Skip to content

Commit

Permalink
增加layer中的check方法
Browse files Browse the repository at this point in the history
  • Loading branch information
zjhellofss committed May 27, 2024
1 parent a74906d commit bf2866e
Show file tree
Hide file tree
Showing 4 changed files with 99 additions and 68 deletions.
3 changes: 3 additions & 0 deletions include/layer/abstract/layer.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -52,6 +52,9 @@ class Layer<float> {
public:
explicit Layer(std::string layer_name) : layer_name_(std::move(layer_name)) {}

virtual StatusCode Check(const std::vector<sftensor>& inputs,
const std::vector<sftensor>& outputs);

/**
* @brief Performs forward inference
*
Expand Down
8 changes: 8 additions & 0 deletions source/layer/abstract/layer.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -85,9 +85,17 @@ StatusCode Layer<float>::Forward() {

StatusCode status =
runtime_operator->layer->Forward(layer_input_datas, output_operand_datas->datas);
if (status != StatusCode::kSuccess) {
LOG(ERROR) << "Forward the layer " << runtime_operator->name << " get a error status";
}
return status;
}

StatusCode Layer<float>::Check(const std::vector<sftensor>& inputs,
const std::vector<sftensor>& outputs) {
return StatusCode::kFunctionNotImplement;
}

void Layer<float>::set_runtime_operator(const std::shared_ptr<RuntimeOperator>& runtime_operator) {
CHECK(runtime_operator != nullptr);
this->runtime_operator_ = runtime_operator;
Expand Down
152 changes: 84 additions & 68 deletions source/layer/details/base_convolution.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -25,6 +25,7 @@
#include "base_convolution.hpp"
#include "convolution.hpp"
#include "deconvolution.hpp"
#include "layer/abstract/layer.hpp"
#include "status_code.hpp"
namespace kuiper_infer {
BaseConvolutionLayer::BaseConvolutionLayer(ConvType conv_type, uint32_t output_channel,
Expand Down Expand Up @@ -93,82 +94,17 @@ void BaseConvolutionLayer::AddBias(arma::fmat& output, uint32_t bias_index) cons

StatusCode BaseConvolutionLayer::Forward(const std::vector<std::shared_ptr<Tensor<float>>>& inputs,
std::vector<std::shared_ptr<Tensor<float>>>& outputs) {
if (inputs.empty()) {
LOG(ERROR) << "The input tensor array in the convolution layer is empty";
return StatusCode::kInferInputsEmpty;
}

if (outputs.empty()) {
LOG(ERROR) << "The output tensor array in the convolution layer is empty";
return StatusCode::kInferOutputsEmpty;
}
StatusCode check_code = Check(inputs, outputs);

if (inputs.size() != outputs.size()) {
LOG(ERROR) << "The input and output tensor array size of the convolution "
"layer do not match";
return StatusCode::kInferDimMismatch;
}

if (weights_.empty()) {
LOG(ERROR) << "The number of kernel matrix in the convolution layer should "
"be greater than zero";
return StatusCode::kInferParameterError;
}

if (this->use_bias_ && this->bias_.size() != this->weights_.size()) {
LOG(ERROR) << "The number of kernel matrix and bias matrix do not match";
return StatusCode::kInferParameterError;
}

if (!stride_h_ || !stride_w_) {
LOG(ERROR) << "The stride in the convolution layer should be greater "
"than zero";
return StatusCode::kInferParameterError;
}

if (!dilation_h_ || !dilation_w_) {
LOG(ERROR) << "The dilation in the convolution layer should be greater "
"than zero";
return StatusCode::kInferParameterError;
}

if (!groups_) {
LOG(ERROR) << "The group number in the convolution layer should be "
"greater than zero ";
return StatusCode::kInferParameterError;
}

if (conv_type_ == ConvType::kOpConv) {
if (output_padding_h_ != 0 || output_padding_w_ != 0) {
LOG(ERROR) << "The output padding in the convolution layer should be zero ";
return StatusCode::kInferParameterError;
}
if (check_code != StatusCode::kSuccess) {
return check_code;
}

const uint32_t kernel_count = this->weights_.size();
if (!kernel_count) {
LOG(ERROR) << "The size of kernel matrix in the convolution layer should be greater "
"than zero";
return StatusCode::kInferParameterError;
}

const uint32_t kernel_h = this->weights_.at(0)->rows();
const uint32_t kernel_w = this->weights_.at(0)->cols();
const uint32_t kernel_channel = this->weights_.at(0)->channels();

if (!kernel_h || !kernel_w || !kernel_channel) {
LOG(ERROR) << "The size of kernel matrix in the convolution layer should be greater "
"than zero";
return StatusCode::kInferParameterError;
}

for (uint32_t k = 0; k < kernel_count; ++k) {
const std::shared_ptr<Tensor<float>>& kernel = this->weights_.at(k);
CHECK(kernel->rows() == kernel_h);
CHECK(kernel->cols() == kernel_w);
CHECK(kernel->channels() == kernel_channel);
}

if (kernel_matrix_arr_.size() != kernel_count) {
InitIm2ColWeight();
}
Expand Down Expand Up @@ -443,4 +379,84 @@ StatusCode BaseConvolutionLayer::CreateInstance(const std::shared_ptr<RuntimeOpe
return StatusCode::kSuccess;
}

StatusCode BaseConvolutionLayer::Check(const std::vector<sftensor>& inputs,
const std::vector<sftensor>& outputs) {
if (inputs.empty()) {
LOG(ERROR) << "The input tensor array in the convolution layer is empty";
return StatusCode::kInferInputsEmpty;
}

if (outputs.empty()) {
LOG(ERROR) << "The output tensor array in the convolution layer is empty";
return StatusCode::kInferOutputsEmpty;
}

if (inputs.size() != outputs.size()) {
LOG(ERROR) << "The input and output tensor array size of the convolution "
"layer do not match";
return StatusCode::kInferDimMismatch;
}

if (weights_.empty()) {
LOG(ERROR) << "The number of kernel matrix in the convolution layer should "
"be greater than zero";
return StatusCode::kInferParameterError;
}

if (this->use_bias_ && this->bias_.size() != this->weights_.size()) {
LOG(ERROR) << "The number of kernel matrix and bias matrix do not match";
return StatusCode::kInferParameterError;
}

if (!stride_h_ || !stride_w_) {
LOG(ERROR) << "The stride in the convolution layer should be greater "
"than zero";
return StatusCode::kInferParameterError;
}

if (!dilation_h_ || !dilation_w_) {
LOG(ERROR) << "The dilation in the convolution layer should be greater "
"than zero";
return StatusCode::kInferParameterError;
}

if (!groups_) {
LOG(ERROR) << "The group number in the convolution layer should be "
"greater than zero ";
return StatusCode::kInferParameterError;
}

if (conv_type_ == ConvType::kOpConv) {
if (output_padding_h_ != 0 || output_padding_w_ != 0) {
LOG(ERROR) << "The output padding in the convolution layer should be zero ";
return StatusCode::kInferParameterError;
}
}

const uint32_t kernel_count = this->weights_.size();
if (!kernel_count) {
LOG(ERROR) << "The size of kernel matrix in the convolution layer should be greater "
"than zero";
return StatusCode::kInferParameterError;
}

const uint32_t kernel_h = this->weights_.at(0)->rows();
const uint32_t kernel_w = this->weights_.at(0)->cols();
const uint32_t kernel_channel = this->weights_.at(0)->channels();

if (!kernel_h || !kernel_w || !kernel_channel) {
LOG(ERROR) << "The size of kernel matrix in the convolution layer should be greater "
"than zero";
return StatusCode::kInferParameterError;
}

for (uint32_t k = 0; k < kernel_count; ++k) {
const std::shared_ptr<Tensor<float>>& kernel = this->weights_.at(k);
CHECK(kernel->rows() == kernel_h);
CHECK(kernel->cols() == kernel_w);
CHECK(kernel->channels() == kernel_channel);
}
return StatusCode::kSuccess;
}

} // namespace kuiper_infer
4 changes: 4 additions & 0 deletions source/layer/details/base_convolution.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -58,6 +58,10 @@ class BaseConvolutionLayer : public ParamLayer {
uint32_t kernel_h,
uint32_t kernel_w) const = 0;

public:
StatusCode Check(const std::vector<sftensor>& inputs, const std::vector<sftensor>& outputs);

private:
virtual void InitIm2ColWeight();

protected:
Expand Down

0 comments on commit bf2866e

Please sign in to comment.