summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
author오형석/On-Device Lab(SR)/Staff Engineer/삼성전자 <hseok82.oh@samsung.com>2020-01-02 15:49:00 +0900
committerGitHub Enterprise <noreply-CODE@samsung.com>2020-01-02 15:49:00 +0900
commit5aae072f9e448791b69fd511631553f1d5c7aeca (patch)
tree2d862d263a9c41f2cae44f433128485a909948c3
parentaddf02ade48d0f072ba4b1e8d6c19743b967e150 (diff)
downloadnnfw-5aae072f9e448791b69fd511631553f1d5c7aeca.tar.gz
nnfw-5aae072f9e448791b69fd511631553f1d5c7aeca.tar.bz2
nnfw-5aae072f9e448791b69fd511631553f1d5c7aeca.zip
[srcn] Build with strict option (#9781)
- Enable strict build option for srcn kernel - Fix build error by strict build option Signed-off-by: Hyeongseok Oh <hseok82.oh@samsung.com>
-rw-r--r--compute/ncnn/CMakeLists.txt1
-rw-r--r--compute/ncnn/src/layer/binaryop.cc11
-rw-r--r--compute/ncnn/src/layer/instance_norm.cc6
-rw-r--r--compute/ncnn/src/srcn/conv_sparse.h4
-rw-r--r--compute/ncnn/src/srcn/conv_winograd.cc8
-rw-r--r--compute/ncnn/src/srcn/conv_winograd_batch.cc6
-rw-r--r--compute/ncnn/src/srcn/deconv_sgemm_multithreads.cc2
-rw-r--r--compute/ncnn/src/srcn/depthwise_conv.cc7
-rw-r--r--compute/ncnn/src/srcn/sgemm_singlethread.cc4
-rw-r--r--compute/ncnn/src/srcn/srcn_conv.cc10
10 files changed, 33 insertions, 26 deletions
diff --git a/compute/ncnn/CMakeLists.txt b/compute/ncnn/CMakeLists.txt
index eb7cfe4e2..a8f50120f 100644
--- a/compute/ncnn/CMakeLists.txt
+++ b/compute/ncnn/CMakeLists.txt
@@ -28,6 +28,7 @@ if(NOT TARGET OpenMP::OpenMP_CXX)
endif()
target_link_libraries(nnfw_lib_srcn PRIVATE OpenMP::OpenMP_CXX)
+target_link_libraries(nnfw_lib_srcn PRIVATE nnfw_common)
target_compile_definitions(nnfw_lib_srcn PRIVATE TIZEN) # ANDROID or TIZEN
#target_compile_definitions(nnfw_lib_srcn PRIVATE NCNN) # Enable if ready
set_target_properties(nnfw_lib_srcn PROPERTIES POSITION_INDEPENDENT_CODE ON)
diff --git a/compute/ncnn/src/layer/binaryop.cc b/compute/ncnn/src/layer/binaryop.cc
index af37200c1..a09d55f78 100644
--- a/compute/ncnn/src/layer/binaryop.cc
+++ b/compute/ncnn/src/layer/binaryop.cc
@@ -420,7 +420,7 @@ int ncnn_binary_op(const BinaryOpParam &param, const Mat &bottom_blob, const Mat
{
int ret = 0;
auto op_type = param.op_type;
- auto b = param.b;
+ // auto b = param.b;
// Only support add operation, none broadcasting
// Other case, need to remove internal memory allocation and check correctness
@@ -1566,10 +1566,11 @@ int ncnn_binary_op_inplace(const BinaryOpParam &param, Mat &bottom_blob, Mat &bo
int channels = bottom_blob.c;
int size = w * h;
- int w1 = bottom_blob1.w;
- int h1 = bottom_blob1.h;
- int channels1 = bottom_blob1.c;
- int size1 = w1 * h1;
+// Unused variables
+// int w1 = bottom_blob1.w;
+// int h1 = bottom_blob1.h;
+// int channels1 = bottom_blob1.c;
+// int size1 = w1 * h1;
#if __ARM_NEON
diff --git a/compute/ncnn/src/layer/instance_norm.cc b/compute/ncnn/src/layer/instance_norm.cc
index 0e115c888..08c3f2c23 100644
--- a/compute/ncnn/src/layer/instance_norm.cc
+++ b/compute/ncnn/src/layer/instance_norm.cc
@@ -139,7 +139,7 @@ void ncnn_instance_norm_rowmajor(Mat &in_mat, Mat &out_mat, Mat &gamma_mat, Mat
}
void ncnn_instance_norm_colmajor(Mat &in_mat, Mat &out_mat, Mat &gamma_mat, Mat &beta_mat,
- int channels, float eps)
+ int /*channels*/, float eps)
{
// Treat CHW layout as HWC layout
int h = in_mat.c;
@@ -189,7 +189,7 @@ void ncnn_instance_norm_colmajor(Mat &in_mat, Mat &out_mat, Mat &gamma_mat, Mat
}
void ncnn_instance_norm_with_relu_rowmajor(Mat &in_mat, Mat &out_mat, Mat &gamma_mat, Mat &beta_mat,
- int channels, float eps, float slope)
+ int channels, float eps, float /*slope*/)
{
int w = in_mat.w;
int h = in_mat.h;
@@ -301,7 +301,7 @@ void ncnn_instance_norm_with_relu_rowmajor(Mat &in_mat, Mat &out_mat, Mat &gamma
}
void ncnn_instance_norm_with_relu_colmajor(Mat &in_mat, Mat &out_mat, Mat &gamma_mat, Mat &beta_mat,
- int channels, float eps, float slope)
+ int /*channels*/, float eps, float slope)
{
// Treat CHW layout as HWC layout
int h = in_mat.c;
diff --git a/compute/ncnn/src/srcn/conv_sparse.h b/compute/ncnn/src/srcn/conv_sparse.h
index a9b3c741d..7ac358fd8 100644
--- a/compute/ncnn/src/srcn/conv_sparse.h
+++ b/compute/ncnn/src/srcn/conv_sparse.h
@@ -61,8 +61,8 @@ private:
int num_threads_;
convType_t conv_type_;
- int n_;
- int bn_;
+ uint32_t n_;
+ uint32_t bn_;
int rn_;
int nn_;
diff --git a/compute/ncnn/src/srcn/conv_winograd.cc b/compute/ncnn/src/srcn/conv_winograd.cc
index cc114981f..69649ea2a 100644
--- a/compute/ncnn/src/srcn/conv_winograd.cc
+++ b/compute/ncnn/src/srcn/conv_winograd.cc
@@ -247,11 +247,11 @@ void conv_winograd::winograd_output_col2im(const float *col_buff)
void conv_winograd::compute_winograd()
{
- const int w = in_mat_.w;
- const int h = in_mat_.h;
+ // const int w = in_mat_.w;
+ // const int h = in_mat_.h;
const int inch = in_mat_.c;
- const int outw = out_mat_.w;
- const int outh = out_mat_.h;
+ // const int outw = out_mat_.w;
+ // const int outh = out_mat_.h;
const int outch = out_mat_.c;
const int kernel_size = in_param_.kernel_w;
diff --git a/compute/ncnn/src/srcn/conv_winograd_batch.cc b/compute/ncnn/src/srcn/conv_winograd_batch.cc
index 7b468db02..cba45c648 100644
--- a/compute/ncnn/src/srcn/conv_winograd_batch.cc
+++ b/compute/ncnn/src/srcn/conv_winograd_batch.cc
@@ -200,10 +200,10 @@ void conv_winograd_batch::winograd_output_col2im(const float *col_buff)
void conv_winograd_batch::compute_winograd()
{
const int w = in_mat_.w;
- const int h = in_mat_.h;
+ // const int h = in_mat_.h;
const int inch = in_mat_.c;
- const int outw = out_mat_.w;
- const int outh = out_mat_.h;
+ // const int outw = out_mat_.w;
+ // const int outh = out_mat_.h;
const int outch = out_mat_.c;
const int kernel_size = in_param_.kernel_w;
const int batch = in_mat_.n;
diff --git a/compute/ncnn/src/srcn/deconv_sgemm_multithreads.cc b/compute/ncnn/src/srcn/deconv_sgemm_multithreads.cc
index 77042bcfa..f3ccf13e5 100644
--- a/compute/ncnn/src/srcn/deconv_sgemm_multithreads.cc
+++ b/compute/ncnn/src/srcn/deconv_sgemm_multithreads.cc
@@ -130,7 +130,7 @@ deconv_sgemm_multithreads::deconv_sgemm_multithreads(const convMat_t &in_mat,
convType_t conv_type)
: in_mat_(in_mat), weights_mat_(weights_mat), out_mat_(out_mat), in_param_(in_param),
- num_threads_(num_threads), conv_type_(conv_type)
+ conv_type_(conv_type), num_threads_(num_threads)
{
m_ = in_param_.kernel_h * in_param_.kernel_w * out_mat_.c;
#ifdef NCNN
diff --git a/compute/ncnn/src/srcn/depthwise_conv.cc b/compute/ncnn/src/srcn/depthwise_conv.cc
index 3ae3aa3ab..74d799336 100644
--- a/compute/ncnn/src/srcn/depthwise_conv.cc
+++ b/compute/ncnn/src/srcn/depthwise_conv.cc
@@ -69,7 +69,7 @@ static void depthwise_conv3x3S1_nopad(const convMat_t &in_mat, convMat_t &out_ma
int i;
for (i = 0; i + 1 < outh; i += 2)
{
- int nn = outw >> 2 - 1;
+ int nn = (outw >> 2) - 1;
int remain = outw & 0x03;
if (nn > 0)
@@ -2591,6 +2591,11 @@ static void depthwise_conv_colmajor(const convMat_t &in_mat, convMat_t &out_mat,
}
}
}
+#else // __aarch64__
+ (void)in_mat;
+ (void)out_mat;
+ (void)kernel;
+ (void)in_param;
#endif // __aarch64__
}
diff --git a/compute/ncnn/src/srcn/sgemm_singlethread.cc b/compute/ncnn/src/srcn/sgemm_singlethread.cc
index f9b9f45a9..3de3e1214 100644
--- a/compute/ncnn/src/srcn/sgemm_singlethread.cc
+++ b/compute/ncnn/src/srcn/sgemm_singlethread.cc
@@ -135,8 +135,8 @@ sgemm_singlethread::sgemm_singlethread(sgemmType_t major_type, sgemmTrans_t ltra
sgemmTrans_t rtrans, const int m, const int n, const int k,
const float *lhs_data, const float *rhs_data,
float *res_data, int cache_div)
- : major_type_(major_type), ltrans_(ltrans), rtrans_(rtrans), m_(m), n_(n), k_(k),
- lhs_data_(lhs_data), rhs_data_(rhs_data), res_data_(res_data), cache_div_(cache_div)
+ : lhs_data_(lhs_data), rhs_data_(rhs_data), res_data_(res_data), major_type_(major_type),
+ ltrans_(ltrans), rtrans_(rtrans), m_(m), n_(n), k_(k), cache_div_(cache_div)
{
param_init();
}
diff --git a/compute/ncnn/src/srcn/srcn_conv.cc b/compute/ncnn/src/srcn/srcn_conv.cc
index 822336f39..bb8e4f13e 100644
--- a/compute/ncnn/src/srcn/srcn_conv.cc
+++ b/compute/ncnn/src/srcn/srcn_conv.cc
@@ -98,7 +98,7 @@ float *trans_weight2winograd(winogradParams_t &params, unsigned int *size = NULL
if (params.num_threads > 1)
{
winograd_channel_cond = 128 * 128;
- int winograd_image_cond = 20 * 20;
+ // int winograd_image_cond = 20 * 20;
}
#endif // TIZEN
@@ -274,8 +274,8 @@ void srcn_convolution2D(const convMat_t &in_mat, const convMat_t &weights_mat, c
return;
}
- const int ih = (_H - 1) * in_param.stride_w + in_param.kernel_w;
- const int oh = _H;
+ // const int ih = (_H - 1) * in_param.stride_w + in_param.kernel_w;
+ // const int oh = _H;
const int nh = (outh + _H - 1) / _H;
int rh = outh % _H;
if (rh == 0)
@@ -366,8 +366,8 @@ void srcn_convolution2D(const convMat_t &in_mat, const convMat_t &weights_mat, c
return;
}
- const int ih = (_H - 1) * in_param.stride_w + in_param.kernel_w;
- const int oh = _H;
+ // const int ih = (_H - 1) * in_param.stride_w + in_param.kernel_w;
+ // const int oh = _H;
const int nh = (outh + _H - 1) / _H;
int rh = outh % _H;
if (rh == 0)