Skip to content

Commit

Permalink
Fix warnings on Visual Studio (Tencent#1456)
Browse files Browse the repository at this point in the history
* Fix warning C4244 in src/layer/convolution.cpp

C4244: '=': conversion from 'double' to 'float', possible loss of data

* Fix warning C4244 in src/layer/convolution_sgemm_int8.h

C4244: 'initializing': conversion from 'double' to 'int', possible loss of data

* Fix warning C4244 in src/layer/deconvolution.cpp

C4244: '=': conversion from 'double' to 'float', possible loss of data

* Fix warning C4244 in src/layer/elu.cpp

C4244: '=': conversion from 'double' to 'float', possible loss of data

* Fix warning C4267 in src/layer/embed.cpp

C4267: 'initializing': conversion from 'size_t' to 'int', possible loss of data

* Fix warning C4244 in src/layer/exp.cpp

C4244: '=': conversion from 'double' to 'float', possible loss of data

* Fix warning C4244 in src/layer/innerproduct.cpp

C4244: '=': conversion from 'double' to 'float', possible loss of data

* Fix warning C4244 in src/layer/log.cpp

C4244: '=': conversion from 'double' to 'float', possible loss of data
C4244: 'initializing': conversion from 'double' to 'float', possible loss of data

* Fix warning C4244 in src/layer/lrn.cpp

C4244: '=': conversion from 'double' to 'float', possible loss of data

* Fix warning C4244 in src/layer/mvn.cp

C4244: 'initializing': conversion from 'double' to 'float', possible loss of data

* Fix warning C4244 in src/layer/power.cpp

C4244: '=': conversion from 'double' to 'float', possible loss of data

* Fix warnings C4244 and C4267 in src/layer/proposal.cpp

C4244: 'initializing': conversion from 'double' to 'float', possible loss of data
C4244: 'initializing': conversion from 'double' to 'int', possible loss of data
C4267: 'argument': conversion from 'size_t' to 'int', possible loss of data
C4267: 'initializing': conversion from 'size_t' to 'int', possible loss of data

* Fix warning C4244 in src/layer/reduction.cpp

C4244: 'return': conversion from 'double' to 'T', possible loss of data

* Fix warning C4244 in src/layer/tanh.cpp

C4244: '=': conversion from 'double' to 'float', possible loss of data

* Fix warning C4244 in src/layer/binaryop.cpp

C4244: '=': conversion from 'double' to 'float', possible loss of data

* Fix warnings C4244 and C4267 in src/layer/unaryop.cpp

C4244: 'return': conversion from 'double' to 'T', possible loss of data
C4267: 'initializing': conversion from 'size_t' to 'int', possible loss of data

* Fix warning C4244 in src/layer/x86/convolutiondepthwise_3x3_int8.h

C4244: 'initializing': conversion from 'double' to 'int', possible loss of data
  • Loading branch information
chosungmann authored and nihui committed Dec 26, 2019
1 parent 821caf2 commit c62e270
Show file tree
Hide file tree
Showing 17 changed files with 49 additions and 49 deletions.
2 changes: 1 addition & 1 deletion src/layer/binaryop.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -443,7 +443,7 @@ struct binary_op_min {

template<typename T>
struct binary_op_pow {
T operator() (const T& x, const T& y) const { return pow(x, y); }
T operator() (const T& x, const T& y) const { return static_cast<T>(pow(x, y)); }
};

template<typename T>
Expand Down
2 changes: 1 addition & 1 deletion src/layer/convolution.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -581,7 +581,7 @@ int Convolution::forward(const Mat& bottom_blob, Mat& top_blob, const Option& op
}
else if (activation_type == 4)
{
sum = 1.f / (1.f + exp(-sum));
sum = static_cast<float>(1.f / (1.f + exp(-sum)));
}

outptr[j] = sum;
Expand Down
2 changes: 1 addition & 1 deletion src/layer/deconvolution.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -197,7 +197,7 @@ int Deconvolution::forward(const Mat& bottom_blob, Mat& top_blob, const Option&

for (int i = 0; i < size; i++)
{
outptr[i] = 1.f / (1.f + exp(-outptr[i]));
outptr[i] = static_cast<float>(1.f / (1.f + exp(-outptr[i])));
}
}
}
Expand Down
2 changes: 1 addition & 1 deletion src/layer/elu.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -47,7 +47,7 @@ int ELU::forward_inplace(Mat& bottom_top_blob, const Option& opt) const
for (int i=0; i<size; i++)
{
if (ptr[i] < 0.f)
ptr[i] = alpha * (exp(ptr[i]) - 1.f);
ptr[i] = static_cast<float>(alpha * (exp(ptr[i]) - 1.f));
}
}

Expand Down
2 changes: 1 addition & 1 deletion src/layer/embed.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -53,7 +53,7 @@ int Embed::load_model(const ModelBin& mb)

int Embed::forward(const Mat& bottom_blob, Mat& top_blob, const Option& opt) const
{
int words = bottom_blob.total();
int words = static_cast<int>(bottom_blob.total());

top_blob.create(num_output, words, 4u, opt.blob_allocator);
if (top_blob.empty())
Expand Down
4 changes: 2 additions & 2 deletions src/layer/exp.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -50,7 +50,7 @@ int Exp::forward_inplace(Mat& bottom_top_blob, const Option& opt) const

for (int i=0; i<size; i++)
{
ptr[i] = exp(shift + ptr[i] * scale);
ptr[i] = static_cast<float>(exp(shift + ptr[i] * scale));
}
}
}
Expand All @@ -63,7 +63,7 @@ int Exp::forward_inplace(Mat& bottom_top_blob, const Option& opt) const

for (int i=0; i<size; i++)
{
ptr[i] = pow(base, (shift + ptr[i] * scale));
ptr[i] = static_cast<float>(pow(base, (shift + ptr[i] * scale)));
}
}
}
Expand Down
2 changes: 1 addition & 1 deletion src/layer/innerproduct.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -293,7 +293,7 @@ int InnerProduct::forward(const Mat& bottom_blob, Mat& top_blob, const Option& o
}
else if (activation_type == 4)
{
sum = 1.f / (1.f + exp(-sum));
sum = static_cast<float>(1.f / (1.f + exp(-sum)));
}

top_blob[p] = sum;
Expand Down
6 changes: 3 additions & 3 deletions src/layer/log.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -50,13 +50,13 @@ int Log::forward_inplace(Mat& bottom_top_blob, const Option& opt) const

for (int i=0; i<size; i++)
{
ptr[i] = log(shift + ptr[i] * scale);
ptr[i] = static_cast<float>(log(shift + ptr[i] * scale));
}
}
}
else
{
float log_base_inv = 1.f / log(base);
float log_base_inv = static_cast<float>(1.f / log(base));

#pragma omp parallel for num_threads(opt.num_threads)
for (int q=0; q<channels; q++)
Expand All @@ -65,7 +65,7 @@ int Log::forward_inplace(Mat& bottom_top_blob, const Option& opt) const

for (int i=0; i<size; i++)
{
ptr[i] = log(shift + ptr[i] * scale) * log_base_inv;
ptr[i] = static_cast<float>(log(shift + ptr[i] * scale) * log_base_inv);
}
}
}
Expand Down
4 changes: 2 additions & 2 deletions src/layer/lrn.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -92,7 +92,7 @@ int LRN::forward_inplace(Mat& bottom_top_blob, const Option& opt) const
float* ptr = bottom_top_blob.channel(q);
for (int i=0; i<size; i++)
{
ptr[i] = ptr[i] * pow(bias + alpha_div_size * ssptr[i], -beta);
ptr[i] = static_cast<float>(ptr[i] * pow(bias + alpha_div_size * ssptr[i], -beta));
}
}
}
Expand Down Expand Up @@ -158,7 +158,7 @@ int LRN::forward_inplace(Mat& bottom_top_blob, const Option& opt) const
ss += val;
}

ptr[j] = ptr[j] * pow(bias + alpha_div_size * ss, -beta);
ptr[j] = static_cast<float>(ptr[j] * pow(bias + alpha_div_size * ss, -beta));
}

ptr += outw;
Expand Down
4 changes: 2 additions & 2 deletions src/layer/mvn.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -137,7 +137,7 @@ int MVN::forward(const Mat& bottom_blob, Mat& top_blob, const Option& opt) const
sqmean = sqmean / (channels * size);

// normalize variance
float norm_var = sqrt(sqmean) + eps;
float norm_var = static_cast<float>(sqrt(sqmean) + eps);
float norm_var_inv = 1.f / norm_var;

// apply normalize_variance
Expand All @@ -160,7 +160,7 @@ int MVN::forward(const Mat& bottom_blob, Mat& top_blob, const Option& opt) const
{
float* outptr = top_blob.channel(q);
float sqmean = sqsum[q] / size;
float norm_var = sqrt(sqmean) + eps;
float norm_var = static_cast<float>(sqrt(sqmean) + eps);
float norm_var_inv = 1.f / norm_var;

for (int i=0; i<size; i++)
Expand Down
2 changes: 1 addition & 1 deletion src/layer/power.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -48,7 +48,7 @@ int Power::forward_inplace(Mat& bottom_top_blob, const Option& opt) const

for (int i=0; i<size; i++)
{
ptr[i] = pow((shift + ptr[i] * scale), power);
ptr[i] = static_cast<float>(pow((shift + ptr[i] * scale), power));
}
}

Expand Down
22 changes: 11 additions & 11 deletions src/layer/proposal.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -53,8 +53,8 @@ static Mat generate_anchors(int base_size, const Mat& ratios, const Mat& scales)
{
float ar = ratios[i];

int r_w = round(base_size / sqrt(ar));
int r_h = round(r_w * ar);//round(base_size * sqrt(ar));
int r_w = static_cast<int>(round(base_size / sqrt(ar)));
int r_h = static_cast<int>(round(r_w * ar));//round(base_size * sqrt(ar));

for (int j = 0; j < num_scale; j++)
{
Expand Down Expand Up @@ -153,17 +153,17 @@ static void qsort_descent_inplace(std::vector<T>& datas, std::vector<float>& sco
if (datas.empty() || scores.empty())
return;

qsort_descent_inplace(datas, scores, 0, scores.size() - 1);
qsort_descent_inplace(datas, scores, 0, static_cast<int>(scores.size() - 1));
}

static void nms_sorted_bboxes(const std::vector<Rect>& bboxes, std::vector<int>& picked, float nms_threshold)
static void nms_sorted_bboxes(const std::vector<Rect>& bboxes, std::vector<size_t>& picked, float nms_threshold)
{
picked.clear();

const int n = bboxes.size();
const size_t n = bboxes.size();

std::vector<float> areas(n);
for (int i = 0; i < n; i++)
for (size_t i = 0; i < n; i++)
{
const Rect& r = bboxes[i];

Expand All @@ -173,12 +173,12 @@ static void nms_sorted_bboxes(const std::vector<Rect>& bboxes, std::vector<int>&
areas[i] = width * height;
}

for (int i = 0; i < n; i++)
for (size_t i = 0; i < n; i++)
{
const Rect& a = bboxes[i];

int keep = 1;
for (int j = 0; j < (int)picked.size(); j++)
for (size_t j = 0; j < picked.size(); j++)
{
const Rect& b = bboxes[picked[j]];

Expand Down Expand Up @@ -248,8 +248,8 @@ int Proposal::forward(const std::vector<Mat>& bottom_blobs, std::vector<Mat>& to
float pb_cx = cx + anchor_w * dx;
float pb_cy = cy + anchor_h * dy;

float pb_w = anchor_w * exp(dw);
float pb_h = anchor_h * exp(dh);
float pb_w = static_cast<float>(anchor_w * exp(dw));
float pb_h = static_cast<float>(anchor_h * exp(dh));

pb[0] = pb_cx - pb_w * 0.5f;
pb[1] = pb_cy - pb_h * 0.5f;
Expand Down Expand Up @@ -328,7 +328,7 @@ int Proposal::forward(const std::vector<Mat>& bottom_blobs, std::vector<Mat>& to
}

// apply nms with nms_thresh
std::vector<int> picked;
std::vector<size_t> picked;
nms_sorted_bboxes(proposal_boxes, picked, nms_thresh);

// take after_nms_topN
Expand Down
8 changes: 4 additions & 4 deletions src/layer/reduction.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -753,17 +753,17 @@ struct post_process_identity {

template<typename T>
struct post_process_sqrt {
T operator() (const T& x) const { return sqrt(x); }
T operator() (const T& x) const { return static_cast<T>(sqrt(x)); }
};

template<typename T>
struct post_process_log {
T operator() (const T& x) const { return log(x); }
T operator() (const T& x) const { return static_cast<T>(log(x)); }
};

template<typename T>
struct reduction_op_asum {
T operator() (const T& x, const T& y) const { return x + fabs(y); }
T operator() (const T& x, const T& y) const { return static_cast<T>(x + fabs(y)); }
};

template<typename T>
Expand All @@ -773,7 +773,7 @@ struct reduction_op_sumsq {

template<typename T>
struct reduction_op_sumsexp {
T operator() (const T& x, const T& y) const { return x + exp(y); }
T operator() (const T& x, const T& y) const { return static_cast<T>(x + exp(y)); }
};

template<typename T>
Expand Down
2 changes: 1 addition & 1 deletion src/layer/tanh.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -39,7 +39,7 @@ int TanH::forward_inplace(Mat& bottom_top_blob, const Option& opt) const

for (int i=0; i<size; i++)
{
ptr[i] = tanh(ptr[i]);
ptr[i] = static_cast<float>(tanh(ptr[i]));
}
}

Expand Down
30 changes: 15 additions & 15 deletions src/layer/unaryop.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -38,7 +38,7 @@ static int unary_op_inplace(Mat& a, const Option& opt)
{
Op op;

int size = a.total();
int size = static_cast<int>(a.total());

#pragma omp parallel for num_threads(opt.num_threads)
for (int i=0; i<size; i++)
Expand All @@ -51,7 +51,7 @@ static int unary_op_inplace(Mat& a, const Option& opt)

template<typename T>
struct unary_op_abs {
T operator() (const T& x) const { return fabs(x); }
T operator() (const T& x) const { return static_cast<T>(fabs(x)); }
};

template<typename T>
Expand All @@ -61,12 +61,12 @@ struct unary_op_neg {

template<typename T>
struct unary_op_floor {
T operator() (const T& x) const { return floor(x); }
T operator() (const T& x) const { return static_cast<T>(floor(x)); }
};

template<typename T>
struct unary_op_ceil {
T operator() (const T& x) const { return ceil(x); }
T operator() (const T& x) const { return static_cast<T>(ceil(x)); }
};

template<typename T>
Expand All @@ -76,52 +76,52 @@ struct unary_op_square {

template<typename T>
struct unary_op_sqrt {
T operator() (const T& x) const { return sqrt(x); }
T operator() (const T& x) const { return static_cast<T>(sqrt(x)); }
};

template<typename T>
struct unary_op_rsqrt {
T operator() (const T& x) const { return 1.f / sqrt(x); }
T operator() (const T& x) const { return static_cast<T>(1.f / sqrt(x)); }
};

template<typename T>
struct unary_op_exp {
T operator() (const T& x) const { return exp(x); }
T operator() (const T& x) const { return static_cast<T>(exp(x)); }
};

template<typename T>
struct unary_op_log {
T operator() (const T& x) const { return log(x); }
T operator() (const T& x) const { return static_cast<T>(log(x)); }
};

template<typename T>
struct unary_op_sin {
T operator() (const T& x) const { return sin(x); }
T operator() (const T& x) const { return static_cast<T>(sin(x)); }
};

template<typename T>
struct unary_op_cos {
T operator() (const T& x) const { return cos(x); }
T operator() (const T& x) const { return static_cast<T>(cos(x)); }
};

template<typename T>
struct unary_op_tan {
T operator() (const T& x) const { return tan(x); }
T operator() (const T& x) const { return static_cast<T>(tan(x)); }
};

template<typename T>
struct unary_op_asin {
T operator() (const T& x) const { return asin(x); }
T operator() (const T& x) const { return static_cast<T>(asin(x)); }
};

template<typename T>
struct unary_op_acos {
T operator() (const T& x) const { return acos(x); }
T operator() (const T& x) const { return static_cast<T>(acos(x)); }
};

template<typename T>
struct unary_op_atan {
T operator() (const T& x) const { return atan(x); }
T operator() (const T& x) const { return static_cast<T>(atan(x)); }
};

template<typename T>
Expand All @@ -131,7 +131,7 @@ struct unary_op_reciprocal {

template<typename T>
struct unary_op_tanh {
T operator() (const T& x) const { return tanh(x); }
T operator() (const T& x) const { return static_cast<T>(tanh(x)); }
};

int UnaryOp::forward_inplace(Mat& bottom_top_blob, const Option& opt) const
Expand Down
2 changes: 1 addition & 1 deletion src/layer/x86/convolution_sgemm_int8.h
Original file line number Diff line number Diff line change
Expand Up @@ -14,7 +14,7 @@

static inline signed char float2int8(float v)
{
int int32 = round(v);
int int32 = static_cast<int>(round(v));
if (int32 > 127) return 127;
if (int32 < -127) return -127;
return (signed char)int32;
Expand Down
2 changes: 1 addition & 1 deletion src/layer/x86/convolutiondepthwise_3x3_int8.h
Original file line number Diff line number Diff line change
Expand Up @@ -14,7 +14,7 @@

static inline signed char float2int8(float v)
{
int int32 = round(v);
int int32 = static_cast<int>(round(v));
if (int32 > 127) return 127;
if (int32 < -127) return -127;
return (signed char)int32;
Expand Down

0 comments on commit c62e270

Please sign in to comment.