Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

add -Wextra and remove a lot of warnings for C code #8916

Open
wants to merge 9 commits into
base: master
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 2 additions & 0 deletions 3rdparty/stb/include/stb_image_write.h
Original file line number Diff line number Diff line change
Expand Up @@ -1260,6 +1260,8 @@ static void stbiw__jpg_writeBits(stbi__write_context *s, int *bitBufP, int *bitC
if(c == 255) {
stbiw__putc(s, 0);
}
// FIXME: https://github.com/nothings/stb/issues/1433
bitBuf &= 0x0000FFFF;
bitBuf <<= 8;
bitCnt -= 8;
}
Expand Down
9 changes: 8 additions & 1 deletion Makefile
Original file line number Diff line number Diff line change
Expand Up @@ -94,7 +94,14 @@ NVCC=nvcc
OPTS=-Ofast
LDFLAGS= -lm -pthread
COMMON= -Iinclude/ -I3rdparty/stb/include
CFLAGS=-Wall -Wfatal-errors -Wno-unused-result -Wno-unknown-pragmas -fPIC -rdynamic
CFLAGS=-Wall -Wno-unused-parameter -Wno-unknown-pragmas -fPIC -rdynamic

ifeq ($(USE_CPP), 1)
# C is not C++
CFLAGS+=-fpermissive -Wno-write-strings
Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

fpermissive really necessary?

Copy link
Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I guess one could avoid it, but then one can not write idiomatic C. I would recommend to remove the USE_CPP flag anyway. What is the benefit? That C++ does better type checking is not true anymore for a long time.

else
CFLAGS+=-Wextra
endif

ifeq ($(DEBUG), 1)
#OPTS= -O0 -g
Expand Down
12 changes: 6 additions & 6 deletions include/darknet.h
Original file line number Diff line number Diff line change
Expand Up @@ -553,7 +553,7 @@ struct layer {

tree *softmax_tree;

size_t workspace_size;
ssize_t workspace_size;

//#ifdef GPU
int *indexes_gpu;
Expand Down Expand Up @@ -704,7 +704,7 @@ typedef enum {
typedef struct network {
int n;
int batch;
uint64_t *seen;
int64_t *seen;
float *badlabels_reject_threshold;
float *delta_rolling_max;
float *delta_rolling_avg;
Expand Down Expand Up @@ -818,8 +818,8 @@ typedef struct network {
float **truth_gpu;
float **input16_gpu;
float **output16_gpu;
size_t *max_input16_size;
size_t *max_output16_size;
ssize_t *max_input16_size;
ssize_t *max_output16_size;
int wait_stream;

void *cuda_graph;
Expand All @@ -829,11 +829,11 @@ typedef struct network {

float *global_delta_gpu;
float *state_delta_gpu;
size_t max_delta_gpu_size;
ssize_t max_delta_gpu_size;
//#endif // GPU
int optimized_memory;
int dynamic_minibatch;
size_t workspace_size_limit;
ssize_t workspace_size_limit;
} network;

// network.h
Expand Down
14 changes: 7 additions & 7 deletions include/yolo_v2_class.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -515,7 +515,7 @@ class Tracker_optflow {

if (err.rows == cur_bbox_vec.size() && status.rows == cur_bbox_vec.size())
{
for (size_t i = 0; i < cur_bbox_vec.size(); ++i)
for (int i = 0; i < cur_bbox_vec.size(); ++i)
{
cv::Point2f cur_key_pt = cur_pts_flow.at<cv::Point2f>(0, i);
cv::Point2f prev_key_pt = prev_pts_flow.at<cv::Point2f>(0, i);
Expand Down Expand Up @@ -573,7 +573,7 @@ class preview_boxes_t {
preview_box_track_t() : track_id(0), obj_id(0), last_showed_frames_ago(frames_history), current_detection(false) {}
};
std::vector<preview_box_track_t> preview_box_track_id;
size_t const preview_box_size, bottom_offset;
int const preview_box_size, bottom_offset;
bool const one_off_detections;
public:
preview_boxes_t(size_t _preview_box_size = 100, size_t _bottom_offset = 100, bool _one_off_detections = false) :
Expand Down Expand Up @@ -876,8 +876,8 @@ class track_kalman_t
float time_wait = 0.5; // 0.5 second
if (track_id_state_id_time[state_id].track_id > -1)
{
if ((result_vec_pred[state_id].x > img_size.width) ||
(result_vec_pred[state_id].y > img_size.height))
if (((int)result_vec_pred[state_id].x > img_size.width) ||
((int)result_vec_pred[state_id].y > img_size.height))
{
track_id_state_id_time[state_id].track_id = -1;
}
Expand All @@ -897,7 +897,7 @@ class track_kalman_t

float min_dist = std::numeric_limits<float>::max();

for (size_t i = 0; i < max_objects; ++i)
for (int i = 0; i < max_objects; ++i)
{
if (track_id_state_id_time[i].track_id > -1 && result_vec_pred[i].obj_id == find_box.obj_id && busy_vec[i] == false)
{
Expand Down Expand Up @@ -987,7 +987,7 @@ class track_kalman_t
clear_old_states();
std::vector<bbox_t> result_vec;

for (size_t i = 0; i < max_objects; ++i)
for (int i = 0; i < max_objects; ++i)
{
tst_t tst = track_id_state_id_time[i];
if (tst.track_id > -1) {
Expand Down Expand Up @@ -1022,7 +1022,7 @@ class track_kalman_t
calc_dt();
clear_old_states();

for (size_t i = 0; i < max_objects; ++i)
for (int i = 0; i < max_objects; ++i)
track_id_state_id_time[i].detection_count--;

std::vector<tst_t> tst_vec = find_state_ids(result_vec);
Expand Down
24 changes: 23 additions & 1 deletion src/activations.c
Original file line number Diff line number Diff line change
Expand Up @@ -9,6 +9,15 @@
char *get_activation_string(ACTIVATION a)
{
switch(a){
case RELU6:
case SWISH:
case HARD_MISH:
case NORM_CHAN:
case NORM_CHAN_SOFTMAX:
case NORM_CHAN_SOFTMAX_MAXVAL:
case MISH:
assert(0);

case LOGISTIC:
return "logistic";
case LOGGY:
Expand Down Expand Up @@ -77,6 +86,14 @@ ACTIVATION get_activation(char *s)
float activate(float x, ACTIVATION a)
{
switch(a){
case RELU6:
case SWISH:
case HARD_MISH:
case NORM_CHAN:
case NORM_CHAN_SOFTMAX:
case NORM_CHAN_SOFTMAX_MAXVAL:
case MISH:
assert(0);
case LINEAR:
return linear_activate(x);
case LOGISTIC:
Expand Down Expand Up @@ -308,6 +325,10 @@ void gradient_array_normalize_channels(float *x, const int n, int batch, int cha
float gradient(float x, ACTIVATION a)
{
switch(a){
case SWISH:
case HARD_MISH:
case MISH:
assert(0);
case LINEAR:
return linear_gradient(x);
case LOGISTIC:
Expand All @@ -321,9 +342,10 @@ float gradient(float x, ACTIVATION a)
case NORM_CHAN:
//return relu_gradient(x);
case NORM_CHAN_SOFTMAX_MAXVAL:
//...
// fallthrough
case NORM_CHAN_SOFTMAX:
error("Error: should be used custom NORM_CHAN or NORM_CHAN_SOFTMAX-function for gradient", DARKNET_LOC);
break;
case ELU:
return elu_gradient(x);
case SELU:
Expand Down
62 changes: 31 additions & 31 deletions src/blas.c
Original file line number Diff line number Diff line change
Expand Up @@ -204,7 +204,7 @@ void backward_shortcut_multilayer_cpu(int size, int src_outputs, int batch, int
int add_outputs = outputs_of_layers[i];
if (src_i < add_outputs) {
int add_index = add_outputs*src_b + src_i;
int out_index = id;
// int out_index = id;

float *layer_delta = layers_delta[i];
if (weights) {
Expand Down Expand Up @@ -506,7 +506,7 @@ void constrain_cpu(int size, float ALPHA, float *X)
}
}

void fix_nan_and_inf_cpu(float *input, size_t size)
void fix_nan_and_inf_cpu(float *input, int size)
{
int i;
for (i = 0; i < size; ++i) {
Expand All @@ -530,7 +530,7 @@ void get_embedding(float *src, int src_w, int src_h, int src_c, int embedding_si


// Euclidean_norm
float math_vector_length(float *A, unsigned int feature_size)
float math_vector_length(float *A, int feature_size)
{
float sum = 0;
int i;
Expand All @@ -542,7 +542,7 @@ float math_vector_length(float *A, unsigned int feature_size)
return vector_length;
}

float cosine_similarity(float *A, float *B, unsigned int feature_size)
float cosine_similarity(float *A, float *B, int feature_size)
{
float mul = 0.0, d_a = 0.0, d_b = 0.0;

Expand All @@ -561,9 +561,9 @@ float cosine_similarity(float *A, float *B, unsigned int feature_size)
return similarity;
}

int get_sim_P_index(size_t i, size_t j, contrastive_params *contrast_p, int contrast_p_size)
int get_sim_P_index(int i, int j, contrastive_params *contrast_p, int contrast_p_size)
{
size_t z;
int z;
for (z = 0; z < contrast_p_size; ++z) {
if (contrast_p[z].i == i && contrast_p[z].j == j) break;
}
Expand All @@ -574,9 +574,9 @@ int get_sim_P_index(size_t i, size_t j, contrastive_params *contrast_p, int cont
return z; // found
}

int check_sim(size_t i, size_t j, contrastive_params *contrast_p, int contrast_p_size)
int check_sim(int i, int j, contrastive_params *contrast_p, int contrast_p_size)
{
size_t z;
int z;
for (z = 0; z < contrast_p_size; ++z) {
if (contrast_p[z].i == i && contrast_p[z].j == j) break;
}
Expand All @@ -587,40 +587,40 @@ int check_sim(size_t i, size_t j, contrastive_params *contrast_p, int contrast_p
return 1; // found
}

float find_sim(size_t i, size_t j, contrastive_params *contrast_p, int contrast_p_size)
float find_sim(int i, int j, contrastive_params *contrast_p, int contrast_p_size)
{
size_t z;
int z;
for (z = 0; z < contrast_p_size; ++z) {
if (contrast_p[z].i == i && contrast_p[z].j == j) break;
}
if (z == contrast_p_size) {
printf(" Error: find_sim(): sim isn't found: i = %zu, j = %zu, z = %zu \n", i, j, z);
printf(" Error: find_sim(): sim isn't found: i = %d, j = %d, z = %d \n", i, j, z);
error("Error!", DARKNET_LOC);
}

return contrast_p[z].sim;
}

float find_P_constrastive(size_t i, size_t j, contrastive_params *contrast_p, int contrast_p_size)
float find_P_constrastive(int i, int j, contrastive_params *contrast_p, int contrast_p_size)
{
size_t z;
int z;
for (z = 0; z < contrast_p_size; ++z) {
if (contrast_p[z].i == i && contrast_p[z].j == j) break;
}
if (z == contrast_p_size) {
printf(" Error: find_P_constrastive(): P isn't found: i = %zu, j = %zu, z = %zu \n", i, j, z);
printf(" Error: find_P_constrastive(): P isn't found: i = %d, j = %d, z = %d \n", i, j, z);
error("Error!", DARKNET_LOC);
}

return contrast_p[z].P;
}

// num_of_samples = 2 * loaded_images = mini_batch_size
float P_constrastive_f_det(size_t il, int *labels, float **z, unsigned int feature_size, float temperature, contrastive_params *contrast_p, int contrast_p_size)
float P_constrastive_f_det(int il, int *labels, float **z, int feature_size, float temperature, contrastive_params *contrast_p, int contrast_p_size)
{
const float sim = contrast_p[il].sim;
const size_t i = contrast_p[il].i;
const size_t j = contrast_p[il].j;
const int i = contrast_p[il].i;
const int j = contrast_p[il].j;

const float numerator = expf(sim / temperature);

Expand All @@ -645,10 +645,10 @@ float P_constrastive_f_det(size_t il, int *labels, float **z, unsigned int featu
}

// num_of_samples = 2 * loaded_images = mini_batch_size
float P_constrastive_f(size_t i, size_t l, int *labels, float **z, unsigned int feature_size, float temperature, contrastive_params *contrast_p, int contrast_p_size)
float P_constrastive_f(int i, int l, int *labels, float **z, int feature_size, float temperature, contrastive_params *contrast_p, int contrast_p_size)
{
if (i == l) {
fprintf(stderr, " Error: in P_constrastive must be i != l, while i = %zu, l = %zu \n", i, l);
fprintf(stderr, " Error: in P_constrastive must be i != l, while i = %d, l = %d \n", i, l);
error("Error!", DARKNET_LOC);
}

Expand All @@ -675,10 +675,10 @@ float P_constrastive_f(size_t i, size_t l, int *labels, float **z, unsigned int
return result;
}

void grad_contrastive_loss_positive_f(size_t i, int *class_ids, int *labels, size_t num_of_samples, float **z, unsigned int feature_size, float temperature, float *delta, int wh, contrastive_params *contrast_p, int contrast_p_size)
void grad_contrastive_loss_positive_f(int i, int *class_ids, int *labels, int num_of_samples, float **z, int feature_size, float temperature, float *delta, int wh, contrastive_params *contrast_p, int contrast_p_size)
{
const float vec_len = math_vector_length(z[i], feature_size);
size_t j;
int j;
float N = 0;
for (j = 0; j < num_of_samples; ++j) {
if (labels[i] == labels[j] && labels[i] >= 0) N++;
Expand Down Expand Up @@ -720,10 +720,10 @@ void grad_contrastive_loss_positive_f(size_t i, int *class_ids, int *labels, siz
}
}

void grad_contrastive_loss_negative_f(size_t i, int *class_ids, int *labels, size_t num_of_samples, float **z, unsigned int feature_size, float temperature, float *delta, int wh, contrastive_params *contrast_p, int contrast_p_size, int neg_max)
void grad_contrastive_loss_negative_f(int i, int *class_ids, int *labels, int num_of_samples, float **z, int feature_size, float temperature, float *delta, int wh, contrastive_params *contrast_p, int contrast_p_size, int neg_max)
{
const float vec_len = math_vector_length(z[i], feature_size);
size_t j;
int j;
float N = 0;
for (j = 0; j < num_of_samples; ++j) {
if (labels[i] == labels[j] && labels[i] >= 0) N++;
Expand All @@ -741,7 +741,7 @@ void grad_contrastive_loss_negative_f(size_t i, int *class_ids, int *labels, siz
//if (i != j && (i/2) == (j/2)) {
if (labels[i] >= 0 && labels[i] == labels[j] && i != j) {

size_t k;
int k;
for (k = 0; k < num_of_samples; ++k) {
//if (k != i && k != j && labels[k] != labels[i]) {
if (k != i && k != j && labels[k] != labels[i] && class_ids[j] == class_ids[k]) {
Expand Down Expand Up @@ -777,10 +777,10 @@ void grad_contrastive_loss_negative_f(size_t i, int *class_ids, int *labels, siz


// num_of_samples = 2 * loaded_images = mini_batch_size
float P_constrastive(size_t i, size_t l, int *labels, size_t num_of_samples, float **z, unsigned int feature_size, float temperature, float *cos_sim, float *exp_cos_sim)
float P_constrastive(int i, int l, int *labels, int num_of_samples, float **z, int feature_size, float temperature, float *cos_sim, float *exp_cos_sim)
{
if (i == l) {
fprintf(stderr, " Error: in P_constrastive must be i != l, while i = %zu, l = %zu \n", i, l);
fprintf(stderr, " Error: in P_constrastive must be i != l, while i = %d, l = %d \n", i, l);
error("Error!", DARKNET_LOC);
}

Expand Down Expand Up @@ -808,10 +808,10 @@ float P_constrastive(size_t i, size_t l, int *labels, size_t num_of_samples, flo
// z[feature_size][num_of_samples] - array of arrays with contrastive features (output of conv-layer, f.e. 128 floats for each sample)
// delta[feature_size] - array with deltas for backpropagation
// temperature - scalar temperature param (temperature > 0), f.e. temperature = 0.07: Supervised Contrastive Learning
void grad_contrastive_loss_positive(size_t i, int *labels, size_t num_of_samples, float **z, unsigned int feature_size, float temperature, float *cos_sim, float *p_constrastive, float *delta, int wh)
void grad_contrastive_loss_positive(int i, int *labels, int num_of_samples, float **z, int feature_size, float temperature, float *cos_sim, float *p_constrastive, float *delta, int wh)
{
const float vec_len = math_vector_length(z[i], feature_size);
size_t j;
int j;
float N = 0;
for (j = 0; j < num_of_samples; ++j) {
if (labels[i] == labels[j]) N++;
Expand Down Expand Up @@ -848,10 +848,10 @@ void grad_contrastive_loss_positive(size_t i, int *labels, size_t num_of_samples
// z[feature_size][num_of_samples] - array of arrays with contrastive features (output of conv-layer, f.e. 128 floats for each sample)
// delta[feature_size] - array with deltas for backpropagation
// temperature - scalar temperature param (temperature > 0), f.e. temperature = 0.07: Supervised Contrastive Learning
void grad_contrastive_loss_negative(size_t i, int *labels, size_t num_of_samples, float **z, unsigned int feature_size, float temperature, float *cos_sim, float *p_constrastive, float *delta, int wh)
void grad_contrastive_loss_negative(int i, int *labels, int num_of_samples, float **z, int feature_size, float temperature, float *cos_sim, float *p_constrastive, float *delta, int wh)
{
const float vec_len = math_vector_length(z[i], feature_size);
size_t j;
int j;
float N = 0;
for (j = 0; j < num_of_samples; ++j) {
if (labels[i] == labels[j]) N++;
Expand All @@ -866,7 +866,7 @@ void grad_contrastive_loss_negative(size_t i, int *labels, size_t num_of_samples
//if (i != j && (i/2) == (j/2)) {
if (i != j && labels[i] == labels[j]) {

size_t k;
int k;
for (k = 0; k < num_of_samples; ++k) {
//if (k != i && k != j && labels[k] != labels[i]) {
if (k != i && k != j && labels[k] >= 0) {
Expand Down
Loading
Loading