diff --git a/.gitignore b/.gitignore
index bea19ff44fc..eb15c1352cc 100644
--- a/.gitignore
+++ b/.gitignore
@@ -16,6 +16,8 @@ convnet/
decaf/
submission/
cfg/
+weights/
+build/
darknet
.fuse*
diff --git a/.idea/darknet.iml b/.idea/darknet.iml
new file mode 100644
index 00000000000..f08604bb65b
--- /dev/null
+++ b/.idea/darknet.iml
@@ -0,0 +1,2 @@
+
+
\ No newline at end of file
diff --git a/.idea/misc.xml b/.idea/misc.xml
new file mode 100644
index 00000000000..8822db8f1c2
--- /dev/null
+++ b/.idea/misc.xml
@@ -0,0 +1,7 @@
+
+
+
+
+
+
+
\ No newline at end of file
diff --git a/.idea/modules.xml b/.idea/modules.xml
new file mode 100644
index 00000000000..33597b7b3ce
--- /dev/null
+++ b/.idea/modules.xml
@@ -0,0 +1,8 @@
+
+
+
+
+
+
+
+
\ No newline at end of file
diff --git a/.idea/vcs.xml b/.idea/vcs.xml
new file mode 100644
index 00000000000..94a25f7f4cb
--- /dev/null
+++ b/.idea/vcs.xml
@@ -0,0 +1,6 @@
+
+
+
+
+
+
\ No newline at end of file
diff --git a/.idea/workspace.xml b/.idea/workspace.xml
new file mode 100644
index 00000000000..27c0090c8e6
--- /dev/null
+++ b/.idea/workspace.xml
@@ -0,0 +1,644 @@
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ gpu
+ main(
+ &net
+ net.
+ run_yolo
+ %d x %d /
+ conv %5d %2d
+ load
+ net
+ load_network
+ char *filename
+ load_networ
+ is_1
+ load_
+ bias
+
+
+ net->
+ net
+ struct dn_
+ const char *filename
+
+
+ $PROJECT_DIR$
+ $PROJECT_DIR$/src
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ 1551799986641
+
+
+ 1551799986641
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ file://$PROJECT_DIR$/src/gemm.c
+ 93
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
\ No newline at end of file
diff --git a/CMakeLists.txt b/CMakeLists.txt
new file mode 100644
index 00000000000..153eb842201
--- /dev/null
+++ b/CMakeLists.txt
@@ -0,0 +1,55 @@
+cmake_minimum_required(VERSION 3.5)
+project(darknet C)
+
+set(GPU 1)
+set(CUDNN 0)
+set(OPENCV 0)
+set(OPENMP 0)
+set(DEBUG 1)
+
+set(CMAKE_CXX_STANDARD 14)
+set(CMAKE_BINARY_DIR ${CMAKE_CURRENT_SOURCE_DIR}/build)
+set(CMAKE_ARCHIVE_OUTPUT_DIRECTORY ${CMAKE_BINARY_DIR}/lib)
+set(CMAKE_LIBRARY_OUTPUT_DIRECTORY ${CMAKE_BINARY_DIR}/lib)
+set(CMAKE_RUNTIME_OUTPUT_DIRECTORY ${CMAKE_BINARY_DIR}/bin)
+set(DEBUG 1)
+
+include_directories(src include)
+
+set (OPTS -Ofast)
+
+if ( DEBUG )
+ set(OPTS ${OPTS} -O0 -g )
+endif()
+
+set ( NVCC nvcc )
+set ( AR ar )
+set ( ARFLAGS rcs )
+set ( LDFLAGS -lm -pthread )
+set ( CFLAGS ${OPTS} -Wall -Wno-unused-result -Wno-unknown-pragmas -Wfatal-errors -fPIC)
+
+if ( OPENMP )
+ set(CFLAGS ${CFLAGS} -fopenmp )
+endif()
+
+if ( OPENCV )
+ set(COMMON ${COMMON} -DOPENCV )
+ set(CFLAGS ${CFLAGS} -DOPENCV )
+ set(LDFLAGS ${LDFLAGS} -L${env.OPENCV_HOME}/lib -lopencv_core -lstdc++)
+endif()
+
+if ( GPU )
+ set(COMMON ${COMMON} -DGPU -I/usr/local/cuda/include/ )
+ set(CFLAGS ${CFLAGS} -DGPU )
+ set(LDFLAGS ${LDFLAGS} -lstdc++ -L/usr/local/cuda/lib64 -lcuda -lcudart -lcublas -lcurand)
+endif()
+
+if ( CUDNN )
+ set(COMMON ${COMMON} -DCUDNN )
+ set(CFLAGS ${CFLAGS} -DCUDNN )
+ set(LDFLAGS ${LDFLAGS} -lcudnn)
+endif()
+
+add_subdirectory(src)
+add_subdirectory(examples)
+
diff --git a/Makefile b/Makefile
deleted file mode 100644
index 63e15e657fa..00000000000
--- a/Makefile
+++ /dev/null
@@ -1,105 +0,0 @@
-GPU=0
-CUDNN=0
-OPENCV=0
-OPENMP=0
-DEBUG=0
-
-ARCH= -gencode arch=compute_30,code=sm_30 \
- -gencode arch=compute_35,code=sm_35 \
- -gencode arch=compute_50,code=[sm_50,compute_50] \
- -gencode arch=compute_52,code=[sm_52,compute_52]
-# -gencode arch=compute_20,code=[sm_20,sm_21] \ This one is deprecated?
-
-# This is what I use, uncomment if you know your arch and want to specify
-# ARCH= -gencode arch=compute_52,code=compute_52
-
-VPATH=./src/:./examples
-SLIB=libdarknet.so
-ALIB=libdarknet.a
-EXEC=darknet
-OBJDIR=./obj/
-
-CC=gcc
-CPP=g++
-NVCC=nvcc
-AR=ar
-ARFLAGS=rcs
-OPTS=-Ofast
-LDFLAGS= -lm -pthread
-COMMON= -Iinclude/ -Isrc/
-CFLAGS=-Wall -Wno-unused-result -Wno-unknown-pragmas -Wfatal-errors -fPIC
-
-ifeq ($(OPENMP), 1)
-CFLAGS+= -fopenmp
-endif
-
-ifeq ($(DEBUG), 1)
-OPTS=-O0 -g
-endif
-
-CFLAGS+=$(OPTS)
-
-ifeq ($(OPENCV), 1)
-COMMON+= -DOPENCV
-CFLAGS+= -DOPENCV
-LDFLAGS+= `pkg-config --libs opencv` -lstdc++
-COMMON+= `pkg-config --cflags opencv`
-endif
-
-ifeq ($(GPU), 1)
-COMMON+= -DGPU -I/usr/local/cuda/include/
-CFLAGS+= -DGPU
-LDFLAGS+= -L/usr/local/cuda/lib64 -lcuda -lcudart -lcublas -lcurand
-endif
-
-ifeq ($(CUDNN), 1)
-COMMON+= -DCUDNN
-CFLAGS+= -DCUDNN
-LDFLAGS+= -lcudnn
-endif
-
-OBJ=gemm.o utils.o cuda.o deconvolutional_layer.o convolutional_layer.o list.o image.o activations.o im2col.o col2im.o blas.o crop_layer.o dropout_layer.o maxpool_layer.o softmax_layer.o data.o matrix.o network.o connected_layer.o cost_layer.o parser.o option_list.o detection_layer.o route_layer.o upsample_layer.o box.o normalization_layer.o avgpool_layer.o layer.o local_layer.o shortcut_layer.o logistic_layer.o activation_layer.o rnn_layer.o gru_layer.o crnn_layer.o demo.o batchnorm_layer.o region_layer.o reorg_layer.o tree.o lstm_layer.o l2norm_layer.o yolo_layer.o iseg_layer.o image_opencv.o
-EXECOBJA=captcha.o lsd.o super.o art.o tag.o cifar.o go.o rnn.o segmenter.o regressor.o classifier.o coco.o yolo.o detector.o nightmare.o instance-segmenter.o darknet.o
-ifeq ($(GPU), 1)
-LDFLAGS+= -lstdc++
-OBJ+=convolutional_kernels.o deconvolutional_kernels.o activation_kernels.o im2col_kernels.o col2im_kernels.o blas_kernels.o crop_layer_kernels.o dropout_layer_kernels.o maxpool_layer_kernels.o avgpool_layer_kernels.o
-endif
-
-EXECOBJ = $(addprefix $(OBJDIR), $(EXECOBJA))
-OBJS = $(addprefix $(OBJDIR), $(OBJ))
-DEPS = $(wildcard src/*.h) Makefile include/darknet.h
-
-all: obj backup results $(SLIB) $(ALIB) $(EXEC)
-#all: obj results $(SLIB) $(ALIB) $(EXEC)
-
-
-$(EXEC): $(EXECOBJ) $(ALIB)
- $(CC) $(COMMON) $(CFLAGS) $^ -o $@ $(LDFLAGS) $(ALIB)
-
-$(ALIB): $(OBJS)
- $(AR) $(ARFLAGS) $@ $^
-
-$(SLIB): $(OBJS)
- $(CC) $(CFLAGS) -shared $^ -o $@ $(LDFLAGS)
-
-$(OBJDIR)%.o: %.cpp $(DEPS)
- $(CPP) $(COMMON) $(CFLAGS) -c $< -o $@
-
-$(OBJDIR)%.o: %.c $(DEPS)
- $(CC) $(COMMON) $(CFLAGS) -c $< -o $@
-
-$(OBJDIR)%.o: %.cu $(DEPS)
- $(NVCC) $(ARCH) $(COMMON) --compiler-options "$(CFLAGS)" -c $< -o $@
-
-obj:
- mkdir -p obj
-backup:
- mkdir -p backup
-results:
- mkdir -p results
-
-.PHONY: clean
-
-clean:
- rm -rf $(OBJS) $(SLIB) $(ALIB) $(EXEC) $(EXECOBJ) $(OBJDIR)/*
-
diff --git a/README.md b/README.md
index 09fdeeeb5e7..e47a09ae218 100644
--- a/README.md
+++ b/README.md
@@ -1,3 +1,7 @@
+This fork is to support YOLO implementation on FPGA
+
+Contact: Oscar Kramer, oscar.kramer@radiantsolutions.com, 321-821-1150
+
![Darknet Logo](http://pjreddie.com/media/files/darknet-black-small.png)
# Darknet #
diff --git a/darknet.info.yml b/darknet.info.yml
new file mode 100644
index 00000000000..52dd20a44dd
--- /dev/null
+++ b/darknet.info.yml
@@ -0,0 +1,7 @@
+data_classification:
+ -
+poc:
+ - name: Jane Smith
+ email: jane.smith@example.com
+ - name: Tasmania
+ email: Tasmania_team@example.com
diff --git a/examples/CMakeLists.txt b/examples/CMakeLists.txt
new file mode 100644
index 00000000000..53e15b3005f
--- /dev/null
+++ b/examples/CMakeLists.txt
@@ -0,0 +1,6 @@
+message("In ${CMAKE_CURRENT_SOURCE_DIR}")
+FILE(GLOB EXAMPLE_SOURCES "*.c" PARENT_SCOPE)
+add_executable(darknet ${EXAMPLE_SOURCES} )
+SET_TARGET_PROPERTIES(darknet PROPERTIES LINKER_LANGUAGE C)
+TARGET_LINK_LIBRARIES(darknet DarkNet pthread m)
+
diff --git a/examples/attention.c b/examples/attention.c
index cd1e579d375..e3c5eec5773 100644
--- a/examples/attention.c
+++ b/examples/attention.c
@@ -3,7 +3,7 @@
#include
#include
-void extend_data_truth(data *d, int n, float val)
+void extend_data_truth(dn_data *d, int n, float val)
{
int i, j;
for(i = 0; i < d->y.rows; ++i){
@@ -15,11 +15,11 @@ void extend_data_truth(data *d, int n, float val)
d->y.cols += n;
}
-matrix network_loss_data(network *net, data test)
+dn_matrix network_loss_data(dn_network *net, dn_data test)
{
int i,b;
int k = 1;
- matrix pred = make_matrix(test.X.rows, k);
+ dn_matrix pred = make_matrix(test.X.rows, k);
float *X = calloc(net->batch*test.X.cols, sizeof(float));
float *y = calloc(net->batch*test.y.cols, sizeof(float));
for(i = 0; i < test.X.rows; i += net->batch){
@@ -29,7 +29,7 @@ matrix network_loss_data(network *net, data test)
memcpy(y+b*test.y.cols, test.y.vals[i+b], test.y.cols*sizeof(float));
}
- network orig = *net;
+ dn_network orig = *net;
net->input = X;
net->truth = y;
net->train = 0;
@@ -60,7 +60,7 @@ void train_attention(char *datacfg, char *cfgfile, char *weightfile, int *gpus,
char *base = basecfg(cfgfile);
printf("%s\n", base);
printf("%d\n", ngpus);
- network **nets = calloc(ngpus, sizeof(network*));
+ dn_network **nets = calloc(ngpus, sizeof(dn_network*));
srand(time(0));
int seed = rand();
@@ -73,12 +73,12 @@ void train_attention(char *datacfg, char *cfgfile, char *weightfile, int *gpus,
nets[i]->learning_rate *= ngpus;
}
srand(time(0));
- network *net = nets[0];
+ dn_network *net = nets[0];
int imgs = net->batch * net->subdivisions * ngpus;
printf("Learning Rate: %g, Momentum: %g, Decay: %g\n", net->learning_rate, net->momentum, net->decay);
- list *options = read_data_cfg(datacfg);
+ dn_list *options = read_data_cfg(datacfg);
char *backup_directory = option_find_str(options, "backup", "/backup/");
char *label_list = option_find_str(options, "labels", "data/labels.list");
@@ -86,7 +86,7 @@ void train_attention(char *datacfg, char *cfgfile, char *weightfile, int *gpus,
int classes = option_find_int(options, "classes", 2);
char **labels = get_labels(label_list);
- list *plist = get_paths(train_list);
+ dn_list *plist = get_paths(train_list);
char **paths = (char **)list_to_array(plist);
printf("%d\n", plist->size);
int N = plist->size;
@@ -95,7 +95,7 @@ void train_attention(char *datacfg, char *cfgfile, char *weightfile, int *gpus,
int divs=3;
int size=2;
- load_args args = {0};
+ dn_load_args args = {0};
args.w = divs*net->w/size;
args.h = divs*net->h/size;
args.size = divs*net->w/size;
@@ -117,8 +117,8 @@ void train_attention(char *datacfg, char *cfgfile, char *weightfile, int *gpus,
args.labels = labels;
args.type = CLASSIFICATION_DATA;
- data train;
- data buffer;
+ dn_data train;
+ dn_data buffer;
pthread_t load_thread;
args.d = &buffer;
load_thread = load_data(args);
@@ -130,9 +130,9 @@ void train_attention(char *datacfg, char *cfgfile, char *weightfile, int *gpus,
pthread_join(load_thread, 0);
train = buffer;
load_thread = load_data(args);
- data resized = resize_data(train, net->w, net->h);
+ dn_data resized = resize_data(train, net->w, net->h);
extend_data_truth(&resized, divs*divs, 0);
- data *tiles = tile_data(train, divs, size);
+ dn_data *tiles = tile_data(train, divs, size);
printf("Loaded: %lf seconds\n", what_time_is_it_now()-time);
time = what_time_is_it_now();
@@ -145,7 +145,7 @@ void train_attention(char *datacfg, char *cfgfile, char *weightfile, int *gpus,
for(j = 0; j < ngpus; ++j){
int index = i*ngpus + j;
extend_data_truth(tiles+index, divs*divs, SECRET_NUM);
- matrix deltas = network_loss_data(nets[j], tiles[index]);
+ dn_matrix deltas = network_loss_data(nets[j], tiles[index]);
for(z = 0; z < resized.y.rows; ++z){
resized.y.vals[z][train.y.cols + index] = deltas.vals[z][0];
}
@@ -160,7 +160,7 @@ void train_attention(char *datacfg, char *cfgfile, char *weightfile, int *gpus,
resized.y.vals[z][train.y.cols + i] = (i == index)? 1 : 0;
}
}
- data best = select_data(tiles, inds);
+ dn_data best = select_data(tiles, inds);
free(inds);
#ifdef GPU
if (ngpus == 1) {
@@ -176,7 +176,7 @@ void train_attention(char *datacfg, char *cfgfile, char *weightfile, int *gpus,
}
free_data(best);
printf("\n");
- image im = float_to_image(64,64,3,resized.X.vals[0]);
+ dn_image im = float_to_image(64,64,3,resized.X.vals[0]);
//show_image(im, "orig");
//cvWaitKey(100);
/*
@@ -233,11 +233,11 @@ void train_attention(char *datacfg, char *cfgfile, char *weightfile, int *gpus,
void validate_attention_single(char *datacfg, char *filename, char *weightfile)
{
int i, j;
- network *net = load_network(filename, weightfile, 0);
+ dn_network *net = load_network(filename, weightfile, 0);
set_batch_network(net, 1);
srand(time(0));
- list *options = read_data_cfg(datacfg);
+ dn_list *options = read_data_cfg(datacfg);
char *label_list = option_find_str(options, "labels", "data/labels.list");
char *leaf_list = option_find_str(options, "leaves", 0);
@@ -247,7 +247,7 @@ void validate_attention_single(char *datacfg, char *filename, char *weightfile)
int topk = option_find_int(options, "top", 1);
char **labels = get_labels(label_list);
- list *plist = get_paths(valid_list);
+ dn_list *plist = get_paths(valid_list);
char **paths = (char **)list_to_array(plist);
int m = plist->size;
@@ -271,10 +271,10 @@ void validate_attention_single(char *datacfg, char *filename, char *weightfile)
break;
}
}
- image im = load_image_color(paths[i], 0, 0);
- image resized = resize_min(im, net->w*divs/size);
- image crop = crop_image(resized, (resized.w - net->w*divs/size)/2, (resized.h - net->h*divs/size)/2, net->w*divs/size, net->h*divs/size);
- image rcrop = resize_image(crop, net->w, net->h);
+ dn_image im = load_image_color(paths[i], 0, 0);
+ dn_image resized = resize_min(im, net->w*divs/size);
+ dn_image crop = crop_image(resized, (resized.w - net->w*divs/size)/2, (resized.h - net->h*divs/size)/2, net->w*divs/size, net->h*divs/size);
+ dn_image rcrop = resize_image(crop, net->w, net->h);
//show_image(im, "orig");
//show_image(crop, "cropped");
//cvWaitKey(0);
@@ -287,7 +287,7 @@ void validate_attention_single(char *datacfg, char *filename, char *weightfile)
printf("\n");
copy_cpu(classes, pred, 1, avgs, 1);
top_k(pred + classes, divs*divs, divs*divs, inds);
- show_image(crop, "crop");
+ show_image(crop, "crop", 0);
for(j = 0; j < extra; ++j){
int index = inds[j];
int row = index / divs;
@@ -295,10 +295,10 @@ void validate_attention_single(char *datacfg, char *filename, char *weightfile)
int y = row * crop.h / divs - (net->h - crop.h/divs)/2;
int x = col * crop.w / divs - (net->w - crop.w/divs)/2;
printf("%d %d %d %d\n", row, col, y, x);
- image tile = crop_image(crop, x, y, net->w, net->h);
+ dn_image tile = crop_image(crop, x, y, net->w, net->h);
float *pred = network_predict(net, tile.data);
axpy_cpu(classes, 1., pred, 1, avgs, 1);
- show_image(tile, "tile");
+ show_image(tile, "tile", 0);
//cvWaitKey(10);
}
if(net->hierarchy) hierarchy_predictions(pred, net->outputs, net->hierarchy, 1, 1);
@@ -321,11 +321,11 @@ void validate_attention_single(char *datacfg, char *filename, char *weightfile)
void validate_attention_multi(char *datacfg, char *filename, char *weightfile)
{
int i, j;
- network *net = load_network(filename, weightfile, 0);
+ dn_network *net = load_network(filename, weightfile, 0);
set_batch_network(net, 1);
srand(time(0));
- list *options = read_data_cfg(datacfg);
+ dn_list *options = read_data_cfg(datacfg);
char *label_list = option_find_str(options, "labels", "data/labels.list");
char *valid_list = option_find_str(options, "valid", "data/train.list");
@@ -333,7 +333,7 @@ void validate_attention_multi(char *datacfg, char *filename, char *weightfile)
int topk = option_find_int(options, "top", 1);
char **labels = get_labels(label_list);
- list *plist = get_paths(valid_list);
+ dn_list *plist = get_paths(valid_list);
int scales[] = {224, 288, 320, 352, 384};
int nscales = sizeof(scales)/sizeof(scales[0]);
@@ -355,9 +355,9 @@ void validate_attention_multi(char *datacfg, char *filename, char *weightfile)
}
}
float *pred = calloc(classes, sizeof(float));
- image im = load_image_color(paths[i], 0, 0);
+ dn_image im = load_image_color(paths[i], 0, 0);
for(j = 0; j < nscales; ++j){
- image r = resize_min(im, scales[j]);
+ dn_image r = resize_min(im, scales[j]);
resize_network(net, r.w, r.h);
float *p = network_predict(net, r.data);
if(net->hierarchy) hierarchy_predictions(p, net->outputs, net->hierarchy, 1 , 1);
@@ -381,11 +381,11 @@ void validate_attention_multi(char *datacfg, char *filename, char *weightfile)
void predict_attention(char *datacfg, char *cfgfile, char *weightfile, char *filename, int top)
{
- network *net = load_network(cfgfile, weightfile, 0);
+ dn_network *net = load_network(cfgfile, weightfile, 0);
set_batch_network(net, 1);
srand(2222222);
- list *options = read_data_cfg(datacfg);
+ dn_list *options = read_data_cfg(datacfg);
char *name_list = option_find_str(options, "names", 0);
if(!name_list) name_list = option_find_str(options, "labels", "data/labels.list");
@@ -407,8 +407,8 @@ void predict_attention(char *datacfg, char *cfgfile, char *weightfile, char *fil
if(!input) return;
strtok(input, "\n");
}
- image im = load_image_color(input, 0, 0);
- image r = letterbox_image(im, net->w, net->h);
+ dn_image im = load_image_color(input, 0, 0);
+ dn_image r = letterbox_image(im, net->w, net->h);
//resize_network(&net, r.w, r.h);
//printf("%d %d\n", r.w, r.h);
diff --git a/examples/captcha.c b/examples/captcha.c
index 41d6d07c308..3d4be72480a 100644
--- a/examples/captcha.c
+++ b/examples/captcha.c
@@ -1,8 +1,8 @@
#include "darknet.h"
-void fix_data_captcha(data d, int mask)
+void fix_data_captcha(dn_data d, int mask)
{
- matrix labels = d.y;
+ dn_matrix labels = d.y;
int i, j;
for(i = 0; i < d.y.rows; ++i){
for(j = 0; j < d.y.cols; j += 2){
@@ -30,12 +30,12 @@ void train_captcha(char *cfgfile, char *weightfile)
float avg_loss = -1;
char *base = basecfg(cfgfile);
printf("%s\n", base);
- network *net = load_network(cfgfile, weightfile, 0);
+ dn_network *net = load_network(cfgfile, weightfile, 0);
printf("Learning Rate: %g, Momentum: %g, Decay: %g\n", net->learning_rate, net->momentum, net->decay);
int imgs = 1024;
int i = *net->seen/imgs;
int solved = 1;
- list *plist;
+ dn_list *plist;
char **labels = get_labels("/data/captcha/reimgs.labels.list");
if (solved){
plist = get_paths("/data/captcha/reimgs.solved.list");
@@ -46,10 +46,10 @@ void train_captcha(char *cfgfile, char *weightfile)
printf("%d\n", plist->size);
clock_t time;
pthread_t load_thread;
- data train;
- data buffer;
+ dn_data train;
+ dn_data buffer;
- load_args args = {0};
+ dn_load_args args = {0};
args.w = net->w;
args.h = net->h;
args.paths = paths;
@@ -92,7 +92,7 @@ void train_captcha(char *cfgfile, char *weightfile)
void test_captcha(char *cfgfile, char *weightfile, char *filename)
{
- network *net = load_network(cfgfile, weightfile, 0);
+ dn_network *net = load_network(cfgfile, weightfile, 0);
set_batch_network(net, 1);
srand(2222222);
int i = 0;
@@ -110,7 +110,7 @@ void test_captcha(char *cfgfile, char *weightfile, char *filename)
if(!input) return;
strtok(input, "\n");
}
- image im = load_image_color(input, net->w, net->h);
+ dn_image im = load_image_color(input, net->w, net->h);
float *X = im.data;
float *predictions = network_predict(net, X);
top_predictions(net, 26, indexes);
@@ -130,8 +130,8 @@ void test_captcha(char *cfgfile, char *weightfile, char *filename)
void valid_captcha(char *cfgfile, char *weightfile, char *filename)
{
char **labels = get_labels("/data/captcha/reimgs.labels.list");
- network *net = load_network(cfgfile, weightfile, 0);
- list *plist = get_paths("/data/captcha/reimgs.fg.list");
+ dn_network *net = load_network(cfgfile, weightfile, 0);
+ dn_list *plist = get_paths("/data/captcha/reimgs.fg.list");
char **paths = (char **)list_to_array(plist);
int N = plist->size;
int outputs = net->outputs;
@@ -141,7 +141,7 @@ void valid_captcha(char *cfgfile, char *weightfile, char *filename)
int i, j;
for(i = 0; i < N; ++i){
if (i%100 == 0) fprintf(stderr, "%d\n", i);
- image im = load_image_color(paths[i], net->w, net->h);
+ dn_image im = load_image_color(paths[i], net->w, net->h);
float *X = im.data;
float *predictions = network_predict(net, X);
//printf("%s: Predicted in %f seconds.\n", input, sec(clock()-time));
diff --git a/examples/cifar.c b/examples/cifar.c
index a5f5f240b9f..e59511171b8 100644
--- a/examples/cifar.c
+++ b/examples/cifar.c
@@ -6,7 +6,7 @@ void train_cifar(char *cfgfile, char *weightfile)
float avg_loss = -1;
char *base = basecfg(cfgfile);
printf("%s\n", base);
- network *net = load_network(cfgfile, weightfile, 0);
+ dn_network *net = load_network(cfgfile, weightfile, 0);
printf("Learning Rate: %g, Momentum: %g, Decay: %g\n", net->learning_rate, net->momentum, net->decay);
char *backup_directory = "/home/pjreddie/backup/";
@@ -15,7 +15,7 @@ void train_cifar(char *cfgfile, char *weightfile)
char **labels = get_labels("data/cifar/labels.txt");
int epoch = (*net->seen)/N;
- data train = load_all_cifar10();
+ dn_data train = load_all_cifar10();
while(get_current_batch(net) < net->max_batches || net->max_batches == 0){
clock_t time=clock();
@@ -51,7 +51,7 @@ void train_cifar_distill(char *cfgfile, char *weightfile)
float avg_loss = -1;
char *base = basecfg(cfgfile);
printf("%s\n", base);
- network *net = load_network(cfgfile, weightfile, 0);
+ dn_network *net = load_network(cfgfile, weightfile, 0);
printf("Learning Rate: %g, Momentum: %g, Decay: %g\n", net->learning_rate, net->momentum, net->decay);
char *backup_directory = "/home/pjreddie/backup/";
@@ -61,8 +61,8 @@ void train_cifar_distill(char *cfgfile, char *weightfile)
char **labels = get_labels("data/cifar/labels.txt");
int epoch = (*net->seen)/N;
- data train = load_all_cifar10();
- matrix soft = csv_to_matrix("results/ensemble.csv");
+ dn_data train = load_all_cifar10();
+ dn_matrix soft = csv_to_matrix("results/ensemble.csv");
float weight = .9;
scale_matrix(soft, weight);
@@ -100,16 +100,16 @@ void train_cifar_distill(char *cfgfile, char *weightfile)
void test_cifar_multi(char *filename, char *weightfile)
{
- network *net = load_network(filename, weightfile, 0);
+ dn_network *net = load_network(filename, weightfile, 0);
set_batch_network(net, 1);
srand(time(0));
float avg_acc = 0;
- data test = load_cifar10_data("data/cifar/cifar-10-batches-bin/test_batch.bin");
+ dn_data test = load_cifar10_data("data/cifar/cifar-10-batches-bin/test_batch.bin");
int i;
for(i = 0; i < test.X.rows; ++i){
- image im = float_to_image(32, 32, 3, test.X.vals[i]);
+ dn_image im = float_to_image(32, 32, 3, test.X.vals[i]);
float pred[10] = {0};
@@ -129,13 +129,13 @@ void test_cifar_multi(char *filename, char *weightfile)
void test_cifar(char *filename, char *weightfile)
{
- network *net = load_network(filename, weightfile, 0);
+ dn_network *net = load_network(filename, weightfile, 0);
srand(time(0));
clock_t time;
float avg_acc = 0;
float avg_top5 = 0;
- data test = load_cifar10_data("data/cifar/cifar-10-batches-bin/test_batch.bin");
+ dn_data test = load_cifar10_data("data/cifar/cifar-10-batches-bin/test_batch.bin");
time=clock();
@@ -150,17 +150,17 @@ void extract_cifar()
{
char *labels[] = {"airplane","automobile","bird","cat","deer","dog","frog","horse","ship","truck"};
int i;
- data train = load_all_cifar10();
- data test = load_cifar10_data("data/cifar/cifar-10-batches-bin/test_batch.bin");
+ dn_data train = load_all_cifar10();
+ dn_data test = load_cifar10_data("data/cifar/cifar-10-batches-bin/test_batch.bin");
for(i = 0; i < train.X.rows; ++i){
- image im = float_to_image(32, 32, 3, train.X.vals[i]);
+ dn_image im = float_to_image(32, 32, 3, train.X.vals[i]);
int class = max_index(train.y.vals[i], 10);
char buff[256];
sprintf(buff, "data/cifar/train/%d_%s",i,labels[class]);
save_image_options(im, buff, PNG, 0);
}
for(i = 0; i < test.X.rows; ++i){
- image im = float_to_image(32, 32, 3, test.X.vals[i]);
+ dn_image im = float_to_image(32, 32, 3, test.X.vals[i]);
int class = max_index(test.y.vals[i], 10);
char buff[256];
sprintf(buff, "data/cifar/test/%d_%s",i,labels[class]);
@@ -170,19 +170,19 @@ char *labels[] = {"airplane","automobile","bird","cat","deer","dog","frog","hors
void test_cifar_csv(char *filename, char *weightfile)
{
- network *net = load_network(filename, weightfile, 0);
+ dn_network *net = load_network(filename, weightfile, 0);
srand(time(0));
- data test = load_cifar10_data("data/cifar/cifar-10-batches-bin/test_batch.bin");
+ dn_data test = load_cifar10_data("data/cifar/cifar-10-batches-bin/test_batch.bin");
- matrix pred = network_predict_data(net, test);
+ dn_matrix pred = network_predict_data(net, test);
int i;
for(i = 0; i < test.X.rows; ++i){
- image im = float_to_image(32, 32, 3, test.X.vals[i]);
+ dn_image im = float_to_image(32, 32, 3, test.X.vals[i]);
flip_image(im);
}
- matrix pred2 = network_predict_data(net, test);
+ dn_matrix pred2 = network_predict_data(net, test);
scale_matrix(pred, .5);
scale_matrix(pred2, .5);
matrix_add_matrix(pred2, pred);
@@ -194,19 +194,19 @@ void test_cifar_csv(char *filename, char *weightfile)
void test_cifar_csvtrain(char *cfg, char *weights)
{
- network *net = load_network(cfg, weights, 0);
+ dn_network *net = load_network(cfg, weights, 0);
srand(time(0));
- data test = load_all_cifar10();
+ dn_data test = load_all_cifar10();
- matrix pred = network_predict_data(net, test);
+ dn_matrix pred = network_predict_data(net, test);
int i;
for(i = 0; i < test.X.rows; ++i){
- image im = float_to_image(32, 32, 3, test.X.vals[i]);
+ dn_image im = float_to_image(32, 32, 3, test.X.vals[i]);
flip_image(im);
}
- matrix pred2 = network_predict_data(net, test);
+ dn_matrix pred2 = network_predict_data(net, test);
scale_matrix(pred, .5);
scale_matrix(pred2, .5);
matrix_add_matrix(pred2, pred);
@@ -218,9 +218,9 @@ void test_cifar_csvtrain(char *cfg, char *weights)
void eval_cifar_csv()
{
- data test = load_cifar10_data("data/cifar/cifar-10-batches-bin/test_batch.bin");
+ dn_data test = load_cifar10_data("data/cifar/cifar-10-batches-bin/test_batch.bin");
- matrix pred = csv_to_matrix("results/combined.csv");
+ dn_matrix pred = csv_to_matrix("results/combined.csv");
fprintf(stderr, "%d %d\n", pred.rows, pred.cols);
fprintf(stderr, "Accuracy: %f\n", matrix_topk_accuracy(test.y, pred, 1));
diff --git a/examples/classifier.c b/examples/classifier.c
index df91a084804..e495feb6103 100644
--- a/examples/classifier.c
+++ b/examples/classifier.c
@@ -23,7 +23,7 @@ void train_classifier(char *datacfg, char *cfgfile, char *weightfile, int *gpus,
char *base = basecfg(cfgfile);
printf("%s\n", base);
printf("%d\n", ngpus);
- network **nets = calloc(ngpus, sizeof(network*));
+ dn_network **nets = calloc(ngpus, sizeof(dn_network*));
srand(time(0));
int seed = rand();
@@ -36,12 +36,12 @@ void train_classifier(char *datacfg, char *cfgfile, char *weightfile, int *gpus,
nets[i]->learning_rate *= ngpus;
}
srand(time(0));
- network *net = nets[0];
+ dn_network *net = nets[0];
int imgs = net->batch * net->subdivisions * ngpus;
printf("Learning Rate: %g, Momentum: %g, Decay: %g\n", net->learning_rate, net->momentum, net->decay);
- list *options = read_data_cfg(datacfg);
+ dn_list *options = read_data_cfg(datacfg);
char *backup_directory = option_find_str(options, "backup", "/backup/");
int tag = option_find_int_quiet(options, "tag", 0);
@@ -55,13 +55,13 @@ void train_classifier(char *datacfg, char *cfgfile, char *weightfile, int *gpus,
if(!tag){
labels = get_labels(label_list);
}
- list *plist = get_paths(train_list);
+ dn_list *plist = get_paths(train_list);
char **paths = (char **)list_to_array(plist);
printf("%d\n", plist->size);
int N = plist->size;
double time;
- load_args args = {0};
+ dn_load_args args = {0};
args.w = net->w;
args.h = net->h;
args.threads = 32;
@@ -88,8 +88,8 @@ void train_classifier(char *datacfg, char *cfgfile, char *weightfile, int *gpus,
args.type = CLASSIFICATION_DATA;
}
- data train;
- data buffer;
+ dn_data train;
+ dn_data buffer;
pthread_t load_thread;
args.d = &buffer;
load_thread = load_data(args);
@@ -170,10 +170,10 @@ void train_classifier(char *datacfg, char *cfgfile, char *weightfile, int *gpus,
void validate_classifier_crop(char *datacfg, char *filename, char *weightfile)
{
int i = 0;
- network *net = load_network(filename, weightfile, 0);
+ dn_network *net = load_network(filename, weightfile, 0);
srand(time(0));
- list *options = read_data_cfg(datacfg);
+ dn_list *options = read_data_cfg(datacfg);
char *label_list = option_find_str(options, "labels", "data/labels.list");
char *valid_list = option_find_str(options, "valid", "data/train.list");
@@ -181,7 +181,7 @@ void validate_classifier_crop(char *datacfg, char *filename, char *weightfile)
int topk = option_find_int(options, "top", 1);
char **labels = get_labels(label_list);
- list *plist = get_paths(valid_list);
+ dn_list *plist = get_paths(valid_list);
char **paths = (char **)list_to_array(plist);
int m = plist->size;
@@ -193,9 +193,9 @@ void validate_classifier_crop(char *datacfg, char *filename, char *weightfile)
int splits = m/1000;
int num = (i+1)*m/splits - i*m/splits;
- data val, buffer;
+ dn_data val, buffer;
- load_args args = {0};
+ dn_load_args args = {0};
args.w = net->w;
args.h = net->h;
@@ -234,11 +234,11 @@ void validate_classifier_crop(char *datacfg, char *filename, char *weightfile)
void validate_classifier_10(char *datacfg, char *filename, char *weightfile)
{
int i, j;
- network *net = load_network(filename, weightfile, 0);
+ dn_network *net = load_network(filename, weightfile, 0);
set_batch_network(net, 1);
srand(time(0));
- list *options = read_data_cfg(datacfg);
+ dn_list *options = read_data_cfg(datacfg);
char *label_list = option_find_str(options, "labels", "data/labels.list");
char *valid_list = option_find_str(options, "valid", "data/train.list");
@@ -246,7 +246,7 @@ void validate_classifier_10(char *datacfg, char *filename, char *weightfile)
int topk = option_find_int(options, "top", 1);
char **labels = get_labels(label_list);
- list *plist = get_paths(valid_list);
+ dn_list *plist = get_paths(valid_list);
char **paths = (char **)list_to_array(plist);
int m = plist->size;
@@ -268,8 +268,8 @@ void validate_classifier_10(char *datacfg, char *filename, char *weightfile)
int w = net->w;
int h = net->h;
int shift = 32;
- image im = load_image_color(paths[i], w+shift, h+shift);
- image images[10];
+ dn_image im = load_image_color(paths[i], w+shift, h+shift);
+ dn_image images[10];
images[0] = crop_image(im, -shift, -shift, w, h);
images[1] = crop_image(im, shift, -shift, w, h);
images[2] = crop_image(im, 0, 0, w, h);
@@ -303,11 +303,11 @@ void validate_classifier_10(char *datacfg, char *filename, char *weightfile)
void validate_classifier_full(char *datacfg, char *filename, char *weightfile)
{
int i, j;
- network *net = load_network(filename, weightfile, 0);
+ dn_network *net = load_network(filename, weightfile, 0);
set_batch_network(net, 1);
srand(time(0));
- list *options = read_data_cfg(datacfg);
+ dn_list *options = read_data_cfg(datacfg);
char *label_list = option_find_str(options, "labels", "data/labels.list");
char *valid_list = option_find_str(options, "valid", "data/train.list");
@@ -315,7 +315,7 @@ void validate_classifier_full(char *datacfg, char *filename, char *weightfile)
int topk = option_find_int(options, "top", 1);
char **labels = get_labels(label_list);
- list *plist = get_paths(valid_list);
+ dn_list *plist = get_paths(valid_list);
char **paths = (char **)list_to_array(plist);
int m = plist->size;
@@ -335,8 +335,8 @@ void validate_classifier_full(char *datacfg, char *filename, char *weightfile)
break;
}
}
- image im = load_image_color(paths[i], 0, 0);
- image resized = resize_min(im, size);
+ dn_image im = load_image_color(paths[i], 0, 0);
+ dn_image resized = resize_min(im, size);
resize_network(net, resized.w, resized.h);
//show_image(im, "orig");
//show_image(crop, "cropped");
@@ -361,11 +361,11 @@ void validate_classifier_full(char *datacfg, char *filename, char *weightfile)
void validate_classifier_single(char *datacfg, char *filename, char *weightfile)
{
int i, j;
- network *net = load_network(filename, weightfile, 0);
+ dn_network *net = load_network(filename, weightfile, 0);
set_batch_network(net, 1);
srand(time(0));
- list *options = read_data_cfg(datacfg);
+ dn_list *options = read_data_cfg(datacfg);
char *label_list = option_find_str(options, "labels", "data/labels.list");
char *leaf_list = option_find_str(options, "leaves", 0);
@@ -375,7 +375,7 @@ void validate_classifier_single(char *datacfg, char *filename, char *weightfile)
int topk = option_find_int(options, "top", 1);
char **labels = get_labels(label_list);
- list *plist = get_paths(valid_list);
+ dn_list *plist = get_paths(valid_list);
char **paths = (char **)list_to_array(plist);
int m = plist->size;
@@ -394,8 +394,8 @@ void validate_classifier_single(char *datacfg, char *filename, char *weightfile)
break;
}
}
- image im = load_image_color(paths[i], 0, 0);
- image crop = center_crop_image(im, net->w, net->h);
+ dn_image im = load_image_color(paths[i], 0, 0);
+ dn_image crop = center_crop_image(im, net->w, net->h);
//grayscale_image_3c(crop);
//show_image(im, "orig");
//show_image(crop, "cropped");
@@ -420,11 +420,11 @@ void validate_classifier_single(char *datacfg, char *filename, char *weightfile)
void validate_classifier_multi(char *datacfg, char *cfg, char *weights)
{
int i, j;
- network *net = load_network(cfg, weights, 0);
+ dn_network *net = load_network(cfg, weights, 0);
set_batch_network(net, 1);
srand(time(0));
- list *options = read_data_cfg(datacfg);
+ dn_list *options = read_data_cfg(datacfg);
char *label_list = option_find_str(options, "labels", "data/labels.list");
char *valid_list = option_find_str(options, "valid", "data/train.list");
@@ -432,7 +432,7 @@ void validate_classifier_multi(char *datacfg, char *cfg, char *weights)
int topk = option_find_int(options, "top", 1);
char **labels = get_labels(label_list);
- list *plist = get_paths(valid_list);
+ dn_list *plist = get_paths(valid_list);
//int scales[] = {224, 288, 320, 352, 384};
int scales[] = {224, 256, 288, 320};
int nscales = sizeof(scales)/sizeof(scales[0]);
@@ -455,9 +455,9 @@ void validate_classifier_multi(char *datacfg, char *cfg, char *weights)
}
}
float *pred = calloc(classes, sizeof(float));
- image im = load_image_color(paths[i], 0, 0);
+ dn_image im = load_image_color(paths[i], 0, 0);
for(j = 0; j < nscales; ++j){
- image r = resize_max(im, scales[j]);
+ dn_image r = resize_max(im, scales[j]);
resize_network(net, r.w, r.h);
float *p = network_predict(net, r.data);
if(net->hierarchy) hierarchy_predictions(p, net->outputs, net->hierarchy, 1 , 1);
@@ -481,11 +481,11 @@ void validate_classifier_multi(char *datacfg, char *cfg, char *weights)
void try_classifier(char *datacfg, char *cfgfile, char *weightfile, char *filename, int layer_num)
{
- network *net = load_network(cfgfile, weightfile, 0);
+ dn_network *net = load_network(cfgfile, weightfile, 0);
set_batch_network(net, 1);
srand(2222222);
- list *options = read_data_cfg(datacfg);
+ dn_list *options = read_data_cfg(datacfg);
char *name_list = option_find_str(options, "names", 0);
if(!name_list) name_list = option_find_str(options, "labels", "data/labels.list");
@@ -507,9 +507,9 @@ void try_classifier(char *datacfg, char *cfgfile, char *weightfile, char *filena
if(!input) return;
strtok(input, "\n");
}
- image orig = load_image_color(input, 0, 0);
- image r = resize_min(orig, 256);
- image im = crop_image(r, (r.w - 224 - 1)/2 + 1, (r.h - 224 - 1)/2 + 1, 224, 224);
+ dn_image orig = load_image_color(input, 0, 0);
+ dn_image r = resize_min(orig, 256);
+ dn_image im = crop_image(r, (r.w - 224 - 1)/2 + 1, (r.h - 224 - 1)/2 + 1, 224, 224);
float mean[] = {0.48263312050943, 0.45230225481413, 0.40099074308742};
float std[] = {0.22590347483426, 0.22120921437787, 0.22103996251583};
float var[3];
@@ -523,7 +523,7 @@ void try_classifier(char *datacfg, char *cfgfile, char *weightfile, char *filena
time=clock();
float *predictions = network_predict(net, X);
- layer l = net->layers[layer_num];
+ dn_layer l = net->layers[layer_num];
for(i = 0; i < l.c; ++i){
if(l.rolling_mean) printf("%f %f %f\n", l.rolling_mean[i], l.rolling_variance[i], l.scales[i]);
}
@@ -559,11 +559,11 @@ void try_classifier(char *datacfg, char *cfgfile, char *weightfile, char *filena
void predict_classifier(char *datacfg, char *cfgfile, char *weightfile, char *filename, int top)
{
- network *net = load_network(cfgfile, weightfile, 0);
+ dn_network *net = load_network(cfgfile, weightfile, 0);
set_batch_network(net, 1);
srand(2222222);
- list *options = read_data_cfg(datacfg);
+ dn_list *options = read_data_cfg(datacfg);
char *name_list = option_find_str(options, "names", 0);
if(!name_list) name_list = option_find_str(options, "labels", "data/labels.list");
@@ -585,8 +585,8 @@ void predict_classifier(char *datacfg, char *cfgfile, char *weightfile, char *fi
if(!input) return;
strtok(input, "\n");
}
- image im = load_image_color(input, 0, 0);
- image r = letterbox_image(im, net->w, net->h);
+ dn_image im = load_image_color(input, 0, 0);
+ dn_image r = letterbox_image(im, net->w, net->h);
//image r = resize_min(im, 320);
//printf("%d %d\n", r.w, r.h);
//resize_network(net, r.w, r.h);
@@ -614,27 +614,27 @@ void predict_classifier(char *datacfg, char *cfgfile, char *weightfile, char *fi
void label_classifier(char *datacfg, char *filename, char *weightfile)
{
int i;
- network *net = load_network(filename, weightfile, 0);
+ dn_network *net = load_network(filename, weightfile, 0);
set_batch_network(net, 1);
srand(time(0));
- list *options = read_data_cfg(datacfg);
+ dn_list *options = read_data_cfg(datacfg);
char *label_list = option_find_str(options, "names", "data/labels.list");
char *test_list = option_find_str(options, "test", "data/train.list");
int classes = option_find_int(options, "classes", 2);
char **labels = get_labels(label_list);
- list *plist = get_paths(test_list);
+ dn_list *plist = get_paths(test_list);
char **paths = (char **)list_to_array(plist);
int m = plist->size;
free_list(plist);
for(i = 0; i < m; ++i){
- image im = load_image_color(paths[i], 0, 0);
- image resized = resize_min(im, net->w);
- image crop = crop_image(resized, (resized.w - net->w)/2, (resized.h - net->h)/2, net->w, net->h);
+ dn_image im = load_image_color(paths[i], 0, 0);
+ dn_image resized = resize_min(im, net->w);
+ dn_image crop = crop_image(resized, (resized.w - net->w)/2, (resized.h - net->h)/2, net->w, net->h);
float *pred = network_predict(net, crop.data);
if(resized.data != im.data) free_image(resized);
@@ -649,15 +649,15 @@ void label_classifier(char *datacfg, char *filename, char *weightfile)
void csv_classifier(char *datacfg, char *cfgfile, char *weightfile)
{
int i,j;
- network *net = load_network(cfgfile, weightfile, 0);
+ dn_network *net = load_network(cfgfile, weightfile, 0);
srand(time(0));
- list *options = read_data_cfg(datacfg);
+ dn_list *options = read_data_cfg(datacfg);
char *test_list = option_find_str(options, "test", "data/test.list");
int top = option_find_int(options, "top", 1);
- list *plist = get_paths(test_list);
+ dn_list *plist = get_paths(test_list);
char **paths = (char **)list_to_array(plist);
int m = plist->size;
@@ -667,8 +667,8 @@ void csv_classifier(char *datacfg, char *cfgfile, char *weightfile)
for(i = 0; i < m; ++i){
double time = what_time_is_it_now();
char *path = paths[i];
- image im = load_image_color(path, 0, 0);
- image r = letterbox_image(im, net->w, net->h);
+ dn_image im = load_image_color(path, 0, 0);
+ dn_image r = letterbox_image(im, net->w, net->h);
float *predictions = network_predict(net, r.data);
if(net->hierarchy) hierarchy_predictions(predictions, net->outputs, net->hierarchy, 1, 1);
top_k(predictions, net->outputs, top, indexes);
@@ -689,15 +689,15 @@ void csv_classifier(char *datacfg, char *cfgfile, char *weightfile)
void test_classifier(char *datacfg, char *cfgfile, char *weightfile, int target_layer)
{
int curr = 0;
- network *net = load_network(cfgfile, weightfile, 0);
+ dn_network *net = load_network(cfgfile, weightfile, 0);
srand(time(0));
- list *options = read_data_cfg(datacfg);
+ dn_list *options = read_data_cfg(datacfg);
char *test_list = option_find_str(options, "test", "data/test.list");
int classes = option_find_int(options, "classes", 2);
- list *plist = get_paths(test_list);
+ dn_list *plist = get_paths(test_list);
char **paths = (char **)list_to_array(plist);
int m = plist->size;
@@ -705,9 +705,9 @@ void test_classifier(char *datacfg, char *cfgfile, char *weightfile, int target_
clock_t time;
- data val, buffer;
+ dn_data val, buffer;
- load_args args = {0};
+ dn_load_args args = {0};
args.w = net->w;
args.h = net->h;
args.paths = paths;
@@ -733,7 +733,7 @@ void test_classifier(char *datacfg, char *cfgfile, char *weightfile, int target_
fprintf(stderr, "Loaded: %d images in %lf seconds\n", val.X.rows, sec(clock()-time));
time=clock();
- matrix pred = network_predict_data(net, val);
+ dn_matrix pred = network_predict_data(net, val);
int i, j;
if (target_layer >= 0){
@@ -758,25 +758,25 @@ void test_classifier(char *datacfg, char *cfgfile, char *weightfile, int target_
void file_output_classifier(char *datacfg, char *filename, char *weightfile, char *listfile)
{
int i,j;
- network *net = load_network(filename, weightfile, 0);
+ dn_network *net = load_network(filename, weightfile, 0);
set_batch_network(net, 1);
srand(time(0));
- list *options = read_data_cfg(datacfg);
+ dn_list *options = read_data_cfg(datacfg);
//char *label_list = option_find_str(options, "names", "data/labels.list");
int classes = option_find_int(options, "classes", 2);
- list *plist = get_paths(listfile);
+ dn_list *plist = get_paths(listfile);
char **paths = (char **)list_to_array(plist);
int m = plist->size;
free_list(plist);
for(i = 0; i < m; ++i){
- image im = load_image_color(paths[i], 0, 0);
- image resized = resize_min(im, net->w);
- image crop = crop_image(resized, (resized.w - net->w)/2, (resized.h - net->h)/2, net->w, net->h);
+ dn_image im = load_image_color(paths[i], 0, 0);
+ dn_image resized = resize_min(im, net->w);
+ dn_image crop = crop_image(resized, (resized.w - net->w)/2, (resized.h - net->h)/2, net->w, net->h);
float *pred = network_predict(net, crop.data);
if(net->hierarchy) hierarchy_predictions(pred, net->outputs, net->hierarchy, 0, 1);
diff --git a/examples/coco.c b/examples/coco.c
index 6a50b89abd2..b15ae280a70 100644
--- a/examples/coco.c
+++ b/examples/coco.c
@@ -17,24 +17,24 @@ void train_coco(char *cfgfile, char *weightfile)
char *base = basecfg(cfgfile);
printf("%s\n", base);
float avg_loss = -1;
- network *net = load_network(cfgfile, weightfile, 0);
+ dn_network *net = load_network(cfgfile, weightfile, 0);
printf("Learning Rate: %g, Momentum: %g, Decay: %g\n", net->learning_rate, net->momentum, net->decay);
int imgs = net->batch*net->subdivisions;
int i = *net->seen/imgs;
- data train, buffer;
+ dn_data train, buffer;
- layer l = net->layers[net->n - 1];
+ dn_layer l = net->layers[net->n - 1];
int side = l.side;
int classes = l.classes;
float jitter = l.jitter;
- list *plist = get_paths(train_images);
+ dn_list *plist = get_paths(train_images);
//int N = plist->size;
char **paths = (char **)list_to_array(plist);
- load_args args = {0};
+ dn_load_args args = {0};
args.w = net->w;
args.h = net->h;
args.paths = paths;
@@ -127,18 +127,18 @@ int get_coco_image_id(char *filename)
void validate_coco(char *cfg, char *weights)
{
- network *net = load_network(cfg, weights, 0);
+ dn_network *net = load_network(cfg, weights, 0);
set_batch_network(net, 1);
fprintf(stderr, "Learning Rate: %g, Momentum: %g, Decay: %g\n", net->learning_rate, net->momentum, net->decay);
srand(time(0));
char *base = "results/";
- list *plist = get_paths("data/coco_val_5k.list");
+ dn_list *plist = get_paths("data/coco_val_5k.list");
//list *plist = get_paths("/home/pjreddie/data/people-art/test.txt");
//list *plist = get_paths("/home/pjreddie/data/voc/test/2007_test.txt");
char **paths = (char **)list_to_array(plist);
- layer l = net->layers[net->n-1];
+ dn_layer l = net->layers[net->n-1];
int classes = l.classes;
char buff[1024];
@@ -155,13 +155,13 @@ void validate_coco(char *cfg, char *weights)
float iou_thresh = .5;
int nthreads = 8;
- image *val = calloc(nthreads, sizeof(image));
- image *val_resized = calloc(nthreads, sizeof(image));
- image *buf = calloc(nthreads, sizeof(image));
- image *buf_resized = calloc(nthreads, sizeof(image));
+ dn_image *val = calloc(nthreads, sizeof(dn_image));
+ dn_image *val_resized = calloc(nthreads, sizeof(dn_image));
+ dn_image *buf = calloc(nthreads, sizeof(dn_image));
+ dn_image *buf_resized = calloc(nthreads, sizeof(dn_image));
pthread_t *thr = calloc(nthreads, sizeof(pthread_t));
- load_args args = {0};
+ dn_load_args args = {0};
args.w = net->w;
args.h = net->h;
args.type = IMAGE_DATA;
@@ -211,16 +211,16 @@ void validate_coco(char *cfg, char *weights)
void validate_coco_recall(char *cfgfile, char *weightfile)
{
- network *net = load_network(cfgfile, weightfile, 0);
+ dn_network *net = load_network(cfgfile, weightfile, 0);
set_batch_network(net, 1);
fprintf(stderr, "Learning Rate: %g, Momentum: %g, Decay: %g\n", net->learning_rate, net->momentum, net->decay);
srand(time(0));
char *base = "results/comp4_det_test_";
- list *plist = get_paths("/home/pjreddie/data/voc/test/2007_test.txt");
+ dn_list *plist = get_paths("/home/pjreddie/data/voc/test/2007_test.txt");
char **paths = (char **)list_to_array(plist);
- layer l = net->layers[net->n-1];
+ dn_layer l = net->layers[net->n-1];
int classes = l.classes;
int side = l.side;
@@ -246,8 +246,8 @@ void validate_coco_recall(char *cfgfile, char *weightfile)
for(i = 0; i < m; ++i){
char *path = paths[i];
- image orig = load_image_color(path, 0, 0);
- image sized = resize_image(orig, net->w, net->h);
+ dn_image orig = load_image_color(path, 0, 0);
+ dn_image sized = resize_image(orig, net->w, net->h);
char *id = basecfg(path);
network_predict(net, sized.data);
@@ -262,7 +262,7 @@ void validate_coco_recall(char *cfgfile, char *weightfile)
find_replace(labelpath, ".JPEG", ".txt", labelpath);
int num_labels = 0;
- box_label *truth = read_boxes(labelpath, &num_labels);
+ dn_box_label *truth = read_boxes(labelpath, &num_labels);
for(k = 0; k < side*side*l.n; ++k){
if(dets[k].objectness > thresh){
++proposals;
@@ -270,7 +270,7 @@ void validate_coco_recall(char *cfgfile, char *weightfile)
}
for (j = 0; j < num_labels; ++j) {
++total;
- box t = {truth[j].x, truth[j].y, truth[j].w, truth[j].h};
+ dn_box t = {truth[j].x, truth[j].y, truth[j].w, truth[j].h};
float best_iou = 0;
for(k = 0; k < side*side*l.n; ++k){
float iou = box_iou(dets[k].bbox, t);
@@ -293,9 +293,9 @@ void validate_coco_recall(char *cfgfile, char *weightfile)
void test_coco(char *cfgfile, char *weightfile, char *filename, float thresh)
{
- image **alphabet = load_alphabet();
- network *net = load_network(cfgfile, weightfile, 0);
- layer l = net->layers[net->n-1];
+ dn_image **alphabet = load_alphabet();
+ dn_network *net = load_network(cfgfile, weightfile, 0);
+ dn_layer l = net->layers[net->n-1];
set_batch_network(net, 1);
srand(2222222);
float nms = .4;
@@ -312,8 +312,8 @@ void test_coco(char *cfgfile, char *weightfile, char *filename, float thresh)
if(!input) return;
strtok(input, "\n");
}
- image im = load_image_color(input,0,0);
- image sized = resize_image(im, net->w, net->h);
+ dn_image im = load_image_color(input,0,0);
+ dn_image sized = resize_image(im, net->w, net->h);
float *X = sized.data;
time=clock();
network_predict(net, X);
diff --git a/examples/darknet.c b/examples/darknet.c
index d538359203b..42b9888dddb 100644
--- a/examples/darknet.c
+++ b/examples/darknet.c
@@ -27,8 +27,8 @@ void average(int argc, char *argv[])
char *cfgfile = argv[2];
char *outfile = argv[3];
gpu_index = -1;
- network *net = parse_network_cfg(cfgfile);
- network *sum = parse_network_cfg(cfgfile);
+ dn_network *net = parse_network_cfg(cfgfile);
+ dn_network *sum = parse_network_cfg(cfgfile);
char *weightfile = argv[4];
load_weights(sum, weightfile);
@@ -39,8 +39,8 @@ void average(int argc, char *argv[])
weightfile = argv[i+5];
load_weights(net, weightfile);
for(j = 0; j < net->n; ++j){
- layer l = net->layers[j];
- layer out = sum->layers[j];
+ dn_layer l = net->layers[j];
+ dn_layer out = sum->layers[j];
if(l.type == CONVOLUTIONAL){
int num = l.n*l.c*l.size*l.size;
axpy_cpu(l.n, 1, l.biases, 1, out.biases, 1);
@@ -59,7 +59,7 @@ void average(int argc, char *argv[])
}
n = n+1;
for(j = 0; j < net->n; ++j){
- layer l = sum->layers[j];
+ dn_layer l = sum->layers[j];
if(l.type == CONVOLUTIONAL){
int num = l.n*l.c*l.size*l.size;
scal_cpu(l.n, 1./n, l.biases, 1);
@@ -78,12 +78,12 @@ void average(int argc, char *argv[])
save_weights(sum, outfile);
}
-long numops(network *net)
+long numops(dn_network *net)
{
int i;
long ops = 0;
for(i = 0; i < net->n; ++i){
- layer l = net->layers[i];
+ dn_layer l = net->layers[i];
if(l.type == CONVOLUTIONAL){
ops += 2l * l.n * l.size*l.size*l.c/l.groups * l.out_h*l.out_w;
} else if(l.type == CONNECTED){
@@ -116,11 +116,11 @@ long numops(network *net)
void speed(char *cfgfile, int tics)
{
if (tics == 0) tics = 1000;
- network *net = parse_network_cfg(cfgfile);
+ dn_network *net = parse_network_cfg(cfgfile);
set_batch_network(net, 1);
int i;
double time=what_time_is_it_now();
- image im = make_image(net->w, net->h, net->c*net->batch);
+ dn_image im = make_image(net->w, net->h, net->c*net->batch);
for(i = 0; i < tics; ++i){
network_predict(net, im.data);
}
@@ -136,7 +136,7 @@ void speed(char *cfgfile, int tics)
void operations(char *cfgfile)
{
gpu_index = -1;
- network *net = parse_network_cfg(cfgfile);
+ dn_network *net = parse_network_cfg(cfgfile);
long ops = numops(net);
printf("Floating Point Operations: %ld\n", ops);
printf("Floating Point Operations: %.2f Bn\n", (float)ops/1000000000.);
@@ -145,7 +145,7 @@ void operations(char *cfgfile)
void oneoff(char *cfgfile, char *weightfile, char *outfile)
{
gpu_index = -1;
- network *net = parse_network_cfg(cfgfile);
+ dn_network *net = parse_network_cfg(cfgfile);
int oldn = net->layers[net->n - 2].n;
int c = net->layers[net->n - 2].c;
scal_cpu(oldn*c, .1, net->layers[net->n - 2].weights, 1);
@@ -160,7 +160,7 @@ void oneoff(char *cfgfile, char *weightfile, char *outfile)
net->layers[net->n - 2].weights -= 5*c;
net->layers[net->n - 2].n = oldn;
printf("%d\n", oldn);
- layer l = net->layers[net->n - 2];
+ dn_layer l = net->layers[net->n - 2];
copy_cpu(l.n/3, l.biases, 1, l.biases + l.n/3, 1);
copy_cpu(l.n/3, l.biases, 1, l.biases + 2*l.n/3, 1);
copy_cpu(l.n/3*l.c, l.weights, 1, l.weights + l.n/3*l.c, 1);
@@ -172,7 +172,7 @@ void oneoff(char *cfgfile, char *weightfile, char *outfile)
void oneoff2(char *cfgfile, char *weightfile, char *outfile, int l)
{
gpu_index = -1;
- network *net = parse_network_cfg(cfgfile);
+ dn_network *net = parse_network_cfg(cfgfile);
if(weightfile){
load_weights_upto(net, weightfile, 0, net->n);
load_weights_upto(net, weightfile, l, net->n);
@@ -184,15 +184,15 @@ void oneoff2(char *cfgfile, char *weightfile, char *outfile, int l)
void partial(char *cfgfile, char *weightfile, char *outfile, int max)
{
gpu_index = -1;
- network *net = load_network(cfgfile, weightfile, 1);
+ dn_network *net = load_network(cfgfile, weightfile, 1);
save_weights_upto(net, outfile, max);
}
void print_weights(char *cfgfile, char *weightfile, int n)
{
gpu_index = -1;
- network *net = load_network(cfgfile, weightfile, 1);
- layer l = net->layers[n];
+ dn_network *net = load_network(cfgfile, weightfile, 1);
+ dn_layer l = net->layers[n];
int i, j;
//printf("[");
for(i = 0; i < l.n; ++i){
@@ -210,10 +210,10 @@ void print_weights(char *cfgfile, char *weightfile, int n)
void rescale_net(char *cfgfile, char *weightfile, char *outfile)
{
gpu_index = -1;
- network *net = load_network(cfgfile, weightfile, 0);
+ dn_network *net = load_network(cfgfile, weightfile, 0);
int i;
for(i = 0; i < net->n; ++i){
- layer l = net->layers[i];
+ dn_layer l = net->layers[i];
if(l.type == CONVOLUTIONAL){
rescale_weights(l, 2, -.5);
break;
@@ -225,10 +225,10 @@ void rescale_net(char *cfgfile, char *weightfile, char *outfile)
void rgbgr_net(char *cfgfile, char *weightfile, char *outfile)
{
gpu_index = -1;
- network *net = load_network(cfgfile, weightfile, 0);
+ dn_network *net = load_network(cfgfile, weightfile, 0);
int i;
for(i = 0; i < net->n; ++i){
- layer l = net->layers[i];
+ dn_layer l = net->layers[i];
if(l.type == CONVOLUTIONAL){
rgbgr_weights(l);
break;
@@ -240,10 +240,10 @@ void rgbgr_net(char *cfgfile, char *weightfile, char *outfile)
void reset_normalize_net(char *cfgfile, char *weightfile, char *outfile)
{
gpu_index = -1;
- network *net = load_network(cfgfile, weightfile, 0);
+ dn_network *net = load_network(cfgfile, weightfile, 0);
int i;
for (i = 0; i < net->n; ++i) {
- layer l = net->layers[i];
+ dn_layer l = net->layers[i];
if (l.type == CONVOLUTIONAL && l.batch_normalize) {
denormalize_convolutional_layer(l);
}
@@ -262,7 +262,7 @@ void reset_normalize_net(char *cfgfile, char *weightfile, char *outfile)
save_weights(net, outfile);
}
-layer normalize_layer(layer l, int n)
+dn_layer normalize_layer(dn_layer l, int n)
{
int j;
l.batch_normalize=1;
@@ -278,10 +278,10 @@ layer normalize_layer(layer l, int n)
void normalize_net(char *cfgfile, char *weightfile, char *outfile)
{
gpu_index = -1;
- network *net = load_network(cfgfile, weightfile, 0);
+ dn_network *net = load_network(cfgfile, weightfile, 0);
int i;
for(i = 0; i < net->n; ++i){
- layer l = net->layers[i];
+ dn_layer l = net->layers[i];
if(l.type == CONVOLUTIONAL && !l.batch_normalize){
net->layers[i] = normalize_layer(l, l.n);
}
@@ -304,10 +304,10 @@ void normalize_net(char *cfgfile, char *weightfile, char *outfile)
void statistics_net(char *cfgfile, char *weightfile)
{
gpu_index = -1;
- network *net = load_network(cfgfile, weightfile, 0);
+ dn_network *net = load_network(cfgfile, weightfile, 0);
int i;
for (i = 0; i < net->n; ++i) {
- layer l = net->layers[i];
+ dn_layer l = net->layers[i];
if (l.type == CONNECTED && l.batch_normalize) {
printf("Connected Layer %d\n", i);
statistics_connected_layer(l);
@@ -334,10 +334,10 @@ void statistics_net(char *cfgfile, char *weightfile)
void denormalize_net(char *cfgfile, char *weightfile, char *outfile)
{
gpu_index = -1;
- network *net = load_network(cfgfile, weightfile, 0);
+ dn_network *net = load_network(cfgfile, weightfile, 0);
int i;
for (i = 0; i < net->n; ++i) {
- layer l = net->layers[i];
+ dn_layer l = net->layers[i];
if ((l.type == DECONVOLUTIONAL || l.type == CONVOLUTIONAL) && l.batch_normalize) {
denormalize_convolutional_layer(l);
net->layers[i].batch_normalize=0;
@@ -367,16 +367,16 @@ void denormalize_net(char *cfgfile, char *weightfile, char *outfile)
void mkimg(char *cfgfile, char *weightfile, int h, int w, int num, char *prefix)
{
- network *net = load_network(cfgfile, weightfile, 0);
- image *ims = get_weights(net->layers[0]);
+ dn_network *net = load_network(cfgfile, weightfile, 0);
+ dn_image *ims = get_weights(net->layers[0]);
int n = net->layers[0].n;
int z;
for(z = 0; z < num; ++z){
- image im = make_image(h, w, 3);
+ dn_image im = make_image(h, w, 3);
fill_image(im, .5);
int i;
for(i = 0; i < 100; ++i){
- image r = copy_image(ims[rand()%n]);
+ dn_image r = copy_image(ims[rand()%n]);
rotate_image_cw(r, rand()%4);
random_distort_image(r, 1, 1.5, 1.5);
int dx = rand()%(w-r.w);
@@ -393,7 +393,7 @@ void mkimg(char *cfgfile, char *weightfile, int h, int w, int num, char *prefix)
void visualize(char *cfgfile, char *weightfile)
{
- network *net = load_network(cfgfile, weightfile, 0);
+ dn_network *net = load_network(cfgfile, weightfile, 0);
visualize_network(net);
}
diff --git a/examples/detector.c b/examples/detector.c
index 318f7fbbe81..f6ddc3f6bf8 100644
--- a/examples/detector.c
+++ b/examples/detector.c
@@ -5,7 +5,7 @@ static int coco_ids[] = {1,2,3,4,5,6,7,8,9,10,11,13,14,15,16,17,18,19,20,21,22,2
void train_detector(char *datacfg, char *cfgfile, char *weightfile, int *gpus, int ngpus, int clear)
{
- list *options = read_data_cfg(datacfg);
+ dn_list *options = read_data_cfg(datacfg);
char *train_images = option_find_str(options, "train", "data/train.list");
char *backup_directory = option_find_str(options, "backup", "/backup/");
@@ -13,7 +13,7 @@ void train_detector(char *datacfg, char *cfgfile, char *weightfile, int *gpus, i
char *base = basecfg(cfgfile);
printf("%s\n", base);
float avg_loss = -1;
- network **nets = calloc(ngpus, sizeof(network));
+ dn_network **nets = calloc(ngpus, sizeof(dn_network));
srand(time(0));
int seed = rand();
@@ -27,22 +27,22 @@ void train_detector(char *datacfg, char *cfgfile, char *weightfile, int *gpus, i
nets[i]->learning_rate *= ngpus;
}
srand(time(0));
- network *net = nets[0];
+ dn_network *net = nets[0];
int imgs = net->batch * net->subdivisions * ngpus;
printf("Learning Rate: %g, Momentum: %g, Decay: %g\n", net->learning_rate, net->momentum, net->decay);
- data train, buffer;
+ dn_data train, buffer;
- layer l = net->layers[net->n - 1];
+ dn_layer l = net->layers[net->n - 1];
int classes = l.classes;
float jitter = l.jitter;
- list *plist = get_paths(train_images);
+ dn_list *plist = get_paths(train_images);
//int N = plist->size;
char **paths = (char **)list_to_array(plist);
- load_args args = get_base_args(net);
+ dn_load_args args = get_base_args(net);
args.coords = l.coords;
args.paths = paths;
args.n = imgs;
@@ -234,7 +234,7 @@ void print_imagenet_detections(FILE *fp, int id, detection *dets, int total, int
void validate_detector_flip(char *datacfg, char *cfgfile, char *weightfile, char *outfile)
{
int j;
- list *options = read_data_cfg(datacfg);
+ dn_list *options = read_data_cfg(datacfg);
char *valid_images = option_find_str(options, "valid", "data/train.list");
char *name_list = option_find_str(options, "names", "data/names.list");
char *prefix = option_find_str(options, "results", "results");
@@ -243,15 +243,15 @@ void validate_detector_flip(char *datacfg, char *cfgfile, char *weightfile, char
int *map = 0;
if (mapf) map = read_map(mapf);
- network *net = load_network(cfgfile, weightfile, 0);
+ dn_network *net = load_network(cfgfile, weightfile, 0);
set_batch_network(net, 2);
fprintf(stderr, "Learning Rate: %g, Momentum: %g, Decay: %g\n", net->learning_rate, net->momentum, net->decay);
srand(time(0));
- list *plist = get_paths(valid_images);
+ dn_list *plist = get_paths(valid_images);
char **paths = (char **)list_to_array(plist);
- layer l = net->layers[net->n-1];
+ dn_layer l = net->layers[net->n-1];
int classes = l.classes;
char buff[1024];
@@ -289,15 +289,15 @@ void validate_detector_flip(char *datacfg, char *cfgfile, char *weightfile, char
float nms = .45;
int nthreads = 4;
- image *val = calloc(nthreads, sizeof(image));
- image *val_resized = calloc(nthreads, sizeof(image));
- image *buf = calloc(nthreads, sizeof(image));
- image *buf_resized = calloc(nthreads, sizeof(image));
+ dn_image *val = calloc(nthreads, sizeof(dn_image));
+ dn_image *val_resized = calloc(nthreads, sizeof(dn_image));
+ dn_image *buf = calloc(nthreads, sizeof(dn_image));
+ dn_image *buf_resized = calloc(nthreads, sizeof(dn_image));
pthread_t *thr = calloc(nthreads, sizeof(pthread_t));
- image input = make_image(net->w, net->h, net->c*2);
+ dn_image input = make_image(net->w, net->h, net->c*2);
- load_args args = {0};
+ dn_load_args args = {0};
args.w = net->w;
args.h = net->h;
//args.type = IMAGE_DATA;
@@ -364,7 +364,7 @@ void validate_detector_flip(char *datacfg, char *cfgfile, char *weightfile, char
void validate_detector(char *datacfg, char *cfgfile, char *weightfile, char *outfile)
{
int j;
- list *options = read_data_cfg(datacfg);
+ dn_list *options = read_data_cfg(datacfg);
char *valid_images = option_find_str(options, "valid", "data/train.list");
char *name_list = option_find_str(options, "names", "data/names.list");
char *prefix = option_find_str(options, "results", "results");
@@ -373,15 +373,15 @@ void validate_detector(char *datacfg, char *cfgfile, char *weightfile, char *out
int *map = 0;
if (mapf) map = read_map(mapf);
- network *net = load_network(cfgfile, weightfile, 0);
+ dn_network *net = load_network(cfgfile, weightfile, 0);
set_batch_network(net, 1);
fprintf(stderr, "Learning Rate: %g, Momentum: %g, Decay: %g\n", net->learning_rate, net->momentum, net->decay);
srand(time(0));
- list *plist = get_paths(valid_images);
+ dn_list *plist = get_paths(valid_images);
char **paths = (char **)list_to_array(plist);
- layer l = net->layers[net->n-1];
+ dn_layer l = net->layers[net->n-1];
int classes = l.classes;
char buff[1024];
@@ -420,13 +420,13 @@ void validate_detector(char *datacfg, char *cfgfile, char *weightfile, char *out
float nms = .45;
int nthreads = 4;
- image *val = calloc(nthreads, sizeof(image));
- image *val_resized = calloc(nthreads, sizeof(image));
- image *buf = calloc(nthreads, sizeof(image));
- image *buf_resized = calloc(nthreads, sizeof(image));
+ dn_image *val = calloc(nthreads, sizeof(dn_image));
+ dn_image *val_resized = calloc(nthreads, sizeof(dn_image));
+ dn_image *buf = calloc(nthreads, sizeof(dn_image));
+ dn_image *buf_resized = calloc(nthreads, sizeof(dn_image));
pthread_t *thr = calloc(nthreads, sizeof(pthread_t));
- load_args args = {0};
+ dn_load_args args = {0};
args.w = net->w;
args.h = net->h;
//args.type = IMAGE_DATA;
@@ -488,15 +488,15 @@ void validate_detector(char *datacfg, char *cfgfile, char *weightfile, char *out
void validate_detector_recall(char *cfgfile, char *weightfile)
{
- network *net = load_network(cfgfile, weightfile, 0);
+ dn_network *net = load_network(cfgfile, weightfile, 0);
set_batch_network(net, 1);
fprintf(stderr, "Learning Rate: %g, Momentum: %g, Decay: %g\n", net->learning_rate, net->momentum, net->decay);
srand(time(0));
- list *plist = get_paths("data/coco_val_5k.list");
+ dn_list *plist = get_paths("data/coco_val_5k.list");
char **paths = (char **)list_to_array(plist);
- layer l = net->layers[net->n-1];
+ dn_layer l = net->layers[net->n-1];
int j, k;
@@ -514,8 +514,8 @@ void validate_detector_recall(char *cfgfile, char *weightfile)
for(i = 0; i < m; ++i){
char *path = paths[i];
- image orig = load_image_color(path, 0, 0);
- image sized = resize_image(orig, net->w, net->h);
+ dn_image orig = load_image_color(path, 0, 0);
+ dn_image sized = resize_image(orig, net->w, net->h);
char *id = basecfg(path);
network_predict(net, sized.data);
int nboxes = 0;
@@ -529,7 +529,7 @@ void validate_detector_recall(char *cfgfile, char *weightfile)
find_replace(labelpath, ".JPEG", ".txt", labelpath);
int num_labels = 0;
- box_label *truth = read_boxes(labelpath, &num_labels);
+ dn_box_label *truth = read_boxes(labelpath, &num_labels);
for(k = 0; k < nboxes; ++k){
if(dets[k].objectness > thresh){
++proposals;
@@ -537,7 +537,7 @@ void validate_detector_recall(char *cfgfile, char *weightfile)
}
for (j = 0; j < num_labels; ++j) {
++total;
- box t = {truth[j].x, truth[j].y, truth[j].w, truth[j].h};
+ dn_box t = {truth[j].x, truth[j].y, truth[j].w, truth[j].h};
float best_iou = 0;
for(k = 0; k < l.w*l.h*l.n; ++k){
float iou = box_iou(dets[k].bbox, t);
@@ -561,12 +561,12 @@ void validate_detector_recall(char *cfgfile, char *weightfile)
void test_detector(char *datacfg, char *cfgfile, char *weightfile, char *filename, float thresh, float hier_thresh, char *outfile, int fullscreen)
{
- list *options = read_data_cfg(datacfg);
+ dn_list *options = read_data_cfg(datacfg);
char *name_list = option_find_str(options, "names", "data/names.list");
char **names = get_labels(name_list);
- image **alphabet = load_alphabet();
- network *net = load_network(cfgfile, weightfile, 0);
+ dn_image **alphabet = load_alphabet();
+ dn_network *net = load_network(cfgfile, weightfile, 0);
set_batch_network(net, 1);
srand(2222222);
double time;
@@ -583,13 +583,13 @@ void test_detector(char *datacfg, char *cfgfile, char *weightfile, char *filenam
if(!input) return;
strtok(input, "\n");
}
- image im = load_image_color(input,0,0);
- image sized = letterbox_image(im, net->w, net->h);
+ dn_image im = load_image_color(input,0,0);
+ dn_image sized = letterbox_image(im, net->w, net->h);
//image sized = resize_image(im, net->w, net->h);
//image sized2 = resize_max(im, net->w);
//image sized = crop_image(sized2, -((net->w - sized2.w)/2), -((net->h - sized2.h)/2), net->w, net->h);
//resize_network(net, sized.w, sized.h);
- layer l = net->layers[net->n-1];
+ dn_layer l = net->layers[net->n-1];
float *X = sized.data;
@@ -839,7 +839,7 @@ void run_detector(int argc, char **argv)
else if(0==strcmp(argv[2], "valid2")) validate_detector_flip(datacfg, cfg, weights, outfile);
else if(0==strcmp(argv[2], "recall")) validate_detector_recall(cfg, weights);
else if(0==strcmp(argv[2], "demo")) {
- list *options = read_data_cfg(datacfg);
+ dn_list *options = read_data_cfg(datacfg);
int classes = option_find_int(options, "classes", 20);
char *name_list = option_find_str(options, "names", "data/names.list");
char **names = get_labels(name_list);
diff --git a/examples/dice.c b/examples/dice.c.HIDE
similarity index 100%
rename from examples/dice.c
rename to examples/dice.c.HIDE
diff --git a/examples/go.c b/examples/go.c
index 688579dcb3a..2d6cca0e908 100644
--- a/examples/go.c
+++ b/examples/go.c
@@ -92,9 +92,9 @@ static int occupied(float *b, int i)
return 0;
}
-data random_go_moves(moves m, int n)
+dn_data random_go_moves(moves m, int n)
{
- data d = {0};
+ dn_data d = {0};
d.X = make_matrix(n, 19*19*3);
d.y = make_matrix(n, 19*19+2);
int i, j;
@@ -118,8 +118,8 @@ data random_go_moves(moves m, int n)
int flip = rand()%2;
int rotate = rand()%4;
- image in = float_to_image(19, 19, 3, board);
- image out = float_to_image(19, 19, 1, label);
+ dn_image in = float_to_image(19, 19, 3, board);
+ dn_image out = float_to_image(19, 19, 1, label);
if(flip){
flip_image(in);
flip_image(out);
@@ -138,7 +138,7 @@ void train_go(char *cfgfile, char *weightfile, char *filename, int *gpus, int ng
char *base = basecfg(cfgfile);
printf("%s\n", base);
printf("%d\n", ngpus);
- network **nets = calloc(ngpus, sizeof(network*));
+ dn_network **nets = calloc(ngpus, sizeof(dn_network*));
srand(time(0));
int seed = rand();
@@ -150,7 +150,7 @@ void train_go(char *cfgfile, char *weightfile, char *filename, int *gpus, int ng
nets[i] = load_network(cfgfile, weightfile, clear);
nets[i]->learning_rate *= ngpus;
}
- network *net = nets[0];
+ dn_network *net = nets[0];
printf("Learning Rate: %g, Momentum: %g, Decay: %g\n", net->learning_rate, net->momentum, net->decay);
char *backup_directory = "/home/pjreddie/backup/";
@@ -165,7 +165,7 @@ void train_go(char *cfgfile, char *weightfile, char *filename, int *gpus, int ng
while(get_current_batch(net) < net->max_batches || net->max_batches == 0){
double time=what_time_is_it_now();
- data train = random_go_moves(m, net->batch*net->subdivisions*ngpus);
+ dn_data train = random_go_moves(m, net->batch*net->subdivisions*ngpus);
printf("Loaded: %lf seconds\n", what_time_is_it_now() - time);
time=what_time_is_it_now();
@@ -294,20 +294,20 @@ void flip_board(float *board)
}
}
-float predict_move2(network *net, float *board, float *move, int multi)
+float predict_move2(dn_network *net, float *board, float *move, int multi)
{
float *output = network_predict(net, board);
copy_cpu(19*19+1, output, 1, move, 1);
float result = output[19*19 + 1];
int i;
if(multi){
- image bim = float_to_image(19, 19, 3, board);
+ dn_image bim = float_to_image(19, 19, 3, board);
for(i = 1; i < 8; ++i){
rotate_image_cw(bim, i);
if(i >= 4) flip_image(bim);
float *output = network_predict(net, board);
- image oim = float_to_image(19, 19, 1, output);
+ dn_image oim = float_to_image(19, 19, 1, output);
result += output[19*19 + 1];
if(i >= 4) flip_image(oim);
@@ -390,11 +390,11 @@ void free_mcts(mcts_tree *root)
free(root);
}
-float *network_predict_rotations(network *net, float *next)
+float *network_predict_rotations(dn_network *net, float *next)
{
int n = net->batch;
float *in = calloc(19*19*3*n, sizeof(float));
- image im = float_to_image(19, 19, 3, next);
+ dn_image im = float_to_image(19, 19, 3, next);
int i,j;
int *inds = random_index_order(0, 8);
for(j = 0; j < n; ++j){
@@ -408,7 +408,7 @@ float *network_predict_rotations(network *net, float *next)
float *pred = network_predict(net, in);
for(j = 0; j < n; ++j){
i = inds[j];
- image im = float_to_image(19, 19, 1, pred + j*(19*19 + 2));
+ dn_image im = float_to_image(19, 19, 1, pred + j*(19*19 + 2));
if(i >= 4) flip_image(im);
rotate_image_cw(im, -i);
if(j > 0){
@@ -421,7 +421,7 @@ float *network_predict_rotations(network *net, float *next)
return pred;
}
-mcts_tree *expand(float *next, float *ko, network *net)
+mcts_tree *expand(float *next, float *ko, dn_network *net)
{
mcts_tree *root = calloc(1, sizeof(mcts_tree));
root->board = next;
@@ -458,7 +458,7 @@ float *copy_board(float *board)
return next;
}
-float select_mcts(mcts_tree *root, network *net, float *prev, float cpuct)
+float select_mcts(mcts_tree *root, dn_network *net, float *prev, float cpuct)
{
if(root->done) return -root->result;
int i;
@@ -507,7 +507,7 @@ float select_mcts(mcts_tree *root, network *net, float *prev, float cpuct)
return -val;
}
-mcts_tree *run_mcts(mcts_tree *tree, network *net, float *board, float *ko, int player, int n, float cpuct, float secs)
+mcts_tree *run_mcts(mcts_tree *tree, dn_network *net, float *board, float *ko, int player, int n, float cpuct, float secs)
{
int i;
double t = what_time_is_it_now();
@@ -735,7 +735,7 @@ void valid_go(char *cfgfile, char *weightfile, int multi, char *filename)
srand(time(0));
char *base = basecfg(cfgfile);
printf("%s\n", base);
- network *net = load_network(cfgfile, weightfile, 0);
+ dn_network *net = load_network(cfgfile, weightfile, 0);
set_batch_network(net, 1);
printf("Learning Rate: %g, Momentum: %g, Decay: %g\n", net->learning_rate, net->momentum, net->decay);
@@ -797,7 +797,7 @@ int stdin_ready()
return 0;
}
-mcts_tree *ponder(mcts_tree *tree, network *net, float *b, float *ko, int player, float cpuct)
+mcts_tree *ponder(mcts_tree *tree, dn_network *net, float *b, float *ko, int player, float cpuct)
{
double t = what_time_is_it_now();
int count = 0;
@@ -813,7 +813,7 @@ mcts_tree *ponder(mcts_tree *tree, network *net, float *b, float *ko, int player
void engine_go(char *filename, char *weightfile, int mcts_iters, float secs, float temp, float cpuct, int anon, int resign)
{
mcts_tree *root = 0;
- network *net = load_network(filename, weightfile, 0);
+ dn_network *net = load_network(filename, weightfile, 0);
set_batch_network(net, 1);
srand(time(0));
float *board = calloc(19*19*3, sizeof(float));
@@ -1098,7 +1098,7 @@ void engine_go(char *filename, char *weightfile, int mcts_iters, float secs, flo
void test_go(char *cfg, char *weights, int multi)
{
int i;
- network *net = load_network(cfg, weights, 0);
+ dn_network *net = load_network(cfg, weights, 0);
set_batch_network(net, 1);
srand(time(0));
float *board = calloc(19*19*3, sizeof(float));
@@ -1214,17 +1214,17 @@ void self_go(char *filename, char *weightfile, char *f2, char *w2, int multi)
{
mcts_tree *tree1 = 0;
mcts_tree *tree2 = 0;
- network *net = load_network(filename, weightfile, 0);
+ dn_network *net = load_network(filename, weightfile, 0);
//set_batch_network(net, 1);
- network *net2;
+ dn_network *net2;
if (f2) {
net2 = parse_network_cfg(f2);
if(w2){
load_weights(net2, w2);
}
} else {
- net2 = calloc(1, sizeof(network));
+ net2 = calloc(1, sizeof(dn_network));
*net2 = *net;
}
srand(time(0));
@@ -1282,7 +1282,7 @@ void self_go(char *filename, char *weightfile, char *f2, char *w2, int multi)
//mcts_iters = 500;
cpuct = 1;
}
- network *use = ((total%2==0) == (player==1)) ? net : net2;
+ dn_network *use = ((total%2==0) == (player==1)) ? net : net2;
mcts_tree *t = ((total%2==0) == (player==1)) ? tree1 : tree2;
t = run_mcts(t, use, board, two, player, mcts_iters, cpuct, 0);
move m = pick_move(t, temp, player);
diff --git a/examples/instance-segmenter.c b/examples/instance-segmenter.c
index 664e71426d5..4639c7f1cd2 100644
--- a/examples/instance-segmenter.c
+++ b/examples/instance-segmenter.c
@@ -2,7 +2,7 @@
#include
#include
-void normalize_image2(image p);
+void normalize_image2(dn_image p);
void train_isegmenter(char *datacfg, char *cfgfile, char *weightfile, int *gpus, int ngpus, int clear, int display)
{
int i;
@@ -11,7 +11,7 @@ void train_isegmenter(char *datacfg, char *cfgfile, char *weightfile, int *gpus,
char *base = basecfg(cfgfile);
printf("%s\n", base);
printf("%d\n", ngpus);
- network **nets = calloc(ngpus, sizeof(network*));
+ dn_network **nets = calloc(ngpus, sizeof(dn_network*));
srand(time(0));
int seed = rand();
@@ -24,10 +24,10 @@ void train_isegmenter(char *datacfg, char *cfgfile, char *weightfile, int *gpus,
nets[i]->learning_rate *= ngpus;
}
srand(time(0));
- network *net = nets[0];
- image pred = get_network_image(net);
+ dn_network *net = nets[0];
+ dn_image pred = get_network_image(net);
- image embed = pred;
+ dn_image embed = pred;
embed.c = 3;
embed.data += embed.w*embed.h*80;
@@ -38,17 +38,17 @@ void train_isegmenter(char *datacfg, char *cfgfile, char *weightfile, int *gpus,
int imgs = net->batch * net->subdivisions * ngpus;
printf("Learning Rate: %g, Momentum: %g, Decay: %g\n", net->learning_rate, net->momentum, net->decay);
- list *options = read_data_cfg(datacfg);
+ dn_list *options = read_data_cfg(datacfg);
char *backup_directory = option_find_str(options, "backup", "/backup/");
char *train_list = option_find_str(options, "train", "data/train.list");
- list *plist = get_paths(train_list);
+ dn_list *plist = get_paths(train_list);
char **paths = (char **)list_to_array(plist);
printf("%d\n", plist->size);
int N = plist->size;
- load_args args = {0};
+ dn_load_args args = {0};
args.w = net->w;
args.h = net->h;
args.threads = 32;
@@ -70,8 +70,8 @@ void train_isegmenter(char *datacfg, char *cfgfile, char *weightfile, int *gpus,
args.m = N;
args.type = ISEG_DATA;
- data train;
- data buffer;
+ dn_data train;
+ dn_data buffer;
pthread_t load_thread;
args.d = &buffer;
load_thread = load_data(args);
@@ -98,12 +98,12 @@ void train_isegmenter(char *datacfg, char *cfgfile, char *weightfile, int *gpus,
loss = train_network(net, train);
#endif
if(display){
- image tr = float_to_image(net->w/div, net->h/div, 80, train.y.vals[net->batch*(net->subdivisions-1)]);
- image im = float_to_image(net->w, net->h, net->c, train.X.vals[net->batch*(net->subdivisions-1)]);
+ dn_image tr = float_to_image(net->w/div, net->h/div, 80, train.y.vals[net->batch*(net->subdivisions-1)]);
+ dn_image im = float_to_image(net->w, net->h, net->c, train.X.vals[net->batch*(net->subdivisions-1)]);
pred.c = 80;
- image mask = mask_to_rgb(tr);
- image prmask = mask_to_rgb(pred);
- image ecopy = copy_image(embed);
+ dn_image mask = mask_to_rgb(tr);
+ dn_image prmask = mask_to_rgb(pred);
+ dn_image ecopy = copy_image(embed);
normalize_image2(ecopy);
show_image(ecopy, "embed", 1);
free_image(ecopy);
@@ -142,7 +142,7 @@ void train_isegmenter(char *datacfg, char *cfgfile, char *weightfile, int *gpus,
void predict_isegmenter(char *datafile, char *cfg, char *weights, char *filename)
{
- network *net = load_network(cfg, weights, 0);
+ dn_network *net = load_network(cfg, weights, 0);
set_batch_network(net, 1);
srand(2222222);
@@ -159,14 +159,14 @@ void predict_isegmenter(char *datafile, char *cfg, char *weights, char *filename
if(!input) return;
strtok(input, "\n");
}
- image im = load_image_color(input, 0, 0);
- image sized = letterbox_image(im, net->w, net->h);
+ dn_image im = load_image_color(input, 0, 0);
+ dn_image sized = letterbox_image(im, net->w, net->h);
float *X = sized.data;
time=clock();
float *predictions = network_predict(net, X);
- image pred = get_network_image(net);
- image prmask = mask_to_rgb(pred);
+ dn_image pred = get_network_image(net);
+ dn_image prmask = mask_to_rgb(pred);
printf("Predicted: %f\n", predictions[0]);
printf("%s: Predicted in %f seconds.\n", input, sec(clock()-time));
show_image(sized, "orig", 1);
diff --git a/examples/lsd.c b/examples/lsd.c
index 4ab944c884b..d55a2dce19d 100644
--- a/examples/lsd.c
+++ b/examples/lsd.c
@@ -396,9 +396,9 @@ void slerp(float *start, float *end, float s, int n, float *out)
scale_array(out, n, 1./mag);
}
-image random_unit_vector_image(int w, int h, int c)
+dn_image random_unit_vector_image(int w, int h, int c)
{
- image im = make_image(w, h, c);
+ dn_image im = make_image(w, h, c);
int i;
for(i = 0; i < im.w*im.h*im.c; ++i){
im.data[i] = rand_normal();
@@ -410,7 +410,7 @@ image random_unit_vector_image(int w, int h, int c)
void inter_dcgan(char *cfgfile, char *weightfile)
{
- network *net = load_network(cfgfile, weightfile, 0);
+ dn_network *net = load_network(cfgfile, weightfile, 0);
set_batch_network(net, 1);
srand(2222222);
@@ -426,10 +426,10 @@ void inter_dcgan(char *cfgfile, char *weightfile)
break;
}
}
- image start = random_unit_vector_image(net->w, net->h, net->c);
- image end = random_unit_vector_image(net->w, net->h, net->c);
- image im = make_image(net->w, net->h, net->c);
- image orig = copy_image(start);
+ dn_image start = random_unit_vector_image(net->w, net->h, net->c);
+ dn_image end = random_unit_vector_image(net->w, net->h, net->c);
+ dn_image im = make_image(net->w, net->h, net->c);
+ dn_image orig = copy_image(start);
int c = 0;
int count = 0;
@@ -454,7 +454,7 @@ void inter_dcgan(char *cfgfile, char *weightfile)
float *X = im.data;
time=clock();
network_predict(net, X);
- image out = get_network_image_layer(net, imlayer);
+ dn_image out = get_network_image_layer(net, imlayer);
//yuv_to_rgb(out);
normalize_image(out);
printf("%s: Predicted in %f seconds.\n", input, sec(clock()-time));
@@ -468,7 +468,7 @@ void inter_dcgan(char *cfgfile, char *weightfile)
void test_dcgan(char *cfgfile, char *weightfile)
{
- network *net = load_network(cfgfile, weightfile, 0);
+ dn_network *net = load_network(cfgfile, weightfile, 0);
set_batch_network(net, 1);
srand(2222222);
@@ -480,7 +480,7 @@ void test_dcgan(char *cfgfile, char *weightfile)
imlayer = net->n-1;
while(1){
- image im = make_image(net->w, net->h, net->c);
+ dn_image im = make_image(net->w, net->h, net->c);
int i;
for(i = 0; i < im.w*im.h*im.c; ++i){
im.data[i] = rand_normal();
@@ -491,7 +491,7 @@ void test_dcgan(char *cfgfile, char *weightfile)
float *X = im.data;
time=clock();
network_predict(net, X);
- image out = get_network_image_layer(net, imlayer);
+ dn_image out = get_network_image_layer(net, imlayer);
//yuv_to_rgb(out);
normalize_image(out);
printf("%s: Predicted in %f seconds.\n", input, sec(clock()-time));
@@ -502,7 +502,7 @@ void test_dcgan(char *cfgfile, char *weightfile)
}
}
-void set_network_alpha_beta(network *net, float alpha, float beta)
+void set_network_alpha_beta(dn_network *net, float alpha, float beta)
{
int i;
for(i = 0; i < net->n; ++i){
@@ -1293,7 +1293,7 @@ save_weights(net, buff);
void test_lsd(char *cfg, char *weights, char *filename, int gray)
{
- network *net = load_network(cfg, weights, 0);
+ dn_network *net = load_network(cfg, weights, 0);
set_batch_network(net, 1);
srand(2222222);
@@ -1320,15 +1320,15 @@ void test_lsd(char *cfg, char *weights, char *filename, int gray)
if(!input) return;
strtok(input, "\n");
}
- image im = load_image_color(input, 0, 0);
- image resized = resize_min(im, net->w);
- image crop = crop_image(resized, (resized.w - net->w)/2, (resized.h - net->h)/2, net->w, net->h);
+ dn_image im = load_image_color(input, 0, 0);
+ dn_image resized = resize_min(im, net->w);
+ dn_image crop = crop_image(resized, (resized.w - net->w)/2, (resized.h - net->h)/2, net->w, net->h);
if(gray) grayscale_image_3c(crop);
float *X = crop.data;
time=clock();
network_predict(net, X);
- image out = get_network_image_layer(net, imlayer);
+ dn_image out = get_network_image_layer(net, imlayer);
//yuv_to_rgb(out);
constrain_image(out);
printf("%s: Predicted in %f seconds.\n", input, sec(clock()-time));
diff --git a/examples/nightmare.c b/examples/nightmare.c
index 2978eb61193..3c4e78c9f18 100644
--- a/examples/nightmare.c
+++ b/examples/nightmare.c
@@ -25,7 +25,7 @@ void calculate_loss(float *output, float *delta, int n, float thresh)
}
}
-void optimize_picture(network *net, image orig, int max_layer, float scale, float rate, float thresh, int norm)
+void optimize_picture(dn_network *net, dn_image orig, int max_layer, float scale, float rate, float thresh, int norm)
{
//scale_image(orig, 2);
//translate_image(orig, -1);
@@ -35,15 +35,15 @@ void optimize_picture(network *net, image orig, int max_layer, float scale, floa
int dy = rand()%16 - 8;
int flip = rand()%2;
- image crop = crop_image(orig, dx, dy, orig.w, orig.h);
- image im = resize_image(crop, (int)(orig.w * scale), (int)(orig.h * scale));
+ dn_image crop = crop_image(orig, dx, dy, orig.w, orig.h);
+ dn_image im = resize_image(crop, (int)(orig.w * scale), (int)(orig.h * scale));
if(flip) flip_image(im);
resize_network(net, im.w, im.h);
- layer last = net->layers[net->n-1];
+ dn_layer last = net->layers[net->n-1];
//net->layers[net->n - 1].activation = LINEAR;
- image delta = make_image(im.w, im.h, im.c);
+ dn_image delta = make_image(im.w, im.h, im.c);
#ifdef GPU
net->delta_gpu = cuda_make_array(delta.data, im.w*im.h*im.c);
@@ -73,8 +73,8 @@ void optimize_picture(network *net, image orig, int max_layer, float scale, floa
if(flip) flip_image(delta);
//normalize_array(delta.data, delta.w*delta.h*delta.c);
- image resized = resize_image(delta, orig.w, orig.h);
- image out = crop_image(resized, -dx, -dy, orig.w, orig.h);
+ dn_image resized = resize_image(delta, orig.w, orig.h);
+ dn_image out = crop_image(resized, -dx, -dy, orig.w, orig.h);
/*
image g = grayscale_image(out);
@@ -83,7 +83,7 @@ void optimize_picture(network *net, image orig, int max_layer, float scale, floa
*/
//rate = rate / abs_mean(out.data, out.w*out.h*out.c);
- image gray = make_image(out.w, out.h, out.c);
+ dn_image gray = make_image(out.w, out.h, out.c);
fill_image(gray, .5);
axpy_cpu(orig.w*orig.h*orig.c, -1, orig.data, 1, gray.data, 1);
axpy_cpu(orig.w*orig.h*orig.c, .1, gray.data, 1, out.data, 1);
@@ -111,7 +111,7 @@ void optimize_picture(network *net, image orig, int max_layer, float scale, floa
}
-void smooth(image recon, image update, float lambda, int num)
+void smooth(dn_image recon, dn_image update, float lambda, int num)
{
int i, j, k;
int ii, jj;
@@ -132,11 +132,11 @@ void smooth(image recon, image update, float lambda, int num)
}
}
-void reconstruct_picture(network *net, float *features, image recon, image update, float rate, float momentum, float lambda, int smooth_size, int iters)
+void reconstruct_picture(dn_network *net, float *features, dn_image recon, dn_image update, float rate, float momentum, float lambda, int smooth_size, int iters)
{
int iter = 0;
for (iter = 0; iter < iters; ++iter) {
- image delta = make_image(recon.w, recon.h, recon.c);
+ dn_image delta = make_image(recon.w, recon.h, recon.c);
#ifdef GPU
layer l = get_network_output_layer(net);
@@ -313,26 +313,26 @@ void run_nightmare(int argc, char **argv)
int reconstruct = find_arg(argc, argv, "-reconstruct");
int smooth_size = find_int_arg(argc, argv, "-smooth", 1);
- network *net = load_network(cfg, weights, 0);
+ dn_network *net = load_network(cfg, weights, 0);
char *cfgbase = basecfg(cfg);
char *imbase = basecfg(input);
set_batch_network(net, 1);
- image im = load_image_color(input, 0, 0);
+ dn_image im = load_image_color(input, 0, 0);
if(0){
float scale = 1;
if(im.w > 512 || im.h > 512){
if(im.w > im.h) scale = 512.0/im.w;
else scale = 512.0/im.h;
}
- image resized = resize_image(im, scale*im.w, scale*im.h);
+ dn_image resized = resize_image(im, scale*im.w, scale*im.h);
free_image(im);
im = resized;
}
//im = letterbox_image(im, net->w, net->h);
float *features = 0;
- image update;
+ dn_image update;
if (reconstruct){
net->n = max_layer;
im = letterbox_image(im, net->w, net->h);
@@ -343,7 +343,7 @@ void run_nightmare(int argc, char **argv)
printf("region!\n");
zero_objectness(net->layers[net->n-1]);
}
- image out_im = copy_image(get_network_image(net));
+ dn_image out_im = copy_image(get_network_image(net));
/*
image crop = crop_image(out_im, zz, zz, out_im.w-2*zz, out_im.h-2*zz);
//flip_image(crop);
@@ -385,7 +385,7 @@ void run_nightmare(int argc, char **argv)
}
fprintf(stderr, "done\n");
if(0){
- image g = grayscale_image(im);
+ dn_image g = grayscale_image(im);
free_image(im);
im = g;
}
@@ -400,12 +400,12 @@ void run_nightmare(int argc, char **argv)
//show_image(im, buff, 0);
if(rotate){
- image rot = rotate_image(im, rotate);
+ dn_image rot = rotate_image(im, rotate);
free_image(im);
im = rot;
}
- image crop = crop_image(im, im.w * (1. - zoom)/2., im.h * (1.-zoom)/2., im.w*zoom, im.h*zoom);
- image resized = resize_image(crop, im.w, im.h);
+ dn_image crop = crop_image(im, im.w * (1. - zoom)/2., im.h * (1.-zoom)/2., im.w*zoom, im.h*zoom);
+ dn_image resized = resize_image(crop, im.w, im.h);
free_image(im);
free_image(crop);
im = resized;
diff --git a/examples/regressor.c b/examples/regressor.c
index 20cec0fad9f..a4f4eb55d55 100644
--- a/examples/regressor.c
+++ b/examples/regressor.c
@@ -10,7 +10,7 @@ void train_regressor(char *datacfg, char *cfgfile, char *weightfile, int *gpus,
char *base = basecfg(cfgfile);
printf("%s\n", base);
printf("%d\n", ngpus);
- network **nets = calloc(ngpus, sizeof(network*));
+ dn_network **nets = calloc(ngpus, sizeof(dn_network*));
srand(time(0));
int seed = rand();
@@ -23,24 +23,24 @@ void train_regressor(char *datacfg, char *cfgfile, char *weightfile, int *gpus,
nets[i]->learning_rate *= ngpus;
}
srand(time(0));
- network *net = nets[0];
+ dn_network *net = nets[0];
int imgs = net->batch * net->subdivisions * ngpus;
printf("Learning Rate: %g, Momentum: %g, Decay: %g\n", net->learning_rate, net->momentum, net->decay);
- list *options = read_data_cfg(datacfg);
+ dn_list *options = read_data_cfg(datacfg);
char *backup_directory = option_find_str(options, "backup", "/backup/");
char *train_list = option_find_str(options, "train", "data/train.list");
int classes = option_find_int(options, "classes", 1);
- list *plist = get_paths(train_list);
+ dn_list *plist = get_paths(train_list);
char **paths = (char **)list_to_array(plist);
printf("%d\n", plist->size);
int N = plist->size;
clock_t time;
- load_args args = {0};
+ dn_load_args args = {0};
args.w = net->w;
args.h = net->h;
args.threads = 32;
@@ -60,8 +60,8 @@ void train_regressor(char *datacfg, char *cfgfile, char *weightfile, int *gpus,
args.m = N;
args.type = REGRESSION_DATA;
- data train;
- data buffer;
+ dn_data train;
+ dn_data buffer;
pthread_t load_thread;
args.d = &buffer;
load_thread = load_data(args);
@@ -115,7 +115,7 @@ void train_regressor(char *datacfg, char *cfgfile, char *weightfile, int *gpus,
void predict_regressor(char *cfgfile, char *weightfile, char *filename)
{
- network *net = load_network(cfgfile, weightfile, 0);
+ dn_network *net = load_network(cfgfile, weightfile, 0);
set_batch_network(net, 1);
srand(2222222);
@@ -132,8 +132,8 @@ void predict_regressor(char *cfgfile, char *weightfile, char *filename)
if(!input) return;
strtok(input, "\n");
}
- image im = load_image_color(input, 0, 0);
- image sized = letterbox_image(im, net->w, net->h);
+ dn_image im = load_image_color(input, 0, 0);
+ dn_image sized = letterbox_image(im, net->w, net->h);
float *X = sized.data;
time=clock();
diff --git a/examples/rnn.c b/examples/rnn.c
index 5d49eaae707..f02bf890045 100644
--- a/examples/rnn.c
+++ b/examples/rnn.c
@@ -9,11 +9,11 @@ typedef struct {
unsigned char **load_files(char *filename, int *n)
{
- list *paths = get_paths(filename);
+ dn_list *paths = get_paths(filename);
*n = paths->size;
unsigned char **contents = calloc(*n, sizeof(char *));
int i;
- node *x = paths->front;
+ dn_node *x = paths->front;
for(i = 0; i < *n; ++i){
contents[i] = read_file((char *)x->val);
x = x->next;
@@ -171,7 +171,7 @@ void train_char_rnn(char *cfgfile, char *weightfile, char *filename, int clear,
char *base = basecfg(cfgfile);
fprintf(stderr, "%s\n", base);
float avg_loss = -1;
- network *net = load_network(cfgfile, weightfile, clear);
+ dn_network *net = load_network(cfgfile, weightfile, clear);
int inputs = net->inputs;
fprintf(stderr, "Learning Rate: %g, Momentum: %g, Decay: %g, Inputs: %d %d %d\n", net->learning_rate, net->momentum, net->decay, inputs, net->batch, net->time_steps);
@@ -254,7 +254,7 @@ void test_char_rnn(char *cfgfile, char *weightfile, int num, char *seed, float t
char *base = basecfg(cfgfile);
fprintf(stderr, "%s\n", base);
- network *net = load_network(cfgfile, weightfile, 0);
+ dn_network *net = load_network(cfgfile, weightfile, 0);
int inputs = net->inputs;
int i, j;
@@ -308,7 +308,7 @@ void test_tactic_rnn_multi(char *cfgfile, char *weightfile, int num, float temp,
char *base = basecfg(cfgfile);
fprintf(stderr, "%s\n", base);
- network *net = load_network(cfgfile, weightfile, 0);
+ dn_network *net = load_network(cfgfile, weightfile, 0);
int inputs = net->inputs;
int i, j;
@@ -353,7 +353,7 @@ void test_tactic_rnn(char *cfgfile, char *weightfile, int num, float temp, int r
char *base = basecfg(cfgfile);
fprintf(stderr, "%s\n", base);
- network *net = load_network(cfgfile, weightfile, 0);
+ dn_network *net = load_network(cfgfile, weightfile, 0);
int inputs = net->inputs;
int i, j;
@@ -388,7 +388,7 @@ void valid_tactic_rnn(char *cfgfile, char *weightfile, char *seed)
char *base = basecfg(cfgfile);
fprintf(stderr, "%s\n", base);
- network *net = load_network(cfgfile, weightfile, 0);
+ dn_network *net = load_network(cfgfile, weightfile, 0);
int inputs = net->inputs;
int count = 0;
@@ -437,7 +437,7 @@ void valid_char_rnn(char *cfgfile, char *weightfile, char *seed)
char *base = basecfg(cfgfile);
fprintf(stderr, "%s\n", base);
- network *net = load_network(cfgfile, weightfile, 0);
+ dn_network *net = load_network(cfgfile, weightfile, 0);
int inputs = net->inputs;
int count = 0;
@@ -475,7 +475,7 @@ void vec_char_rnn(char *cfgfile, char *weightfile, char *seed)
char *base = basecfg(cfgfile);
fprintf(stderr, "%s\n", base);
- network *net = load_network(cfgfile, weightfile, 0);
+ dn_network *net = load_network(cfgfile, weightfile, 0);
int inputs = net->inputs;
int c;
@@ -504,7 +504,7 @@ void vec_char_rnn(char *cfgfile, char *weightfile, char *seed)
network_predict(net, input);
input[(int)c] = 0;
- layer l = net->layers[0];
+ dn_layer l = net->layers[0];
#ifdef GPU
cuda_pull_array(l.output_gpu, l.output, l.outputs);
#endif
diff --git a/examples/segmenter.c b/examples/segmenter.c
index 2e7cea0b730..5e4530a76bd 100644
--- a/examples/segmenter.c
+++ b/examples/segmenter.c
@@ -10,7 +10,7 @@ void train_segmenter(char *datacfg, char *cfgfile, char *weightfile, int *gpus,
char *base = basecfg(cfgfile);
printf("%s\n", base);
printf("%d\n", ngpus);
- network **nets = calloc(ngpus, sizeof(network*));
+ dn_network **nets = calloc(ngpus, sizeof(dn_network*));
srand(time(0));
int seed = rand();
@@ -23,8 +23,8 @@ void train_segmenter(char *datacfg, char *cfgfile, char *weightfile, int *gpus,
nets[i]->learning_rate *= ngpus;
}
srand(time(0));
- network *net = nets[0];
- image pred = get_network_image(net);
+ dn_network *net = nets[0];
+ dn_image pred = get_network_image(net);
int div = net->w/pred.w;
assert(pred.w * div == net->w);
@@ -33,17 +33,17 @@ void train_segmenter(char *datacfg, char *cfgfile, char *weightfile, int *gpus,
int imgs = net->batch * net->subdivisions * ngpus;
printf("Learning Rate: %g, Momentum: %g, Decay: %g\n", net->learning_rate, net->momentum, net->decay);
- list *options = read_data_cfg(datacfg);
+ dn_list *options = read_data_cfg(datacfg);
char *backup_directory = option_find_str(options, "backup", "/backup/");
char *train_list = option_find_str(options, "train", "data/train.list");
- list *plist = get_paths(train_list);
+ dn_list *plist = get_paths(train_list);
char **paths = (char **)list_to_array(plist);
printf("%d\n", plist->size);
int N = plist->size;
- load_args args = {0};
+ dn_load_args args = {0};
args.w = net->w;
args.h = net->h;
args.threads = 32;
@@ -64,8 +64,8 @@ void train_segmenter(char *datacfg, char *cfgfile, char *weightfile, int *gpus,
args.m = N;
args.type = SEGMENTATION_DATA;
- data train;
- data buffer;
+ dn_data train;
+ dn_data buffer;
pthread_t load_thread;
args.d = &buffer;
load_thread = load_data(args);
@@ -92,10 +92,10 @@ void train_segmenter(char *datacfg, char *cfgfile, char *weightfile, int *gpus,
loss = train_network(net, train);
#endif
if(display){
- image tr = float_to_image(net->w/div, net->h/div, 80, train.y.vals[net->batch*(net->subdivisions-1)]);
- image im = float_to_image(net->w, net->h, net->c, train.X.vals[net->batch*(net->subdivisions-1)]);
- image mask = mask_to_rgb(tr);
- image prmask = mask_to_rgb(pred);
+ dn_image tr = float_to_image(net->w/div, net->h/div, 80, train.y.vals[net->batch*(net->subdivisions-1)]);
+ dn_image im = float_to_image(net->w, net->h, net->c, train.X.vals[net->batch*(net->subdivisions-1)]);
+ dn_image mask = mask_to_rgb(tr);
+ dn_image prmask = mask_to_rgb(pred);
show_image(im, "input", 1);
show_image(prmask, "pred", 1);
show_image(mask, "truth", 100);
@@ -130,7 +130,7 @@ void train_segmenter(char *datacfg, char *cfgfile, char *weightfile, int *gpus,
void predict_segmenter(char *datafile, char *cfg, char *weights, char *filename)
{
- network *net = load_network(cfg, weights, 0);
+ dn_network *net = load_network(cfg, weights, 0);
set_batch_network(net, 1);
srand(2222222);
@@ -147,14 +147,14 @@ void predict_segmenter(char *datafile, char *cfg, char *weights, char *filename)
if(!input) return;
strtok(input, "\n");
}
- image im = load_image_color(input, 0, 0);
- image sized = letterbox_image(im, net->w, net->h);
+ dn_image im = load_image_color(input, 0, 0);
+ dn_image sized = letterbox_image(im, net->w, net->h);
float *X = sized.data;
time=clock();
float *predictions = network_predict(net, X);
- image pred = get_network_image(net);
- image prmask = mask_to_rgb(pred);
+ dn_image pred = get_network_image(net);
+ dn_image prmask = mask_to_rgb(pred);
printf("Predicted: %f\n", predictions[0]);
printf("%s: Predicted in %f seconds.\n", input, sec(clock()-time));
show_image(sized, "orig", 1);
diff --git a/examples/super.c b/examples/super.c
index d34406b1f2c..5b083c47228 100644
--- a/examples/super.c
+++ b/examples/super.c
@@ -8,18 +8,18 @@ void train_super(char *cfgfile, char *weightfile, int clear)
char *base = basecfg(cfgfile);
printf("%s\n", base);
float avg_loss = -1;
- network *net = load_network(cfgfile, weightfile, clear);
+ dn_network *net = load_network(cfgfile, weightfile, clear);
printf("Learning Rate: %g, Momentum: %g, Decay: %g\n", net->learning_rate, net->momentum, net->decay);
int imgs = net->batch*net->subdivisions;
int i = *net->seen/imgs;
- data train, buffer;
+ dn_data train, buffer;
- list *plist = get_paths(train_images);
+ dn_list *plist = get_paths(train_images);
//int N = plist->size;
char **paths = (char **)list_to_array(plist);
- load_args args = {0};
+ dn_load_args args = {0};
args.w = net->w;
args.h = net->h;
args.scale = 4;
@@ -66,7 +66,7 @@ void train_super(char *cfgfile, char *weightfile, int clear)
void test_super(char *cfgfile, char *weightfile, char *filename)
{
- network *net = load_network(cfgfile, weightfile, 0);
+ dn_network *net = load_network(cfgfile, weightfile, 0);
set_batch_network(net, 1);
srand(2222222);
@@ -83,14 +83,14 @@ void test_super(char *cfgfile, char *weightfile, char *filename)
if(!input) return;
strtok(input, "\n");
}
- image im = load_image_color(input, 0, 0);
+ dn_image im = load_image_color(input, 0, 0);
resize_network(net, im.w, im.h);
printf("%d %d\n", im.w, im.h);
float *X = im.data;
time=clock();
network_predict(net, X);
- image out = get_network_image(net);
+ dn_image out = get_network_image(net);
printf("%s: Predicted in %f seconds.\n", input, sec(clock()-time));
save_image(out, "out");
show_image(out, "out", 0);
diff --git a/examples/swag.c b/examples/swag.c
index c22d7855c46..e877b71145e 100644
--- a/examples/swag.c
+++ b/examples/swag.c
@@ -9,28 +9,28 @@ void train_swag(char *cfgfile, char *weightfile)
char *base = basecfg(cfgfile);
printf("%s\n", base);
float avg_loss = -1;
- network net = parse_network_cfg(cfgfile);
+ dn_network* net = parse_network_cfg(cfgfile);
if(weightfile){
- load_weights(&net, weightfile);
+ load_weights(net, weightfile);
}
- printf("Learning Rate: %g, Momentum: %g, Decay: %g\n", net.learning_rate, net.momentum, net.decay);
- int imgs = net.batch*net.subdivisions;
- int i = *net.seen/imgs;
- data train, buffer;
+ printf("Learning Rate: %g, Momentum: %g, Decay: %g\n", net->learning_rate, net->momentum, net->decay);
+ int imgs = net->batch*net->subdivisions;
+ int i = *net->seen/imgs;
+ dn_data train, buffer;
- layer l = net.layers[net.n - 1];
+ dn_layer l = net->layers[net->n - 1];
int side = l.side;
int classes = l.classes;
float jitter = l.jitter;
- list *plist = get_paths(train_images);
+ dn_list *plist = get_paths(train_images);
//int N = plist->size;
char **paths = (char **)list_to_array(plist);
- load_args args = {0};
- args.w = net.w;
- args.h = net.h;
+ dn_load_args args = {0};
+ args.w = net->w;
+ args.h = net->h;
args.paths = paths;
args.n = imgs;
args.m = plist->size;
@@ -43,7 +43,7 @@ void train_swag(char *cfgfile, char *weightfile)
pthread_t load_thread = load_data_in_thread(args);
clock_t time;
//while(i*imgs < N*120){
- while(get_current_batch(net) < net.max_batches){
+ while(get_current_batch(net) < net->max_batches){
i += 1;
time=clock();
pthread_join(load_thread, 0);
diff --git a/examples/tag.c b/examples/tag.c
index 4caf8cba18f..d307e25d5d4 100644
--- a/examples/tag.c
+++ b/examples/tag.c
@@ -7,19 +7,19 @@ void train_tag(char *cfgfile, char *weightfile, int clear)
char *base = basecfg(cfgfile);
char *backup_directory = "/home/pjreddie/backup/";
printf("%s\n", base);
- network *net = load_network(cfgfile, weightfile, clear);
+ dn_network *net = load_network(cfgfile, weightfile, clear);
printf("Learning Rate: %g, Momentum: %g, Decay: %g\n", net->learning_rate, net->momentum, net->decay);
int imgs = 1024;
- list *plist = get_paths("/home/pjreddie/tag/train.list");
+ dn_list *plist = get_paths("/home/pjreddie/tag/train.list");
char **paths = (char **)list_to_array(plist);
printf("%d\n", plist->size);
int N = plist->size;
clock_t time;
pthread_t load_thread;
- data train;
- data buffer;
+ dn_data train;
+ dn_data buffer;
- load_args args = {0};
+ dn_load_args args = {0};
args.w = net->w;
args.h = net->h;
@@ -82,7 +82,7 @@ void train_tag(char *cfgfile, char *weightfile, int clear)
void test_tag(char *cfgfile, char *weightfile, char *filename)
{
- network *net = load_network(cfgfile, weightfile, 0);
+ dn_network *net = load_network(cfgfile, weightfile, 0);
set_batch_network(net, 1);
srand(2222222);
int i = 0;
@@ -102,8 +102,8 @@ void test_tag(char *cfgfile, char *weightfile, char *filename)
if(!input) return;
strtok(input, "\n");
}
- image im = load_image_color(input, 0, 0);
- image r = resize_min(im, size);
+ dn_image im = load_image_color(input, 0, 0);
+ dn_image r = resize_min(im, size);
resize_network(net, r.w, r.h);
printf("%d %d\n", r.w, r.h);
diff --git a/examples/voxel.c b/examples/voxel.c
index 01ea9bb9898..80a652018e5 100644
--- a/examples/voxel.c
+++ b/examples/voxel.c
@@ -44,23 +44,23 @@ void train_voxel(char *cfgfile, char *weightfile)
char *base = basecfg(cfgfile);
printf("%s\n", base);
float avg_loss = -1;
- network net = parse_network_cfg(cfgfile);
+ dn_network* net = parse_network_cfg(cfgfile);
if(weightfile){
- load_weights(&net, weightfile);
+ load_weights(net, weightfile);
}
- printf("Learning Rate: %g, Momentum: %g, Decay: %g\n", net.learning_rate, net.momentum, net.decay);
- int imgs = net.batch*net.subdivisions;
- int i = *net.seen/imgs;
- data train, buffer;
+ printf("Learning Rate: %g, Momentum: %g, Decay: %g\n", net->learning_rate, net->momentum, net->decay);
+ int imgs = net->batch*net->subdivisions;
+ int i = *(net->seen)/imgs;
+ dn_data train, buffer;
- list *plist = get_paths(train_images);
+ dn_list *plist = get_paths(train_images);
//int N = plist->size;
char **paths = (char **)list_to_array(plist);
- load_args args = {0};
- args.w = net.w;
- args.h = net.h;
+ dn_load_args args = {0};
+ args.w = net->w;
+ args.h = net->h;
args.scale = 4;
args.paths = paths;
args.n = imgs;
@@ -71,7 +71,7 @@ void train_voxel(char *cfgfile, char *weightfile)
pthread_t load_thread = load_data_in_thread(args);
clock_t time;
//while(i*imgs < N*120){
- while(get_current_batch(net) < net.max_batches){
+ while(get_current_batch(net) < net->max_batches){
i += 1;
time=clock();
pthread_join(load_thread, 0);
@@ -105,11 +105,11 @@ void train_voxel(char *cfgfile, char *weightfile)
void test_voxel(char *cfgfile, char *weightfile, char *filename)
{
- network net = parse_network_cfg(cfgfile);
+ dn_network* net = parse_network_cfg(cfgfile);
if(weightfile){
- load_weights(&net, weightfile);
+ load_weights(net, weightfile);
}
- set_batch_network(&net, 1);
+ set_batch_network(net, 1);
srand(2222222);
clock_t time;
@@ -125,14 +125,14 @@ void test_voxel(char *cfgfile, char *weightfile, char *filename)
if(!input) return;
strtok(input, "\n");
}
- image im = load_image_color(input, 0, 0);
- resize_network(&net, im.w, im.h);
+ dn_image im = load_image_color(input, 0, 0);
+ resize_network(net, im.w, im.h);
printf("%d %d\n", im.w, im.h);
float *X = im.data;
time=clock();
network_predict(net, X);
- image out = get_network_image(net);
+ dn_image out = get_network_image(net);
printf("%s: Predicted in %f seconds.\n", input, sec(clock()-time));
save_image(out, "out");
diff --git a/examples/writing.c b/examples/writing.c
index 1b6ff83b583..419f45ab725 100644
--- a/examples/writing.c
+++ b/examples/writing.c
@@ -7,24 +7,24 @@ void train_writing(char *cfgfile, char *weightfile)
float avg_loss = -1;
char *base = basecfg(cfgfile);
printf("%s\n", base);
- network net = parse_network_cfg(cfgfile);
+ dn_network* net = parse_network_cfg(cfgfile);
if(weightfile){
- load_weights(&net, weightfile);
+ load_weights(net, weightfile);
}
- printf("Learning Rate: %g, Momentum: %g, Decay: %g\n", net.learning_rate, net.momentum, net.decay);
- int imgs = net.batch*net.subdivisions;
- list *plist = get_paths("figures.list");
+ printf("Learning Rate: %g, Momentum: %g, Decay: %g\n", net->learning_rate, net->momentum, net->decay);
+ int imgs = net->batch*net->subdivisions;
+ dn_list *plist = get_paths("figures.list");
char **paths = (char **)list_to_array(plist);
clock_t time;
int N = plist->size;
printf("N: %d\n", N);
- image out = get_network_image(net);
+ dn_image out = get_network_image(net);
- data train, buffer;
+ dn_data train, buffer;
- load_args args = {0};
- args.w = net.w;
- args.h = net.h;
+ dn_load_args args = {0};
+ args.w = net->w;
+ args.h = net->h;
args.out_w = out.w;
args.out_h = out.h;
args.paths = paths;
@@ -34,8 +34,8 @@ void train_writing(char *cfgfile, char *weightfile)
args.type = WRITING_DATA;
pthread_t load_thread = load_data_in_thread(args);
- int epoch = (*net.seen)/N;
- while(get_current_batch(net) < net.max_batches || net.max_batches == 0){
+ int epoch = *(net->seen)/N;
+ while(get_current_batch(net) < net->max_batches || net->max_batches == 0){
time=clock();
pthread_join(load_thread, 0);
train = buffer;
@@ -63,15 +63,15 @@ void train_writing(char *cfgfile, char *weightfile)
if(avg_loss == -1) avg_loss = loss;
avg_loss = avg_loss*.9 + loss*.1;
- printf("%ld, %.3f: %f, %f avg, %f rate, %lf seconds, %ld images\n", get_current_batch(net), (float)(*net.seen)/N, loss, avg_loss, get_current_rate(net), sec(clock()-time), *net.seen);
+ printf("%ld, %.3f: %f, %f avg, %f rate, %lf seconds, %ld images\n", get_current_batch(net), (float)(*(net->seen))/N, loss, avg_loss, get_current_rate(net), sec(clock()-time), *net->seen);
free_data(train);
if(get_current_batch(net)%100 == 0){
char buff[256];
sprintf(buff, "%s/%s_batch_%ld.weights", backup_directory, base, get_current_batch(net));
save_weights(net, buff);
}
- if(*net.seen/N > epoch){
- epoch = *net.seen/N;
+ if(*net->seen/N > epoch){
+ epoch = *net->seen/N;
char buff[256];
sprintf(buff, "%s/%s_%d.weights",backup_directory,base, epoch);
save_weights(net, buff);
@@ -81,11 +81,11 @@ void train_writing(char *cfgfile, char *weightfile)
void test_writing(char *cfgfile, char *weightfile, char *filename)
{
- network net = parse_network_cfg(cfgfile);
+ dn_network* net = parse_network_cfg(cfgfile);
if(weightfile){
- load_weights(&net, weightfile);
+ load_weights(net, weightfile);
}
- set_batch_network(&net, 1);
+ set_batch_network(net, 1);
srand(2222222);
clock_t time;
char buff[256];
@@ -101,21 +101,21 @@ void test_writing(char *cfgfile, char *weightfile, char *filename)
strtok(input, "\n");
}
- image im = load_image_color(input, 0, 0);
- resize_network(&net, im.w, im.h);
+ dn_image im = load_image_color(input, 0, 0);
+ resize_network(net, im.w, im.h);
printf("%d %d %d\n", im.h, im.w, im.c);
float *X = im.data;
time=clock();
network_predict(net, X);
printf("%s: Predicted in %f seconds.\n", input, sec(clock()-time));
- image pred = get_network_image(net);
+ dn_image pred = get_network_image(net);
- image upsampled = resize_image(pred, im.w, im.h);
- image thresh = threshold_image(upsampled, .5);
+ dn_image upsampled = resize_image(pred, im.w, im.h);
+ dn_image thresh = threshold_image(upsampled, .5);
pred = thresh;
- show_image(pred, "prediction");
- show_image(im, "orig");
+ show_image(pred, "prediction", 0);
+ show_image(im, "orig", 0);
#ifdef OPENCV
cvWaitKey(0);
cvDestroyAllWindows();
diff --git a/examples/yolo.c b/examples/yolo.c
index 4ddb69a3e53..a7efbc4efbf 100644
--- a/examples/yolo.c
+++ b/examples/yolo.c
@@ -10,24 +10,24 @@ void train_yolo(char *cfgfile, char *weightfile)
char *base = basecfg(cfgfile);
printf("%s\n", base);
float avg_loss = -1;
- network *net = load_network(cfgfile, weightfile, 0);
+ dn_network *net = load_network(cfgfile, weightfile, 0);
printf("Learning Rate: %g, Momentum: %g, Decay: %g\n", net->learning_rate, net->momentum, net->decay);
int imgs = net->batch*net->subdivisions;
int i = *net->seen/imgs;
- data train, buffer;
+ dn_data train, buffer;
- layer l = net->layers[net->n - 1];
+ dn_layer l = net->layers[net->n - 1];
int side = l.side;
int classes = l.classes;
float jitter = l.jitter;
- list *plist = get_paths(train_images);
+ dn_list *plist = get_paths(train_images);
//int N = plist->size;
char **paths = (char **)list_to_array(plist);
- load_args args = {0};
+ dn_load_args args = {0};
args.w = net->w;
args.h = net->h;
args.paths = paths;
@@ -97,18 +97,18 @@ void print_yolo_detections(FILE **fps, char *id, int total, int classes, int w,
void validate_yolo(char *cfg, char *weights)
{
- network *net = load_network(cfg, weights, 0);
+ dn_network *net = load_network(cfg, weights, 0);
set_batch_network(net, 1);
fprintf(stderr, "Learning Rate: %g, Momentum: %g, Decay: %g\n", net->learning_rate, net->momentum, net->decay);
srand(time(0));
char *base = "results/comp4_det_test_";
//list *plist = get_paths("data/voc.2007.test");
- list *plist = get_paths("/home/pjreddie/data/voc/2007_test.txt");
+ dn_list *plist = get_paths("/home/pjreddie/data/voc/2007_test.txt");
//list *plist = get_paths("data/voc.2012.test");
char **paths = (char **)list_to_array(plist);
- layer l = net->layers[net->n-1];
+ dn_layer l = net->layers[net->n-1];
int classes = l.classes;
int j;
@@ -128,13 +128,13 @@ void validate_yolo(char *cfg, char *weights)
float iou_thresh = .5;
int nthreads = 8;
- image *val = calloc(nthreads, sizeof(image));
- image *val_resized = calloc(nthreads, sizeof(image));
- image *buf = calloc(nthreads, sizeof(image));
- image *buf_resized = calloc(nthreads, sizeof(image));
+ dn_image *val = calloc(nthreads, sizeof(dn_image));
+ dn_image *val_resized = calloc(nthreads, sizeof(dn_image));
+ dn_image *buf = calloc(nthreads, sizeof(dn_image));
+ dn_image *buf_resized = calloc(nthreads, sizeof(dn_image));
pthread_t *thr = calloc(nthreads, sizeof(pthread_t));
- load_args args = {0};
+ dn_load_args args = {0};
args.w = net->w;
args.h = net->h;
args.type = IMAGE_DATA;
@@ -181,16 +181,16 @@ void validate_yolo(char *cfg, char *weights)
void validate_yolo_recall(char *cfg, char *weights)
{
- network *net = load_network(cfg, weights, 0);
+ dn_network *net = load_network(cfg, weights, 0);
set_batch_network(net, 1);
fprintf(stderr, "Learning Rate: %g, Momentum: %g, Decay: %g\n", net->learning_rate, net->momentum, net->decay);
srand(time(0));
char *base = "results/comp4_det_test_";
- list *plist = get_paths("data/voc.2007.test");
+ dn_list *plist = get_paths("data/voc.2007.test");
char **paths = (char **)list_to_array(plist);
- layer l = net->layers[net->n-1];
+ dn_layer l = net->layers[net->n-1];
int classes = l.classes;
int side = l.side;
@@ -216,8 +216,8 @@ void validate_yolo_recall(char *cfg, char *weights)
for(i = 0; i < m; ++i){
char *path = paths[i];
- image orig = load_image_color(path, 0, 0);
- image sized = resize_image(orig, net->w, net->h);
+ dn_image orig = load_image_color(path, 0, 0);
+ dn_image sized = resize_image(orig, net->w, net->h);
char *id = basecfg(path);
network_predict(net, sized.data);
@@ -232,7 +232,7 @@ void validate_yolo_recall(char *cfg, char *weights)
find_replace(labelpath, ".JPEG", ".txt", labelpath);
int num_labels = 0;
- box_label *truth = read_boxes(labelpath, &num_labels);
+ dn_box_label *truth = read_boxes(labelpath, &num_labels);
for(k = 0; k < side*side*l.n; ++k){
if(dets[k].objectness > thresh){
++proposals;
@@ -240,7 +240,7 @@ void validate_yolo_recall(char *cfg, char *weights)
}
for (j = 0; j < num_labels; ++j) {
++total;
- box t = {truth[j].x, truth[j].y, truth[j].w, truth[j].h};
+ dn_box t = {truth[j].x, truth[j].y, truth[j].w, truth[j].h};
float best_iou = 0;
for(k = 0; k < side*side*l.n; ++k){
float iou = box_iou(dets[k].bbox, t);
@@ -264,9 +264,9 @@ void validate_yolo_recall(char *cfg, char *weights)
void test_yolo(char *cfgfile, char *weightfile, char *filename, float thresh)
{
- image **alphabet = load_alphabet();
- network *net = load_network(cfgfile, weightfile, 0);
- layer l = net->layers[net->n-1];
+ dn_image **alphabet = load_alphabet();
+ dn_network *net = load_network(cfgfile, weightfile, 0);
+ dn_layer l = net->layers[net->n-1];
set_batch_network(net, 1);
srand(2222222);
clock_t time;
@@ -283,8 +283,8 @@ void test_yolo(char *cfgfile, char *weightfile, char *filename, float thresh)
if(!input) return;
strtok(input, "\n");
}
- image im = load_image_color(input,0,0);
- image sized = resize_image(im, net->w, net->h);
+ dn_image im = load_image_color(input,0,0);
+ dn_image sized = resize_image(im, net->w, net->h);
float *X = sized.data;
time=clock();
network_predict(net, X);
diff --git a/include/darknet.h b/include/darknet.h
index 4390c619409..b7989415646 100644
--- a/include/darknet.h
+++ b/include/darknet.h
@@ -42,8 +42,8 @@ typedef struct{
int groups;
int *group_size;
int *group_offset;
-} tree;
-tree *read_tree(char *filename);
+} dn_tree;
+dn_tree *read_tree(const char *filename);
typedef enum{
LOGISTIC, RELU, RELIE, LINEAR, RAMP, TANH, PLSE, LEAKY, ELU, LOGGY, STAIR, HARDTAN, LHTAN, SELU
@@ -106,22 +106,22 @@ typedef struct{
int t;
} update_args;
-struct network;
-typedef struct network network;
+struct dn_network;
+typedef struct dn_network dn_network;
-struct layer;
-typedef struct layer layer;
+struct dn_layer;
+typedef struct dn_layer dn_layer;
-struct layer{
+struct dn_layer{
LAYER_TYPE type;
ACTIVATION activation;
COST_TYPE cost_type;
- void (*forward) (struct layer, struct network);
- void (*backward) (struct layer, struct network);
- void (*update) (struct layer, update_args);
- void (*forward_gpu) (struct layer, struct network);
- void (*backward_gpu) (struct layer, struct network);
- void (*update_gpu) (struct layer, update_args);
+ void (*forward) (struct dn_layer, struct dn_network);
+ void (*backward) (struct dn_layer, struct dn_network);
+ void (*update) (struct dn_layer, update_args);
+ void (*forward_gpu) (struct dn_layer, struct dn_network);
+ void (*backward_gpu) (struct dn_layer, struct dn_network);
+ void (*update_gpu) (struct dn_layer, update_args);
int batch_normalize;
int shortcut;
int batch;
@@ -288,46 +288,46 @@ struct layer{
float * binary_input;
- struct layer *input_layer;
- struct layer *self_layer;
- struct layer *output_layer;
+ struct dn_layer *input_layer;
+ struct dn_layer *self_layer;
+ struct dn_layer *output_layer;
- struct layer *reset_layer;
- struct layer *update_layer;
- struct layer *state_layer;
+ struct dn_layer *reset_layer;
+ struct dn_layer *update_layer;
+ struct dn_layer *state_layer;
- struct layer *input_gate_layer;
- struct layer *state_gate_layer;
- struct layer *input_save_layer;
- struct layer *state_save_layer;
- struct layer *input_state_layer;
- struct layer *state_state_layer;
+ struct dn_layer *input_gate_layer;
+ struct dn_layer *state_gate_layer;
+ struct dn_layer *input_save_layer;
+ struct dn_layer *state_save_layer;
+ struct dn_layer *input_state_layer;
+ struct dn_layer *state_state_layer;
- struct layer *input_z_layer;
- struct layer *state_z_layer;
+ struct dn_layer *input_z_layer;
+ struct dn_layer *state_z_layer;
- struct layer *input_r_layer;
- struct layer *state_r_layer;
+ struct dn_layer *input_r_layer;
+ struct dn_layer *state_r_layer;
- struct layer *input_h_layer;
- struct layer *state_h_layer;
+ struct dn_layer *input_h_layer;
+ struct dn_layer *state_h_layer;
- struct layer *wz;
- struct layer *uz;
- struct layer *wr;
- struct layer *ur;
- struct layer *wh;
- struct layer *uh;
- struct layer *uo;
- struct layer *wo;
- struct layer *uf;
- struct layer *wf;
- struct layer *ui;
- struct layer *wi;
- struct layer *ug;
- struct layer *wg;
-
- tree *softmax_tree;
+ struct dn_layer *wz;
+ struct dn_layer *uz;
+ struct dn_layer *wr;
+ struct dn_layer *ur;
+ struct dn_layer *wh;
+ struct dn_layer *uh;
+ struct dn_layer *uo;
+ struct dn_layer *wo;
+ struct dn_layer *uf;
+ struct dn_layer *wf;
+ struct dn_layer *ui;
+ struct dn_layer *wi;
+ struct dn_layer *ug;
+ struct dn_layer *wg;
+
+ dn_tree *softmax_tree;
size_t workspace_size;
@@ -421,20 +421,20 @@ struct layer{
#endif
};
-void free_layer(layer);
+void free_layer(dn_layer);
typedef enum {
CONSTANT, STEP, EXP, POLY, STEPS, SIG, RANDOM
} learning_rate_policy;
-typedef struct network{
+typedef struct dn_network{
int n;
int batch;
size_t *seen;
int *t;
float epoch;
int subdivisions;
- layer *layers;
+ dn_layer *layers;
float *output;
learning_rate_policy policy;
@@ -475,7 +475,7 @@ typedef struct network{
int random;
int gpu_index;
- tree *hierarchy;
+ dn_tree *hierarchy;
float *input;
float *truth;
@@ -493,7 +493,7 @@ typedef struct network{
float *output_gpu;
#endif
-} network;
+} dn_network;
typedef struct {
int w;
@@ -510,14 +510,14 @@ typedef struct {
int h;
int c;
float *data;
-} image;
+} dn_image;
typedef struct{
float x, y, w, h;
-} box;
+} dn_box;
typedef struct detection{
- box bbox;
+ dn_box bbox;
int classes;
float *prob;
float *mask;
@@ -525,26 +525,26 @@ typedef struct detection{
int sort_class;
} detection;
-typedef struct matrix{
+typedef struct {
int rows, cols;
float **vals;
-} matrix;
+} dn_matrix;
typedef struct{
int w, h;
- matrix X;
- matrix y;
+ dn_matrix X;
+ dn_matrix y;
int shallow;
int *num_boxes;
- box **boxes;
-} data;
+ dn_box **boxes;
+} dn_data;
typedef enum {
CLASSIFICATION_DATA, DETECTION_DATA, CAPTCHA_DATA, REGION_DATA, IMAGE_DATA, COMPARE_DATA, WRITING_DATA, SWAG_DATA, TAG_DATA, OLD_CLASSIFICATION_DATA, STUDY_DATA, DET_DATA, SUPER_DATA, LETTERBOX_DATA, REGRESSION_DATA, SEGMENTATION_DATA, INSTANCE_DATA, ISEG_DATA
-} data_type;
+} dn_data_type;
-typedef struct load_args{
+typedef struct dn_load_args{
int threads;
char **paths;
char *path;
@@ -570,48 +570,48 @@ typedef struct load_args{
float saturation;
float exposure;
float hue;
- data *d;
- image *im;
- image *resized;
- data_type type;
- tree *hierarchy;
-} load_args;
+ dn_data *d;
+ dn_image *im;
+ dn_image *resized;
+ dn_data_type type;
+ dn_tree *hierarchy;
+} dn_load_args;
typedef struct{
int id;
float x,y,w,h;
float left, right, top, bottom;
-} box_label;
+} dn_box_label;
-network *load_network(char *cfg, char *weights, int clear);
-load_args get_base_args(network *net);
+dn_network *load_network(const char *cfg, const char *weights, int clear);
+dn_load_args get_base_args(dn_network *net);
-void free_data(data d);
+void free_data(dn_data d);
-typedef struct node{
+typedef struct dn_node{
void *val;
- struct node *next;
- struct node *prev;
-} node;
+ struct dn_node *next;
+ struct dn_node *prev;
+} dn_node;
-typedef struct list{
+typedef struct dn_list{
int size;
- node *front;
- node *back;
-} list;
+ dn_node *front;
+ dn_node *back;
+} dn_list;
-pthread_t load_data(load_args args);
-list *read_data_cfg(char *filename);
-list *read_cfg(char *filename);
-unsigned char *read_file(char *filename);
-data resize_data(data orig, int w, int h);
-data *tile_data(data orig, int divs, int size);
-data select_data(data *orig, int *inds);
+pthread_t load_data(dn_load_args args);
+dn_list *read_data_cfg(const char *filename);
+dn_list *read_cfg(const char *filename);
+unsigned char *read_file(const char *filename);
+dn_data resize_data(dn_data orig, int w, int h);
+dn_data *tile_data(dn_data orig, int divs, int size);
+dn_data select_data(dn_data *orig, int *inds);
-void forward_network(network *net);
-void backward_network(network *net);
-void update_network(network *net);
+void forward_network(dn_network *net);
+void backward_network(dn_network *net);
+void update_network(dn_network *net);
float dot_cpu(int N, float *X, int INCX, float *Y, int INCY);
@@ -622,7 +622,7 @@ void fill_cpu(int N, float ALPHA, float * X, int INCX);
void normalize_cpu(float *x, float *mean, float *variance, int batch, int filters, int spatial);
void softmax(float *input, int n, float temp, int stride, float *output);
-int best_3d_shift_r(image a, image b, int min, int max);
+int best_3d_shift_r(dn_image a, dn_image b, int min, int max);
#ifdef GPU
void axpy_gpu(int N, float ALPHA, float * X, int INCX, float * Y, int INCY);
void fill_gpu(int N, float ALPHA, float * X, int INCX);
@@ -644,115 +644,115 @@ float train_networks(network **nets, int n, data d, int interval);
void sync_nets(network **nets, int n, int interval);
void harmless_update_network_gpu(network *net);
#endif
-image get_label(image **characters, char *string, int size);
-void draw_label(image a, int r, int c, image label, const float *rgb);
-void save_image(image im, const char *name);
-void save_image_options(image im, const char *name, IMTYPE f, int quality);
-void get_next_batch(data d, int n, int offset, float *X, float *y);
-void grayscale_image_3c(image im);
-void normalize_image(image p);
-void matrix_to_csv(matrix m);
-float train_network_sgd(network *net, data d, int n);
-void rgbgr_image(image im);
-data copy_data(data d);
-data concat_data(data d1, data d2);
-data load_cifar10_data(char *filename);
-float matrix_topk_accuracy(matrix truth, matrix guess, int k);
-void matrix_add_matrix(matrix from, matrix to);
-void scale_matrix(matrix m, float scale);
-matrix csv_to_matrix(char *filename);
-float *network_accuracies(network *net, data d, int n);
-float train_network_datum(network *net);
-image make_random_image(int w, int h, int c);
-
-void denormalize_connected_layer(layer l);
-void denormalize_convolutional_layer(layer l);
-void statistics_connected_layer(layer l);
-void rescale_weights(layer l, float scale, float trans);
-void rgbgr_weights(layer l);
-image *get_weights(layer l);
+dn_image get_label(dn_image **characters, char *string, int size);
+void draw_label(dn_image a, int r, int c, dn_image label, const float *rgb);
+void save_image(dn_image im, const char *name);
+void save_image_options(dn_image im, const char *name, IMTYPE f, int quality);
+void get_next_batch(dn_data d, int n, int offset, float *X, float *y);
+void grayscale_image_3c(dn_image im);
+void normalize_image(dn_image p);
+void matrix_to_csv(dn_matrix m);
+float train_network_sgd(dn_network *net, dn_data d, int n);
+void rgbgr_image(dn_image im);
+dn_data copy_data(dn_data d);
+dn_data concat_data(dn_data d1, dn_data d2);
+dn_data load_cifar10_data(const char *filename);
+float matrix_topk_accuracy(dn_matrix truth, dn_matrix guess, int k);
+void matrix_add_matrix(dn_matrix from, dn_matrix to);
+void scale_matrix(dn_matrix m, float scale);
+dn_matrix csv_to_matrix(const char *filename);
+float *network_accuracies(dn_network *net, dn_data d, int n);
+float train_network_datum(dn_network *net);
+dn_image make_random_image(int w, int h, int c);
+
+void denormalize_connected_layer(dn_layer l);
+void denormalize_convolutional_layer(dn_layer l);
+void statistics_connected_layer(dn_layer l);
+void rescale_weights(dn_layer l, float scale, float trans);
+void rgbgr_weights(dn_layer l);
+dn_image *get_weights(dn_layer l);
void demo(char *cfgfile, char *weightfile, float thresh, int cam_index, const char *filename, char **names, int classes, int frame_skip, char *prefix, int avg, float hier_thresh, int w, int h, int fps, int fullscreen);
-void get_detection_detections(layer l, int w, int h, float thresh, detection *dets);
-
-char *option_find_str(list *l, char *key, char *def);
-int option_find_int(list *l, char *key, int def);
-int option_find_int_quiet(list *l, char *key, int def);
-
-network *parse_network_cfg(char *filename);
-void save_weights(network *net, char *filename);
-void load_weights(network *net, char *filename);
-void save_weights_upto(network *net, char *filename, int cutoff);
-void load_weights_upto(network *net, char *filename, int start, int cutoff);
-
-void zero_objectness(layer l);
-void get_region_detections(layer l, int w, int h, int netw, int neth, float thresh, int *map, float tree_thresh, int relative, detection *dets);
-int get_yolo_detections(layer l, int w, int h, int netw, int neth, float thresh, int *map, int relative, detection *dets);
-void free_network(network *net);
-void set_batch_network(network *net, int b);
-void set_temp_network(network *net, float t);
-image load_image(char *filename, int w, int h, int c);
-image load_image_color(char *filename, int w, int h);
-image make_image(int w, int h, int c);
-image resize_image(image im, int w, int h);
-void censor_image(image im, int dx, int dy, int w, int h);
-image letterbox_image(image im, int w, int h);
-image crop_image(image im, int dx, int dy, int w, int h);
-image center_crop_image(image im, int w, int h);
-image resize_min(image im, int min);
-image resize_max(image im, int max);
-image threshold_image(image im, float thresh);
-image mask_to_rgb(image mask);
-int resize_network(network *net, int w, int h);
-void free_matrix(matrix m);
-void test_resize(char *filename);
-int show_image(image p, const char *name, int ms);
-image copy_image(image p);
-void draw_box_width(image a, int x1, int y1, int x2, int y2, int w, float r, float g, float b);
-float get_current_rate(network *net);
+void get_detection_detections(dn_layer l, int w, int h, float thresh, detection *dets);
+
+char *option_find_str(dn_list *l, char *key, char *def);
+int option_find_int(dn_list *l, char *key, int def);
+int option_find_int_quiet(dn_list *l, char *key, int def);
+
+dn_network *parse_network_cfg(const char *filename);
+void save_weights(dn_network *net, const char *filename);
+void load_weights(dn_network *net, const char *filename);
+void save_weights_upto(dn_network *net, const char *filename, int cutoff);
+void load_weights_upto(dn_network *net, const char *filename, int start, int cutoff);
+
+void zero_objectness(dn_layer l);
+void get_region_detections(dn_layer l, int w, int h, int netw, int neth, float thresh, int *map, float tree_thresh, int relative, detection *dets);
+int get_yolo_detections(dn_layer l, int w, int h, int netw, int neth, float thresh, int *map, int relative, detection *dets);
+void free_network(dn_network *net);
+void set_batch_network(dn_network *net, int b);
+void set_temp_network(dn_network *net, float t);
+dn_image load_image(const char *filename, int w, int h, int c);
+dn_image load_image_color(const char *filename, int w, int h);
+dn_image make_image(int w, int h, int c);
+dn_image resize_image(dn_image im, int w, int h);
+void censor_image(dn_image im, int dx, int dy, int w, int h);
+dn_image letterbox_image(dn_image im, int w, int h);
+dn_image crop_image(dn_image im, int dx, int dy, int w, int h);
+dn_image center_crop_image(dn_image im, int w, int h);
+dn_image resize_min(dn_image im, int min);
+dn_image resize_max(dn_image im, int max);
+dn_image threshold_image(dn_image im, float thresh);
+dn_image mask_to_rgb(dn_image mask);
+int resize_network(dn_network *net, int w, int h);
+void free_matrix(dn_matrix m);
+void test_resize(const char *filename);
+int show_image(dn_image p, const char *name, int ms);
+dn_image copy_image(dn_image p);
+void draw_box_width(dn_image a, int x1, int y1, int x2, int y2, int w, float r, float g, float b);
+float get_current_rate(dn_network *net);
void composite_3d(char *f1, char *f2, char *out, int delta);
-data load_data_old(char **paths, int n, int m, char **labels, int k, int w, int h);
-size_t get_current_batch(network *net);
-void constrain_image(image im);
-image get_network_image_layer(network *net, int i);
-layer get_network_output_layer(network *net);
-void top_predictions(network *net, int n, int *index);
-void flip_image(image a);
-image float_to_image(int w, int h, int c, float *data);
-void ghost_image(image source, image dest, int dx, int dy);
-float network_accuracy(network *net, data d);
-void random_distort_image(image im, float hue, float saturation, float exposure);
-void fill_image(image m, float s);
-image grayscale_image(image im);
-void rotate_image_cw(image im, int times);
+dn_data load_data_old(char **paths, int n, int m, char **labels, int k, int w, int h);
+size_t get_current_batch(dn_network *net);
+void constrain_image(dn_image im);
+dn_image get_network_image_layer(dn_network *net, int i);
+dn_layer get_network_output_layer(dn_network *net);
+void top_predictions(dn_network *net, int n, int *index);
+void flip_image(dn_image a);
+dn_image float_to_image(int w, int h, int c, float *data);
+void ghost_image(dn_image source, dn_image dest, int dx, int dy);
+float network_accuracy(dn_network *net, dn_data d);
+void random_distort_image(dn_image im, float hue, float saturation, float exposure);
+void fill_image(dn_image m, float s);
+dn_image grayscale_image(dn_image im);
+void rotate_image_cw(dn_image im, int times);
double what_time_is_it_now();
-image rotate_image(image m, float rad);
-void visualize_network(network *net);
-float box_iou(box a, box b);
-data load_all_cifar10();
-box_label *read_boxes(char *filename, int *n);
-box float_to_box(float *f, int stride);
-void draw_detections(image im, detection *dets, int num, float thresh, char **names, image **alphabet, int classes);
-
-matrix network_predict_data(network *net, data test);
-image **load_alphabet();
-image get_network_image(network *net);
-float *network_predict(network *net, float *input);
-
-int network_width(network *net);
-int network_height(network *net);
-float *network_predict_image(network *net, image im);
-void network_detect(network *net, image im, float thresh, float hier_thresh, float nms, detection *dets);
-detection *get_network_boxes(network *net, int w, int h, float thresh, float hier, int *map, int relative, int *num);
+dn_image rotate_image(dn_image m, float rad);
+void visualize_network(dn_network *net);
+float box_iou(dn_box a, dn_box b);
+dn_data load_all_cifar10();
+dn_box_label *read_boxes(const char *filename, int *n);
+dn_box float_to_box(float *f, int stride);
+void draw_detections(dn_image im, detection *dets, int num, float thresh, char **names, dn_image **alphabet, int classes);
+
+dn_matrix network_predict_data(dn_network *net, dn_data test);
+dn_image **load_alphabet();
+dn_image get_network_image(dn_network *net);
+float *network_predict(dn_network *net, float *input);
+
+int network_width(dn_network *net);
+int network_height(dn_network *net);
+float *network_predict_image(dn_network *net, dn_image im);
+void network_detect(dn_network *net, dn_image im, float thresh, float hier_thresh, float nms, detection *dets);
+detection *get_network_boxes(dn_network *net, int w, int h, float thresh, float hier, int *map, int relative, int *num);
void free_detections(detection *dets, int n);
-void reset_network_state(network *net, int b);
+void reset_network_state(dn_network *net, int b);
-char **get_labels(char *filename);
+char **get_labels(const char *filename);
void do_nms_obj(detection *dets, int total, int classes, float thresh);
void do_nms_sort(detection *dets, int total, int classes, float thresh);
-matrix make_matrix(int rows, int cols);
+dn_matrix make_matrix(int rows, int cols);
#ifdef OPENCV
void *open_video_stream(const char *f, int c, int w, int h, int fps);
@@ -760,13 +760,13 @@ image get_image_from_stream(void *p);
void make_window(char *name, int w, int h, int fullscreen);
#endif
-void free_image(image m);
-float train_network(network *net, data d);
-pthread_t load_data_in_thread(load_args args);
-void load_data_blocking(load_args args);
-list *get_paths(char *filename);
-void hierarchy_predictions(float *predictions, int n, tree *hier, int only_leaves, int stride);
-void change_leaves(tree *t, char *leaf_list);
+void free_image(dn_image m);
+float train_network(dn_network *net, dn_data d);
+pthread_t load_data_in_thread(dn_load_args args);
+void load_data_blocking(dn_load_args args);
+dn_list *get_paths(const char *filename);
+void hierarchy_predictions(float *predictions, int n, dn_tree *hier, int only_leaves, int stride);
+void change_leaves(dn_tree *t, char *leaf_list);
int find_int_arg(int argc, char **argv, char *arg, int def);
float find_float_arg(int argc, char **argv, char *arg, float def);
@@ -778,15 +778,15 @@ void free_ptrs(void **ptrs, int n);
char *fgetl(FILE *fp);
void strip(char *s);
float sec(clock_t clocks);
-void **list_to_array(list *l);
+void **list_to_array(dn_list *l);
void top_k(float *a, int n, int k, int *index);
-int *read_map(char *filename);
+int *read_map(const char *filename);
void error(const char *s);
int max_index(float *a, int n);
int max_int_index(int *a, int n);
int sample_array(float *a, int n);
int *random_index_order(int min, int max);
-void free_list(list *l);
+void free_list(dn_list *l);
float mse_array(float *a, int n);
float variance_array(float *a, int n);
float mag_array(float *a, int n);
diff --git a/src/CMakeLists.txt b/src/CMakeLists.txt
new file mode 100644
index 00000000000..b9e5653a2c9
--- /dev/null
+++ b/src/CMakeLists.txt
@@ -0,0 +1,15 @@
+message("In ${CMAKE_CURRENT_SOURCE_DIR}")
+FILE(GLOB_RECURSE HEADER_FILES "*.h" PARENT_SCOPE)
+FILE(GLOB_RECURSE SOURCE_FILES "*.c" PARENT_SCOPE)
+
+set(HEADER_FILES ${HEADER_FILES} include/darknet.h PARENT_SCOPE)
+set(CUDA_FILES)
+if ( GPU )
+ message("Compiling for GPU...")
+ FILE(GLOB_RECURSE CUDA_FILES "*.cu" PARENT_SCOPE)
+ message("CUDA_FILES = ${CUDA_FILES}")
+endif()
+
+message("SOURCE_FILES = ${SOURCE_FILES}")
+add_library(DarkNet SHARED ${HEADER_FILES} ${SOURCE_FILES} ${CUDA_FILES})
+SET_TARGET_PROPERTIES(DarkNet PROPERTIES LINKER_LANGUAGE C)
diff --git a/src/activation_layer.c b/src/activation_layer.c
index b4ba953967b..b9cb8a23bf1 100644
--- a/src/activation_layer.c
+++ b/src/activation_layer.c
@@ -9,9 +9,9 @@
#include
#include
-layer make_activation_layer(int batch, int inputs, ACTIVATION activation)
+dn_layer make_activation_layer(int batch, int inputs, ACTIVATION activation)
{
- layer l = {0};
+ dn_layer l = {0};
l.type = ACTIVE;
l.inputs = inputs;
@@ -35,13 +35,13 @@ layer make_activation_layer(int batch, int inputs, ACTIVATION activation)
return l;
}
-void forward_activation_layer(layer l, network net)
+void forward_activation_layer(dn_layer l, dn_network net)
{
copy_cpu(l.outputs*l.batch, net.input, 1, l.output, 1);
activate_array(l.output, l.outputs*l.batch, l.activation);
}
-void backward_activation_layer(layer l, network net)
+void backward_activation_layer(dn_layer l, dn_network net)
{
gradient_array(l.output, l.outputs*l.batch, l.activation, l.delta);
copy_cpu(l.outputs*l.batch, l.delta, 1, net.delta, 1);
diff --git a/src/activation_layer.h b/src/activation_layer.h
index 42118a84e83..076424bb2a1 100644
--- a/src/activation_layer.h
+++ b/src/activation_layer.h
@@ -5,10 +5,10 @@
#include "layer.h"
#include "network.h"
-layer make_activation_layer(int batch, int inputs, ACTIVATION activation);
+dn_layer make_activation_layer(int batch, int inputs, ACTIVATION activation);
-void forward_activation_layer(layer l, network net);
-void backward_activation_layer(layer l, network net);
+void forward_activation_layer(dn_layer l, dn_network net);
+void backward_activation_layer(dn_layer l, dn_network net);
#ifdef GPU
void forward_activation_layer_gpu(layer l, network net);
diff --git a/src/avgpool_layer.c b/src/avgpool_layer.c
index 83034dbecf4..4b29d7632e0 100644
--- a/src/avgpool_layer.c
+++ b/src/avgpool_layer.c
@@ -37,7 +37,7 @@ void resize_avgpool_layer(avgpool_layer *l, int w, int h)
l->inputs = h*w*l->c;
}
-void forward_avgpool_layer(const avgpool_layer l, network net)
+void forward_avgpool_layer(const avgpool_layer l, dn_network net)
{
int b,i,k;
@@ -54,7 +54,7 @@ void forward_avgpool_layer(const avgpool_layer l, network net)
}
}
-void backward_avgpool_layer(const avgpool_layer l, network net)
+void backward_avgpool_layer(const avgpool_layer l, dn_network net)
{
int b,i,k;
diff --git a/src/avgpool_layer.h b/src/avgpool_layer.h
index 3bd356c4e39..4d671453ac0 100644
--- a/src/avgpool_layer.h
+++ b/src/avgpool_layer.h
@@ -6,13 +6,13 @@
#include "layer.h"
#include "network.h"
-typedef layer avgpool_layer;
+typedef dn_layer avgpool_layer;
-image get_avgpool_image(avgpool_layer l);
+dn_image get_avgpool_image(avgpool_layer l);
avgpool_layer make_avgpool_layer(int batch, int w, int h, int c);
void resize_avgpool_layer(avgpool_layer *l, int w, int h);
-void forward_avgpool_layer(const avgpool_layer l, network net);
-void backward_avgpool_layer(const avgpool_layer l, network net);
+void forward_avgpool_layer(const avgpool_layer l, dn_network net);
+void backward_avgpool_layer(const avgpool_layer l, dn_network net);
#ifdef GPU
void forward_avgpool_layer_gpu(avgpool_layer l, network net);
diff --git a/src/batchnorm_layer.c b/src/batchnorm_layer.c
index ebff387cc4b..d548a7b90d0 100644
--- a/src/batchnorm_layer.c
+++ b/src/batchnorm_layer.c
@@ -3,10 +3,10 @@
#include "blas.h"
#include
-layer make_batchnorm_layer(int batch, int w, int h, int c)
+dn_layer make_batchnorm_layer(int batch, int w, int h, int c)
{
fprintf(stderr, "Batch Normalization Layer: %d x %d x %d image\n", w,h,c);
- layer l = {0};
+ dn_layer l = {0};
l.type = BATCHNORM;
l.batch = batch;
l.h = l.out_h = h;
@@ -127,12 +127,12 @@ void normalize_delta_cpu(float *x, float *mean, float *variance, float *mean_del
}
}
-void resize_batchnorm_layer(layer *layer, int w, int h)
+void resize_batchnorm_layer(dn_layer *layer, int w, int h)
{
fprintf(stderr, "Not implemented\n");
}
-void forward_batchnorm_layer(layer l, network net)
+void forward_batchnorm_layer(dn_layer l, dn_network net)
{
if(l.type == BATCHNORM) copy_cpu(l.outputs*l.batch, net.input, 1, l.output, 1);
copy_cpu(l.outputs*l.batch, l.output, 1, l.x, 1);
@@ -154,7 +154,7 @@ void forward_batchnorm_layer(layer l, network net)
add_bias(l.output, l.biases, l.batch, l.out_c, l.out_h*l.out_w);
}
-void backward_batchnorm_layer(layer l, network net)
+void backward_batchnorm_layer(dn_layer l, dn_network net)
{
if(!net.train){
l.mean = l.rolling_mean;
diff --git a/src/batchnorm_layer.h b/src/batchnorm_layer.h
index 25a18a3c8f2..b6216d85c8f 100644
--- a/src/batchnorm_layer.h
+++ b/src/batchnorm_layer.h
@@ -5,9 +5,9 @@
#include "layer.h"
#include "network.h"
-layer make_batchnorm_layer(int batch, int w, int h, int c);
-void forward_batchnorm_layer(layer l, network net);
-void backward_batchnorm_layer(layer l, network net);
+dn_layer make_batchnorm_layer(int batch, int w, int h, int c);
+void forward_batchnorm_layer(dn_layer l, dn_network net);
+void backward_batchnorm_layer(dn_layer l, dn_network net);
#ifdef GPU
void forward_batchnorm_layer_gpu(layer l, network net);
diff --git a/src/box.c b/src/box.c
index 8a1772c9ae0..131aa580953 100644
--- a/src/box.c
+++ b/src/box.c
@@ -40,10 +40,10 @@ void do_nms_obj(detection *dets, int total, int classes, float thresh)
qsort(dets, total, sizeof(detection), nms_comparator);
for(i = 0; i < total; ++i){
if(dets[i].objectness == 0) continue;
- box a = dets[i].bbox;
+ dn_box a = dets[i].bbox;
for(j = i+1; j < total; ++j){
if(dets[j].objectness == 0) continue;
- box b = dets[j].bbox;
+ dn_box b = dets[j].bbox;
if (box_iou(a, b) > thresh){
dets[j].objectness = 0;
for(k = 0; k < classes; ++k){
@@ -77,9 +77,9 @@ void do_nms_sort(detection *dets, int total, int classes, float thresh)
qsort(dets, total, sizeof(detection), nms_comparator);
for(i = 0; i < total; ++i){
if(dets[i].prob[k] == 0) continue;
- box a = dets[i].bbox;
+ dn_box a = dets[i].bbox;
for(j = i+1; j < total; ++j){
- box b = dets[j].bbox;
+ dn_box b = dets[j].bbox;
if (box_iou(a, b) > thresh){
dets[j].prob[k] = 0;
}
@@ -88,9 +88,9 @@ void do_nms_sort(detection *dets, int total, int classes, float thresh)
}
}
-box float_to_box(float *f, int stride)
+dn_box float_to_box(float *f, int stride)
{
- box b = {0};
+ dn_box b = {0};
b.x = f[0];
b.y = f[1*stride];
b.w = f[2*stride];
@@ -98,7 +98,7 @@ box float_to_box(float *f, int stride)
return b;
}
-dbox derivative(box a, box b)
+dbox derivative(dn_box a, dn_box b)
{
dbox d;
d.dx = 0;
@@ -160,7 +160,7 @@ float overlap(float x1, float w1, float x2, float w2)
return right - left;
}
-float box_intersection(box a, box b)
+float box_intersection(dn_box a, dn_box b)
{
float w = overlap(a.x, a.w, b.x, b.w);
float h = overlap(a.y, a.h, b.y, b.h);
@@ -169,19 +169,19 @@ float box_intersection(box a, box b)
return area;
}
-float box_union(box a, box b)
+float box_union(dn_box a, dn_box b)
{
float i = box_intersection(a, b);
float u = a.w*a.h + b.w*b.h - i;
return u;
}
-float box_iou(box a, box b)
+float box_iou(dn_box a, dn_box b)
{
return box_intersection(a, b)/box_union(a, b);
}
-float box_rmse(box a, box b)
+float box_rmse(dn_box a, dn_box b)
{
return sqrt(pow(a.x-b.x, 2) +
pow(a.y-b.y, 2) +
@@ -189,7 +189,7 @@ float box_rmse(box a, box b)
pow(a.h-b.h, 2));
}
-dbox dintersect(box a, box b)
+dbox dintersect(dn_box a, dn_box b)
{
float w = overlap(a.x, a.w, b.x, b.w);
float h = overlap(a.y, a.h, b.y, b.h);
@@ -204,7 +204,7 @@ dbox dintersect(box a, box b)
return di;
}
-dbox dunion(box a, box b)
+dbox dunion(dn_box a, dn_box b)
{
dbox du;
@@ -220,13 +220,13 @@ dbox dunion(box a, box b)
void test_dunion()
{
- box a = {0, 0, 1, 1};
- box dxa= {0+.0001, 0, 1, 1};
- box dya= {0, 0+.0001, 1, 1};
- box dwa= {0, 0, 1+.0001, 1};
- box dha= {0, 0, 1, 1+.0001};
+ dn_box a = {0, 0, 1, 1};
+ dn_box dxa= {0+.0001, 0, 1, 1};
+ dn_box dya= {0, 0+.0001, 1, 1};
+ dn_box dwa= {0, 0, 1+.0001, 1};
+ dn_box dha= {0, 0, 1, 1+.0001};
- box b = {.5, .5, .2, .2};
+ dn_box b = {.5, .5, .2, .2};
dbox di = dunion(a,b);
printf("Union: %f %f %f %f\n", di.dx, di.dy, di.dw, di.dh);
float inter = box_union(a, b);
@@ -242,13 +242,13 @@ void test_dunion()
}
void test_dintersect()
{
- box a = {0, 0, 1, 1};
- box dxa= {0+.0001, 0, 1, 1};
- box dya= {0, 0+.0001, 1, 1};
- box dwa= {0, 0, 1+.0001, 1};
- box dha= {0, 0, 1, 1+.0001};
+ dn_box a = {0, 0, 1, 1};
+ dn_box dxa= {0+.0001, 0, 1, 1};
+ dn_box dya= {0, 0+.0001, 1, 1};
+ dn_box dwa= {0, 0, 1+.0001, 1};
+ dn_box dha= {0, 0, 1, 1+.0001};
- box b = {.5, .5, .2, .2};
+ dn_box b = {.5, .5, .2, .2};
dbox di = dintersect(a,b);
printf("Inter: %f %f %f %f\n", di.dx, di.dy, di.dw, di.dh);
float inter = box_intersection(a, b);
@@ -267,13 +267,13 @@ void test_box()
{
test_dintersect();
test_dunion();
- box a = {0, 0, 1, 1};
- box dxa= {0+.00001, 0, 1, 1};
- box dya= {0, 0+.00001, 1, 1};
- box dwa= {0, 0, 1+.00001, 1};
- box dha= {0, 0, 1, 1+.00001};
+ dn_box a = {0, 0, 1, 1};
+ dn_box dxa= {0+.00001, 0, 1, 1};
+ dn_box dya= {0, 0+.00001, 1, 1};
+ dn_box dwa= {0, 0, 1+.00001, 1};
+ dn_box dha= {0, 0, 1, 1+.00001};
- box b = {.5, 0, .2, .2};
+ dn_box b = {.5, 0, .2, .2};
float iou = box_iou(a,b);
iou = (1-iou)*(1-iou);
@@ -292,7 +292,7 @@ void test_box()
printf("manual %f %f %f %f\n", xiou, yiou, wiou, hiou);
}
-dbox diou(box a, box b)
+dbox diou(dn_box a, dn_box b)
{
float u = box_union(a,b);
float i = box_intersection(a,b);
@@ -316,7 +316,7 @@ dbox diou(box a, box b)
}
-void do_nms(box *boxes, float **probs, int total, int classes, float thresh)
+void do_nms(dn_box *boxes, float **probs, int total, int classes, float thresh)
{
int i, j, k;
for(i = 0; i < total; ++i){
@@ -336,9 +336,9 @@ void do_nms(box *boxes, float **probs, int total, int classes, float thresh)
}
}
-box encode_box(box b, box anchor)
+dn_box encode_box(dn_box b, dn_box anchor)
{
- box encode;
+ dn_box encode;
encode.x = (b.x - anchor.x) / anchor.w;
encode.y = (b.y - anchor.y) / anchor.h;
encode.w = log2(b.w / anchor.w);
@@ -346,9 +346,9 @@ box encode_box(box b, box anchor)
return encode;
}
-box decode_box(box b, box anchor)
+dn_box decode_box(dn_box b, dn_box anchor)
{
- box decode;
+ dn_box decode;
decode.x = b.x * anchor.w + anchor.x;
decode.y = b.y * anchor.h + anchor.y;
decode.w = pow(2., b.w) * anchor.w;
diff --git a/src/box.h b/src/box.h
index dda3e59100c..5b49065f7d9 100644
--- a/src/box.h
+++ b/src/box.h
@@ -6,9 +6,9 @@ typedef struct{
float dx, dy, dw, dh;
} dbox;
-float box_rmse(box a, box b);
-dbox diou(box a, box b);
-box decode_box(box b, box anchor);
-box encode_box(box b, box anchor);
+float box_rmse(dn_box a, dn_box b);
+dbox diou(dn_box a, dn_box b);
+dn_box decode_box(dn_box b, dn_box anchor);
+dn_box encode_box(dn_box b, dn_box anchor);
#endif
diff --git a/src/compare.c b/src/compare.c.HIDE
similarity index 98%
rename from src/compare.c
rename to src/compare.c.HIDE
index d2d2b3bdc67..95a2e4b7936 100644
--- a/src/compare.c
+++ b/src/compare.c.HIDE
@@ -78,7 +78,7 @@ void train_compare(char *cfgfile, char *weightfile)
free(base);
}
-void validate_compare(char *filename, char *weightfile)
+void validate_compare(const char *filename, char *weightfile)
{
int i = 0;
network net = parse_network_cfg(filename);
@@ -224,7 +224,7 @@ void bbox_fight(network net, sortable_bbox *a, sortable_bbox *b, int classes, in
free(X);
}
-void SortMaster3000(char *filename, char *weightfile)
+void SortMaster3000(const char *filename, char *weightfile)
{
int i = 0;
network net = parse_network_cfg(filename);
@@ -255,7 +255,7 @@ void SortMaster3000(char *filename, char *weightfile)
printf("Sorted in %d compares, %f secs\n", total_compares, sec(clock()-time));
}
-void BattleRoyaleWithCheese(char *filename, char *weightfile)
+void BattleRoyaleWithCheese(const char *filename, char *weightfile)
{
int classes = 20;
int i,j;
diff --git a/src/connected_layer.c b/src/connected_layer.c
index 353f4e5677b..d5b64b2f0b4 100644
--- a/src/connected_layer.c
+++ b/src/connected_layer.c
@@ -11,10 +11,10 @@
#include
#include
-layer make_connected_layer(int batch, int inputs, int outputs, ACTIVATION activation, int batch_normalize, int adam)
+dn_layer make_connected_layer(int batch, int inputs, int outputs, ACTIVATION activation, int batch_normalize, int adam)
{
int i;
- layer l = {0};
+ dn_layer l = {0};
l.learning_rate_scale = 1;
l.type = CONNECTED;
@@ -129,7 +129,7 @@ layer make_connected_layer(int batch, int inputs, int outputs, ACTIVATION activa
return l;
}
-void update_connected_layer(layer l, update_args a)
+void update_connected_layer(dn_layer l, update_args a)
{
float learning_rate = a.learning_rate*l.learning_rate_scale;
float momentum = a.momentum;
@@ -148,7 +148,7 @@ void update_connected_layer(layer l, update_args a)
scal_cpu(l.inputs*l.outputs, momentum, l.weight_updates, 1);
}
-void forward_connected_layer(layer l, network net)
+void forward_connected_layer(dn_layer l, dn_network net)
{
fill_cpu(l.outputs*l.batch, 0, l.output, 1);
int m = l.batch;
@@ -166,7 +166,7 @@ void forward_connected_layer(layer l, network net)
activate_array(l.output, l.outputs*l.batch, l.activation);
}
-void backward_connected_layer(layer l, network net)
+void backward_connected_layer(dn_layer l, dn_network net)
{
gradient_array(l.output, l.outputs*l.batch, l.activation, l.delta);
@@ -196,7 +196,7 @@ void backward_connected_layer(layer l, network net)
}
-void denormalize_connected_layer(layer l)
+void denormalize_connected_layer(dn_layer l)
{
int i, j;
for(i = 0; i < l.outputs; ++i){
@@ -212,7 +212,7 @@ void denormalize_connected_layer(layer l)
}
-void statistics_connected_layer(layer l)
+void statistics_connected_layer(dn_layer l)
{
if(l.batch_normalize){
printf("Scales ");
diff --git a/src/connected_layer.h b/src/connected_layer.h
index 6727a964eaa..df9cb53c892 100644
--- a/src/connected_layer.h
+++ b/src/connected_layer.h
@@ -5,11 +5,11 @@
#include "layer.h"
#include "network.h"
-layer make_connected_layer(int batch, int inputs, int outputs, ACTIVATION activation, int batch_normalize, int adam);
+dn_layer make_connected_layer(int batch, int inputs, int outputs, ACTIVATION activation, int batch_normalize, int adam);
-void forward_connected_layer(layer l, network net);
-void backward_connected_layer(layer l, network net);
-void update_connected_layer(layer l, update_args a);
+void forward_connected_layer(dn_layer l, dn_network net);
+void backward_connected_layer(dn_layer l, dn_network net);
+void update_connected_layer(dn_layer l, update_args a);
#ifdef GPU
void forward_connected_layer_gpu(layer l, network net);
diff --git a/src/convolutional_layer.c b/src/convolutional_layer.c
index 1fb58b0933b..a8b13d568bb 100644
--- a/src/convolutional_layer.c
+++ b/src/convolutional_layer.c
@@ -14,12 +14,12 @@
void swap_binary(convolutional_layer *l)
{
- float *swap = l->weights;
- l->weights = l->binary_weights;
- l->binary_weights = swap;
+ float *swap = l->weights;
+ l->weights = l->binary_weights;
+ l->binary_weights = swap;
#ifdef GPU
- swap = l->weights_gpu;
+ swap = l->weights_gpu;
l->weights_gpu = l->binary_weights_gpu;
l->binary_weights_gpu = swap;
#endif
@@ -27,65 +27,65 @@ void swap_binary(convolutional_layer *l)
void binarize_weights(float *weights, int n, int size, float *binary)
{
- int i, f;
- for(f = 0; f < n; ++f){
- float mean = 0;
- for(i = 0; i < size; ++i){
- mean += fabs(weights[f*size + i]);
- }
- mean = mean / size;
- for(i = 0; i < size; ++i){
- binary[f*size + i] = (weights[f*size + i] > 0) ? mean : -mean;
- }
- }
+ int i, f;
+ for(f = 0; f < n; ++f){
+ float mean = 0;
+ for(i = 0; i < size; ++i){
+ mean += fabs(weights[f*size + i]);
+ }
+ mean = mean / size;
+ for(i = 0; i < size; ++i){
+ binary[f*size + i] = (weights[f*size + i] > 0) ? mean : -mean;
+ }
+ }
}
void binarize_cpu(float *input, int n, float *binary)
{
- int i;
- for(i = 0; i < n; ++i){
- binary[i] = (input[i] > 0) ? 1 : -1;
- }
+ int i;
+ for(i = 0; i < n; ++i){
+ binary[i] = (input[i] > 0) ? 1 : -1;
+ }
}
void binarize_input(float *input, int n, int size, float *binary)
{
- int i, s;
- for(s = 0; s < size; ++s){
- float mean = 0;
- for(i = 0; i < n; ++i){
- mean += fabs(input[i*size + s]);
- }
- mean = mean / n;
- for(i = 0; i < n; ++i){
- binary[i*size + s] = (input[i*size + s] > 0) ? mean : -mean;
- }
- }
+ int i, s;
+ for(s = 0; s < size; ++s){
+ float mean = 0;
+ for(i = 0; i < n; ++i){
+ mean += fabs(input[i*size + s]);
+ }
+ mean = mean / n;
+ for(i = 0; i < n; ++i){
+ binary[i*size + s] = (input[i*size + s] > 0) ? mean : -mean;
+ }
+ }
}
int convolutional_out_height(convolutional_layer l)
{
- return (l.h + 2*l.pad - l.size) / l.stride + 1;
+ return (l.h + 2*l.pad - l.size) / l.stride + 1;
}
int convolutional_out_width(convolutional_layer l)
{
- return (l.w + 2*l.pad - l.size) / l.stride + 1;
+ return (l.w + 2*l.pad - l.size) / l.stride + 1;
}
-image get_convolutional_image(convolutional_layer l)
+dn_image get_convolutional_image(convolutional_layer l)
{
- return float_to_image(l.out_w,l.out_h,l.out_c,l.output);
+ return float_to_image(l.out_w,l.out_h,l.out_c,l.output);
}
-image get_convolutional_delta(convolutional_layer l)
+dn_image get_convolutional_delta(convolutional_layer l)
{
- return float_to_image(l.out_w,l.out_h,l.out_c,l.delta);
+ return float_to_image(l.out_w,l.out_h,l.out_c,l.delta);
}
-static size_t get_workspace_size(layer l){
+static size_t get_workspace_size(dn_layer l){
#ifdef CUDNN
- if(gpu_index >= 0){
+ if(gpu_index >= 0){
size_t most = 0;
size_t s = 0;
cudnnGetConvolutionForwardWorkspaceSize(cudnn_handle(),
@@ -115,7 +115,7 @@ static size_t get_workspace_size(layer l){
return most;
}
#endif
- return (size_t)l.out_h*l.out_w*l.size*l.size*l.c/l.groups*sizeof(float);
+ return (size_t)l.out_h*l.out_w*l.size*l.size*l.c/l.groups*sizeof(float);
}
#ifdef GPU
@@ -173,93 +173,96 @@ void cudnn_convolutional_setup(layer *l)
#endif
#endif
-convolutional_layer make_convolutional_layer(int batch, int h, int w, int c, int n, int groups, int size, int stride, int padding, ACTIVATION activation, int batch_normalize, int binary, int xnor, int adam)
+convolutional_layer
+make_convolutional_layer(int batch, int h, int w, int c, int n, int groups,
+ int size, int stride, int padding, ACTIVATION activation,
+ int batch_normalize, int binary, int xnor, int adam)
{
- int i;
- convolutional_layer l = {0};
- l.type = CONVOLUTIONAL;
-
- l.groups = groups;
- l.h = h;
- l.w = w;
- l.c = c;
- l.n = n;
- l.binary = binary;
- l.xnor = xnor;
- l.batch = batch;
- l.stride = stride;
- l.size = size;
- l.pad = padding;
- l.batch_normalize = batch_normalize;
-
- l.weights = calloc(c/groups*n*size*size, sizeof(float));
- l.weight_updates = calloc(c/groups*n*size*size, sizeof(float));
-
- l.biases = calloc(n, sizeof(float));
- l.bias_updates = calloc(n, sizeof(float));
-
- l.nweights = c/groups*n*size*size;
- l.nbiases = n;
-
- // float scale = 1./sqrt(size*size*c);
- float scale = sqrt(2./(size*size*c/l.groups));
- //printf("convscale %f\n", scale);
- //scale = .02;
- //for(i = 0; i < c*n*size*size; ++i) l.weights[i] = scale*rand_uniform(-1, 1);
- for(i = 0; i < l.nweights; ++i) l.weights[i] = scale*rand_normal();
- int out_w = convolutional_out_width(l);
- int out_h = convolutional_out_height(l);
- l.out_h = out_h;
- l.out_w = out_w;
- l.out_c = n;
- l.outputs = l.out_h * l.out_w * l.out_c;
- l.inputs = l.w * l.h * l.c;
-
- l.output = calloc(l.batch*l.outputs, sizeof(float));
- l.delta = calloc(l.batch*l.outputs, sizeof(float));
-
- l.forward = forward_convolutional_layer;
- l.backward = backward_convolutional_layer;
- l.update = update_convolutional_layer;
- if(binary){
- l.binary_weights = calloc(l.nweights, sizeof(float));
- l.cweights = calloc(l.nweights, sizeof(char));
- l.scales = calloc(n, sizeof(float));
- }
- if(xnor){
- l.binary_weights = calloc(l.nweights, sizeof(float));
- l.binary_input = calloc(l.inputs*l.batch, sizeof(float));
- }
-
- if(batch_normalize){
- l.scales = calloc(n, sizeof(float));
- l.scale_updates = calloc(n, sizeof(float));
- for(i = 0; i < n; ++i){
- l.scales[i] = 1;
- }
-
- l.mean = calloc(n, sizeof(float));
- l.variance = calloc(n, sizeof(float));
-
- l.mean_delta = calloc(n, sizeof(float));
- l.variance_delta = calloc(n, sizeof(float));
-
- l.rolling_mean = calloc(n, sizeof(float));
- l.rolling_variance = calloc(n, sizeof(float));
- l.x = calloc(l.batch*l.outputs, sizeof(float));
- l.x_norm = calloc(l.batch*l.outputs, sizeof(float));
- }
- if(adam){
- l.m = calloc(l.nweights, sizeof(float));
- l.v = calloc(l.nweights, sizeof(float));
- l.bias_m = calloc(n, sizeof(float));
- l.scale_m = calloc(n, sizeof(float));
- l.bias_v = calloc(n, sizeof(float));
- l.scale_v = calloc(n, sizeof(float));
- }
+ int i;
+ convolutional_layer l = {0};
+ l.type = CONVOLUTIONAL;
+
+ l.groups = groups;
+ l.h = h;
+ l.w = w;
+ l.c = c;
+ l.n = n;
+ l.binary = binary;
+ l.xnor = xnor;
+ l.batch = batch;
+ l.stride = stride;
+ l.size = size;
+ l.pad = padding;
+ l.batch_normalize = batch_normalize;
+
+ l.weights = calloc(c/groups*n*size*size, sizeof(float));
+ l.weight_updates = calloc(c/groups*n*size*size, sizeof(float));
+
+ l.biases = calloc(n, sizeof(float));
+ l.bias_updates = calloc(n, sizeof(float));
+
+ l.nweights = c/groups*n*size*size;
+ l.nbiases = n;
+
+ // float scale = 1./sqrt(size*size*c);
+ float scale = sqrt(2./(size*size*c/l.groups));
+ //printf("convscale %f\n", scale);
+ //scale = .02;
+ //for(i = 0; i < c*n*size*size; ++i) l.weights[i] = scale*rand_uniform(-1, 1);
+ for(i = 0; i < l.nweights; ++i) l.weights[i] = scale*rand_normal();
+ int out_w = convolutional_out_width(l);
+ int out_h = convolutional_out_height(l);
+ l.out_h = out_h;
+ l.out_w = out_w;
+ l.out_c = n;
+ l.outputs = l.out_h * l.out_w * l.out_c;
+ l.inputs = l.w * l.h * l.c;
+
+ l.output = calloc(l.batch*l.outputs, sizeof(float));
+ l.delta = calloc(l.batch*l.outputs, sizeof(float));
+
+ l.forward = forward_convolutional_layer;
+ l.backward = backward_convolutional_layer;
+ l.update = update_convolutional_layer;
+ if(binary){
+ l.binary_weights = calloc(l.nweights, sizeof(float));
+ l.cweights = calloc(l.nweights, sizeof(char));
+ l.scales = calloc(n, sizeof(float));
+ }
+ if(xnor){
+ l.binary_weights = calloc(l.nweights, sizeof(float));
+ l.binary_input = calloc(l.inputs*l.batch, sizeof(float));
+ }
+
+ if(batch_normalize){
+ l.scales = calloc(n, sizeof(float));
+ l.scale_updates = calloc(n, sizeof(float));
+ for(i = 0; i < n; ++i){
+ l.scales[i] = 1;
+ }
+
+ l.mean = calloc(n, sizeof(float));
+ l.variance = calloc(n, sizeof(float));
+
+ l.mean_delta = calloc(n, sizeof(float));
+ l.variance_delta = calloc(n, sizeof(float));
+
+ l.rolling_mean = calloc(n, sizeof(float));
+ l.rolling_variance = calloc(n, sizeof(float));
+ l.x = calloc(l.batch*l.outputs, sizeof(float));
+ l.x_norm = calloc(l.batch*l.outputs, sizeof(float));
+ }
+ if(adam){
+ l.m = calloc(l.nweights, sizeof(float));
+ l.v = calloc(l.nweights, sizeof(float));
+ l.bias_m = calloc(n, sizeof(float));
+ l.scale_m = calloc(n, sizeof(float));
+ l.bias_v = calloc(n, sizeof(float));
+ l.scale_v = calloc(n, sizeof(float));
+ }
#ifdef GPU
- l.forward_gpu = forward_convolutional_layer_gpu;
+ l.forward_gpu = forward_convolutional_layer_gpu;
l.backward_gpu = backward_convolutional_layer_gpu;
l.update_gpu = update_convolutional_layer_gpu;
@@ -319,27 +322,27 @@ convolutional_layer make_convolutional_layer(int batch, int h, int w, int c, int
#endif
}
#endif
- l.workspace_size = get_workspace_size(l);
- l.activation = activation;
+ l.workspace_size = get_workspace_size(l);
+ l.activation = activation;
- fprintf(stderr, "conv %5d %2d x%2d /%2d %4d x%4d x%4d -> %4d x%4d x%4d %5.3f BFLOPs\n", n, size, size, stride, w, h, c, l.out_w, l.out_h, l.out_c, (2.0 * l.n * l.size*l.size*l.c/l.groups * l.out_h*l.out_w)/1000000000.);
+ fprintf(stderr, "conv %5d %2d x%2d /%2d %4d x%4d x%4d -> %4d x%4d x%4d %5.3f BFLOPs\n", n, size, size, stride, w, h, c, l.out_w, l.out_h, l.out_c, (2.0 * l.n * l.size*l.size*l.c/l.groups * l.out_h*l.out_w)/1000000000.);
- return l;
+ return l;
}
void denormalize_convolutional_layer(convolutional_layer l)
{
- int i, j;
- for(i = 0; i < l.n; ++i){
- float scale = l.scales[i]/sqrt(l.rolling_variance[i] + .00001);
- for(j = 0; j < l.c/l.groups*l.size*l.size; ++j){
- l.weights[i*l.c/l.groups*l.size*l.size + j] *= scale;
- }
- l.biases[i] -= l.rolling_mean[i] * scale;
- l.scales[i] = 1;
- l.rolling_mean[i] = 0;
- l.rolling_variance[i] = 1;
- }
+ int i, j;
+ for(i = 0; i < l.n; ++i){
+ float scale = l.scales[i]/sqrt(l.rolling_variance[i] + .00001);
+ for(j = 0; j < l.c/l.groups*l.size*l.size; ++j){
+ l.weights[i*l.c/l.groups*l.size*l.size + j] *= scale;
+ }
+ l.biases[i] -= l.rolling_mean[i] * scale;
+ l.scales[i] = 1;
+ l.rolling_mean[i] = 0;
+ l.rolling_variance[i] = 1;
+ }
}
/*
@@ -369,26 +372,26 @@ void test_convolutional_layer()
void resize_convolutional_layer(convolutional_layer *l, int w, int h)
{
- l->w = w;
- l->h = h;
- int out_w = convolutional_out_width(*l);
- int out_h = convolutional_out_height(*l);
+ l->w = w;
+ l->h = h;
+ int out_w = convolutional_out_width(*l);
+ int out_h = convolutional_out_height(*l);
- l->out_w = out_w;
- l->out_h = out_h;
+ l->out_w = out_w;
+ l->out_h = out_h;
- l->outputs = l->out_h * l->out_w * l->out_c;
- l->inputs = l->w * l->h * l->c;
+ l->outputs = l->out_h * l->out_w * l->out_c;
+ l->inputs = l->w * l->h * l->c;
- l->output = realloc(l->output, l->batch*l->outputs*sizeof(float));
- l->delta = realloc(l->delta, l->batch*l->outputs*sizeof(float));
- if(l->batch_normalize){
- l->x = realloc(l->x, l->batch*l->outputs*sizeof(float));
- l->x_norm = realloc(l->x_norm, l->batch*l->outputs*sizeof(float));
- }
+ l->output = realloc(l->output, l->batch*l->outputs*sizeof(float));
+ l->delta = realloc(l->delta, l->batch*l->outputs*sizeof(float));
+ if(l->batch_normalize){
+ l->x = realloc(l->x, l->batch*l->outputs*sizeof(float));
+ l->x_norm = realloc(l->x_norm, l->batch*l->outputs*sizeof(float));
+ }
#ifdef GPU
- cuda_free(l->delta_gpu);
+ cuda_free(l->delta_gpu);
cuda_free(l->output_gpu);
l->delta_gpu = cuda_make_array(l->delta, l->batch*l->outputs);
@@ -405,218 +408,218 @@ void resize_convolutional_layer(convolutional_layer *l, int w, int h)
cudnn_convolutional_setup(l);
#endif
#endif
- l->workspace_size = get_workspace_size(*l);
+ l->workspace_size = get_workspace_size(*l);
}
void add_bias(float *output, float *biases, int batch, int n, int size)
{
- int i,j,b;
- for(b = 0; b < batch; ++b){
- for(i = 0; i < n; ++i){
- for(j = 0; j < size; ++j){
- output[(b*n + i)*size + j] += biases[i];
- }
- }
- }
+ int i,j,b;
+ for(b = 0; b < batch; ++b){
+ for(i = 0; i < n; ++i){
+ for(j = 0; j < size; ++j){
+ output[(b*n + i)*size + j] += biases[i];
+ }
+ }
+ }
}
void scale_bias(float *output, float *scales, int batch, int n, int size)
{
- int i,j,b;
- for(b = 0; b < batch; ++b){
- for(i = 0; i < n; ++i){
- for(j = 0; j < size; ++j){
- output[(b*n + i)*size + j] *= scales[i];
- }
- }
- }
+ int i,j,b;
+ for(b = 0; b < batch; ++b){
+ for(i = 0; i < n; ++i){
+ for(j = 0; j < size; ++j){
+ output[(b*n + i)*size + j] *= scales[i];
+ }
+ }
+ }
}
void backward_bias(float *bias_updates, float *delta, int batch, int n, int size)
{
- int i,b;
- for(b = 0; b < batch; ++b){
- for(i = 0; i < n; ++i){
- bias_updates[i] += sum_array(delta+size*(i+b*n), size);
- }
- }
+ int i,b;
+ for(b = 0; b < batch; ++b){
+ for(i = 0; i < n; ++i){
+ bias_updates[i] += sum_array(delta+size*(i+b*n), size);
+ }
+ }
}
-void forward_convolutional_layer(convolutional_layer l, network net)
+void forward_convolutional_layer(convolutional_layer l, dn_network net)
{
- int i, j;
-
- fill_cpu(l.outputs*l.batch, 0, l.output, 1);
-
- if(l.xnor){
- binarize_weights(l.weights, l.n, l.c/l.groups*l.size*l.size, l.binary_weights);
- swap_binary(&l);
- binarize_cpu(net.input, l.c*l.h*l.w*l.batch, l.binary_input);
- net.input = l.binary_input;
- }
-
- int m = l.n/l.groups;
- int k = l.size*l.size*l.c/l.groups;
- int n = l.out_w*l.out_h;
- for(i = 0; i < l.batch; ++i){
- for(j = 0; j < l.groups; ++j){
- float *a = l.weights + j*l.nweights/l.groups;
- float *b = net.workspace;
- float *c = l.output + (i*l.groups + j)*n*m;
- float *im = net.input + (i*l.groups + j)*l.c/l.groups*l.h*l.w;
-
- if (l.size == 1) {
- b = im;
- } else {
- im2col_cpu(im, l.c/l.groups, l.h, l.w, l.size, l.stride, l.pad, b);
- }
- gemm(0,0,m,n,k,1,a,k,b,n,1,c,n);
- }
- }
-
- if(l.batch_normalize){
- forward_batchnorm_layer(l, net);
- } else {
- add_bias(l.output, l.biases, l.batch, l.n, l.out_h*l.out_w);
- }
-
- activate_array(l.output, l.outputs*l.batch, l.activation);
- if(l.binary || l.xnor) swap_binary(&l);
+ int i, j;
+
+ fill_cpu(l.outputs*l.batch, 0, l.output, 1);
+
+ if(l.xnor){
+ binarize_weights(l.weights, l.n, l.c/l.groups*l.size*l.size, l.binary_weights);
+ swap_binary(&l);
+ binarize_cpu(net.input, l.c*l.h*l.w*l.batch, l.binary_input);
+ net.input = l.binary_input;
+ }
+
+ int m = l.n/l.groups;
+ int k = l.size*l.size*l.c/l.groups;
+ int n = l.out_w*l.out_h;
+ for(i = 0; i < l.batch; ++i){
+ for(j = 0; j < l.groups; ++j){
+ float *a = l.weights + j*l.nweights/l.groups;
+ float *b = net.workspace;
+ float *c = l.output + (i*l.groups + j)*n*m;
+ float *im = net.input + (i*l.groups + j)*l.c/l.groups*l.h*l.w;
+
+ if (l.size == 1) {
+ b = im;
+ } else {
+ im2col_cpu(im, l.c/l.groups, l.h, l.w, l.size, l.stride, l.pad, b);
+ }
+ gemm(0,0,m,n,k,1,a,k,b,n,1,c,n);
+ }
+ }
+
+ if(l.batch_normalize){
+ forward_batchnorm_layer(l, net);
+ } else {
+ add_bias(l.output, l.biases, l.batch, l.n, l.out_h*l.out_w);
+ }
+
+ activate_array(l.output, l.outputs*l.batch, l.activation);
+ if(l.binary || l.xnor) swap_binary(&l);
}
-void backward_convolutional_layer(convolutional_layer l, network net)
+void backward_convolutional_layer(convolutional_layer l, dn_network net)
{
- int i, j;
- int m = l.n/l.groups;
- int n = l.size*l.size*l.c/l.groups;
- int k = l.out_w*l.out_h;
-
- gradient_array(l.output, l.outputs*l.batch, l.activation, l.delta);
-
- if(l.batch_normalize){
- backward_batchnorm_layer(l, net);
- } else {
- backward_bias(l.bias_updates, l.delta, l.batch, l.n, k);
- }
-
- for(i = 0; i < l.batch; ++i){
- for(j = 0; j < l.groups; ++j){
- float *a = l.delta + (i*l.groups + j)*m*k;
- float *b = net.workspace;
- float *c = l.weight_updates + j*l.nweights/l.groups;
-
- float *im = net.input + (i*l.groups + j)*l.c/l.groups*l.h*l.w;
- float *imd = net.delta + (i*l.groups + j)*l.c/l.groups*l.h*l.w;
-
- if(l.size == 1){
- b = im;
- } else {
- im2col_cpu(im, l.c/l.groups, l.h, l.w,
- l.size, l.stride, l.pad, b);
+ int i, j;
+ int m = l.n/l.groups;
+ int n = l.size*l.size*l.c/l.groups;
+ int k = l.out_w*l.out_h;
+
+ gradient_array(l.output, l.outputs*l.batch, l.activation, l.delta);
+
+ if(l.batch_normalize){
+ backward_batchnorm_layer(l, net);
+ } else {
+ backward_bias(l.bias_updates, l.delta, l.batch, l.n, k);
+ }
+
+ for(i = 0; i < l.batch; ++i){
+ for(j = 0; j < l.groups; ++j){
+ float *a = l.delta + (i*l.groups + j)*m*k;
+ float *b = net.workspace;
+ float *c = l.weight_updates + j*l.nweights/l.groups;
+
+ float *im = net.input + (i*l.groups + j)*l.c/l.groups*l.h*l.w;
+ float *imd = net.delta + (i*l.groups + j)*l.c/l.groups*l.h*l.w;
+
+ if(l.size == 1){
+ b = im;
+ } else {
+ im2col_cpu(im, l.c/l.groups, l.h, l.w,
+ l.size, l.stride, l.pad, b);
+ }
+
+ gemm(0,1,m,n,k,1,a,k,b,k,1,c,n);
+
+ if (net.delta) {
+ a = l.weights + j*l.nweights/l.groups;
+ b = l.delta + (i*l.groups + j)*m*k;
+ c = net.workspace;
+ if (l.size == 1) {
+ c = imd;
}
- gemm(0,1,m,n,k,1,a,k,b,k,1,c,n);
-
- if (net.delta) {
- a = l.weights + j*l.nweights/l.groups;
- b = l.delta + (i*l.groups + j)*m*k;
- c = net.workspace;
- if (l.size == 1) {
- c = imd;
- }
-
- gemm(1,0,n,k,m,1,a,n,b,k,0,c,k);
+ gemm(1,0,n,k,m,1,a,n,b,k,0,c,k);
- if (l.size != 1) {
- col2im_cpu(net.workspace, l.c/l.groups, l.h, l.w, l.size, l.stride, l.pad, imd);
- }
+ if (l.size != 1) {
+ col2im_cpu(net.workspace, l.c/l.groups, l.h, l.w, l.size, l.stride, l.pad, imd);
}
- }
- }
+ }
+ }
+ }
}
void update_convolutional_layer(convolutional_layer l, update_args a)
{
- float learning_rate = a.learning_rate*l.learning_rate_scale;
- float momentum = a.momentum;
- float decay = a.decay;
- int batch = a.batch;
-
- axpy_cpu(l.n, learning_rate/batch, l.bias_updates, 1, l.biases, 1);
- scal_cpu(l.n, momentum, l.bias_updates, 1);
-
- if(l.scales){
- axpy_cpu(l.n, learning_rate/batch, l.scale_updates, 1, l.scales, 1);
- scal_cpu(l.n, momentum, l.scale_updates, 1);
- }
-
- axpy_cpu(l.nweights, -decay*batch, l.weights, 1, l.weight_updates, 1);
- axpy_cpu(l.nweights, learning_rate/batch, l.weight_updates, 1, l.weights, 1);
- scal_cpu(l.nweights, momentum, l.weight_updates, 1);
+ float learning_rate = a.learning_rate*l.learning_rate_scale;
+ float momentum = a.momentum;
+ float decay = a.decay;
+ int batch = a.batch;
+
+ axpy_cpu(l.n, learning_rate/batch, l.bias_updates, 1, l.biases, 1);
+ scal_cpu(l.n, momentum, l.bias_updates, 1);
+
+ if(l.scales){
+ axpy_cpu(l.n, learning_rate/batch, l.scale_updates, 1, l.scales, 1);
+ scal_cpu(l.n, momentum, l.scale_updates, 1);
+ }
+
+ axpy_cpu(l.nweights, -decay*batch, l.weights, 1, l.weight_updates, 1);
+ axpy_cpu(l.nweights, learning_rate/batch, l.weight_updates, 1, l.weights, 1);
+ scal_cpu(l.nweights, momentum, l.weight_updates, 1);
}
-image get_convolutional_weight(convolutional_layer l, int i)
+dn_image get_convolutional_weight(convolutional_layer l, int i)
{
- int h = l.size;
- int w = l.size;
- int c = l.c/l.groups;
- return float_to_image(w,h,c,l.weights+i*h*w*c);
+ int h = l.size;
+ int w = l.size;
+ int c = l.c/l.groups;
+ return float_to_image(w,h,c,l.weights+i*h*w*c);
}
void rgbgr_weights(convolutional_layer l)
{
- int i;
- for(i = 0; i < l.n; ++i){
- image im = get_convolutional_weight(l, i);
- if (im.c == 3) {
- rgbgr_image(im);
- }
- }
+ int i;
+ for(i = 0; i < l.n; ++i){
+ dn_image im = get_convolutional_weight(l, i);
+ if (im.c == 3) {
+ rgbgr_image(im);
+ }
+ }
}
void rescale_weights(convolutional_layer l, float scale, float trans)
{
- int i;
- for(i = 0; i < l.n; ++i){
- image im = get_convolutional_weight(l, i);
- if (im.c == 3) {
- scale_image(im, scale);
- float sum = sum_array(im.data, im.w*im.h*im.c);
- l.biases[i] += sum*trans;
- }
- }
+ int i;
+ for(i = 0; i < l.n; ++i){
+ dn_image im = get_convolutional_weight(l, i);
+ if (im.c == 3) {
+ scale_image(im, scale);
+ float sum = sum_array(im.data, im.w*im.h*im.c);
+ l.biases[i] += sum*trans;
+ }
+ }
}
-image *get_weights(convolutional_layer l)
+dn_image *get_weights(convolutional_layer l)
{
- image *weights = calloc(l.n, sizeof(image));
- int i;
- for(i = 0; i < l.n; ++i){
- weights[i] = copy_image(get_convolutional_weight(l, i));
- normalize_image(weights[i]);
- /*
- char buff[256];
- sprintf(buff, "filter%d", i);
- save_image(weights[i], buff);
- */
- }
- //error("hey");
- return weights;
+ dn_image *weights = calloc(l.n, sizeof(dn_image));
+ int i;
+ for(i = 0; i < l.n; ++i){
+ weights[i] = copy_image(get_convolutional_weight(l, i));
+ normalize_image(weights[i]);
+ /*
+ char buff[256];
+ sprintf(buff, "filter%d", i);
+ save_image(weights[i], buff);
+ */
+ }
+ //error("hey");
+ return weights;
}
-image *visualize_convolutional_layer(convolutional_layer l, char *window, image *prev_weights)
+dn_image *visualize_convolutional_layer(convolutional_layer l, char *window, dn_image *prev_weights)
{
- image *single_weights = get_weights(l);
- show_images(single_weights, l.n, window);
-
- image delta = get_convolutional_image(l);
- image dc = collapse_image_layers(delta, 1);
- char buff[256];
- sprintf(buff, "%s: Output", window);
- //show_image(dc, buff);
- //save_image(dc, buff);
- free_image(dc);
- return single_weights;
+ dn_image *single_weights = get_weights(l);
+ show_images(single_weights, l.n, window);
+
+ dn_image delta = get_convolutional_image(l);
+ dn_image dc = collapse_image_layers(delta, 1);
+ char buff[256];
+ sprintf(buff, "%s: Output", window);
+ //show_image(dc, buff);
+ //save_image(dc, buff);
+ free_image(dc);
+ return single_weights;
}
diff --git a/src/convolutional_layer.h b/src/convolutional_layer.h
index 6c261f5fc23..732af16b1da 100644
--- a/src/convolutional_layer.h
+++ b/src/convolutional_layer.h
@@ -7,7 +7,7 @@
#include "layer.h"
#include "network.h"
-typedef layer convolutional_layer;
+typedef dn_layer convolutional_layer;
#ifdef GPU
void forward_convolutional_layer_gpu(convolutional_layer layer, network net);
@@ -27,21 +27,21 @@ void cudnn_convolutional_setup(layer *l);
convolutional_layer make_convolutional_layer(int batch, int h, int w, int c, int n, int groups, int size, int stride, int padding, ACTIVATION activation, int batch_normalize, int binary, int xnor, int adam);
void resize_convolutional_layer(convolutional_layer *layer, int w, int h);
-void forward_convolutional_layer(const convolutional_layer layer, network net);
+void forward_convolutional_layer(const convolutional_layer layer, dn_network net);
void update_convolutional_layer(convolutional_layer layer, update_args a);
-image *visualize_convolutional_layer(convolutional_layer layer, char *window, image *prev_weights);
+dn_image *visualize_convolutional_layer(convolutional_layer layer, char *window, dn_image *prev_weights);
void binarize_weights(float *weights, int n, int size, float *binary);
void swap_binary(convolutional_layer *l);
void binarize_weights2(float *weights, int n, int size, char *binary, float *scales);
-void backward_convolutional_layer(convolutional_layer layer, network net);
+void backward_convolutional_layer(convolutional_layer layer, dn_network net);
void add_bias(float *output, float *biases, int batch, int n, int size);
void backward_bias(float *bias_updates, float *delta, int batch, int n, int size);
-image get_convolutional_image(convolutional_layer layer);
-image get_convolutional_delta(convolutional_layer layer);
-image get_convolutional_weight(convolutional_layer layer, int i);
+dn_image get_convolutional_image(convolutional_layer layer);
+dn_image get_convolutional_delta(convolutional_layer layer);
+dn_image get_convolutional_weight(convolutional_layer layer, int i);
int convolutional_out_height(convolutional_layer layer);
int convolutional_out_width(convolutional_layer layer);
diff --git a/src/cost_layer.c b/src/cost_layer.c
index 2138ff2617a..9b37f9f10db 100644
--- a/src/cost_layer.c
+++ b/src/cost_layer.c
@@ -79,7 +79,7 @@ void resize_cost_layer(cost_layer *l, int inputs)
#endif
}
-void forward_cost_layer(cost_layer l, network net)
+void forward_cost_layer(cost_layer l, dn_network net)
{
if (!net.truth) return;
if(l.cost_type == MASKED){
@@ -98,7 +98,7 @@ void forward_cost_layer(cost_layer l, network net)
l.cost[0] = sum_array(l.output, l.batch*l.inputs);
}
-void backward_cost_layer(const cost_layer l, network net)
+void backward_cost_layer(const cost_layer l, dn_network net)
{
axpy_cpu(l.batch*l.inputs, l.scale, l.delta, 1, net.delta, 1);
}
diff --git a/src/cost_layer.h b/src/cost_layer.h
index ceb64de00bf..5396b0ce2d7 100644
--- a/src/cost_layer.h
+++ b/src/cost_layer.h
@@ -3,13 +3,13 @@
#include "layer.h"
#include "network.h"
-typedef layer cost_layer;
+typedef dn_layer cost_layer;
COST_TYPE get_cost_type(char *s);
char *get_cost_string(COST_TYPE a);
cost_layer make_cost_layer(int batch, int inputs, COST_TYPE type, float scale);
-void forward_cost_layer(const cost_layer l, network net);
-void backward_cost_layer(const cost_layer l, network net);
+void forward_cost_layer(const cost_layer l, dn_network net);
+void backward_cost_layer(const cost_layer l, dn_network net);
void resize_cost_layer(cost_layer *l, int inputs);
#ifdef GPU
diff --git a/src/crnn_layer.c b/src/crnn_layer.c
index 7dd29f62b7a..680ca5c93c3 100644
--- a/src/crnn_layer.c
+++ b/src/crnn_layer.c
@@ -10,7 +10,7 @@
#include
#include
-static void increment_layer(layer *l, int steps)
+static void increment_layer(dn_layer *l, int steps)
{
int num = l->outputs*l->batch*steps;
l->output += num;
@@ -26,11 +26,11 @@ static void increment_layer(layer *l, int steps)
#endif
}
-layer make_crnn_layer(int batch, int h, int w, int c, int hidden_filters, int output_filters, int steps, ACTIVATION activation, int batch_normalize)
+dn_layer make_crnn_layer(int batch, int h, int w, int c, int hidden_filters, int output_filters, int steps, ACTIVATION activation, int batch_normalize)
{
fprintf(stderr, "CRNN Layer: %d x %d x %d image, %d filters\n", h,w,c,output_filters);
batch = batch / steps;
- layer l = {0};
+ dn_layer l = {0};
l.batch = batch;
l.type = CRNN;
l.steps = steps;
@@ -46,17 +46,17 @@ layer make_crnn_layer(int batch, int h, int w, int c, int hidden_filters, int ou
l.state = calloc(l.hidden*batch*(steps+1), sizeof(float));
- l.input_layer = malloc(sizeof(layer));
+ l.input_layer = malloc(sizeof(dn_layer));
fprintf(stderr, "\t\t");
*(l.input_layer) = make_convolutional_layer(batch*steps, h, w, c, hidden_filters, 1, 3, 1, 1, activation, batch_normalize, 0, 0, 0);
l.input_layer->batch = batch;
- l.self_layer = malloc(sizeof(layer));
+ l.self_layer = malloc(sizeof(dn_layer));
fprintf(stderr, "\t\t");
*(l.self_layer) = make_convolutional_layer(batch*steps, h, w, hidden_filters, hidden_filters, 1, 3, 1, 1, activation, batch_normalize, 0, 0, 0);
l.self_layer->batch = batch;
- l.output_layer = malloc(sizeof(layer));
+ l.output_layer = malloc(sizeof(dn_layer));
fprintf(stderr, "\t\t");
*(l.output_layer) = make_convolutional_layer(batch*steps, h, w, hidden_filters, output_filters, 1, 3, 1, 1, activation, batch_normalize, 0, 0, 0);
l.output_layer->batch = batch;
@@ -81,21 +81,21 @@ layer make_crnn_layer(int batch, int h, int w, int c, int hidden_filters, int ou
return l;
}
-void update_crnn_layer(layer l, update_args a)
+void update_crnn_layer(dn_layer l, update_args a)
{
update_convolutional_layer(*(l.input_layer), a);
update_convolutional_layer(*(l.self_layer), a);
update_convolutional_layer(*(l.output_layer), a);
}
-void forward_crnn_layer(layer l, network net)
+void forward_crnn_layer(dn_layer l, dn_network net)
{
- network s = net;
+ dn_network s = net;
s.train = net.train;
int i;
- layer input_layer = *(l.input_layer);
- layer self_layer = *(l.self_layer);
- layer output_layer = *(l.output_layer);
+ dn_layer input_layer = *(l.input_layer);
+ dn_layer self_layer = *(l.self_layer);
+ dn_layer output_layer = *(l.output_layer);
fill_cpu(l.outputs * l.batch * l.steps, 0, output_layer.delta, 1);
fill_cpu(l.hidden * l.batch * l.steps, 0, self_layer.delta, 1);
@@ -129,13 +129,13 @@ void forward_crnn_layer(layer l, network net)
}
}
-void backward_crnn_layer(layer l, network net)
+void backward_crnn_layer(dn_layer l, dn_network net)
{
- network s = net;
+ dn_network s = net;
int i;
- layer input_layer = *(l.input_layer);
- layer self_layer = *(l.self_layer);
- layer output_layer = *(l.output_layer);
+ dn_layer input_layer = *(l.input_layer);
+ dn_layer self_layer = *(l.self_layer);
+ dn_layer output_layer = *(l.output_layer);
increment_layer(&input_layer, l.steps-1);
increment_layer(&self_layer, l.steps-1);
diff --git a/src/crnn_layer.h b/src/crnn_layer.h
index 515f378354e..c1198298c55 100644
--- a/src/crnn_layer.h
+++ b/src/crnn_layer.h
@@ -6,11 +6,11 @@
#include "layer.h"
#include "network.h"
-layer make_crnn_layer(int batch, int h, int w, int c, int hidden_filters, int output_filters, int steps, ACTIVATION activation, int batch_normalize);
+dn_layer make_crnn_layer(int batch, int h, int w, int c, int hidden_filters, int output_filters, int steps, ACTIVATION activation, int batch_normalize);
-void forward_crnn_layer(layer l, network net);
-void backward_crnn_layer(layer l, network net);
-void update_crnn_layer(layer l, update_args a);
+void forward_crnn_layer(dn_layer l, dn_network net);
+void backward_crnn_layer(dn_layer l, dn_network net);
+void update_crnn_layer(dn_layer l, update_args a);
#ifdef GPU
void forward_crnn_layer_gpu(layer l, network net);
diff --git a/src/crop_layer.c b/src/crop_layer.c
index 3b918529e64..fd446157f1f 100644
--- a/src/crop_layer.c
+++ b/src/crop_layer.c
@@ -2,7 +2,7 @@
#include "cuda.h"
#include
-image get_crop_image(crop_layer l)
+dn_image get_crop_image(crop_layer l)
{
int h = l.out_h;
int w = l.out_w;
@@ -10,8 +10,8 @@ image get_crop_image(crop_layer l)
return float_to_image(w,h,c,l.output);
}
-void backward_crop_layer(const crop_layer l, network net){}
-void backward_crop_layer_gpu(const crop_layer l, network net){}
+void backward_crop_layer(const crop_layer l, dn_network net){}
+void backward_crop_layer_gpu(const crop_layer l, dn_network net){}
crop_layer make_crop_layer(int batch, int h, int w, int c, int crop_height, int crop_width, int flip, float angle, float saturation, float exposure)
{
@@ -45,7 +45,7 @@ crop_layer make_crop_layer(int batch, int h, int w, int c, int crop_height, int
return l;
}
-void resize_crop_layer(layer *l, int w, int h)
+void resize_crop_layer(dn_layer *l, int w, int h)
{
l->w = w;
l->h = h;
@@ -64,7 +64,7 @@ void resize_crop_layer(layer *l, int w, int h)
}
-void forward_crop_layer(const crop_layer l, network net)
+void forward_crop_layer(const crop_layer l, dn_network net)
{
int i,j,c,b,row,col;
int index;
diff --git a/src/crop_layer.h b/src/crop_layer.h
index 3b5883c47d6..501b4b2769d 100644
--- a/src/crop_layer.h
+++ b/src/crop_layer.h
@@ -5,12 +5,12 @@
#include "layer.h"
#include "network.h"
-typedef layer crop_layer;
+typedef dn_layer crop_layer;
-image get_crop_image(crop_layer l);
+dn_image get_crop_image(crop_layer l);
crop_layer make_crop_layer(int batch, int h, int w, int c, int crop_height, int crop_width, int flip, float angle, float saturation, float exposure);
-void forward_crop_layer(const crop_layer l, network net);
-void resize_crop_layer(layer *l, int w, int h);
+void forward_crop_layer(const crop_layer l, dn_network net);
+void resize_crop_layer(dn_layer *l, int w, int h);
#ifdef GPU
void forward_crop_layer_gpu(crop_layer l, network net);
diff --git a/src/activation_kernels.cu b/src/cuda/activation_kernels.cu
similarity index 100%
rename from src/activation_kernels.cu
rename to src/cuda/activation_kernels.cu
diff --git a/src/avgpool_layer_kernels.cu b/src/cuda/avgpool_layer_kernels.cu
similarity index 94%
rename from src/avgpool_layer_kernels.cu
rename to src/cuda/avgpool_layer_kernels.cu
index a7eca3aeae9..5795681d82b 100644
--- a/src/avgpool_layer_kernels.cu
+++ b/src/cuda/avgpool_layer_kernels.cu
@@ -43,7 +43,7 @@ __global__ void backward_avgpool_layer_kernel(int n, int w, int h, int c, float
}
}
-extern "C" void forward_avgpool_layer_gpu(avgpool_layer layer, network net)
+extern "C" void forward_avgpool_layer_gpu(avgpool_layer layer, dn_network net)
{
size_t n = layer.c*layer.batch;
@@ -51,7 +51,7 @@ extern "C" void forward_avgpool_layer_gpu(avgpool_layer layer, network net)
check_error(cudaPeekAtLastError());
}
-extern "C" void backward_avgpool_layer_gpu(avgpool_layer layer, network net)
+extern "C" void backward_avgpool_layer_gpu(avgpool_layer layer, dn_network net)
{
size_t n = layer.c*layer.batch;
diff --git a/src/blas_kernels.cu b/src/cuda/blas_kernels.cu
similarity index 99%
rename from src/blas_kernels.cu
rename to src/cuda/blas_kernels.cu
index 47e82179170..d6c85996a71 100644
--- a/src/blas_kernels.cu
+++ b/src/cuda/blas_kernels.cu
@@ -969,7 +969,7 @@ __global__ void softmax_tree_kernel(float *input, int spatial, int batch, int st
softmax_device(input + goff + boff + s, group_size[g], temp, spatial, output + goff + boff + s);
}
-extern "C" void softmax_tree(float *input, int spatial, int batch, int stride, float temp, float *output, tree hier)
+extern "C" void softmax_tree(float *input, int spatial, int batch, int stride, float temp, float *output, dn_tree hier)
{
int *tree_groups_size = cuda_make_int_array(hier.group_size, hier.groups);
int *tree_groups_offset = cuda_make_int_array(hier.group_offset, hier.groups);
diff --git a/src/col2im_kernels.cu b/src/cuda/col2im_kernels.cu
similarity index 100%
rename from src/col2im_kernels.cu
rename to src/cuda/col2im_kernels.cu
diff --git a/src/convolutional_kernels.cu b/src/cuda/convolutional_kernels.cu
similarity index 96%
rename from src/convolutional_kernels.cu
rename to src/cuda/convolutional_kernels.cu
index 4a1047b8743..023fd41ab2c 100644
--- a/src/convolutional_kernels.cu
+++ b/src/cuda/convolutional_kernels.cu
@@ -70,7 +70,7 @@ void binarize_weights_gpu(float *weights, int n, int size, float *binary)
check_error(cudaPeekAtLastError());
}
-void forward_convolutional_layer_gpu(convolutional_layer l, network net)
+void forward_convolutional_layer_gpu(convolutional_layer l, dn_network net)
{
fill_gpu(l.outputs*l.batch, 0, l.output_gpu, 1);
if(l.binary){
@@ -164,7 +164,7 @@ __global__ void smooth_kernel(float *x, int n, int w, int h, int c, int size, fl
}
}
-extern "C" void smooth_layer(layer l, int size, float rate)
+extern "C" void smooth_layer(dn_layer l, int size, float rate)
{
int h = l.out_h;
int w = l.out_w;
@@ -176,7 +176,7 @@ extern "C" void smooth_layer(layer l, int size, float rate)
check_error(cudaPeekAtLastError());
}
-void backward_convolutional_layer_gpu(convolutional_layer l, network net)
+void backward_convolutional_layer_gpu(convolutional_layer l, dn_network net)
{
if(l.smooth){
smooth_layer(l, 5, l.smooth);
@@ -270,7 +270,7 @@ void backward_convolutional_layer_gpu(convolutional_layer l, network net)
#endif
}
-void pull_convolutional_layer(layer l)
+void pull_convolutional_layer(dn_layer l)
{
cuda_pull_array(l.weights_gpu, l.weights, l.nweights);
cuda_pull_array(l.biases_gpu, l.biases, l.n);
@@ -283,7 +283,7 @@ void pull_convolutional_layer(layer l)
}
}
-void push_convolutional_layer(layer l)
+void push_convolutional_layer(dn_layer l)
{
cuda_push_array(l.weights_gpu, l.weights, l.nweights);
cuda_push_array(l.biases_gpu, l.biases, l.n);
@@ -296,7 +296,7 @@ void push_convolutional_layer(layer l)
}
}
-void update_convolutional_layer_gpu(layer l, update_args a)
+void update_convolutional_layer_gpu(dn_layer l, update_args a)
{
float learning_rate = a.learning_rate*l.learning_rate_scale;
float momentum = a.momentum;
diff --git a/src/crop_layer_kernels.cu b/src/cuda/crop_layer_kernels.cu
similarity index 98%
rename from src/crop_layer_kernels.cu
rename to src/cuda/crop_layer_kernels.cu
index b5b9f554627..345492c74be 100644
--- a/src/crop_layer_kernels.cu
+++ b/src/cuda/crop_layer_kernels.cu
@@ -180,7 +180,7 @@ __global__ void forward_crop_layer_kernel(float *input, float *rand, int size, i
output[count] = bilinear_interpolate_kernel(input, w, h, rx, ry, k);
}
-extern "C" void forward_crop_layer_gpu(crop_layer layer, network net)
+extern "C" void forward_crop_layer_gpu(crop_layer layer, dn_network net)
{
cuda_random(layer.rand_gpu, layer.batch*8);
diff --git a/src/deconvolutional_kernels.cu b/src/cuda/deconvolutional_kernels.cu
similarity index 92%
rename from src/deconvolutional_kernels.cu
rename to src/cuda/deconvolutional_kernels.cu
index 8267dcfa25b..8d0d3f30be6 100644
--- a/src/deconvolutional_kernels.cu
+++ b/src/cuda/deconvolutional_kernels.cu
@@ -14,7 +14,7 @@ extern "C" {
#include "cuda.h"
}
-extern "C" void forward_deconvolutional_layer_gpu(layer l, network net)
+extern "C" void forward_deconvolutional_layer_gpu(dn_layer l, dn_network net)
{
int i;
@@ -41,7 +41,7 @@ extern "C" void forward_deconvolutional_layer_gpu(layer l, network net)
activate_array_gpu(l.output_gpu, l.batch*l.n*l.out_w*l.out_h, l.activation);
}
-extern "C" void backward_deconvolutional_layer_gpu(layer l, network net)
+extern "C" void backward_deconvolutional_layer_gpu(dn_layer l, dn_network net)
{
int i;
@@ -83,7 +83,7 @@ extern "C" void backward_deconvolutional_layer_gpu(layer l, network net)
}
}
-extern "C" void pull_deconvolutional_layer(layer l)
+extern "C" void pull_deconvolutional_layer(dn_layer l)
{
cuda_pull_array(l.weights_gpu, l.weights, l.c*l.n*l.size*l.size);
cuda_pull_array(l.biases_gpu, l.biases, l.n);
@@ -96,7 +96,7 @@ extern "C" void pull_deconvolutional_layer(layer l)
}
}
-extern "C" void push_deconvolutional_layer(layer l)
+extern "C" void push_deconvolutional_layer(dn_layer l)
{
cuda_push_array(l.weights_gpu, l.weights, l.c*l.n*l.size*l.size);
cuda_push_array(l.biases_gpu, l.biases, l.n);
@@ -109,7 +109,7 @@ extern "C" void push_deconvolutional_layer(layer l)
}
}
-void update_deconvolutional_layer_gpu(layer l, update_args a)
+void update_deconvolutional_layer_gpu(dn_layer l, update_args a)
{
float learning_rate = a.learning_rate*l.learning_rate_scale;
float momentum = a.momentum;
diff --git a/src/dropout_layer_kernels.cu b/src/cuda/dropout_layer_kernels.cu
similarity index 88%
rename from src/dropout_layer_kernels.cu
rename to src/cuda/dropout_layer_kernels.cu
index bd12b678758..05eb03a6758 100644
--- a/src/dropout_layer_kernels.cu
+++ b/src/cuda/dropout_layer_kernels.cu
@@ -14,7 +14,7 @@ __global__ void yoloswag420blazeit360noscope(float *input, int size, float *rand
if(id < size) input[id] = (rand[id] < prob) ? 0 : input[id]*scale;
}
-void forward_dropout_layer_gpu(dropout_layer layer, network net)
+void forward_dropout_layer_gpu(dropout_layer layer, dn_network net)
{
if (!net.train) return;
int size = layer.inputs*layer.batch;
@@ -31,7 +31,7 @@ void forward_dropout_layer_gpu(dropout_layer layer, network net)
check_error(cudaPeekAtLastError());
}
-void backward_dropout_layer_gpu(dropout_layer layer, network net)
+void backward_dropout_layer_gpu(dropout_layer layer, dn_network net)
{
if(!net.delta_gpu) return;
int size = layer.inputs*layer.batch;
diff --git a/src/im2col_kernels.cu b/src/cuda/im2col_kernels.cu
similarity index 100%
rename from src/im2col_kernels.cu
rename to src/cuda/im2col_kernels.cu
diff --git a/src/maxpool_layer_kernels.cu b/src/cuda/maxpool_layer_kernels.cu
similarity index 97%
rename from src/maxpool_layer_kernels.cu
rename to src/cuda/maxpool_layer_kernels.cu
index 869ef466af5..28652c63949 100644
--- a/src/maxpool_layer_kernels.cu
+++ b/src/cuda/maxpool_layer_kernels.cu
@@ -84,7 +84,7 @@ __global__ void backward_maxpool_layer_kernel(int n, int in_h, int in_w, int in_
prev_delta[index] += d;
}
-extern "C" void forward_maxpool_layer_gpu(maxpool_layer layer, network net)
+extern "C" void forward_maxpool_layer_gpu(maxpool_layer layer, dn_network net)
{
int h = layer.out_h;
int w = layer.out_w;
@@ -96,7 +96,7 @@ extern "C" void forward_maxpool_layer_gpu(maxpool_layer layer, network net)
check_error(cudaPeekAtLastError());
}
-extern "C" void backward_maxpool_layer_gpu(maxpool_layer layer, network net)
+extern "C" void backward_maxpool_layer_gpu(maxpool_layer layer, dn_network net)
{
size_t n = layer.h*layer.w*layer.c*layer.batch;
diff --git a/src/data.c b/src/data.c
index 59051b4e9fd..3097f049e10 100644
--- a/src/data.c
+++ b/src/data.c
@@ -9,12 +9,12 @@
pthread_mutex_t mutex = PTHREAD_MUTEX_INITIALIZER;
-list *get_paths(char *filename)
+dn_list *get_paths(const char *filename)
{
char *path;
FILE *file = fopen(filename, "r");
if(!file) file_error(filename);
- list *lines = make_list();
+ dn_list *lines = make_list();
while((path=fgetl(file))){
list_insert(lines, path);
}
@@ -65,18 +65,18 @@ char **find_replace_paths(char **paths, int n, char *find, char *replace)
return replace_paths;
}
-matrix load_image_paths_gray(char **paths, int n, int w, int h)
+dn_matrix load_image_paths_gray(char **paths, int n, int w, int h)
{
int i;
- matrix X;
+ dn_matrix X;
X.rows = n;
X.vals = calloc(X.rows, sizeof(float*));
X.cols = 0;
for(i = 0; i < n; ++i){
- image im = load_image(paths[i], w, h, 3);
+ dn_image im = load_image(paths[i], w, h, 3);
- image gray = grayscale_image(im);
+ dn_image gray = grayscale_image(im);
free_image(im);
im = gray;
@@ -86,33 +86,33 @@ matrix load_image_paths_gray(char **paths, int n, int w, int h)
return X;
}
-matrix load_image_paths(char **paths, int n, int w, int h)
+dn_matrix load_image_paths(char **paths, int n, int w, int h)
{
int i;
- matrix X;
+ dn_matrix X;
X.rows = n;
X.vals = calloc(X.rows, sizeof(float*));
X.cols = 0;
for(i = 0; i < n; ++i){
- image im = load_image_color(paths[i], w, h);
+ dn_image im = load_image_color(paths[i], w, h);
X.vals[i] = im.data;
X.cols = im.h*im.w*im.c;
}
return X;
}
-matrix load_image_augment_paths(char **paths, int n, int min, int max, int size, float angle, float aspect, float hue, float saturation, float exposure, int center)
+dn_matrix load_image_augment_paths(char **paths, int n, int min, int max, int size, float angle, float aspect, float hue, float saturation, float exposure, int center)
{
int i;
- matrix X;
+ dn_matrix X;
X.rows = n;
X.vals = calloc(X.rows, sizeof(float*));
X.cols = 0;
for(i = 0; i < n; ++i){
- image im = load_image_color(paths[i], 0, 0);
- image crop;
+ dn_image im = load_image_color(paths[i], 0, 0);
+ dn_image crop;
if(center){
crop = center_crop_image(im, size, size);
} else {
@@ -136,7 +136,7 @@ matrix load_image_augment_paths(char **paths, int n, int min, int max, int size,
}
-box_label *read_boxes(char *filename, int *n)
+dn_box_label *read_boxes(const char *filename, int *n)
{
FILE *file = fopen(filename, "r");
if(!file) file_error(filename);
@@ -144,11 +144,11 @@ box_label *read_boxes(char *filename, int *n)
int id;
int count = 0;
int size = 64;
- box_label *boxes = calloc(size, sizeof(box_label));
+ dn_box_label *boxes = calloc(size, sizeof(dn_box_label));
while(fscanf(file, "%d %f %f %f %f", &id, &x, &y, &w, &h) == 5){
if(count == size) {
size = size * 2;
- boxes = realloc(boxes, size*sizeof(box_label));
+ boxes = realloc(boxes, size*sizeof(dn_box_label));
}
boxes[count].id = id;
boxes[count].x = x;
@@ -166,18 +166,18 @@ box_label *read_boxes(char *filename, int *n)
return boxes;
}
-void randomize_boxes(box_label *b, int n)
+void randomize_boxes(dn_box_label *b, int n)
{
int i;
for(i = 0; i < n; ++i){
- box_label swap = b[i];
+ dn_box_label swap = b[i];
int index = rand()%n;
b[i] = b[index];
b[index] = swap;
}
}
-void correct_boxes(box_label *boxes, int n, float dx, float dy, float sx, float sy, int flip)
+void correct_boxes(dn_box_label *boxes, int n, float dx, float dy, float sx, float sy, int flip)
{
int i;
for(i = 0; i < n; ++i){
@@ -224,7 +224,7 @@ void fill_truth_swag(char *path, float *truth, int classes, int flip, float dx,
find_replace(labelpath, ".JPEG", ".txt", labelpath);
int count = 0;
- box_label *boxes = read_boxes(labelpath, &count);
+ dn_box_label *boxes = read_boxes(labelpath, &count);
randomize_boxes(boxes, count);
correct_boxes(boxes, count, dx, dy, sx, sy, flip);
float x,y,w,h;
@@ -263,7 +263,7 @@ void fill_truth_region(char *path, float *truth, int classes, int num_boxes, int
find_replace(labelpath, ".JPG", ".txt", labelpath);
find_replace(labelpath, ".JPEG", ".txt", labelpath);
int count = 0;
- box_label *boxes = read_boxes(labelpath, &count);
+ dn_box_label *boxes = read_boxes(labelpath, &count);
randomize_boxes(boxes, count);
correct_boxes(boxes, count, dx, dy, sx, sy, flip);
float x,y,w,h;
@@ -300,7 +300,7 @@ void fill_truth_region(char *path, float *truth, int classes, int num_boxes, int
free(boxes);
}
-void load_rle(image im, int *rle, int n)
+void load_rle(dn_image im, int *rle, int n)
{
int count = 0;
int curr = 0;
@@ -316,7 +316,7 @@ void load_rle(image im, int *rle, int n)
}
}
-void or_image(image src, image dest, int c)
+void or_image(dn_image src, dn_image dest, int c)
{
int i;
for(i = 0; i < src.w*src.h; ++i){
@@ -324,7 +324,7 @@ void or_image(image src, image dest, int c)
}
}
-void exclusive_image(image src)
+void exclusive_image(dn_image src)
{
int k, j, i;
int s = src.w*src.h;
@@ -339,7 +339,7 @@ void exclusive_image(image src)
}
}
-box bound_image(image im)
+dn_box bound_image(dn_image im)
{
int x,y;
int minx = im.w;
@@ -356,7 +356,7 @@ box bound_image(image im)
}
}
}
- box b = {minx, miny, maxx-minx + 1, maxy-miny + 1};
+ dn_box b = {minx, miny, maxx-minx + 1, maxy-miny + 1};
//printf("%f %f %f %f\n", b.x, b.y, b.w, b.h);
return b;
}
@@ -375,15 +375,15 @@ void fill_truth_iseg(char *path, int num_boxes, float *truth, int classes, int w
int id;
int i = 0;
int j;
- image part = make_image(w, h, 1);
+ dn_image part = make_image(w, h, 1);
while((fscanf(file, "%d %s", &id, buff) == 2) && i < num_boxes){
int n = 0;
int *rle = read_intlist(buff, &n, 0);
load_rle(part, rle, n);
- image sized = rotate_crop_image(part, aug.rad, aug.scale, aug.w, aug.h, aug.dx, aug.dy, aug.aspect);
+ dn_image sized = rotate_crop_image(part, aug.rad, aug.scale, aug.w, aug.h, aug.dx, aug.dy, aug.aspect);
if(flip) flip_image(sized);
- image mask = resize_image(sized, mw, mh);
+ dn_image mask = resize_image(sized, mw, mh);
truth[i*(mw*mh+1)] = id;
for(j = 0; j < mw*mh; ++j){
truth[i*(mw*mh + 1) + 1 + j] = mask.data[j];
@@ -412,17 +412,17 @@ void fill_truth_mask(char *path, int num_boxes, float *truth, int classes, int w
char buff[32788];
int id;
int i = 0;
- image part = make_image(w, h, 1);
+ dn_image part = make_image(w, h, 1);
while((fscanf(file, "%d %s", &id, buff) == 2) && i < num_boxes){
int n = 0;
int *rle = read_intlist(buff, &n, 0);
load_rle(part, rle, n);
- image sized = rotate_crop_image(part, aug.rad, aug.scale, aug.w, aug.h, aug.dx, aug.dy, aug.aspect);
+ dn_image sized = rotate_crop_image(part, aug.rad, aug.scale, aug.w, aug.h, aug.dx, aug.dy, aug.aspect);
if(flip) flip_image(sized);
- box b = bound_image(sized);
+ dn_box b = bound_image(sized);
if(b.w > 0){
- image crop = crop_image(sized, b.x, b.y, b.w, b.h);
- image mask = resize_image(crop, mw, mh);
+ dn_image crop = crop_image(sized, b.x, b.y, b.w, b.h);
+ dn_image mask = resize_image(crop, mw, mh);
truth[i*(4 + mw*mh + 1) + 0] = (b.x + b.w/2.)/sized.w;
truth[i*(4 + mw*mh + 1) + 1] = (b.y + b.h/2.)/sized.h;
truth[i*(4 + mw*mh + 1) + 2] = b.w/sized.w;
@@ -456,7 +456,7 @@ void fill_truth_detection(char *path, int num_boxes, float *truth, int classes,
find_replace(labelpath, ".JPG", ".txt", labelpath);
find_replace(labelpath, ".JPEG", ".txt", labelpath);
int count = 0;
- box_label *boxes = read_boxes(labelpath, &count);
+ dn_box_label *boxes = read_boxes(labelpath, &count);
randomize_boxes(boxes, count);
correct_boxes(boxes, count, dx, dy, sx, sy, flip);
if(count > num_boxes) count = num_boxes;
@@ -513,10 +513,10 @@ void fill_truth_captcha(char *path, int n, float *truth)
}
}
-data load_data_captcha(char **paths, int n, int m, int k, int w, int h)
+dn_data load_data_captcha(char **paths, int n, int m, int k, int w, int h)
{
if(m) paths = get_random_paths(paths, n, m);
- data d = {0};
+ dn_data d = {0};
d.shallow = 0;
d.X = load_image_paths(paths, n, w, h);
d.y = make_matrix(n, k*NUMCHARS);
@@ -528,10 +528,10 @@ data load_data_captcha(char **paths, int n, int m, int k, int w, int h)
return d;
}
-data load_data_captcha_encode(char **paths, int n, int m, int w, int h)
+dn_data load_data_captcha_encode(char **paths, int n, int m, int w, int h)
{
if(m) paths = get_random_paths(paths, n, m);
- data d = {0};
+ dn_data d = {0};
d.shallow = 0;
d.X = load_image_paths(paths, n, w, h);
d.X.cols = 17100;
@@ -555,7 +555,7 @@ void fill_truth(char *path, char **labels, int k, float *truth)
if(count != 1 && (k != 1 || count != 0)) printf("Too many or too few labels: %d, %s\n", count, path);
}
-void fill_hierarchy(float *truth, int k, tree *hierarchy)
+void fill_hierarchy(float *truth, int k, dn_tree *hierarchy)
{
int j;
for(j = 0; j < k; ++j){
@@ -587,9 +587,9 @@ void fill_hierarchy(float *truth, int k, tree *hierarchy)
}
}
-matrix load_regression_labels_paths(char **paths, int n, int k)
+dn_matrix load_regression_labels_paths(char **paths, int n, int k)
{
- matrix y = make_matrix(n, k);
+ dn_matrix y = make_matrix(n, k);
int i,j;
for(i = 0; i < n; ++i){
char labelpath[4096];
@@ -617,9 +617,9 @@ matrix load_regression_labels_paths(char **paths, int n, int k)
return y;
}
-matrix load_labels_paths(char **paths, int n, char **labels, int k, tree *hierarchy)
+dn_matrix load_labels_paths(char **paths, int n, char **labels, int k, dn_tree *hierarchy)
{
- matrix y = make_matrix(n, k);
+ dn_matrix y = make_matrix(n, k);
int i;
for(i = 0; i < n && labels; ++i){
fill_truth(paths[i], labels, k, y.vals[i]);
@@ -630,9 +630,9 @@ matrix load_labels_paths(char **paths, int n, char **labels, int k, tree *hierar
return y;
}
-matrix load_tags_paths(char **paths, int n, int k)
+dn_matrix load_tags_paths(char **paths, int n, int k)
{
- matrix y = make_matrix(n, k);
+ dn_matrix y = make_matrix(n, k);
int i;
//int count = 0;
for(i = 0; i < n; ++i){
@@ -654,15 +654,15 @@ matrix load_tags_paths(char **paths, int n, int k)
return y;
}
-char **get_labels(char *filename)
+char **get_labels(const char *filename)
{
- list *plist = get_paths(filename);
+ dn_list *plist = get_paths(filename);
char **labels = (char **)list_to_array(plist);
free_list(plist);
return labels;
}
-void free_data(data d)
+void free_data(dn_data d)
{
if(!d.shallow){
free_matrix(d.X);
@@ -673,7 +673,7 @@ void free_data(data d)
}
}
-image get_segmentation_image(char *path, int w, int h, int classes)
+dn_image get_segmentation_image(char *path, int w, int h, int classes)
{
char labelpath[4096];
find_replace(path, "images", "mask", labelpath);
@@ -681,12 +681,12 @@ image get_segmentation_image(char *path, int w, int h, int classes)
find_replace(labelpath, ".jpg", ".txt", labelpath);
find_replace(labelpath, ".JPG", ".txt", labelpath);
find_replace(labelpath, ".JPEG", ".txt", labelpath);
- image mask = make_image(w, h, classes);
+ dn_image mask = make_image(w, h, classes);
FILE *file = fopen(labelpath, "r");
if(!file) file_error(labelpath);
char buff[32788];
int id;
- image part = make_image(w, h, 1);
+ dn_image part = make_image(w, h, 1);
while(fscanf(file, "%d %s", &id, buff) == 2){
int n = 0;
int *rle = read_intlist(buff, &n, 0);
@@ -700,7 +700,7 @@ image get_segmentation_image(char *path, int w, int h, int classes)
return mask;
}
-image get_segmentation_image2(char *path, int w, int h, int classes)
+dn_image get_segmentation_image2(char *path, int w, int h, int classes)
{
char labelpath[4096];
find_replace(path, "images", "mask", labelpath);
@@ -708,7 +708,7 @@ image get_segmentation_image2(char *path, int w, int h, int classes)
find_replace(labelpath, ".jpg", ".txt", labelpath);
find_replace(labelpath, ".JPG", ".txt", labelpath);
find_replace(labelpath, ".JPEG", ".txt", labelpath);
- image mask = make_image(w, h, classes+1);
+ dn_image mask = make_image(w, h, classes+1);
int i;
for(i = 0; i < w*h; ++i){
mask.data[w*h*classes + i] = 1;
@@ -717,7 +717,7 @@ image get_segmentation_image2(char *path, int w, int h, int classes)
if(!file) file_error(labelpath);
char buff[32788];
int id;
- image part = make_image(w, h, 1);
+ dn_image part = make_image(w, h, 1);
while(fscanf(file, "%d %s", &id, buff) == 2){
int n = 0;
int *rle = read_intlist(buff, &n, 0);
@@ -734,11 +734,11 @@ image get_segmentation_image2(char *path, int w, int h, int classes)
return mask;
}
-data load_data_seg(int n, char **paths, int m, int w, int h, int classes, int min, int max, float angle, float aspect, float hue, float saturation, float exposure, int div)
+dn_data load_data_seg(int n, char **paths, int m, int w, int h, int classes, int min, int max, float angle, float aspect, float hue, float saturation, float exposure, int div)
{
char **random_paths = get_random_paths(paths, n, m);
int i;
- data d = {0};
+ dn_data d = {0};
d.shallow = 0;
d.X.rows = n;
@@ -751,18 +751,18 @@ data load_data_seg(int n, char **paths, int m, int w, int h, int classes, int mi
d.y.vals = calloc(d.X.rows, sizeof(float*));
for(i = 0; i < n; ++i){
- image orig = load_image_color(random_paths[i], 0, 0);
+ dn_image orig = load_image_color(random_paths[i], 0, 0);
augment_args a = random_augment_args(orig, angle, aspect, min, max, w, h);
- image sized = rotate_crop_image(orig, a.rad, a.scale, a.w, a.h, a.dx, a.dy, a.aspect);
+ dn_image sized = rotate_crop_image(orig, a.rad, a.scale, a.w, a.h, a.dx, a.dy, a.aspect);
int flip = rand()%2;
if(flip) flip_image(sized);
random_distort_image(sized, hue, saturation, exposure);
d.X.vals[i] = sized.data;
- image mask = get_segmentation_image(random_paths[i], orig.w, orig.h, classes);
+ dn_image mask = get_segmentation_image(random_paths[i], orig.w, orig.h, classes);
//image mask = make_image(orig.w, orig.h, classes+1);
- image sized_m = rotate_crop_image(mask, a.rad, a.scale/div, a.w/div, a.h/div, a.dx/div, a.dy/div, a.aspect);
+ dn_image sized_m = rotate_crop_image(mask, a.rad, a.scale/div, a.w/div, a.h/div, a.dx/div, a.dy/div, a.aspect);
if(flip) flip_image(sized_m);
d.y.vals[i] = sized_m.data;
@@ -782,11 +782,11 @@ data load_data_seg(int n, char **paths, int m, int w, int h, int classes, int mi
return d;
}
-data load_data_iseg(int n, char **paths, int m, int w, int h, int classes, int boxes, int div, int min, int max, float angle, float aspect, float hue, float saturation, float exposure)
+dn_data load_data_iseg(int n, char **paths, int m, int w, int h, int classes, int boxes, int div, int min, int max, float angle, float aspect, float hue, float saturation, float exposure)
{
char **random_paths = get_random_paths(paths, n, m);
int i;
- data d = {0};
+ dn_data d = {0};
d.shallow = 0;
d.X.rows = n;
@@ -796,9 +796,9 @@ data load_data_iseg(int n, char **paths, int m, int w, int h, int classes, int b
d.y = make_matrix(n, (((w/div)*(h/div))+1)*boxes);
for(i = 0; i < n; ++i){
- image orig = load_image_color(random_paths[i], 0, 0);
+ dn_image orig = load_image_color(random_paths[i], 0, 0);
augment_args a = random_augment_args(orig, angle, aspect, min, max, w, h);
- image sized = rotate_crop_image(orig, a.rad, a.scale, a.w, a.h, a.dx, a.dy, a.aspect);
+ dn_image sized = rotate_crop_image(orig, a.rad, a.scale, a.w, a.h, a.dx, a.dy, a.aspect);
int flip = rand()%2;
if(flip) flip_image(sized);
@@ -822,11 +822,11 @@ data load_data_iseg(int n, char **paths, int m, int w, int h, int classes, int b
return d;
}
-data load_data_mask(int n, char **paths, int m, int w, int h, int classes, int boxes, int coords, int min, int max, float angle, float aspect, float hue, float saturation, float exposure)
+dn_data load_data_mask(int n, char **paths, int m, int w, int h, int classes, int boxes, int coords, int min, int max, float angle, float aspect, float hue, float saturation, float exposure)
{
char **random_paths = get_random_paths(paths, n, m);
int i;
- data d = {0};
+ dn_data d = {0};
d.shallow = 0;
d.X.rows = n;
@@ -836,9 +836,9 @@ data load_data_mask(int n, char **paths, int m, int w, int h, int classes, int b
d.y = make_matrix(n, (coords+1)*boxes);
for(i = 0; i < n; ++i){
- image orig = load_image_color(random_paths[i], 0, 0);
+ dn_image orig = load_image_color(random_paths[i], 0, 0);
augment_args a = random_augment_args(orig, angle, aspect, min, max, w, h);
- image sized = rotate_crop_image(orig, a.rad, a.scale, a.w, a.h, a.dx, a.dy, a.aspect);
+ dn_image sized = rotate_crop_image(orig, a.rad, a.scale, a.w, a.h, a.dx, a.dy, a.aspect);
int flip = rand()%2;
if(flip) flip_image(sized);
@@ -862,11 +862,11 @@ data load_data_mask(int n, char **paths, int m, int w, int h, int classes, int b
return d;
}
-data load_data_region(int n, char **paths, int m, int w, int h, int size, int classes, float jitter, float hue, float saturation, float exposure)
+dn_data load_data_region(int n, char **paths, int m, int w, int h, int size, int classes, float jitter, float hue, float saturation, float exposure)
{
char **random_paths = get_random_paths(paths, n, m);
int i;
- data d = {0};
+ dn_data d = {0};
d.shallow = 0;
d.X.rows = n;
@@ -877,7 +877,7 @@ data load_data_region(int n, char **paths, int m, int w, int h, int size, int cl
int k = size*size*(5+classes);
d.y = make_matrix(n, k);
for(i = 0; i < n; ++i){
- image orig = load_image_color(random_paths[i], 0, 0);
+ dn_image orig = load_image_color(random_paths[i], 0, 0);
int oh = orig.h;
int ow = orig.w;
@@ -897,12 +897,12 @@ data load_data_region(int n, char **paths, int m, int w, int h, int size, int cl
float sy = (float)sheight / oh;
int flip = rand()%2;
- image cropped = crop_image(orig, pleft, ptop, swidth, sheight);
+ dn_image cropped = crop_image(orig, pleft, ptop, swidth, sheight);
float dx = ((float)pleft/ow)/sx;
float dy = ((float)ptop /oh)/sy;
- image sized = resize_image(cropped, w, h);
+ dn_image sized = resize_image(cropped, w, h);
if(flip) flip_image(sized);
random_distort_image(sized, hue, saturation, exposure);
d.X.vals[i] = sized.data;
@@ -916,11 +916,11 @@ data load_data_region(int n, char **paths, int m, int w, int h, int size, int cl
return d;
}
-data load_data_compare(int n, char **paths, int m, int classes, int w, int h)
+dn_data load_data_compare(int n, char **paths, int m, int classes, int w, int h)
{
if(m) paths = get_random_paths(paths, 2*n, m);
int i,j;
- data d = {0};
+ dn_data d = {0};
d.shallow = 0;
d.X.rows = n;
@@ -930,8 +930,8 @@ data load_data_compare(int n, char **paths, int m, int classes, int w, int h)
int k = 2*(classes);
d.y = make_matrix(n, k);
for(i = 0; i < n; ++i){
- image im1 = load_image_color(paths[i*2], w, h);
- image im2 = load_image_color(paths[i*2+1], w, h);
+ dn_image im1 = load_image_color(paths[i*2], w, h);
+ dn_image im2 = load_image_color(paths[i*2+1], w, h);
d.X.vals[i] = calloc(d.X.cols, sizeof(float));
memcpy(d.X.vals[i], im1.data, h*w*3*sizeof(float));
@@ -980,16 +980,16 @@ data load_data_compare(int n, char **paths, int m, int classes, int w, int h)
return d;
}
-data load_data_swag(char **paths, int n, int classes, float jitter)
+dn_data load_data_swag(char **paths, int n, int classes, float jitter)
{
int index = rand()%n;
char *random_path = paths[index];
- image orig = load_image_color(random_path, 0, 0);
+ dn_image orig = load_image_color(random_path, 0, 0);
int h = orig.h;
int w = orig.w;
- data d = {0};
+ dn_data d = {0};
d.shallow = 0;
d.w = w;
d.h = h;
@@ -1016,12 +1016,12 @@ data load_data_swag(char **paths, int n, int classes, float jitter)
float sy = (float)sheight / h;
int flip = rand()%2;
- image cropped = crop_image(orig, pleft, ptop, swidth, sheight);
+ dn_image cropped = crop_image(orig, pleft, ptop, swidth, sheight);
float dx = ((float)pleft/w)/sx;
float dy = ((float)ptop /h)/sy;
- image sized = resize_image(cropped, w, h);
+ dn_image sized = resize_image(cropped, w, h);
if(flip) flip_image(sized);
d.X.vals[0] = sized.data;
@@ -1033,11 +1033,11 @@ data load_data_swag(char **paths, int n, int classes, float jitter)
return d;
}
-data load_data_detection(int n, char **paths, int m, int w, int h, int boxes, int classes, float jitter, float hue, float saturation, float exposure)
+dn_data load_data_detection(int n, char **paths, int m, int w, int h, int boxes, int classes, float jitter, float hue, float saturation, float exposure)
{
char **random_paths = get_random_paths(paths, n, m);
int i;
- data d = {0};
+ dn_data d = {0};
d.shallow = 0;
d.X.rows = n;
@@ -1046,8 +1046,8 @@ data load_data_detection(int n, char **paths, int m, int w, int h, int boxes, in
d.y = make_matrix(n, 5*boxes);
for(i = 0; i < n; ++i){
- image orig = load_image_color(random_paths[i], 0, 0);
- image sized = make_image(w, h, orig.c);
+ dn_image orig = load_image_color(random_paths[i], 0, 0);
+ dn_image sized = make_image(w, h, orig.c);
fill_image(sized, .5);
float dw = jitter * orig.w;
@@ -1090,7 +1090,7 @@ data load_data_detection(int n, char **paths, int m, int w, int h, int boxes, in
void *load_thread(void *ptr)
{
//printf("Loading data: %d\n", rand());
- load_args a = *(struct load_args*)ptr;
+ dn_load_args a = *(struct dn_load_args*)ptr;
if(a.exposure == 0) a.exposure = 1;
if(a.saturation == 0) a.saturation = 1;
if(a.aspect == 0) a.aspect = 1;
@@ -1132,10 +1132,10 @@ void *load_thread(void *ptr)
return 0;
}
-pthread_t load_data_in_thread(load_args args)
+pthread_t load_data_in_thread(dn_load_args args)
{
pthread_t thread;
- struct load_args *ptr = calloc(1, sizeof(struct load_args));
+ struct dn_load_args *ptr = calloc(1, sizeof(struct dn_load_args));
*ptr = args;
if(pthread_create(&thread, 0, load_thread, ptr)) error("Thread creation failed");
return thread;
@@ -1144,12 +1144,12 @@ pthread_t load_data_in_thread(load_args args)
void *load_threads(void *ptr)
{
int i;
- load_args args = *(load_args *)ptr;
+ dn_load_args args = *(dn_load_args *)ptr;
if (args.threads == 0) args.threads = 1;
- data *out = args.d;
+ dn_data *out = args.d;
int total = args.n;
free(ptr);
- data *buffers = calloc(args.threads, sizeof(data));
+ dn_data *buffers = calloc(args.threads, sizeof(dn_data));
pthread_t *threads = calloc(args.threads, sizeof(pthread_t));
for(i = 0; i < args.threads; ++i){
args.d = buffers + i;
@@ -1170,27 +1170,27 @@ void *load_threads(void *ptr)
return 0;
}
-void load_data_blocking(load_args args)
+void load_data_blocking(dn_load_args args)
{
- struct load_args *ptr = calloc(1, sizeof(struct load_args));
+ struct dn_load_args *ptr = calloc(1, sizeof(struct dn_load_args));
*ptr = args;
load_thread(ptr);
}
-pthread_t load_data(load_args args)
+pthread_t load_data(dn_load_args args)
{
pthread_t thread;
- struct load_args *ptr = calloc(1, sizeof(struct load_args));
+ struct dn_load_args *ptr = calloc(1, sizeof(struct dn_load_args));
*ptr = args;
if(pthread_create(&thread, 0, load_threads, ptr)) error("Thread creation failed");
return thread;
}
-data load_data_writing(char **paths, int n, int m, int w, int h, int out_w, int out_h)
+dn_data load_data_writing(char **paths, int n, int m, int w, int h, int out_w, int out_h)
{
if(m) paths = get_random_paths(paths, n, m);
char **replace_paths = find_replace_paths(paths, n, ".png", "-label.png");
- data d = {0};
+ dn_data d = {0};
d.shallow = 0;
d.X = load_image_paths(paths, n, w, h);
d.y = load_image_paths_gray(replace_paths, n, out_w, out_h);
@@ -1201,10 +1201,10 @@ data load_data_writing(char **paths, int n, int m, int w, int h, int out_w, int
return d;
}
-data load_data_old(char **paths, int n, int m, char **labels, int k, int w, int h)
+dn_data load_data_old(char **paths, int n, int m, char **labels, int k, int w, int h)
{
if(m) paths = get_random_paths(paths, n, m);
- data d = {0};
+ dn_data d = {0};
d.shallow = 0;
d.X = load_image_paths(paths, n, w, h);
d.y = load_labels_paths(paths, n, labels, k, 0);
@@ -1226,10 +1226,10 @@ data load_data_old(char **paths, int n, int m, char **labels, int k, int w, int
}
*/
-data load_data_super(char **paths, int n, int m, int w, int h, int scale)
+dn_data load_data_super(char **paths, int n, int m, int w, int h, int scale)
{
if(m) paths = get_random_paths(paths, n, m);
- data d = {0};
+ dn_data d = {0};
d.shallow = 0;
int i;
@@ -1242,11 +1242,11 @@ data load_data_super(char **paths, int n, int m, int w, int h, int scale)
d.y.cols = w*scale * h*scale * 3;
for(i = 0; i < n; ++i){
- image im = load_image_color(paths[i], 0, 0);
- image crop = random_crop_image(im, w*scale, h*scale);
+ dn_image im = load_image_color(paths[i], 0, 0);
+ dn_image crop = random_crop_image(im, w*scale, h*scale);
int flip = rand()%2;
if (flip) flip_image(crop);
- image resize = resize_image(crop, w, h);
+ dn_image resize = resize_image(crop, w, h);
d.X.vals[i] = resize.data;
d.y.vals[i] = crop.data;
free_image(im);
@@ -1256,10 +1256,10 @@ data load_data_super(char **paths, int n, int m, int w, int h, int scale)
return d;
}
-data load_data_regression(char **paths, int n, int m, int k, int min, int max, int size, float angle, float aspect, float hue, float saturation, float exposure)
+dn_data load_data_regression(char **paths, int n, int m, int k, int min, int max, int size, float angle, float aspect, float hue, float saturation, float exposure)
{
if(m) paths = get_random_paths(paths, n, m);
- data d = {0};
+ dn_data d = {0};
d.shallow = 0;
d.X = load_image_augment_paths(paths, n, min, max, size, angle, aspect, hue, saturation, exposure, 0);
d.y = load_regression_labels_paths(paths, n, k);
@@ -1267,9 +1267,9 @@ data load_data_regression(char **paths, int n, int m, int k, int min, int max, i
return d;
}
-data select_data(data *orig, int *inds)
+dn_data select_data(dn_data *orig, int *inds)
{
- data d = {0};
+ dn_data d = {0};
d.shallow = 1;
d.w = orig[0].w;
d.h = orig[0].h;
@@ -1290,13 +1290,13 @@ data select_data(data *orig, int *inds)
return d;
}
-data *tile_data(data orig, int divs, int size)
+dn_data *tile_data(dn_data orig, int divs, int size)
{
- data *ds = calloc(divs*divs, sizeof(data));
+ dn_data *ds = calloc(divs*divs, sizeof(dn_data));
int i, j;
#pragma omp parallel for
for(i = 0; i < divs*divs; ++i){
- data d;
+ dn_data d;
d.shallow = 0;
d.w = orig.w/divs * size;
d.h = orig.h/divs * size;
@@ -1309,7 +1309,7 @@ data *tile_data(data orig, int divs, int size)
for(j = 0; j < orig.X.rows; ++j){
int x = (i%divs) * orig.w / divs - (d.w - orig.w/divs)/2;
int y = (i/divs) * orig.h / divs - (d.h - orig.h/divs)/2;
- image im = float_to_image(orig.w, orig.h, 3, orig.X.vals[j]);
+ dn_image im = float_to_image(orig.w, orig.h, 3, orig.X.vals[j]);
d.X.vals[j] = crop_image(im, x, y, d.w, d.h).data;
}
ds[i] = d;
@@ -1317,9 +1317,9 @@ data *tile_data(data orig, int divs, int size)
return ds;
}
-data resize_data(data orig, int w, int h)
+dn_data resize_data(dn_data orig, int w, int h)
{
- data d = {0};
+ dn_data d = {0};
d.shallow = 0;
d.w = w;
d.h = h;
@@ -1331,16 +1331,16 @@ data resize_data(data orig, int w, int h)
d.y = copy_matrix(orig.y);
#pragma omp parallel for
for(i = 0; i < orig.X.rows; ++i){
- image im = float_to_image(orig.w, orig.h, 3, orig.X.vals[i]);
+ dn_image im = float_to_image(orig.w, orig.h, 3, orig.X.vals[i]);
d.X.vals[i] = resize_image(im, w, h).data;
}
return d;
}
-data load_data_augment(char **paths, int n, int m, char **labels, int k, tree *hierarchy, int min, int max, int size, float angle, float aspect, float hue, float saturation, float exposure, int center)
+dn_data load_data_augment(char **paths, int n, int m, char **labels, int k, dn_tree *hierarchy, int min, int max, int size, float angle, float aspect, float hue, float saturation, float exposure, int center)
{
if(m) paths = get_random_paths(paths, n, m);
- data d = {0};
+ dn_data d = {0};
d.shallow = 0;
d.w=size;
d.h=size;
@@ -1350,10 +1350,10 @@ data load_data_augment(char **paths, int n, int m, char **labels, int k, tree *h
return d;
}
-data load_data_tag(char **paths, int n, int m, int k, int min, int max, int size, float angle, float aspect, float hue, float saturation, float exposure)
+dn_data load_data_tag(char **paths, int n, int m, int k, int min, int max, int size, float angle, float aspect, float hue, float saturation, float exposure)
{
if(m) paths = get_random_paths(paths, n, m);
- data d = {0};
+ dn_data d = {0};
d.w = size;
d.h = size;
d.shallow = 0;
@@ -1363,10 +1363,10 @@ data load_data_tag(char **paths, int n, int m, int k, int min, int max, int size
return d;
}
-matrix concat_matrix(matrix m1, matrix m2)
+dn_matrix concat_matrix(dn_matrix m1, dn_matrix m2)
{
int i, count = 0;
- matrix m;
+ dn_matrix m;
m.cols = m1.cols;
m.rows = m1.rows+m2.rows;
m.vals = calloc(m1.rows + m2.rows, sizeof(float*));
@@ -1379,9 +1379,9 @@ matrix concat_matrix(matrix m1, matrix m2)
return m;
}
-data concat_data(data d1, data d2)
+dn_data concat_data(dn_data d1, dn_data d2)
{
- data d = {0};
+ dn_data d = {0};
d.shallow = 1;
d.X = concat_matrix(d1.X, d2.X);
d.y = concat_matrix(d1.y, d2.y);
@@ -1390,26 +1390,26 @@ data concat_data(data d1, data d2)
return d;
}
-data concat_datas(data *d, int n)
+dn_data concat_datas(dn_data *d, int n)
{
int i;
- data out = {0};
+ dn_data out = {0};
for(i = 0; i < n; ++i){
- data new = concat_data(d[i], out);
+ dn_data new = concat_data(d[i], out);
free_data(out);
out = new;
}
return out;
}
-data load_categorical_data_csv(char *filename, int target, int k)
+dn_data load_categorical_data_csv(const char *filename, int target, int k)
{
- data d = {0};
+ dn_data d = {0};
d.shallow = 0;
- matrix X = csv_to_matrix(filename);
+ dn_matrix X = csv_to_matrix(filename);
float *truth_1d = pop_column(&X, target);
float **truth = one_hot_encode(truth_1d, X.rows, k);
- matrix y;
+ dn_matrix y;
y.rows = X.rows;
y.cols = k;
y.vals = truth;
@@ -1419,13 +1419,13 @@ data load_categorical_data_csv(char *filename, int target, int k)
return d;
}
-data load_cifar10_data(char *filename)
+dn_data load_cifar10_data(const char *filename)
{
- data d = {0};
+ dn_data d = {0};
d.shallow = 0;
long i,j;
- matrix X = make_matrix(10000, 3072);
- matrix y = make_matrix(10000, 10);
+ dn_matrix X = make_matrix(10000, 3072);
+ dn_matrix y = make_matrix(10000, 10);
d.X = X;
d.y = y;
@@ -1446,7 +1446,7 @@ data load_cifar10_data(char *filename)
return d;
}
-void get_random_batch(data d, int n, float *X, float *y)
+void get_random_batch(dn_data d, int n, float *X, float *y)
{
int j;
for(j = 0; j < n; ++j){
@@ -1456,7 +1456,7 @@ void get_random_batch(data d, int n, float *X, float *y)
}
}
-void get_next_batch(data d, int n, int offset, float *X, float *y)
+void get_next_batch(dn_data d, int n, int offset, float *X, float *y)
{
int j;
for(j = 0; j < n; ++j){
@@ -1466,7 +1466,7 @@ void get_next_batch(data d, int n, int offset, float *X, float *y)
}
}
-void smooth_data(data d)
+void smooth_data(dn_data d)
{
int i, j;
float scale = 1. / d.y.cols;
@@ -1478,13 +1478,13 @@ void smooth_data(data d)
}
}
-data load_all_cifar10()
+dn_data load_all_cifar10()
{
- data d = {0};
+ dn_data d = {0};
d.shallow = 0;
int i,j,b;
- matrix X = make_matrix(50000, 3072);
- matrix y = make_matrix(50000, 10);
+ dn_matrix X = make_matrix(50000, 3072);
+ dn_matrix y = make_matrix(50000, 10);
d.X = X;
d.y = y;
@@ -1511,11 +1511,11 @@ data load_all_cifar10()
return d;
}
-data load_go(char *filename)
+dn_data load_go(const char *filename)
{
FILE *fp = fopen(filename, "rb");
- matrix X = make_matrix(3363059, 361);
- matrix y = make_matrix(3363059, 361);
+ dn_matrix X = make_matrix(3363059, 361);
+ dn_matrix y = make_matrix(3363059, 361);
int row, col;
if(!fp) file_error(filename);
@@ -1546,7 +1546,7 @@ data load_go(char *filename)
X = resize_matrix(X, count);
y = resize_matrix(y, count);
- data d = {0};
+ dn_data d = {0};
d.shallow = 0;
d.X = X;
d.y = y;
@@ -1558,7 +1558,7 @@ data load_go(char *filename)
}
-void randomize_data(data d)
+void randomize_data(dn_data d)
{
int i;
for(i = d.X.rows-1; i > 0; --i){
@@ -1573,7 +1573,7 @@ void randomize_data(data d)
}
}
-void scale_data_rows(data d, float s)
+void scale_data_rows(dn_data d, float s)
{
int i;
for(i = 0; i < d.X.rows; ++i){
@@ -1581,7 +1581,7 @@ void scale_data_rows(data d, float s)
}
}
-void translate_data_rows(data d, float s)
+void translate_data_rows(dn_data d, float s)
{
int i;
for(i = 0; i < d.X.rows; ++i){
@@ -1589,9 +1589,9 @@ void translate_data_rows(data d, float s)
}
}
-data copy_data(data d)
+dn_data copy_data(dn_data d)
{
- data c = {0};
+ dn_data c = {0};
c.w = d.w;
c.h = d.h;
c.shallow = 0;
@@ -1602,7 +1602,7 @@ data copy_data(data d)
return c;
}
-void normalize_data_rows(data d)
+void normalize_data_rows(dn_data d)
{
int i;
for(i = 0; i < d.X.rows; ++i){
@@ -1610,9 +1610,9 @@ void normalize_data_rows(data d)
}
}
-data get_data_part(data d, int part, int total)
+dn_data get_data_part(dn_data d, int part, int total)
{
- data p = {0};
+ dn_data p = {0};
p.shallow = 1;
p.X.rows = d.X.rows * (part + 1) / total - d.X.rows * part / total;
p.y.rows = d.y.rows * (part + 1) / total - d.y.rows * part / total;
@@ -1623,9 +1623,9 @@ data get_data_part(data d, int part, int total)
return p;
}
-data get_random_data(data d, int num)
+dn_data get_random_data(dn_data d, int num)
{
- data r = {0};
+ dn_data r = {0};
r.shallow = 1;
r.X.rows = num;
@@ -1646,14 +1646,14 @@ data get_random_data(data d, int num)
return r;
}
-data *split_data(data d, int part, int total)
+dn_data *split_data(dn_data d, int part, int total)
{
- data *split = calloc(2, sizeof(data));
+ dn_data *split = calloc(2, sizeof(dn_data));
int i;
int start = part*d.X.rows/total;
int end = (part+1)*d.X.rows/total;
- data train;
- data test;
+ dn_data train;
+ dn_data test;
train.shallow = test.shallow = 1;
test.X.rows = test.y.rows = end-start;
diff --git a/src/data.h b/src/data.h
index 781906f8743..5213fbe9824 100644
--- a/src/data.h
+++ b/src/data.h
@@ -18,33 +18,33 @@ static inline float distance_from_edge(int x, int max)
if (dist > 1) dist = 1;
return dist;
}
-void load_data_blocking(load_args args);
+void load_data_blocking(dn_load_args args);
void print_letters(float *pred, int n);
-data load_data_captcha(char **paths, int n, int m, int k, int w, int h);
-data load_data_captcha_encode(char **paths, int n, int m, int w, int h);
-data load_data_detection(int n, char **paths, int m, int w, int h, int boxes, int classes, float jitter, float hue, float saturation, float exposure);
-data load_data_tag(char **paths, int n, int m, int k, int min, int max, int size, float angle, float aspect, float hue, float saturation, float exposure);
-matrix load_image_augment_paths(char **paths, int n, int min, int max, int size, float angle, float aspect, float hue, float saturation, float exposure, int center);
-data load_data_super(char **paths, int n, int m, int w, int h, int scale);
-data load_data_augment(char **paths, int n, int m, char **labels, int k, tree *hierarchy, int min, int max, int size, float angle, float aspect, float hue, float saturation, float exposure, int center);
-data load_data_regression(char **paths, int n, int m, int classes, int min, int max, int size, float angle, float aspect, float hue, float saturation, float exposure);
-data load_go(char *filename);
-
-
-data load_data_writing(char **paths, int n, int m, int w, int h, int out_w, int out_h);
-
-void get_random_batch(data d, int n, float *X, float *y);
-data get_data_part(data d, int part, int total);
-data get_random_data(data d, int num);
-data load_categorical_data_csv(char *filename, int target, int k);
-void normalize_data_rows(data d);
-void scale_data_rows(data d, float s);
-void translate_data_rows(data d, float s);
-void randomize_data(data d);
-data *split_data(data d, int part, int total);
-data concat_datas(data *d, int n);
+dn_data load_data_captcha(char **paths, int n, int m, int k, int w, int h);
+dn_data load_data_captcha_encode(char **paths, int n, int m, int w, int h);
+dn_data load_data_detection(int n, char **paths, int m, int w, int h, int boxes, int classes, float jitter, float hue, float saturation, float exposure);
+dn_data load_data_tag(char **paths, int n, int m, int k, int min, int max, int size, float angle, float aspect, float hue, float saturation, float exposure);
+dn_matrix load_image_augment_paths(char **paths, int n, int min, int max, int size, float angle, float aspect, float hue, float saturation, float exposure, int center);
+dn_data load_data_super(char **paths, int n, int m, int w, int h, int scale);
+dn_data load_data_augment(char **paths, int n, int m, char **labels, int k, dn_tree *hierarchy, int min, int max, int size, float angle, float aspect, float hue, float saturation, float exposure, int center);
+dn_data load_data_regression(char **paths, int n, int m, int classes, int min, int max, int size, float angle, float aspect, float hue, float saturation, float exposure);
+dn_data load_go(const char *filename);
+
+
+dn_data load_data_writing(char **paths, int n, int m, int w, int h, int out_w, int out_h);
+
+void get_random_batch(dn_data d, int n, float *X, float *y);
+dn_data get_data_part(dn_data d, int part, int total);
+dn_data get_random_data(dn_data d, int num);
+dn_data load_categorical_data_csv(const char *filename, int target, int k);
+void normalize_data_rows(dn_data d);
+void scale_data_rows(dn_data d, float s);
+void translate_data_rows(dn_data d, float s);
+void randomize_data(dn_data d);
+dn_data *split_data(dn_data d, int part, int total);
+dn_data concat_datas(dn_data *d, int n);
void fill_truth(char *path, char **labels, int k, float *truth);
#endif
diff --git a/src/deconvolutional_layer.c b/src/deconvolutional_layer.c
index 00c0e85771d..c06ec359fb0 100644
--- a/src/deconvolutional_layer.c
+++ b/src/deconvolutional_layer.c
@@ -11,11 +11,11 @@
#include
-static size_t get_workspace_size(layer l){
+static size_t get_workspace_size(dn_layer l){
return (size_t)l.h*l.w*l.size*l.size*l.n*sizeof(float);
}
-void bilinear_init(layer l)
+void bilinear_init(dn_layer l)
{
int i,j,f;
float center = (l.size-1) / 2.;
@@ -32,10 +32,10 @@ void bilinear_init(layer l)
}
-layer make_deconvolutional_layer(int batch, int h, int w, int c, int n, int size, int stride, int padding, ACTIVATION activation, int batch_normalize, int adam)
+dn_layer make_deconvolutional_layer(int batch, int h, int w, int c, int n, int size, int stride, int padding, ACTIVATION activation, int batch_normalize, int adam)
{
int i;
- layer l = {0};
+ dn_layer l = {0};
l.type = DECONVOLUTIONAL;
l.h = h;
@@ -165,7 +165,7 @@ layer make_deconvolutional_layer(int batch, int h, int w, int c, int n, int size
return l;
}
-void denormalize_deconvolutional_layer(layer l)
+void denormalize_deconvolutional_layer(dn_layer l)
{
int i, j;
for(i = 0; i < l.n; ++i){
@@ -180,7 +180,7 @@ void denormalize_deconvolutional_layer(layer l)
}
}
-void resize_deconvolutional_layer(layer *l, int h, int w)
+void resize_deconvolutional_layer(dn_layer *l, int h, int w)
{
l->h = h;
l->w = w;
@@ -219,7 +219,7 @@ void resize_deconvolutional_layer(layer *l, int h, int w)
l->workspace_size = get_workspace_size(*l);
}
-void forward_deconvolutional_layer(const layer l, network net)
+void forward_deconvolutional_layer(const dn_layer l, dn_network net)
{
int i;
@@ -246,7 +246,7 @@ void forward_deconvolutional_layer(const layer l, network net)
activate_array(l.output, l.batch*l.n*l.out_w*l.out_h, l.activation);
}
-void backward_deconvolutional_layer(layer l, network net)
+void backward_deconvolutional_layer(dn_layer l, dn_network net)
{
int i;
@@ -287,7 +287,7 @@ void backward_deconvolutional_layer(layer l, network net)
}
}
-void update_deconvolutional_layer(layer l, update_args a)
+void update_deconvolutional_layer(dn_layer l, update_args a)
{
float learning_rate = a.learning_rate*l.learning_rate_scale;
float momentum = a.momentum;
diff --git a/src/deconvolutional_layer.h b/src/deconvolutional_layer.h
index b254fb91e69..a8b783b5cce 100644
--- a/src/deconvolutional_layer.h
+++ b/src/deconvolutional_layer.h
@@ -15,11 +15,11 @@ void push_deconvolutional_layer(layer l);
void pull_deconvolutional_layer(layer l);
#endif
-layer make_deconvolutional_layer(int batch, int h, int w, int c, int n, int size, int stride, int padding, ACTIVATION activation, int batch_normalize, int adam);
-void resize_deconvolutional_layer(layer *l, int h, int w);
-void forward_deconvolutional_layer(const layer l, network net);
-void update_deconvolutional_layer(layer l, update_args a);
-void backward_deconvolutional_layer(layer l, network net);
+dn_layer make_deconvolutional_layer(int batch, int h, int w, int c, int n, int size, int stride, int padding, ACTIVATION activation, int batch_normalize, int adam);
+void resize_deconvolutional_layer(dn_layer *l, int h, int w);
+void forward_deconvolutional_layer(const dn_layer l, dn_network net);
+void update_deconvolutional_layer(dn_layer l, update_args a);
+void backward_deconvolutional_layer(dn_layer l, dn_network net);
#endif
diff --git a/src/detection_layer.c b/src/detection_layer.c
index d0e0194b1db..90e305e4ac3 100644
--- a/src/detection_layer.c
+++ b/src/detection_layer.c
@@ -47,7 +47,7 @@ detection_layer make_detection_layer(int batch, int inputs, int n, int side, int
return l;
}
-void forward_detection_layer(const detection_layer l, network net)
+void forward_detection_layer(const detection_layer l, dn_network net)
{
int locations = l.side*l.side;
int i,j;
@@ -102,13 +102,13 @@ void forward_detection_layer(const detection_layer l, network net)
avg_allcat += l.output[class_index+j];
}
- box truth = float_to_box(net.truth + truth_index + 1 + l.classes, 1);
+ dn_box truth = float_to_box(net.truth + truth_index + 1 + l.classes, 1);
truth.x /= l.side;
truth.y /= l.side;
for(j = 0; j < l.n; ++j){
int box_index = index + locations*(l.classes + l.n) + (i*l.n + j) * l.coords;
- box out = float_to_box(l.output + box_index, 1);
+ dn_box out = float_to_box(l.output + box_index, 1);
out.x /= l.side;
out.y /= l.side;
@@ -147,7 +147,7 @@ void forward_detection_layer(const detection_layer l, network net)
int box_index = index + locations*(l.classes + l.n) + (i*l.n + best_index) * l.coords;
int tbox_index = truth_index + 1 + l.classes;
- box out = float_to_box(l.output + box_index, 1);
+ dn_box out = float_to_box(l.output + box_index, 1);
out.x /= l.side;
out.y /= l.side;
if (l.sqrt) {
@@ -217,12 +217,12 @@ void forward_detection_layer(const detection_layer l, network net)
}
}
-void backward_detection_layer(const detection_layer l, network net)
+void backward_detection_layer(const detection_layer l, dn_network net)
{
axpy_cpu(l.batch*l.inputs, 1, l.delta, 1, net.delta, 1);
}
-void get_detection_detections(layer l, int w, int h, float thresh, detection *dets)
+void get_detection_detections(dn_layer l, int w, int h, float thresh, detection *dets)
{
int i,j,n;
float *predictions = l.output;
@@ -235,7 +235,7 @@ void get_detection_detections(layer l, int w, int h, float thresh, detection *de
int p_index = l.side*l.side*l.classes + i*l.n + n;
float scale = predictions[p_index];
int box_index = l.side*l.side*(l.classes + l.n) + (i*l.n + n)*4;
- box b;
+ dn_box b;
b.x = (predictions[box_index + 0] + col) / l.side * w;
b.y = (predictions[box_index + 1] + row) / l.side * h;
b.w = pow(predictions[box_index + 2], (l.sqrt?2:1)) * w;
diff --git a/src/detection_layer.h b/src/detection_layer.h
index 1c818535700..356b36a3b85 100644
--- a/src/detection_layer.h
+++ b/src/detection_layer.h
@@ -4,11 +4,11 @@
#include "layer.h"
#include "network.h"
-typedef layer detection_layer;
+typedef dn_layer detection_layer;
detection_layer make_detection_layer(int batch, int inputs, int n, int size, int classes, int coords, int rescore);
-void forward_detection_layer(const detection_layer l, network net);
-void backward_detection_layer(const detection_layer l, network net);
+void forward_detection_layer(const detection_layer l, dn_network net);
+void backward_detection_layer(const detection_layer l, dn_network net);
#ifdef GPU
void forward_detection_layer_gpu(const detection_layer l, network net);
diff --git a/src/dropout_layer.c b/src/dropout_layer.c
index 780554fb371..12cc8df7c69 100644
--- a/src/dropout_layer.c
+++ b/src/dropout_layer.c
@@ -35,7 +35,7 @@ void resize_dropout_layer(dropout_layer *l, int inputs)
#endif
}
-void forward_dropout_layer(dropout_layer l, network net)
+void forward_dropout_layer(dropout_layer l, dn_network net)
{
int i;
if (!net.train) return;
@@ -47,7 +47,7 @@ void forward_dropout_layer(dropout_layer l, network net)
}
}
-void backward_dropout_layer(dropout_layer l, network net)
+void backward_dropout_layer(dropout_layer l, dn_network net)
{
int i;
if(!net.delta) return;
diff --git a/src/dropout_layer.h b/src/dropout_layer.h
index 01f94d4d7d1..fff3ca01a4a 100644
--- a/src/dropout_layer.h
+++ b/src/dropout_layer.h
@@ -4,12 +4,12 @@
#include "layer.h"
#include "network.h"
-typedef layer dropout_layer;
+typedef dn_layer dropout_layer;
dropout_layer make_dropout_layer(int batch, int inputs, float probability);
-void forward_dropout_layer(dropout_layer l, network net);
-void backward_dropout_layer(dropout_layer l, network net);
+void forward_dropout_layer(dropout_layer l, dn_network net);
+void backward_dropout_layer(dropout_layer l, dn_network net);
void resize_dropout_layer(dropout_layer *l, int inputs);
#ifdef GPU
diff --git a/src/gemm.c b/src/gemm.c
index 648027f2cdf..6024b39afe7 100644
--- a/src/gemm.c
+++ b/src/gemm.c
@@ -5,164 +5,169 @@
#include
#include
-void gemm_bin(int M, int N, int K, float ALPHA,
- char *A, int lda,
- float *B, int ldb,
- float *C, int ldc)
+void gemm_bin(int M, int N, int K, float ALPHA,
+ char *A, int lda,
+ float *B, int ldb,
+ float *C, int ldc)
{
- int i,j,k;
- for(i = 0; i < M; ++i){
- for(k = 0; k < K; ++k){
- char A_PART = A[i*lda+k];
- if(A_PART){
- for(j = 0; j < N; ++j){
- C[i*ldc+j] += B[k*ldb+j];
- }
- } else {
- for(j = 0; j < N; ++j){
- C[i*ldc+j] -= B[k*ldb+j];
- }
+ int i,j,k;
+ for(i = 0; i < M; ++i){
+ for(k = 0; k < K; ++k){
+ char A_PART = A[i*lda+k];
+ if(A_PART){
+ for(j = 0; j < N; ++j){
+ C[i*ldc+j] += B[k*ldb+j];
}
- }
- }
+ } else {
+ for(j = 0; j < N; ++j){
+ C[i*ldc+j] -= B[k*ldb+j];
+ }
+ }
+ }
+ }
}
float *random_matrix(int rows, int cols)
{
- int i;
- float *m = calloc(rows*cols, sizeof(float));
- for(i = 0; i < rows*cols; ++i){
- m[i] = (float)rand()/RAND_MAX;
- }
- return m;
+ int i;
+ float *m = calloc(rows*cols, sizeof(float));
+ for(i = 0; i < rows*cols; ++i){
+ m[i] = (float)rand()/RAND_MAX;
+ }
+ return m;
}
void time_random_matrix(int TA, int TB, int m, int k, int n)
{
- float *a;
- if(!TA) a = random_matrix(m,k);
- else a = random_matrix(k,m);
- int lda = (!TA)?k:m;
- float *b;
- if(!TB) b = random_matrix(k,n);
- else b = random_matrix(n,k);
- int ldb = (!TB)?n:k;
-
- float *c = random_matrix(m,n);
- int i;
- clock_t start = clock(), end;
- for(i = 0; i<10; ++i){
- gemm_cpu(TA,TB,m,n,k,1,a,lda,b,ldb,1,c,n);
- }
- end = clock();
- printf("Matrix Multiplication %dx%d * %dx%d, TA=%d, TB=%d: %lf ms\n",m,k,k,n, TA, TB, (float)(end-start)/CLOCKS_PER_SEC);
- free(a);
- free(b);
- free(c);
+ float *a;
+ if(!TA) a = random_matrix(m,k);
+ else a = random_matrix(k,m);
+ int lda = (!TA)?k:m;
+ float *b;
+ if(!TB) b = random_matrix(k,n);
+ else b = random_matrix(n,k);
+ int ldb = (!TB)?n:k;
+
+ float *c = random_matrix(m,n);
+ int i;
+ clock_t start = clock(), end;
+ for(i = 0; i<10; ++i){
+ gemm_cpu(TA,TB,m,n,k,1,a,lda,b,ldb,1,c,n);
+ }
+ end = clock();
+ printf("Matrix Multiplication %dx%d * %dx%d, TA=%d, TB=%d: %lf ms\n",m,k,k,n, TA, TB, (float)(end-start)/CLOCKS_PER_SEC);
+ free(a);
+ free(b);
+ free(c);
}
-void gemm(int TA, int TB, int M, int N, int K, float ALPHA,
- float *A, int lda,
- float *B, int ldb,
- float BETA,
- float *C, int ldc)
+void gemm(int TA, int TB, int M, int N, int K, float ALPHA,
+ float *A, int lda,
+ float *B, int ldb,
+ float BETA,
+ float *C, int ldc)
{
- gemm_cpu( TA, TB, M, N, K, ALPHA,A,lda, B, ldb,BETA,C,ldc);
+ gemm_cpu( TA, TB, M, N, K, ALPHA,A,lda, B, ldb,BETA,C,ldc);
}
-void gemm_nn(int M, int N, int K, float ALPHA,
- float *A, int lda,
- float *B, int ldb,
- float *C, int ldc)
+void gemm_nn(int M, int N, int K, float ALPHA,
+ float *A, int lda,
+ float *B, int ldb,
+ float *C, int ldc)
{
- int i,j,k;
- #pragma omp parallel for
- for(i = 0; i < M; ++i){
- for(k = 0; k < K; ++k){
- register float A_PART = ALPHA*A[i*lda+k];
- for(j = 0; j < N; ++j){
- C[i*ldc+j] += A_PART*B[k*ldb+j];
+ int i,j,k;
+#pragma omp parallel for
+ for(i = 0; i < M; ++i){
+ for(k = 0; k < K; ++k){
+ register float A_PART = ALPHA*A[i*lda+k];
+ for(j = 0; j < N; ++j){
+ C[i*ldc+j] += A_PART*B[k*ldb+j];
+ if (i==0 && j==26688)
+ {
+ unsigned short p = B[k*ldb+j]*255;
+ fprintf(stderr,"j=%d, A=%f, B=%f (0x%02x), C=%f\n",j, A_PART, B[k*ldb+j],p,C[i*ldc+j]);
}
- }
- }
+ }
+ }
+ }
}
-void gemm_nt(int M, int N, int K, float ALPHA,
- float *A, int lda,
- float *B, int ldb,
- float *C, int ldc)
+void gemm_nt(int M, int N, int K, float ALPHA,
+ float *A, int lda,
+ float *B, int ldb,
+ float *C, int ldc)
{
- int i,j,k;
- #pragma omp parallel for
- for(i = 0; i < M; ++i){
- for(j = 0; j < N; ++j){
- register float sum = 0;
- for(k = 0; k < K; ++k){
- sum += ALPHA*A[i*lda+k]*B[j*ldb + k];
- }
- C[i*ldc+j] += sum;
- }
- }
+ int i,j,k;
+#pragma omp parallel for
+ for(i = 0; i < M; ++i){
+ for(j = 0; j < N; ++j){
+ register float sum = 0;
+ for(k = 0; k < K; ++k){
+ sum += ALPHA*A[i*lda+k]*B[j*ldb + k];
+ }
+ C[i*ldc+j] += sum;
+ }
+ }
}
-void gemm_tn(int M, int N, int K, float ALPHA,
- float *A, int lda,
- float *B, int ldb,
- float *C, int ldc)
+void gemm_tn(int M, int N, int K, float ALPHA,
+ float *A, int lda,
+ float *B, int ldb,
+ float *C, int ldc)
{
- int i,j,k;
- #pragma omp parallel for
- for(i = 0; i < M; ++i){
- for(k = 0; k < K; ++k){
- register float A_PART = ALPHA*A[k*lda+i];
- for(j = 0; j < N; ++j){
- C[i*ldc+j] += A_PART*B[k*ldb+j];
- }
- }
- }
+ int i,j,k;
+#pragma omp parallel for
+ for(i = 0; i < M; ++i){
+ for(k = 0; k < K; ++k){
+ register float A_PART = ALPHA*A[k*lda+i];
+ for(j = 0; j < N; ++j){
+ C[i*ldc+j] += A_PART*B[k*ldb+j];
+ }
+ }
+ }
}
-void gemm_tt(int M, int N, int K, float ALPHA,
- float *A, int lda,
- float *B, int ldb,
- float *C, int ldc)
+void gemm_tt(int M, int N, int K, float ALPHA,
+ float *A, int lda,
+ float *B, int ldb,
+ float *C, int ldc)
{
- int i,j,k;
- #pragma omp parallel for
- for(i = 0; i < M; ++i){
- for(j = 0; j < N; ++j){
- register float sum = 0;
- for(k = 0; k < K; ++k){
- sum += ALPHA*A[i+k*lda]*B[k+j*ldb];
- }
- C[i*ldc+j] += sum;
- }
- }
+ int i,j,k;
+#pragma omp parallel for
+ for(i = 0; i < M; ++i){
+ for(j = 0; j < N; ++j){
+ register float sum = 0;
+ for(k = 0; k < K; ++k){
+ sum += ALPHA*A[i+k*lda]*B[k+j*ldb];
+ }
+ C[i*ldc+j] += sum;
+ }
+ }
}
-void gemm_cpu(int TA, int TB, int M, int N, int K, float ALPHA,
- float *A, int lda,
- float *B, int ldb,
- float BETA,
- float *C, int ldc)
+void gemm_cpu(int TA, int TB, int M, int N, int K, float ALPHA,
+ float *A, int lda,
+ float *B, int ldb,
+ float BETA,
+ float *C, int ldc)
{
- //printf("cpu: %d %d %d %d %d %f %d %d %f %d\n",TA, TB, M, N, K, ALPHA, lda, ldb, BETA, ldc);
- int i, j;
- for(i = 0; i < M; ++i){
- for(j = 0; j < N; ++j){
- C[i*ldc + j] *= BETA;
- }
- }
- if(!TA && !TB)
- gemm_nn(M, N, K, ALPHA,A,lda, B, ldb,C,ldc);
- else if(TA && !TB)
- gemm_tn(M, N, K, ALPHA,A,lda, B, ldb,C,ldc);
- else if(!TA && TB)
- gemm_nt(M, N, K, ALPHA,A,lda, B, ldb,C,ldc);
- else
- gemm_tt(M, N, K, ALPHA,A,lda, B, ldb,C,ldc);
+ //printf("cpu: %d %d %d %d %d %f %d %d %f %d\n",TA, TB, M, N, K, ALPHA, lda, ldb, BETA, ldc);
+ int i, j;
+ for(i = 0; i < M; ++i){
+ for(j = 0; j < N; ++j){
+ C[i*ldc + j] *= BETA;
+ }
+ }
+ if(!TA && !TB)
+ gemm_nn(M, N, K, ALPHA,A,lda, B, ldb,C,ldc);
+ else if(TA && !TB)
+ gemm_tn(M, N, K, ALPHA,A,lda, B, ldb,C,ldc);
+ else if(!TA && TB)
+ gemm_nt(M, N, K, ALPHA,A,lda, B, ldb,C,ldc);
+ else
+ gemm_tt(M, N, K, ALPHA,A,lda, B, ldb,C,ldc);
}
#ifdef GPU
diff --git a/src/gru_layer.c b/src/gru_layer.c
index b6601d80e0b..dcd97caf888 100644
--- a/src/gru_layer.c
+++ b/src/gru_layer.c
@@ -10,7 +10,7 @@
#include
#include
-static void increment_layer(layer *l, int steps)
+static void increment_layer(dn_layer *l, int steps)
{
int num = l->outputs*l->batch*steps;
l->output += num;
@@ -26,44 +26,44 @@ static void increment_layer(layer *l, int steps)
#endif
}
-layer make_gru_layer(int batch, int inputs, int outputs, int steps, int batch_normalize, int adam)
+dn_layer make_gru_layer(int batch, int inputs, int outputs, int steps, int batch_normalize, int adam)
{
fprintf(stderr, "GRU Layer: %d inputs, %d outputs\n", inputs, outputs);
batch = batch / steps;
- layer l = {0};
+ dn_layer l = {0};
l.batch = batch;
l.type = GRU;
l.steps = steps;
l.inputs = inputs;
- l.uz = malloc(sizeof(layer));
+ l.uz = malloc(sizeof(dn_layer));
fprintf(stderr, "\t\t");
*(l.uz) = make_connected_layer(batch*steps, inputs, outputs, LINEAR, batch_normalize, adam);
l.uz->batch = batch;
- l.wz = malloc(sizeof(layer));
+ l.wz = malloc(sizeof(dn_layer));
fprintf(stderr, "\t\t");
*(l.wz) = make_connected_layer(batch*steps, outputs, outputs, LINEAR, batch_normalize, adam);
l.wz->batch = batch;
- l.ur = malloc(sizeof(layer));
+ l.ur = malloc(sizeof(dn_layer));
fprintf(stderr, "\t\t");
*(l.ur) = make_connected_layer(batch*steps, inputs, outputs, LINEAR, batch_normalize, adam);
l.ur->batch = batch;
- l.wr = malloc(sizeof(layer));
+ l.wr = malloc(sizeof(dn_layer));
fprintf(stderr, "\t\t");
*(l.wr) = make_connected_layer(batch*steps, outputs, outputs, LINEAR, batch_normalize, adam);
l.wr->batch = batch;
- l.uh = malloc(sizeof(layer));
+ l.uh = malloc(sizeof(dn_layer));
fprintf(stderr, "\t\t");
*(l.uh) = make_connected_layer(batch*steps, inputs, outputs, LINEAR, batch_normalize, adam);
l.uh->batch = batch;
- l.wh = malloc(sizeof(layer));
+ l.wh = malloc(sizeof(dn_layer));
fprintf(stderr, "\t\t");
*(l.wh) = make_connected_layer(batch*steps, outputs, outputs, LINEAR, batch_normalize, adam);
l.wh->batch = batch;
@@ -115,7 +115,7 @@ layer make_gru_layer(int batch, int inputs, int outputs, int steps, int batch_no
return l;
}
-void update_gru_layer(layer l, update_args a)
+void update_gru_layer(dn_layer l, update_args a)
{
update_connected_layer(*(l.ur), a);
update_connected_layer(*(l.uz), a);
@@ -125,18 +125,18 @@ void update_gru_layer(layer l, update_args a)
update_connected_layer(*(l.wh), a);
}
-void forward_gru_layer(layer l, network net)
+void forward_gru_layer(dn_layer l, dn_network net)
{
- network s = net;
+ dn_network s = net;
s.train = net.train;
int i;
- layer uz = *(l.uz);
- layer ur = *(l.ur);
- layer uh = *(l.uh);
+ dn_layer uz = *(l.uz);
+ dn_layer ur = *(l.ur);
+ dn_layer uh = *(l.uh);
- layer wz = *(l.wz);
- layer wr = *(l.wr);
- layer wh = *(l.wh);
+ dn_layer wz = *(l.wz);
+ dn_layer wr = *(l.wr);
+ dn_layer wh = *(l.wh);
fill_cpu(l.outputs * l.batch * l.steps, 0, uz.delta, 1);
fill_cpu(l.outputs * l.batch * l.steps, 0, ur.delta, 1);
@@ -201,7 +201,7 @@ void forward_gru_layer(layer l, network net)
}
}
-void backward_gru_layer(layer l, network net)
+void backward_gru_layer(dn_layer l, dn_network net)
{
}
diff --git a/src/gru_layer.h b/src/gru_layer.h
index 9067942e949..55bf25069f7 100644
--- a/src/gru_layer.h
+++ b/src/gru_layer.h
@@ -6,11 +6,11 @@
#include "layer.h"
#include "network.h"
-layer make_gru_layer(int batch, int inputs, int outputs, int steps, int batch_normalize, int adam);
+dn_layer make_gru_layer(int batch, int inputs, int outputs, int steps, int batch_normalize, int adam);
-void forward_gru_layer(layer l, network state);
-void backward_gru_layer(layer l, network state);
-void update_gru_layer(layer l, update_args a);
+void forward_gru_layer(dn_layer l, dn_network state);
+void backward_gru_layer(dn_layer l, dn_network state);
+void update_gru_layer(dn_layer l, update_args a);
#ifdef GPU
void forward_gru_layer_gpu(layer l, network state);
diff --git a/src/image.c b/src/image.c
index 4a2c6baf0ad..0eebde7d14b 100644
--- a/src/image.c
+++ b/src/image.c
@@ -16,1256 +16,1256 @@ float colors[6][3] = { {1,0,1}, {0,0,1},{0,1,1},{0,1,0},{1,1,0},{1,0,0} };
float get_color(int c, int x, int max)
{
- float ratio = ((float)x/max)*5;
- int i = floor(ratio);
- int j = ceil(ratio);
- ratio -= i;
- float r = (1-ratio) * colors[i][c] + ratio*colors[j][c];
- //printf("%f\n", r);
- return r;
-}
-
-image mask_to_rgb(image mask)
-{
- int n = mask.c;
- image im = make_image(mask.w, mask.h, 3);
- int i, j;
- for(j = 0; j < n; ++j){
- int offset = j*123457 % n;
- float red = get_color(2,offset,n);
- float green = get_color(1,offset,n);
- float blue = get_color(0,offset,n);
- for(i = 0; i < im.w*im.h; ++i){
- im.data[i + 0*im.w*im.h] += mask.data[j*im.h*im.w + i]*red;
- im.data[i + 1*im.w*im.h] += mask.data[j*im.h*im.w + i]*green;
- im.data[i + 2*im.w*im.h] += mask.data[j*im.h*im.w + i]*blue;
- }
- }
- return im;
+ float ratio = ((float)x/max)*5;
+ int i = floor(ratio);
+ int j = ceil(ratio);
+ ratio -= i;
+ float r = (1-ratio) * colors[i][c] + ratio*colors[j][c];
+ //printf("%f\n", r);
+ return r;
}
-static float get_pixel(image m, int x, int y, int c)
+dn_image mask_to_rgb(dn_image mask)
{
- assert(x < m.w && y < m.h && c < m.c);
- return m.data[c*m.h*m.w + y*m.w + x];
-}
-static float get_pixel_extend(image m, int x, int y, int c)
+ int n = mask.c;
+ dn_image im = make_image(mask.w, mask.h, 3);
+ int i, j;
+ for(j = 0; j < n; ++j){
+ int offset = j*123457 % n;
+ float red = get_color(2,offset,n);
+ float green = get_color(1,offset,n);
+ float blue = get_color(0,offset,n);
+ for(i = 0; i < im.w*im.h; ++i){
+ im.data[i + 0*im.w*im.h] += mask.data[j*im.h*im.w + i]*red;
+ im.data[i + 1*im.w*im.h] += mask.data[j*im.h*im.w + i]*green;
+ im.data[i + 2*im.w*im.h] += mask.data[j*im.h*im.w + i]*blue;
+ }
+ }
+ return im;
+}
+
+static float get_pixel(dn_image m, int x, int y, int c)
{
- if(x < 0 || x >= m.w || y < 0 || y >= m.h) return 0;
- /*
- if(x < 0) x = 0;
- if(x >= m.w) x = m.w-1;
- if(y < 0) y = 0;
- if(y >= m.h) y = m.h-1;
- */
- if(c < 0 || c >= m.c) return 0;
- return get_pixel(m, x, y, c);
+ assert(x < m.w && y < m.h && c < m.c);
+ return m.data[c*m.h*m.w + y*m.w + x];
}
-static void set_pixel(image m, int x, int y, int c, float val)
+static float get_pixel_extend(dn_image m, int x, int y, int c)
{
- if (x < 0 || y < 0 || c < 0 || x >= m.w || y >= m.h || c >= m.c) return;
- assert(x < m.w && y < m.h && c < m.c);
- m.data[c*m.h*m.w + y*m.w + x] = val;
-}
-static void add_pixel(image m, int x, int y, int c, float val)
+ if(x < 0 || x >= m.w || y < 0 || y >= m.h) return 0;
+ /*
+ if(x < 0) x = 0;
+ if(x >= m.w) x = m.w-1;
+ if(y < 0) y = 0;
+ if(y >= m.h) y = m.h-1;
+ */
+ if(c < 0 || c >= m.c) return 0;
+ return get_pixel(m, x, y, c);
+}
+static void set_pixel(dn_image m, int x, int y, int c, float val)
{
- assert(x < m.w && y < m.h && c < m.c);
- m.data[c*m.h*m.w + y*m.w + x] += val;
+ if (x < 0 || y < 0 || c < 0 || x >= m.w || y >= m.h || c >= m.c) return;
+ assert(x < m.w && y < m.h && c < m.c);
+ m.data[c*m.h*m.w + y*m.w + x] = val;
}
-
-static float bilinear_interpolate(image im, float x, float y, int c)
-{
- int ix = (int) floorf(x);
- int iy = (int) floorf(y);
-
- float dx = x - ix;
- float dy = y - iy;
-
- float val = (1-dy) * (1-dx) * get_pixel_extend(im, ix, iy, c) +
- dy * (1-dx) * get_pixel_extend(im, ix, iy+1, c) +
- (1-dy) * dx * get_pixel_extend(im, ix+1, iy, c) +
- dy * dx * get_pixel_extend(im, ix+1, iy+1, c);
- return val;
+static void add_pixel(dn_image m, int x, int y, int c, float val)
+{
+ assert(x < m.w && y < m.h && c < m.c);
+ m.data[c*m.h*m.w + y*m.w + x] += val;
+}
+
+static float bilinear_interpolate(dn_image im, float x, float y, int c)
+{
+ int ix = (int) floorf(x);
+ int iy = (int) floorf(y);
+
+ float dx = x - ix;
+ float dy = y - iy;
+
+ float val = (1-dy) * (1-dx) * get_pixel_extend(im, ix, iy, c) +
+ dy * (1-dx) * get_pixel_extend(im, ix, iy+1, c) +
+ (1-dy) * dx * get_pixel_extend(im, ix+1, iy, c) +
+ dy * dx * get_pixel_extend(im, ix+1, iy+1, c);
+ return val;
+}
+
+
+void composite_image(dn_image source, dn_image dest, int dx, int dy)
+{
+ int x,y,k;
+ for(k = 0; k < source.c; ++k){
+ for(y = 0; y < source.h; ++y){
+ for(x = 0; x < source.w; ++x){
+ float val = get_pixel(source, x, y, k);
+ float val2 = get_pixel_extend(dest, dx+x, dy+y, k);
+ set_pixel(dest, dx+x, dy+y, k, val * val2);
+ }
+ }
+ }
}
-
-void composite_image(image source, image dest, int dx, int dy)
-{
- int x,y,k;
- for(k = 0; k < source.c; ++k){
- for(y = 0; y < source.h; ++y){
- for(x = 0; x < source.w; ++x){
- float val = get_pixel(source, x, y, k);
- float val2 = get_pixel_extend(dest, dx+x, dy+y, k);
- set_pixel(dest, dx+x, dy+y, k, val * val2);
- }
- }
- }
+dn_image border_image(dn_image a, int border)
+{
+ dn_image b = make_image(a.w + 2*border, a.h + 2*border, a.c);
+ int x,y,k;
+ for(k = 0; k < b.c; ++k){
+ for(y = 0; y < b.h; ++y){
+ for(x = 0; x < b.w; ++x){
+ float val = get_pixel_extend(a, x - border, y - border, k);
+ if(x - border < 0 || x - border >= a.w || y - border < 0 || y - border >= a.h) val = 1;
+ set_pixel(b, x, y, k, val);
+ }
+ }
+ }
+ return b;
}
-image border_image(image a, int border)
+dn_image tile_images(dn_image a, dn_image b, int dx)
{
- image b = make_image(a.w + 2*border, a.h + 2*border, a.c);
- int x,y,k;
- for(k = 0; k < b.c; ++k){
- for(y = 0; y < b.h; ++y){
- for(x = 0; x < b.w; ++x){
- float val = get_pixel_extend(a, x - border, y - border, k);
- if(x - border < 0 || x - border >= a.w || y - border < 0 || y - border >= a.h) val = 1;
- set_pixel(b, x, y, k, val);
- }
- }
- }
- return b;
+ if(a.w == 0) return copy_image(b);
+ dn_image c = make_image(a.w + b.w + dx, (a.h > b.h) ? a.h : b.h, (a.c > b.c) ? a.c : b.c);
+ fill_cpu(c.w*c.h*c.c, 1, c.data, 1);
+ embed_image(a, c, 0, 0);
+ composite_image(b, c, a.w + dx, 0);
+ return c;
}
-image tile_images(image a, image b, int dx)
+dn_image get_label(dn_image **characters, char *string, int size)
{
- if(a.w == 0) return copy_image(b);
- image c = make_image(a.w + b.w + dx, (a.h > b.h) ? a.h : b.h, (a.c > b.c) ? a.c : b.c);
- fill_cpu(c.w*c.h*c.c, 1, c.data, 1);
- embed_image(a, c, 0, 0);
- composite_image(b, c, a.w + dx, 0);
- return c;
+ size = size/10;
+ if(size > 7) size = 7;
+ dn_image label = make_empty_image(0,0,0);
+ while(*string){
+ dn_image l = characters[size][(int)*string];
+ dn_image n = tile_images(label, l, -size - 1 + (size+1)/2);
+ free_image(label);
+ label = n;
+ ++string;
+ }
+ dn_image b = border_image(label, label.h*.25);
+ free_image(label);
+ return b;
}
-image get_label(image **characters, char *string, int size)
+void draw_label(dn_image a, int r, int c, dn_image label, const float *rgb)
{
- size = size/10;
- if(size > 7) size = 7;
- image label = make_empty_image(0,0,0);
- while(*string){
- image l = characters[size][(int)*string];
- image n = tile_images(label, l, -size - 1 + (size+1)/2);
- free_image(label);
- label = n;
- ++string;
- }
- image b = border_image(label, label.h*.25);
- free_image(label);
- return b;
-}
-
-void draw_label(image a, int r, int c, image label, const float *rgb)
-{
- int w = label.w;
- int h = label.h;
- if (r - h >= 0) r = r - h;
+ int w = label.w;
+ int h = label.h;
+ if (r - h >= 0) r = r - h;
- int i, j, k;
- for(j = 0; j < h && j + r < a.h; ++j){
- for(i = 0; i < w && i + c < a.w; ++i){
- for(k = 0; k < label.c; ++k){
- float val = get_pixel(label, i, j, k);
- set_pixel(a, i+c, j+r, k, rgb[k] * val);
- }
- }
- }
+ int i, j, k;
+ for(j = 0; j < h && j + r < a.h; ++j){
+ for(i = 0; i < w && i + c < a.w; ++i){
+ for(k = 0; k < label.c; ++k){
+ float val = get_pixel(label, i, j, k);
+ set_pixel(a, i+c, j+r, k, rgb[k] * val);
+ }
+ }
+ }
}
-void draw_box(image a, int x1, int y1, int x2, int y2, float r, float g, float b)
+void draw_box(dn_image a, int x1, int y1, int x2, int y2, float r, float g, float b)
{
- //normalize_image(a);
- int i;
- if(x1 < 0) x1 = 0;
- if(x1 >= a.w) x1 = a.w-1;
- if(x2 < 0) x2 = 0;
- if(x2 >= a.w) x2 = a.w-1;
-
- if(y1 < 0) y1 = 0;
- if(y1 >= a.h) y1 = a.h-1;
- if(y2 < 0) y2 = 0;
- if(y2 >= a.h) y2 = a.h-1;
+ //normalize_image(a);
+ int i;
+ if(x1 < 0) x1 = 0;
+ if(x1 >= a.w) x1 = a.w-1;
+ if(x2 < 0) x2 = 0;
+ if(x2 >= a.w) x2 = a.w-1;
- for(i = x1; i <= x2; ++i){
- a.data[i + y1*a.w + 0*a.w*a.h] = r;
- a.data[i + y2*a.w + 0*a.w*a.h] = r;
-
- a.data[i + y1*a.w + 1*a.w*a.h] = g;
- a.data[i + y2*a.w + 1*a.w*a.h] = g;
-
- a.data[i + y1*a.w + 2*a.w*a.h] = b;
- a.data[i + y2*a.w + 2*a.w*a.h] = b;
- }
- for(i = y1; i <= y2; ++i){
- a.data[x1 + i*a.w + 0*a.w*a.h] = r;
- a.data[x2 + i*a.w + 0*a.w*a.h] = r;
-
- a.data[x1 + i*a.w + 1*a.w*a.h] = g;
- a.data[x2 + i*a.w + 1*a.w*a.h] = g;
-
- a.data[x1 + i*a.w + 2*a.w*a.h] = b;
- a.data[x2 + i*a.w + 2*a.w*a.h] = b;
- }
-}
+ if(y1 < 0) y1 = 0;
+ if(y1 >= a.h) y1 = a.h-1;
+ if(y2 < 0) y2 = 0;
+ if(y2 >= a.h) y2 = a.h-1;
+
+ for(i = x1; i <= x2; ++i){
+ a.data[i + y1*a.w + 0*a.w*a.h] = r;
+ a.data[i + y2*a.w + 0*a.w*a.h] = r;
+
+ a.data[i + y1*a.w + 1*a.w*a.h] = g;
+ a.data[i + y2*a.w + 1*a.w*a.h] = g;
+
+ a.data[i + y1*a.w + 2*a.w*a.h] = b;
+ a.data[i + y2*a.w + 2*a.w*a.h] = b;
+ }
+ for(i = y1; i <= y2; ++i){
+ a.data[x1 + i*a.w + 0*a.w*a.h] = r;
+ a.data[x2 + i*a.w + 0*a.w*a.h] = r;
+
+ a.data[x1 + i*a.w + 1*a.w*a.h] = g;
+ a.data[x2 + i*a.w + 1*a.w*a.h] = g;
+
+ a.data[x1 + i*a.w + 2*a.w*a.h] = b;
+ a.data[x2 + i*a.w + 2*a.w*a.h] = b;
+ }
+}
+
+void draw_box_width(dn_image a, int x1, int y1, int x2, int y2, int w, float r, float g, float b)
+{
+ int i;
+ for(i = 0; i < w; ++i){
+ draw_box(a, x1+i, y1+i, x2-i, y2-i, r, g, b);
+ }
+}
+
+void draw_bbox(dn_image a, dn_box bbox, int w, float r, float g, float b)
+{
+ int left = (bbox.x-bbox.w/2)*a.w;
+ int right = (bbox.x+bbox.w/2)*a.w;
+ int top = (bbox.y-bbox.h/2)*a.h;
+ int bot = (bbox.y+bbox.h/2)*a.h;
-void draw_box_width(image a, int x1, int y1, int x2, int y2, int w, float r, float g, float b)
-{
- int i;
- for(i = 0; i < w; ++i){
- draw_box(a, x1+i, y1+i, x2-i, y2-i, r, g, b);
- }
-}
-
-void draw_bbox(image a, box bbox, int w, float r, float g, float b)
-{
- int left = (bbox.x-bbox.w/2)*a.w;
- int right = (bbox.x+bbox.w/2)*a.w;
- int top = (bbox.y-bbox.h/2)*a.h;
- int bot = (bbox.y+bbox.h/2)*a.h;
-
- int i;
- for(i = 0; i < w; ++i){
- draw_box(a, left+i, top+i, right-i, bot-i, r, g, b);
- }
-}
-
-image **load_alphabet()
-{
- int i, j;
- const int nsize = 8;
- image **alphabets = calloc(nsize, sizeof(image));
- for(j = 0; j < nsize; ++j){
- alphabets[j] = calloc(128, sizeof(image));
- for(i = 32; i < 127; ++i){
- char buff[256];
- sprintf(buff, "data/labels/%d_%d.png", i, j);
- alphabets[j][i] = load_image_color(buff, 0, 0);
- }
- }
- return alphabets;
-}
-
-void draw_detections(image im, detection *dets, int num, float thresh, char **names, image **alphabet, int classes)
-{
- int i,j;
-
- for(i = 0; i < num; ++i){
- char labelstr[4096] = {0};
- int class = -1;
- for(j = 0; j < classes; ++j){
- if (dets[i].prob[j] > thresh){
- if (class < 0) {
- strcat(labelstr, names[j]);
- class = j;
- } else {
- strcat(labelstr, ", ");
- strcat(labelstr, names[j]);
- }
- printf("%s: %.0f%%\n", names[j], dets[i].prob[j]*100);
- }
- }
- if(class >= 0){
- int width = im.h * .006;
-
- /*
- if(0){
- width = pow(prob, 1./2.)*10+1;
- alphabet = 0;
- }
- */
-
- //printf("%d %s: %.0f%%\n", i, names[class], prob*100);
- int offset = class*123457 % classes;
- float red = get_color(2,offset,classes);
- float green = get_color(1,offset,classes);
- float blue = get_color(0,offset,classes);
- float rgb[3];
-
- //width = prob*20+2;
-
- rgb[0] = red;
- rgb[1] = green;
- rgb[2] = blue;
- box b = dets[i].bbox;
- //printf("%f %f %f %f\n", b.x, b.y, b.w, b.h);
-
- int left = (b.x-b.w/2.)*im.w;
- int right = (b.x+b.w/2.)*im.w;
- int top = (b.y-b.h/2.)*im.h;
- int bot = (b.y+b.h/2.)*im.h;
-
- if(left < 0) left = 0;
- if(right > im.w-1) right = im.w-1;
- if(top < 0) top = 0;
- if(bot > im.h-1) bot = im.h-1;
-
- draw_box_width(im, left, top, right, bot, width, red, green, blue);
- if (alphabet) {
- image label = get_label(alphabet, labelstr, (im.h*.03));
- draw_label(im, top + width, left, label, rgb);
- free_image(label);
- }
- if (dets[i].mask){
- image mask = float_to_image(14, 14, 1, dets[i].mask);
- image resized_mask = resize_image(mask, b.w*im.w, b.h*im.h);
- image tmask = threshold_image(resized_mask, .5);
- embed_image(tmask, im, left, top);
- free_image(mask);
- free_image(resized_mask);
- free_image(tmask);
- }
- }
- }
-}
-
-void transpose_image(image im)
-{
- assert(im.w == im.h);
- int n, m;
- int c;
- for(c = 0; c < im.c; ++c){
- for(n = 0; n < im.w-1; ++n){
- for(m = n + 1; m < im.w; ++m){
- float swap = im.data[m + im.w*(n + im.h*c)];
- im.data[m + im.w*(n + im.h*c)] = im.data[n + im.w*(m + im.h*c)];
- im.data[n + im.w*(m + im.h*c)] = swap;
- }
- }
- }
-}
-
-void rotate_image_cw(image im, int times)
-{
- assert(im.w == im.h);
- times = (times + 400) % 4;
- int i, x, y, c;
- int n = im.w;
- for(i = 0; i < times; ++i){
- for(c = 0; c < im.c; ++c){
- for(x = 0; x < n/2; ++x){
- for(y = 0; y < (n-1)/2 + 1; ++y){
- float temp = im.data[y + im.w*(x + im.h*c)];
- im.data[y + im.w*(x + im.h*c)] = im.data[n-1-x + im.w*(y + im.h*c)];
- im.data[n-1-x + im.w*(y + im.h*c)] = im.data[n-1-y + im.w*(n-1-x + im.h*c)];
- im.data[n-1-y + im.w*(n-1-x + im.h*c)] = im.data[x + im.w*(n-1-y + im.h*c)];
- im.data[x + im.w*(n-1-y + im.h*c)] = temp;
- }
- }
- }
- }
-}
-
-void flip_image(image a)
-{
- int i,j,k;
- for(k = 0; k < a.c; ++k){
- for(i = 0; i < a.h; ++i){
- for(j = 0; j < a.w/2; ++j){
- int index = j + a.w*(i + a.h*(k));
- int flip = (a.w - j - 1) + a.w*(i + a.h*(k));
- float swap = a.data[flip];
- a.data[flip] = a.data[index];
- a.data[index] = swap;
- }
- }
- }
+ int i;
+ for(i = 0; i < w; ++i){
+ draw_box(a, left+i, top+i, right-i, bot-i, r, g, b);
+ }
}
-
-image image_distance(image a, image b)
+
+dn_image **load_alphabet()
+{
+ int i, j;
+ const int nsize = 8;
+ dn_image **alphabets = calloc(nsize, sizeof(dn_image));
+ for(j = 0; j < nsize; ++j){
+ alphabets[j] = calloc(128, sizeof(dn_image));
+ for(i = 32; i < 127; ++i){
+ char buff[256];
+ sprintf(buff, "data/labels/%d_%d.png", i, j);
+ alphabets[j][i] = load_image_color(buff, 0, 0);
+ }
+ }
+ return alphabets;
+}
+
+void draw_detections(dn_image im, detection *dets, int num, float thresh, char **names, dn_image **alphabet, int classes)
{
- int i,j;
- image dist = make_image(a.w, a.h, 1);
- for(i = 0; i < a.c; ++i){
- for(j = 0; j < a.h*a.w; ++j){
- dist.data[j] += pow(a.data[i*a.h*a.w+j]-b.data[i*a.h*a.w+j],2);
- }
- }
- for(j = 0; j < a.h*a.w; ++j){
- dist.data[j] = sqrt(dist.data[j]);
- }
- return dist;
-}
-
-void ghost_image(image source, image dest, int dx, int dy)
-{
- int x,y,k;
- float max_dist = sqrt((-source.w/2. + .5)*(-source.w/2. + .5));
- for(k = 0; k < source.c; ++k){
- for(y = 0; y < source.h; ++y){
- for(x = 0; x < source.w; ++x){
- float dist = sqrt((x - source.w/2. + .5)*(x - source.w/2. + .5) + (y - source.h/2. + .5)*(y - source.h/2. + .5));
- float alpha = (1 - dist/max_dist);
- if(alpha < 0) alpha = 0;
- float v1 = get_pixel(source, x,y,k);
- float v2 = get_pixel(dest, dx+x,dy+y,k);
- float val = alpha*v1 + (1-alpha)*v2;
- set_pixel(dest, dx+x, dy+y, k, val);
- }
- }
- }
-}
+ int i,j;
-void blocky_image(image im, int s)
-{
- int i,j,k;
- for(k = 0; k < im.c; ++k){
- for(j = 0; j < im.h; ++j){
- for(i = 0; i < im.w; ++i){
- im.data[i + im.w*(j + im.h*k)] = im.data[i/s*s + im.w*(j/s*s + im.h*k)];
+ for(i = 0; i < num; ++i){
+ char labelstr[4096] = {0};
+ int class = -1;
+ for(j = 0; j < classes; ++j){
+ if (dets[i].prob[j] > thresh){
+ if (class < 0) {
+ strcat(labelstr, names[j]);
+ class = j;
+ } else {
+ strcat(labelstr, ", ");
+ strcat(labelstr, names[j]);
}
- }
- }
-}
-
-void censor_image(image im, int dx, int dy, int w, int h)
-{
- int i,j,k;
- int s = 32;
- if(dx < 0) dx = 0;
- if(dy < 0) dy = 0;
-
- for(k = 0; k < im.c; ++k){
- for(j = dy; j < dy + h && j < im.h; ++j){
- for(i = dx; i < dx + w && i < im.w; ++i){
- im.data[i + im.w*(j + im.h*k)] = im.data[i/s*s + im.w*(j/s*s + im.h*k)];
- //im.data[i + j*im.w + k*im.w*im.h] = 0;
+ printf("%s: %.0f%%\n", names[j], dets[i].prob[j]*100);
+ }
+ }
+ if(class >= 0){
+ int width = im.h * .006;
+
+ /*
+ if(0){
+ width = pow(prob, 1./2.)*10+1;
+ alphabet = 0;
}
- }
- }
-}
-
-void embed_image(image source, image dest, int dx, int dy)
-{
- int x,y,k;
- for(k = 0; k < source.c; ++k){
- for(y = 0; y < source.h; ++y){
- for(x = 0; x < source.w; ++x){
- float val = get_pixel(source, x,y,k);
- set_pixel(dest, dx+x, dy+y, k, val);
+ */
+
+ //printf("%d %s: %.0f%%\n", i, names[class], prob*100);
+ int offset = class*123457 % classes;
+ float red = get_color(2,offset,classes);
+ float green = get_color(1,offset,classes);
+ float blue = get_color(0,offset,classes);
+ float rgb[3];
+
+ //width = prob*20+2;
+
+ rgb[0] = red;
+ rgb[1] = green;
+ rgb[2] = blue;
+ dn_box b = dets[i].bbox;
+ //printf("%f %f %f %f\n", b.x, b.y, b.w, b.h);
+
+ int left = (b.x-b.w/2.)*im.w;
+ int right = (b.x+b.w/2.)*im.w;
+ int top = (b.y-b.h/2.)*im.h;
+ int bot = (b.y+b.h/2.)*im.h;
+
+ if(left < 0) left = 0;
+ if(right > im.w-1) right = im.w-1;
+ if(top < 0) top = 0;
+ if(bot > im.h-1) bot = im.h-1;
+
+ draw_box_width(im, left, top, right, bot, width, red, green, blue);
+ if (alphabet) {
+ dn_image label = get_label(alphabet, labelstr, (im.h*.03));
+ draw_label(im, top + width, left, label, rgb);
+ free_image(label);
+ }
+ if (dets[i].mask){
+ dn_image mask = float_to_image(14, 14, 1, dets[i].mask);
+ dn_image resized_mask = resize_image(mask, b.w*im.w, b.h*im.h);
+ dn_image tmask = threshold_image(resized_mask, .5);
+ embed_image(tmask, im, left, top);
+ free_image(mask);
+ free_image(resized_mask);
+ free_image(tmask);
+ }
+ }
+ }
+}
+
+void transpose_image(dn_image im)
+{
+ assert(im.w == im.h);
+ int n, m;
+ int c;
+ for(c = 0; c < im.c; ++c){
+ for(n = 0; n < im.w-1; ++n){
+ for(m = n + 1; m < im.w; ++m){
+ float swap = im.data[m + im.w*(n + im.h*c)];
+ im.data[m + im.w*(n + im.h*c)] = im.data[n + im.w*(m + im.h*c)];
+ im.data[n + im.w*(m + im.h*c)] = swap;
+ }
+ }
+ }
+}
+
+void rotate_image_cw(dn_image im, int times)
+{
+ assert(im.w == im.h);
+ times = (times + 400) % 4;
+ int i, x, y, c;
+ int n = im.w;
+ for(i = 0; i < times; ++i){
+ for(c = 0; c < im.c; ++c){
+ for(x = 0; x < n/2; ++x){
+ for(y = 0; y < (n-1)/2 + 1; ++y){
+ float temp = im.data[y + im.w*(x + im.h*c)];
+ im.data[y + im.w*(x + im.h*c)] = im.data[n-1-x + im.w*(y + im.h*c)];
+ im.data[n-1-x + im.w*(y + im.h*c)] = im.data[n-1-y + im.w*(n-1-x + im.h*c)];
+ im.data[n-1-y + im.w*(n-1-x + im.h*c)] = im.data[x + im.w*(n-1-y + im.h*c)];
+ im.data[x + im.w*(n-1-y + im.h*c)] = temp;
}
- }
- }
+ }
+ }
+ }
+}
+
+void flip_image(dn_image a)
+{
+ int i,j,k;
+ for(k = 0; k < a.c; ++k){
+ for(i = 0; i < a.h; ++i){
+ for(j = 0; j < a.w/2; ++j){
+ int index = j + a.w*(i + a.h*(k));
+ int flip = (a.w - j - 1) + a.w*(i + a.h*(k));
+ float swap = a.data[flip];
+ a.data[flip] = a.data[index];
+ a.data[index] = swap;
+ }
+ }
+ }
+}
+
+dn_image image_distance(dn_image a, dn_image b)
+{
+ int i,j;
+ dn_image dist = make_image(a.w, a.h, 1);
+ for(i = 0; i < a.c; ++i){
+ for(j = 0; j < a.h*a.w; ++j){
+ dist.data[j] += pow(a.data[i*a.h*a.w+j]-b.data[i*a.h*a.w+j],2);
+ }
+ }
+ for(j = 0; j < a.h*a.w; ++j){
+ dist.data[j] = sqrt(dist.data[j]);
+ }
+ return dist;
+}
+
+void ghost_image(dn_image source, dn_image dest, int dx, int dy)
+{
+ int x,y,k;
+ float max_dist = sqrt((-source.w/2. + .5)*(-source.w/2. + .5));
+ for(k = 0; k < source.c; ++k){
+ for(y = 0; y < source.h; ++y){
+ for(x = 0; x < source.w; ++x){
+ float dist = sqrt((x - source.w/2. + .5)*(x - source.w/2. + .5) + (y - source.h/2. + .5)*(y - source.h/2. + .5));
+ float alpha = (1 - dist/max_dist);
+ if(alpha < 0) alpha = 0;
+ float v1 = get_pixel(source, x,y,k);
+ float v2 = get_pixel(dest, dx+x,dy+y,k);
+ float val = alpha*v1 + (1-alpha)*v2;
+ set_pixel(dest, dx+x, dy+y, k, val);
+ }
+ }
+ }
+}
+
+void blocky_image(dn_image im, int s)
+{
+ int i,j,k;
+ for(k = 0; k < im.c; ++k){
+ for(j = 0; j < im.h; ++j){
+ for(i = 0; i < im.w; ++i){
+ im.data[i + im.w*(j + im.h*k)] = im.data[i/s*s + im.w*(j/s*s + im.h*k)];
+ }
+ }
+ }
+}
+
+void censor_image(dn_image im, int dx, int dy, int w, int h)
+{
+ int i,j,k;
+ int s = 32;
+ if(dx < 0) dx = 0;
+ if(dy < 0) dy = 0;
+
+ for(k = 0; k < im.c; ++k){
+ for(j = dy; j < dy + h && j < im.h; ++j){
+ for(i = dx; i < dx + w && i < im.w; ++i){
+ im.data[i + im.w*(j + im.h*k)] = im.data[i/s*s + im.w*(j/s*s + im.h*k)];
+ //im.data[i + j*im.w + k*im.w*im.h] = 0;
+ }
+ }
+ }
+}
+
+void embed_image(dn_image source, dn_image dest, int dx, int dy)
+{
+ int x,y,k;
+ for(k = 0; k < source.c; ++k){
+ for(y = 0; y < source.h; ++y){
+ for(x = 0; x < source.w; ++x){
+ float val = get_pixel(source, x,y,k);
+ set_pixel(dest, dx+x, dy+y, k, val);
+ }
+ }
+ }
+}
+
+dn_image collapse_image_layers(dn_image source, int border)
+{
+ int h = source.h;
+ h = (h+border)*source.c - border;
+ dn_image dest = make_image(source.w, h, 1);
+ int i;
+ for(i = 0; i < source.c; ++i){
+ dn_image layer = get_image_layer(source, i);
+ int h_offset = i*(source.h+border);
+ embed_image(layer, dest, 0, h_offset);
+ free_image(layer);
+ }
+ return dest;
+}
+
+void constrain_image(dn_image im)
+{
+ int i;
+ for(i = 0; i < im.w*im.h*im.c; ++i){
+ if(im.data[i] < 0) im.data[i] = 0;
+ if(im.data[i] > 1) im.data[i] = 1;
+ }
+}
+
+void normalize_image(dn_image p)
+{
+ int i;
+ float min = 9999999;
+ float max = -999999;
+
+ for(i = 0; i < p.h*p.w*p.c; ++i){
+ float v = p.data[i];
+ if(v < min) min = v;
+ if(v > max) max = v;
+ }
+ if(max - min < .000000001){
+ min = 0;
+ max = 1;
+ }
+ for(i = 0; i < p.c*p.w*p.h; ++i){
+ p.data[i] = (p.data[i] - min)/(max-min);
+ }
+}
+
+void normalize_image2(dn_image p)
+{
+ float *min = calloc(p.c, sizeof(float));
+ float *max = calloc(p.c, sizeof(float));
+ int i,j;
+ for(i = 0; i < p.c; ++i) min[i] = max[i] = p.data[i*p.h*p.w];
+
+ for(j = 0; j < p.c; ++j){
+ for(i = 0; i < p.h*p.w; ++i){
+ float v = p.data[i+j*p.h*p.w];
+ if(v < min[j]) min[j] = v;
+ if(v > max[j]) max[j] = v;
+ }
+ }
+ for(i = 0; i < p.c; ++i){
+ if(max[i] - min[i] < .000000001){
+ min[i] = 0;
+ max[i] = 1;
+ }
+ }
+ for(j = 0; j < p.c; ++j){
+ for(i = 0; i < p.w*p.h; ++i){
+ p.data[i+j*p.h*p.w] = (p.data[i+j*p.h*p.w] - min[j])/(max[j]-min[j]);
+ }
+ }
+ free(min);
+ free(max);
}
-
-image collapse_image_layers(image source, int border)
-{
- int h = source.h;
- h = (h+border)*source.c - border;
- image dest = make_image(source.w, h, 1);
- int i;
- for(i = 0; i < source.c; ++i){
- image layer = get_image_layer(source, i);
- int h_offset = i*(source.h+border);
- embed_image(layer, dest, 0, h_offset);
- free_image(layer);
- }
- return dest;
+
+void copy_image_into(dn_image src, dn_image dest)
+{
+ memcpy(dest.data, src.data, src.h*src.w*src.c*sizeof(float));
}
-
-void constrain_image(image im)
+
+dn_image copy_image(dn_image p)
{
- int i;
- for(i = 0; i < im.w*im.h*im.c; ++i){
- if(im.data[i] < 0) im.data[i] = 0;
- if(im.data[i] > 1) im.data[i] = 1;
- }
+ dn_image copy = p;
+ copy.data = calloc(p.h*p.w*p.c, sizeof(float));
+ memcpy(copy.data, p.data, p.h*p.w*p.c*sizeof(float));
+ return copy;
}
-void normalize_image(image p)
+void rgbgr_image(dn_image im)
{
- int i;
- float min = 9999999;
- float max = -999999;
-
- for(i = 0; i < p.h*p.w*p.c; ++i){
- float v = p.data[i];
- if(v < min) min = v;
- if(v > max) max = v;
- }
- if(max - min < .000000001){
- min = 0;
- max = 1;
- }
- for(i = 0; i < p.c*p.w*p.h; ++i){
- p.data[i] = (p.data[i] - min)/(max-min);
- }
+ int i;
+ for(i = 0; i < im.w*im.h; ++i){
+ float swap = im.data[i];
+ im.data[i] = im.data[i+im.w*im.h*2];
+ im.data[i+im.w*im.h*2] = swap;
+ }
}
-void normalize_image2(image p)
-{
- float *min = calloc(p.c, sizeof(float));
- float *max = calloc(p.c, sizeof(float));
- int i,j;
- for(i = 0; i < p.c; ++i) min[i] = max[i] = p.data[i*p.h*p.w];
-
- for(j = 0; j < p.c; ++j){
- for(i = 0; i < p.h*p.w; ++i){
- float v = p.data[i+j*p.h*p.w];
- if(v < min[j]) min[j] = v;
- if(v > max[j]) max[j] = v;
- }
- }
- for(i = 0; i < p.c; ++i){
- if(max[i] - min[i] < .000000001){
- min[i] = 0;
- max[i] = 1;
- }
- }
- for(j = 0; j < p.c; ++j){
- for(i = 0; i < p.w*p.h; ++i){
- p.data[i+j*p.h*p.w] = (p.data[i+j*p.h*p.w] - min[j])/(max[j]-min[j]);
- }
- }
- free(min);
- free(max);
-}
-
-void copy_image_into(image src, image dest)
-{
- memcpy(dest.data, src.data, src.h*src.w*src.c*sizeof(float));
-}
-
-image copy_image(image p)
-{
- image copy = p;
- copy.data = calloc(p.h*p.w*p.c, sizeof(float));
- memcpy(copy.data, p.data, p.h*p.w*p.c*sizeof(float));
- return copy;
-}
-
-void rgbgr_image(image im)
-{
- int i;
- for(i = 0; i < im.w*im.h; ++i){
- float swap = im.data[i];
- im.data[i] = im.data[i+im.w*im.h*2];
- im.data[i+im.w*im.h*2] = swap;
- }
-}
-
-int show_image(image p, const char *name, int ms)
+int show_image(dn_image p, const char *name, int ms)
{
#ifdef OPENCV
- int c = show_image_cv(p, name, ms);
+ int c = show_image_cv(p, name, ms);
return c;
#else
- fprintf(stderr, "Not compiled with OpenCV, saving to %s.png instead\n", name);
- save_image(p, name);
- return -1;
+ fprintf(stderr, "Not compiled with OpenCV, saving to %s.png instead\n", name);
+ save_image(p, name);
+ return -1;
#endif
}
-void save_image_options(image im, const char *name, IMTYPE f, int quality)
-{
- char buff[256];
- //sprintf(buff, "%s (%d)", name, windows);
- if(f == PNG) sprintf(buff, "%s.png", name);
- else if (f == BMP) sprintf(buff, "%s.bmp", name);
- else if (f == TGA) sprintf(buff, "%s.tga", name);
- else if (f == JPG) sprintf(buff, "%s.jpg", name);
- else sprintf(buff, "%s.png", name);
- unsigned char *data = calloc(im.w*im.h*im.c, sizeof(char));
- int i,k;
- for(k = 0; k < im.c; ++k){
- for(i = 0; i < im.w*im.h; ++i){
- data[i*im.c+k] = (unsigned char) (255*im.data[i + k*im.w*im.h]);
- }
- }
- int success = 0;
- if(f == PNG) success = stbi_write_png(buff, im.w, im.h, im.c, data, im.w*im.c);
- else if (f == BMP) success = stbi_write_bmp(buff, im.w, im.h, im.c, data);
- else if (f == TGA) success = stbi_write_tga(buff, im.w, im.h, im.c, data);
- else if (f == JPG) success = stbi_write_jpg(buff, im.w, im.h, im.c, data, quality);
- free(data);
- if(!success) fprintf(stderr, "Failed to write image %s\n", buff);
-}
-
-void save_image(image im, const char *name)
-{
- save_image_options(im, name, JPG, 80);
-}
-
-void show_image_layers(image p, char *name)
-{
- int i;
- char buff[256];
- for(i = 0; i < p.c; ++i){
- sprintf(buff, "%s - Layer %d", name, i);
- image layer = get_image_layer(p, i);
- show_image(layer, buff, 1);
- free_image(layer);
- }
-}
-
-void show_image_collapsed(image p, char *name)
-{
- image c = collapse_image_layers(p, 1);
- show_image(c, name, 1);
- free_image(c);
-}
-
-image make_empty_image(int w, int h, int c)
+void save_image_options(dn_image im, const char *name, IMTYPE f, int quality)
{
- image out;
- out.data = 0;
- out.h = h;
- out.w = w;
- out.c = c;
- return out;
+ char buff[256];
+ //sprintf(buff, "%s (%d)", name, windows);
+ if(f == PNG) sprintf(buff, "%s.png", name);
+ else if (f == BMP) sprintf(buff, "%s.bmp", name);
+ else if (f == TGA) sprintf(buff, "%s.tga", name);
+ else if (f == JPG) sprintf(buff, "%s.jpg", name);
+ else sprintf(buff, "%s.png", name);
+ unsigned char *data = calloc(im.w*im.h*im.c, sizeof(char));
+ int i,k;
+ for(k = 0; k < im.c; ++k){
+ for(i = 0; i < im.w*im.h; ++i){
+ data[i*im.c+k] = (unsigned char) (255*im.data[i + k*im.w*im.h]);
+ }
+ }
+ int success = 0;
+ if(f == PNG) success = stbi_write_png(buff, im.w, im.h, im.c, data, im.w*im.c);
+ else if (f == BMP) success = stbi_write_bmp(buff, im.w, im.h, im.c, data);
+ else if (f == TGA) success = stbi_write_tga(buff, im.w, im.h, im.c, data);
+ else if (f == JPG) success = stbi_write_jpg(buff, im.w, im.h, im.c, data, quality);
+ free(data);
+ if(!success) fprintf(stderr, "Failed to write image %s\n", buff);
}
-image make_image(int w, int h, int c)
+void save_image(dn_image im, const char *name)
{
- image out = make_empty_image(w,h,c);
- out.data = calloc(h*w*c, sizeof(float));
- return out;
+ save_image_options(im, name, JPG, 80);
}
-image make_random_image(int w, int h, int c)
+void show_image_layers(dn_image p, char *name)
{
- image out = make_empty_image(w,h,c);
- out.data = calloc(h*w*c, sizeof(float));
- int i;
- for(i = 0; i < w*h*c; ++i){
- out.data[i] = (rand_normal() * .25) + .5;
- }
- return out;
+ int i;
+ char buff[256];
+ for(i = 0; i < p.c; ++i){
+ sprintf(buff, "%s - Layer %d", name, i);
+ dn_image layer = get_image_layer(p, i);
+ show_image(layer, buff, 1);
+ free_image(layer);
+ }
}
-image float_to_image(int w, int h, int c, float *data)
+void show_image_collapsed(dn_image p, char *name)
{
- image out = make_empty_image(w,h,c);
- out.data = data;
- return out;
+ dn_image c = collapse_image_layers(p, 1);
+ show_image(c, name, 1);
+ free_image(c);
}
-void place_image(image im, int w, int h, int dx, int dy, image canvas)
+dn_image make_empty_image(int w, int h, int c)
{
- int x, y, c;
- for(c = 0; c < im.c; ++c){
- for(y = 0; y < h; ++y){
- for(x = 0; x < w; ++x){
- float rx = ((float)x / w) * im.w;
- float ry = ((float)y / h) * im.h;
- float val = bilinear_interpolate(im, rx, ry, c);
- set_pixel(canvas, x + dx, y + dy, c, val);
- }
- }
- }
+ dn_image out;
+ out.data = 0;
+ out.h = h;
+ out.w = w;
+ out.c = c;
+ return out;
}
-image center_crop_image(image im, int w, int h)
+dn_image make_image(int w, int h, int c)
{
- int m = (im.w < im.h) ? im.w : im.h;
- image c = crop_image(im, (im.w - m) / 2, (im.h - m)/2, m, m);
- image r = resize_image(c, w, h);
- free_image(c);
- return r;
+ dn_image out = make_empty_image(w,h,c);
+ out.data = calloc(h*w*c, sizeof(float));
+ return out;
}
-image rotate_crop_image(image im, float rad, float s, int w, int h, float dx, float dy, float aspect)
+dn_image make_random_image(int w, int h, int c)
{
- int x, y, c;
- float cx = im.w/2.;
- float cy = im.h/2.;
- image rot = make_image(w, h, im.c);
- for(c = 0; c < im.c; ++c){
- for(y = 0; y < h; ++y){
- for(x = 0; x < w; ++x){
- float rx = cos(rad)*((x - w/2.)/s*aspect + dx/s*aspect) - sin(rad)*((y - h/2.)/s + dy/s) + cx;
- float ry = sin(rad)*((x - w/2.)/s*aspect + dx/s*aspect) + cos(rad)*((y - h/2.)/s + dy/s) + cy;
- float val = bilinear_interpolate(im, rx, ry, c);
- set_pixel(rot, x, y, c, val);
- }
- }
- }
- return rot;
-}
-
-image rotate_image(image im, float rad)
-{
- int x, y, c;
- float cx = im.w/2.;
- float cy = im.h/2.;
- image rot = make_image(im.w, im.h, im.c);
- for(c = 0; c < im.c; ++c){
- for(y = 0; y < im.h; ++y){
- for(x = 0; x < im.w; ++x){
- float rx = cos(rad)*(x-cx) - sin(rad)*(y-cy) + cx;
- float ry = sin(rad)*(x-cx) + cos(rad)*(y-cy) + cy;
- float val = bilinear_interpolate(im, rx, ry, c);
- set_pixel(rot, x, y, c, val);
- }
- }
- }
- return rot;
+ dn_image out = make_empty_image(w,h,c);
+ out.data = calloc(h*w*c, sizeof(float));
+ int i;
+ for(i = 0; i < w*h*c; ++i){
+ out.data[i] = (rand_normal() * .25) + .5;
+ }
+ return out;
}
-void fill_image(image m, float s)
+dn_image float_to_image(int w, int h, int c, float *data)
{
- int i;
- for(i = 0; i < m.h*m.w*m.c; ++i) m.data[i] = s;
+ dn_image out = make_empty_image(w,h,c);
+ out.data = data;
+ return out;
}
-void translate_image(image m, float s)
+void place_image(dn_image im, int w, int h, int dx, int dy, dn_image canvas)
{
- int i;
- for(i = 0; i < m.h*m.w*m.c; ++i) m.data[i] += s;
+ int x, y, c;
+ for(c = 0; c < im.c; ++c){
+ for(y = 0; y < h; ++y){
+ for(x = 0; x < w; ++x){
+ float rx = ((float)x / w) * im.w;
+ float ry = ((float)y / h) * im.h;
+ float val = bilinear_interpolate(im, rx, ry, c);
+ set_pixel(canvas, x + dx, y + dy, c, val);
+ }
+ }
+ }
}
-void scale_image(image m, float s)
+dn_image center_crop_image(dn_image im, int w, int h)
{
- int i;
- for(i = 0; i < m.h*m.w*m.c; ++i) m.data[i] *= s;
+ int m = (im.w < im.h) ? im.w : im.h;
+ dn_image c = crop_image(im, (im.w - m) / 2, (im.h - m)/2, m, m);
+ dn_image r = resize_image(c, w, h);
+ free_image(c);
+ return r;
}
-image crop_image(image im, int dx, int dy, int w, int h)
+dn_image rotate_crop_image(dn_image im, float rad, float s, int w, int h, float dx, float dy, float aspect)
{
- image cropped = make_image(w, h, im.c);
- int i, j, k;
- for(k = 0; k < im.c; ++k){
- for(j = 0; j < h; ++j){
- for(i = 0; i < w; ++i){
- int r = j + dy;
- int c = i + dx;
- float val = 0;
- r = constrain_int(r, 0, im.h-1);
- c = constrain_int(c, 0, im.w-1);
- val = get_pixel(im, c, r, k);
- set_pixel(cropped, i, j, k, val);
- }
- }
- }
- return cropped;
-}
-
-int best_3d_shift_r(image a, image b, int min, int max)
-{
- if(min == max) return min;
- int mid = floor((min + max) / 2.);
- image c1 = crop_image(b, 0, mid, b.w, b.h);
- image c2 = crop_image(b, 0, mid+1, b.w, b.h);
- float d1 = dist_array(c1.data, a.data, a.w*a.h*a.c, 10);
- float d2 = dist_array(c2.data, a.data, a.w*a.h*a.c, 10);
- free_image(c1);
- free_image(c2);
- if(d1 < d2) return best_3d_shift_r(a, b, min, mid);
- else return best_3d_shift_r(a, b, mid+1, max);
-}
-
-int best_3d_shift(image a, image b, int min, int max)
-{
- int i;
- int best = 0;
- float best_distance = FLT_MAX;
- for(i = min; i <= max; i += 2){
- image c = crop_image(b, 0, i, b.w, b.h);
- float d = dist_array(c.data, a.data, a.w*a.h*a.c, 100);
- if(d < best_distance){
- best_distance = d;
- best = i;
- }
- printf("%d %f\n", i, d);
- free_image(c);
- }
- return best;
+ int x, y, c;
+ float cx = im.w/2.;
+ float cy = im.h/2.;
+ dn_image rot = make_image(w, h, im.c);
+ for(c = 0; c < im.c; ++c){
+ for(y = 0; y < h; ++y){
+ for(x = 0; x < w; ++x){
+ float rx = cos(rad)*((x - w/2.)/s*aspect + dx/s*aspect) - sin(rad)*((y - h/2.)/s + dy/s) + cx;
+ float ry = sin(rad)*((x - w/2.)/s*aspect + dx/s*aspect) + cos(rad)*((y - h/2.)/s + dy/s) + cy;
+ float val = bilinear_interpolate(im, rx, ry, c);
+ set_pixel(rot, x, y, c, val);
+ }
+ }
+ }
+ return rot;
}
-void composite_3d(char *f1, char *f2, char *out, int delta)
+dn_image rotate_image(dn_image im, float rad)
{
- if(!out) out = "out";
- image a = load_image(f1, 0,0,0);
- image b = load_image(f2, 0,0,0);
- int shift = best_3d_shift_r(a, b, -a.h/100, a.h/100);
-
- image c1 = crop_image(b, 10, shift, b.w, b.h);
- float d1 = dist_array(c1.data, a.data, a.w*a.h*a.c, 100);
- image c2 = crop_image(b, -10, shift, b.w, b.h);
- float d2 = dist_array(c2.data, a.data, a.w*a.h*a.c, 100);
-
- if(d2 < d1 && 0){
- image swap = a;
- a = b;
- b = swap;
- shift = -shift;
- printf("swapped, %d\n", shift);
- }
- else{
- printf("%d\n", shift);
- }
-
- image c = crop_image(b, delta, shift, a.w, a.h);
- int i;
- for(i = 0; i < c.w*c.h; ++i){
- c.data[i] = a.data[i];
- }
- save_image(c, out);
+ int x, y, c;
+ float cx = im.w/2.;
+ float cy = im.h/2.;
+ dn_image rot = make_image(im.w, im.h, im.c);
+ for(c = 0; c < im.c; ++c){
+ for(y = 0; y < im.h; ++y){
+ for(x = 0; x < im.w; ++x){
+ float rx = cos(rad)*(x-cx) - sin(rad)*(y-cy) + cx;
+ float ry = sin(rad)*(x-cx) + cos(rad)*(y-cy) + cy;
+ float val = bilinear_interpolate(im, rx, ry, c);
+ set_pixel(rot, x, y, c, val);
+ }
+ }
+ }
+ return rot;
}
-void letterbox_image_into(image im, int w, int h, image boxed)
+void fill_image(dn_image m, float s)
{
- int new_w = im.w;
- int new_h = im.h;
- if (((float)w/im.w) < ((float)h/im.h)) {
- new_w = w;
- new_h = (im.h * w)/im.w;
- } else {
- new_h = h;
- new_w = (im.w * h)/im.h;
- }
- image resized = resize_image(im, new_w, new_h);
- embed_image(resized, boxed, (w-new_w)/2, (h-new_h)/2);
- free_image(resized);
-}
-
-image letterbox_image(image im, int w, int h)
-{
- int new_w = im.w;
- int new_h = im.h;
- if (((float)w/im.w) < ((float)h/im.h)) {
- new_w = w;
- new_h = (im.h * w)/im.w;
- } else {
- new_h = h;
- new_w = (im.w * h)/im.h;
- }
- image resized = resize_image(im, new_w, new_h);
- image boxed = make_image(w, h, im.c);
- fill_image(boxed, .5);
- //int i;
- //for(i = 0; i < boxed.w*boxed.h*boxed.c; ++i) boxed.data[i] = 0;
- embed_image(resized, boxed, (w-new_w)/2, (h-new_h)/2);
- free_image(resized);
- return boxed;
-}
-
-image resize_max(image im, int max)
-{
- int w = im.w;
- int h = im.h;
- if(w > h){
- h = (h * max) / w;
- w = max;
- } else {
- w = (w * max) / h;
- h = max;
- }
- if(w == im.w && h == im.h) return im;
- image resized = resize_image(im, w, h);
- return resized;
+ int i;
+ for(i = 0; i < m.h*m.w*m.c; ++i) m.data[i] = s;
}
-image resize_min(image im, int min)
+void translate_image(dn_image m, float s)
{
- int w = im.w;
- int h = im.h;
- if(w < h){
- h = (h * min) / w;
- w = min;
- } else {
- w = (w * min) / h;
- h = min;
- }
- if(w == im.w && h == im.h) return im;
- image resized = resize_image(im, w, h);
- return resized;
+ int i;
+ for(i = 0; i < m.h*m.w*m.c; ++i) m.data[i] += s;
}
-image random_crop_image(image im, int w, int h)
+void scale_image(dn_image m, float s)
{
- int dx = rand_int(0, im.w - w);
- int dy = rand_int(0, im.h - h);
- image crop = crop_image(im, dx, dy, w, h);
- return crop;
+ int i;
+ for(i = 0; i < m.h*m.w*m.c; ++i) m.data[i] *= s;
}
-augment_args random_augment_args(image im, float angle, float aspect, int low, int high, int w, int h)
+dn_image crop_image(dn_image im, int dx, int dy, int w, int h)
{
- augment_args a = {0};
- aspect = rand_scale(aspect);
- int r = rand_int(low, high);
- int min = (im.h < im.w*aspect) ? im.h : im.w*aspect;
- float scale = (float)r / min;
-
- float rad = rand_uniform(-angle, angle) * TWO_PI / 360.;
-
- float dx = (im.w*scale/aspect - w) / 2.;
- float dy = (im.h*scale - w) / 2.;
- //if(dx < 0) dx = 0;
- //if(dy < 0) dy = 0;
- dx = rand_uniform(-dx, dx);
- dy = rand_uniform(-dy, dy);
-
- a.rad = rad;
- a.scale = scale;
- a.w = w;
- a.h = h;
- a.dx = dx;
- a.dy = dy;
- a.aspect = aspect;
- return a;
+ dn_image cropped = make_image(w, h, im.c);
+ int i, j, k;
+ for(k = 0; k < im.c; ++k){
+ for(j = 0; j < h; ++j){
+ for(i = 0; i < w; ++i){
+ int r = j + dy;
+ int c = i + dx;
+ float val = 0;
+ r = constrain_int(r, 0, im.h-1);
+ c = constrain_int(c, 0, im.w-1);
+ val = get_pixel(im, c, r, k);
+ set_pixel(cropped, i, j, k, val);
+ }
+ }
+ }
+ return cropped;
+}
+
+int best_3d_shift_r(dn_image a, dn_image b, int min, int max)
+{
+ if(min == max) return min;
+ int mid = floor((min + max) / 2.);
+ dn_image c1 = crop_image(b, 0, mid, b.w, b.h);
+ dn_image c2 = crop_image(b, 0, mid+1, b.w, b.h);
+ float d1 = dist_array(c1.data, a.data, a.w*a.h*a.c, 10);
+ float d2 = dist_array(c2.data, a.data, a.w*a.h*a.c, 10);
+ free_image(c1);
+ free_image(c2);
+ if(d1 < d2) return best_3d_shift_r(a, b, min, mid);
+ else return best_3d_shift_r(a, b, mid+1, max);
+}
+
+int best_3d_shift(dn_image a, dn_image b, int min, int max)
+{
+ int i;
+ int best = 0;
+ float best_distance = FLT_MAX;
+ for(i = min; i <= max; i += 2){
+ dn_image c = crop_image(b, 0, i, b.w, b.h);
+ float d = dist_array(c.data, a.data, a.w*a.h*a.c, 100);
+ if(d < best_distance){
+ best_distance = d;
+ best = i;
+ }
+ printf("%d %f\n", i, d);
+ free_image(c);
+ }
+ return best;
}
-image random_augment_image(image im, float angle, float aspect, int low, int high, int w, int h)
+void composite_3d(char *f1, char *f2, char *out, int delta)
{
- augment_args a = random_augment_args(im, angle, aspect, low, high, w, h);
- image crop = rotate_crop_image(im, a.rad, a.scale, a.w, a.h, a.dx, a.dy, a.aspect);
- return crop;
+ if(!out) out = "out";
+ dn_image a = load_image(f1, 0,0,0);
+ dn_image b = load_image(f2, 0,0,0);
+ int shift = best_3d_shift_r(a, b, -a.h/100, a.h/100);
+
+ dn_image c1 = crop_image(b, 10, shift, b.w, b.h);
+ float d1 = dist_array(c1.data, a.data, a.w*a.h*a.c, 100);
+ dn_image c2 = crop_image(b, -10, shift, b.w, b.h);
+ float d2 = dist_array(c2.data, a.data, a.w*a.h*a.c, 100);
+
+ if(d2 < d1 && 0){
+ dn_image swap = a;
+ a = b;
+ b = swap;
+ shift = -shift;
+ printf("swapped, %d\n", shift);
+ }
+ else{
+ printf("%d\n", shift);
+ }
+
+ dn_image c = crop_image(b, delta, shift, a.w, a.h);
+ int i;
+ for(i = 0; i < c.w*c.h; ++i){
+ c.data[i] = a.data[i];
+ }
+ save_image(c, out);
+}
+
+void letterbox_image_into(dn_image im, int w, int h, dn_image boxed)
+{
+ int new_w = im.w;
+ int new_h = im.h;
+ if (((float)w/im.w) < ((float)h/im.h)) {
+ new_w = w;
+ new_h = (im.h * w)/im.w;
+ } else {
+ new_h = h;
+ new_w = (im.w * h)/im.h;
+ }
+ dn_image resized = resize_image(im, new_w, new_h);
+ embed_image(resized, boxed, (w-new_w)/2, (h-new_h)/2);
+ free_image(resized);
+}
+
+dn_image letterbox_image(dn_image im, int w, int h)
+{
+ int new_w = im.w;
+ int new_h = im.h;
+ if (((float)w/im.w) < ((float)h/im.h)) {
+ new_w = w;
+ new_h = (im.h * w)/im.w;
+ } else {
+ new_h = h;
+ new_w = (im.w * h)/im.h;
+ }
+ dn_image resized = resize_image(im, new_w, new_h);
+ dn_image boxed = make_image(w, h, im.c);
+ fill_image(boxed, .5);
+ //int i;
+ //for(i = 0; i < boxed.w*boxed.h*boxed.c; ++i) boxed.data[i] = 0;
+ embed_image(resized, boxed, (w-new_w)/2, (h-new_h)/2);
+ free_image(resized);
+ return boxed;
+}
+
+dn_image resize_max(dn_image im, int max)
+{
+ int w = im.w;
+ int h = im.h;
+ if(w > h){
+ h = (h * max) / w;
+ w = max;
+ } else {
+ w = (w * max) / h;
+ h = max;
+ }
+ if(w == im.w && h == im.h) return im;
+ dn_image resized = resize_image(im, w, h);
+ return resized;
+}
+
+dn_image resize_min(dn_image im, int min)
+{
+ int w = im.w;
+ int h = im.h;
+ if(w < h){
+ h = (h * min) / w;
+ w = min;
+ } else {
+ w = (w * min) / h;
+ h = min;
+ }
+ if(w == im.w && h == im.h) return im;
+ dn_image resized = resize_image(im, w, h);
+ return resized;
+}
+
+dn_image random_crop_image(dn_image im, int w, int h)
+{
+ int dx = rand_int(0, im.w - w);
+ int dy = rand_int(0, im.h - h);
+ dn_image crop = crop_image(im, dx, dy, w, h);
+ return crop;
+}
+
+augment_args random_augment_args(dn_image im, float angle, float aspect, int low, int high, int w, int h)
+{
+ augment_args a = {0};
+ aspect = rand_scale(aspect);
+ int r = rand_int(low, high);
+ int min = (im.h < im.w*aspect) ? im.h : im.w*aspect;
+ float scale = (float)r / min;
+
+ float rad = rand_uniform(-angle, angle) * TWO_PI / 360.;
+
+ float dx = (im.w*scale/aspect - w) / 2.;
+ float dy = (im.h*scale - w) / 2.;
+ //if(dx < 0) dx = 0;
+ //if(dy < 0) dy = 0;
+ dx = rand_uniform(-dx, dx);
+ dy = rand_uniform(-dy, dy);
+
+ a.rad = rad;
+ a.scale = scale;
+ a.w = w;
+ a.h = h;
+ a.dx = dx;
+ a.dy = dy;
+ a.aspect = aspect;
+ return a;
+}
+
+dn_image random_augment_image(dn_image im, float angle, float aspect, int low, int high, int w, int h)
+{
+ augment_args a = random_augment_args(im, angle, aspect, low, high, w, h);
+ dn_image crop = rotate_crop_image(im, a.rad, a.scale, a.w, a.h, a.dx, a.dy, a.aspect);
+ return crop;
}
float three_way_max(float a, float b, float c)
{
- return (a > b) ? ( (a > c) ? a : c) : ( (b > c) ? b : c) ;
+ return (a > b) ? ( (a > c) ? a : c) : ( (b > c) ? b : c) ;
}
float three_way_min(float a, float b, float c)
{
- return (a < b) ? ( (a < c) ? a : c) : ( (b < c) ? b : c) ;
+ return (a < b) ? ( (a < c) ? a : c) : ( (b < c) ? b : c) ;
}
-void yuv_to_rgb(image im)
+void yuv_to_rgb(dn_image im)
{
- assert(im.c == 3);
- int i, j;
- float r, g, b;
- float y, u, v;
- for(j = 0; j < im.h; ++j){
- for(i = 0; i < im.w; ++i){
- y = get_pixel(im, i , j, 0);
- u = get_pixel(im, i , j, 1);
- v = get_pixel(im, i , j, 2);
+ assert(im.c == 3);
+ int i, j;
+ float r, g, b;
+ float y, u, v;
+ for(j = 0; j < im.h; ++j){
+ for(i = 0; i < im.w; ++i){
+ y = get_pixel(im, i , j, 0);
+ u = get_pixel(im, i , j, 1);
+ v = get_pixel(im, i , j, 2);
- r = y + 1.13983*v;
- g = y + -.39465*u + -.58060*v;
- b = y + 2.03211*u;
+ r = y + 1.13983*v;
+ g = y + -.39465*u + -.58060*v;
+ b = y + 2.03211*u;
- set_pixel(im, i, j, 0, r);
- set_pixel(im, i, j, 1, g);
- set_pixel(im, i, j, 2, b);
- }
- }
+ set_pixel(im, i, j, 0, r);
+ set_pixel(im, i, j, 1, g);
+ set_pixel(im, i, j, 2, b);
+ }
+ }
}
-void rgb_to_yuv(image im)
+void rgb_to_yuv(dn_image im)
{
- assert(im.c == 3);
- int i, j;
- float r, g, b;
- float y, u, v;
- for(j = 0; j < im.h; ++j){
- for(i = 0; i < im.w; ++i){
- r = get_pixel(im, i , j, 0);
- g = get_pixel(im, i , j, 1);
- b = get_pixel(im, i , j, 2);
+ assert(im.c == 3);
+ int i, j;
+ float r, g, b;
+ float y, u, v;
+ for(j = 0; j < im.h; ++j){
+ for(i = 0; i < im.w; ++i){
+ r = get_pixel(im, i , j, 0);
+ g = get_pixel(im, i , j, 1);
+ b = get_pixel(im, i , j, 2);
- y = .299*r + .587*g + .114*b;
- u = -.14713*r + -.28886*g + .436*b;
- v = .615*r + -.51499*g + -.10001*b;
+ y = .299*r + .587*g + .114*b;
+ u = -.14713*r + -.28886*g + .436*b;
+ v = .615*r + -.51499*g + -.10001*b;
- set_pixel(im, i, j, 0, y);
- set_pixel(im, i, j, 1, u);
- set_pixel(im, i, j, 2, v);
- }
- }
+ set_pixel(im, i, j, 0, y);
+ set_pixel(im, i, j, 1, u);
+ set_pixel(im, i, j, 2, v);
+ }
+ }
}
// http://www.cs.rit.edu/~ncs/color/t_convert.html
-void rgb_to_hsv(image im)
-{
- assert(im.c == 3);
- int i, j;
- float r, g, b;
- float h, s, v;
- for(j = 0; j < im.h; ++j){
- for(i = 0; i < im.w; ++i){
- r = get_pixel(im, i , j, 0);
- g = get_pixel(im, i , j, 1);
- b = get_pixel(im, i , j, 2);
- float max = three_way_max(r,g,b);
- float min = three_way_min(r,g,b);
- float delta = max - min;
- v = max;
- if(max == 0){
- s = 0;
- h = 0;
- }else{
- s = delta/max;
- if(r == max){
- h = (g - b) / delta;
- } else if (g == max) {
- h = 2 + (b - r) / delta;
- } else {
- h = 4 + (r - g) / delta;
- }
- if (h < 0) h += 6;
- h = h/6.;
- }
- set_pixel(im, i, j, 0, h);
- set_pixel(im, i, j, 1, s);
- set_pixel(im, i, j, 2, v);
- }
- }
-}
-
-void hsv_to_rgb(image im)
-{
- assert(im.c == 3);
- int i, j;
- float r, g, b;
- float h, s, v;
- float f, p, q, t;
- for(j = 0; j < im.h; ++j){
- for(i = 0; i < im.w; ++i){
- h = 6 * get_pixel(im, i , j, 0);
- s = get_pixel(im, i , j, 1);
- v = get_pixel(im, i , j, 2);
- if (s == 0) {
- r = g = b = v;
+void rgb_to_hsv(dn_image im)
+{
+ assert(im.c == 3);
+ int i, j;
+ float r, g, b;
+ float h, s, v;
+ for(j = 0; j < im.h; ++j){
+ for(i = 0; i < im.w; ++i){
+ r = get_pixel(im, i , j, 0);
+ g = get_pixel(im, i , j, 1);
+ b = get_pixel(im, i , j, 2);
+ float max = three_way_max(r,g,b);
+ float min = three_way_min(r,g,b);
+ float delta = max - min;
+ v = max;
+ if(max == 0){
+ s = 0;
+ h = 0;
+ }else{
+ s = delta/max;
+ if(r == max){
+ h = (g - b) / delta;
+ } else if (g == max) {
+ h = 2 + (b - r) / delta;
} else {
- int index = floor(h);
- f = h - index;
- p = v*(1-s);
- q = v*(1-s*f);
- t = v*(1-s*(1-f));
- if(index == 0){
- r = v; g = t; b = p;
- } else if(index == 1){
- r = q; g = v; b = p;
- } else if(index == 2){
- r = p; g = v; b = t;
- } else if(index == 3){
- r = p; g = q; b = v;
- } else if(index == 4){
- r = t; g = p; b = v;
- } else {
- r = v; g = p; b = q;
- }
- }
- set_pixel(im, i, j, 0, r);
- set_pixel(im, i, j, 1, g);
- set_pixel(im, i, j, 2, b);
- }
- }
-}
-
-void grayscale_image_3c(image im)
-{
- assert(im.c == 3);
- int i, j, k;
- float scale[] = {0.299, 0.587, 0.114};
- for(j = 0; j < im.h; ++j){
- for(i = 0; i < im.w; ++i){
- float val = 0;
- for(k = 0; k < 3; ++k){
- val += scale[k]*get_pixel(im, i, j, k);
- }
- im.data[0*im.h*im.w + im.w*j + i] = val;
- im.data[1*im.h*im.w + im.w*j + i] = val;
- im.data[2*im.h*im.w + im.w*j + i] = val;
- }
- }
-}
-
-image grayscale_image(image im)
-{
- assert(im.c == 3);
- int i, j, k;
- image gray = make_image(im.w, im.h, 1);
- float scale[] = {0.299, 0.587, 0.114};
- for(k = 0; k < im.c; ++k){
- for(j = 0; j < im.h; ++j){
- for(i = 0; i < im.w; ++i){
- gray.data[i+im.w*j] += scale[k]*get_pixel(im, i, j, k);
+ h = 4 + (r - g) / delta;
}
- }
- }
- return gray;
-}
-
-image threshold_image(image im, float thresh)
-{
- int i;
- image t = make_image(im.w, im.h, im.c);
- for(i = 0; i < im.w*im.h*im.c; ++i){
- t.data[i] = im.data[i]>thresh ? 1 : 0;
- }
- return t;
-}
-
-image blend_image(image fore, image back, float alpha)
-{
- assert(fore.w == back.w && fore.h == back.h && fore.c == back.c);
- image blend = make_image(fore.w, fore.h, fore.c);
- int i, j, k;
- for(k = 0; k < fore.c; ++k){
- for(j = 0; j < fore.h; ++j){
- for(i = 0; i < fore.w; ++i){
- float val = alpha * get_pixel(fore, i, j, k) +
- (1 - alpha)* get_pixel(back, i, j, k);
- set_pixel(blend, i, j, k, val);
+ if (h < 0) h += 6;
+ h = h/6.;
+ }
+ set_pixel(im, i, j, 0, h);
+ set_pixel(im, i, j, 1, s);
+ set_pixel(im, i, j, 2, v);
+ }
+ }
+}
+
+void hsv_to_rgb(dn_image im)
+{
+ assert(im.c == 3);
+ int i, j;
+ float r, g, b;
+ float h, s, v;
+ float f, p, q, t;
+ for(j = 0; j < im.h; ++j){
+ for(i = 0; i < im.w; ++i){
+ h = 6 * get_pixel(im, i , j, 0);
+ s = get_pixel(im, i , j, 1);
+ v = get_pixel(im, i , j, 2);
+ if (s == 0) {
+ r = g = b = v;
+ } else {
+ int index = floor(h);
+ f = h - index;
+ p = v*(1-s);
+ q = v*(1-s*f);
+ t = v*(1-s*(1-f));
+ if(index == 0){
+ r = v; g = t; b = p;
+ } else if(index == 1){
+ r = q; g = v; b = p;
+ } else if(index == 2){
+ r = p; g = v; b = t;
+ } else if(index == 3){
+ r = p; g = q; b = v;
+ } else if(index == 4){
+ r = t; g = p; b = v;
+ } else {
+ r = v; g = p; b = q;
}
- }
- }
- return blend;
+ }
+ set_pixel(im, i, j, 0, r);
+ set_pixel(im, i, j, 1, g);
+ set_pixel(im, i, j, 2, b);
+ }
+ }
+}
+
+void grayscale_image_3c(dn_image im)
+{
+ assert(im.c == 3);
+ int i, j, k;
+ float scale[] = {0.299, 0.587, 0.114};
+ for(j = 0; j < im.h; ++j){
+ for(i = 0; i < im.w; ++i){
+ float val = 0;
+ for(k = 0; k < 3; ++k){
+ val += scale[k]*get_pixel(im, i, j, k);
+ }
+ im.data[0*im.h*im.w + im.w*j + i] = val;
+ im.data[1*im.h*im.w + im.w*j + i] = val;
+ im.data[2*im.h*im.w + im.w*j + i] = val;
+ }
+ }
+}
+
+dn_image grayscale_image(dn_image im)
+{
+ assert(im.c == 3);
+ int i, j, k;
+ dn_image gray = make_image(im.w, im.h, 1);
+ float scale[] = {0.299, 0.587, 0.114};
+ for(k = 0; k < im.c; ++k){
+ for(j = 0; j < im.h; ++j){
+ for(i = 0; i < im.w; ++i){
+ gray.data[i+im.w*j] += scale[k]*get_pixel(im, i, j, k);
+ }
+ }
+ }
+ return gray;
+}
+
+dn_image threshold_image(dn_image im, float thresh)
+{
+ int i;
+ dn_image t = make_image(im.w, im.h, im.c);
+ for(i = 0; i < im.w*im.h*im.c; ++i){
+ t.data[i] = im.data[i]>thresh ? 1 : 0;
+ }
+ return t;
+}
+
+dn_image blend_image(dn_image fore, dn_image back, float alpha)
+{
+ assert(fore.w == back.w && fore.h == back.h && fore.c == back.c);
+ dn_image blend = make_image(fore.w, fore.h, fore.c);
+ int i, j, k;
+ for(k = 0; k < fore.c; ++k){
+ for(j = 0; j < fore.h; ++j){
+ for(i = 0; i < fore.w; ++i){
+ float val = alpha * get_pixel(fore, i, j, k) +
+ (1 - alpha)* get_pixel(back, i, j, k);
+ set_pixel(blend, i, j, k, val);
+ }
+ }
+ }
+ return blend;
}
-void scale_image_channel(image im, int c, float v)
+void scale_image_channel(dn_image im, int c, float v)
{
- int i, j;
- for(j = 0; j < im.h; ++j){
- for(i = 0; i < im.w; ++i){
- float pix = get_pixel(im, i, j, c);
- pix = pix*v;
- set_pixel(im, i, j, c, pix);
- }
- }
-}
-
-void translate_image_channel(image im, int c, float v)
+ int i, j;
+ for(j = 0; j < im.h; ++j){
+ for(i = 0; i < im.w; ++i){
+ float pix = get_pixel(im, i, j, c);
+ pix = pix*v;
+ set_pixel(im, i, j, c, pix);
+ }
+ }
+}
+
+void translate_image_channel(dn_image im, int c, float v)
+{
+ int i, j;
+ for(j = 0; j < im.h; ++j){
+ for(i = 0; i < im.w; ++i){
+ float pix = get_pixel(im, i, j, c);
+ pix = pix+v;
+ set_pixel(im, i, j, c, pix);
+ }
+ }
+}
+
+dn_image binarize_image(dn_image im)
+{
+ dn_image c = copy_image(im);
+ int i;
+ for(i = 0; i < im.w * im.h * im.c; ++i){
+ if(c.data[i] > .5) c.data[i] = 1;
+ else c.data[i] = 0;
+ }
+ return c;
+}
+
+void saturate_image(dn_image im, float sat)
{
- int i, j;
- for(j = 0; j < im.h; ++j){
- for(i = 0; i < im.w; ++i){
- float pix = get_pixel(im, i, j, c);
- pix = pix+v;
- set_pixel(im, i, j, c, pix);
- }
- }
+ rgb_to_hsv(im);
+ scale_image_channel(im, 1, sat);
+ hsv_to_rgb(im);
+ constrain_image(im);
+}
+
+void hue_image(dn_image im, float hue)
+{
+ rgb_to_hsv(im);
+ int i;
+ for(i = 0; i < im.w*im.h; ++i){
+ im.data[i] = im.data[i] + hue;
+ if (im.data[i] > 1) im.data[i] -= 1;
+ if (im.data[i] < 0) im.data[i] += 1;
+ }
+ hsv_to_rgb(im);
+ constrain_image(im);
}
-image binarize_image(image im)
+void exposure_image(dn_image im, float sat)
{
- image c = copy_image(im);
- int i;
- for(i = 0; i < im.w * im.h * im.c; ++i){
- if(c.data[i] > .5) c.data[i] = 1;
- else c.data[i] = 0;
- }
- return c;
+ rgb_to_hsv(im);
+ scale_image_channel(im, 2, sat);
+ hsv_to_rgb(im);
+ constrain_image(im);
}
-void saturate_image(image im, float sat)
+void distort_image(dn_image im, float hue, float sat, float val)
{
- rgb_to_hsv(im);
- scale_image_channel(im, 1, sat);
- hsv_to_rgb(im);
- constrain_image(im);
+ rgb_to_hsv(im);
+ scale_image_channel(im, 1, sat);
+ scale_image_channel(im, 2, val);
+ int i;
+ for(i = 0; i < im.w*im.h; ++i){
+ im.data[i] = im.data[i] + hue;
+ if (im.data[i] > 1) im.data[i] -= 1;
+ if (im.data[i] < 0) im.data[i] += 1;
+ }
+ hsv_to_rgb(im);
+ constrain_image(im);
}
-void hue_image(image im, float hue)
+void random_distort_image(dn_image im, float hue, float saturation, float exposure)
{
- rgb_to_hsv(im);
- int i;
- for(i = 0; i < im.w*im.h; ++i){
- im.data[i] = im.data[i] + hue;
- if (im.data[i] > 1) im.data[i] -= 1;
- if (im.data[i] < 0) im.data[i] += 1;
- }
- hsv_to_rgb(im);
- constrain_image(im);
+ float dhue = rand_uniform(-hue, hue);
+ float dsat = rand_scale(saturation);
+ float dexp = rand_scale(exposure);
+ distort_image(im, dhue, dsat, dexp);
}
-void exposure_image(image im, float sat)
+void saturate_exposure_image(dn_image im, float sat, float exposure)
{
- rgb_to_hsv(im);
- scale_image_channel(im, 2, sat);
- hsv_to_rgb(im);
- constrain_image(im);
+ rgb_to_hsv(im);
+ scale_image_channel(im, 1, sat);
+ scale_image_channel(im, 2, exposure);
+ hsv_to_rgb(im);
+ constrain_image(im);
}
-void distort_image(image im, float hue, float sat, float val)
+dn_image resize_image(dn_image im, int w, int h)
{
- rgb_to_hsv(im);
- scale_image_channel(im, 1, sat);
- scale_image_channel(im, 2, val);
- int i;
- for(i = 0; i < im.w*im.h; ++i){
- im.data[i] = im.data[i] + hue;
- if (im.data[i] > 1) im.data[i] -= 1;
- if (im.data[i] < 0) im.data[i] += 1;
- }
- hsv_to_rgb(im);
- constrain_image(im);
-}
-
-void random_distort_image(image im, float hue, float saturation, float exposure)
-{
- float dhue = rand_uniform(-hue, hue);
- float dsat = rand_scale(saturation);
- float dexp = rand_scale(exposure);
- distort_image(im, dhue, dsat, dexp);
-}
-
-void saturate_exposure_image(image im, float sat, float exposure)
-{
- rgb_to_hsv(im);
- scale_image_channel(im, 1, sat);
- scale_image_channel(im, 2, exposure);
- hsv_to_rgb(im);
- constrain_image(im);
-}
-
-image resize_image(image im, int w, int h)
-{
- image resized = make_image(w, h, im.c);
- image part = make_image(w, im.h, im.c);
- int r, c, k;
- float w_scale = (float)(im.w - 1) / (w - 1);
- float h_scale = (float)(im.h - 1) / (h - 1);
- for(k = 0; k < im.c; ++k){
- for(r = 0; r < im.h; ++r){
- for(c = 0; c < w; ++c){
- float val = 0;
- if(c == w-1 || im.w == 1){
- val = get_pixel(im, im.w-1, r, k);
- } else {
- float sx = c*w_scale;
- int ix = (int) sx;
- float dx = sx - ix;
- val = (1 - dx) * get_pixel(im, ix, r, k) + dx * get_pixel(im, ix+1, r, k);
- }
- set_pixel(part, c, r, k, val);
- }
- }
- }
- for(k = 0; k < im.c; ++k){
- for(r = 0; r < h; ++r){
- float sy = r*h_scale;
- int iy = (int) sy;
- float dy = sy - iy;
- for(c = 0; c < w; ++c){
- float val = (1-dy) * get_pixel(part, c, iy, k);
- set_pixel(resized, c, r, k, val);
- }
- if(r == h-1 || im.h == 1) continue;
- for(c = 0; c < w; ++c){
- float val = dy * get_pixel(part, c, iy+1, k);
- add_pixel(resized, c, r, k, val);
+ dn_image resized = make_image(w, h, im.c);
+ dn_image part = make_image(w, im.h, im.c);
+ int r, c, k;
+ float w_scale = (float)(im.w - 1) / (w - 1);
+ float h_scale = (float)(im.h - 1) / (h - 1);
+ for(k = 0; k < im.c; ++k){
+ for(r = 0; r < im.h; ++r){
+ for(c = 0; c < w; ++c){
+ float val = 0;
+ if(c == w-1 || im.w == 1){
+ val = get_pixel(im, im.w-1, r, k);
+ } else {
+ float sx = c*w_scale;
+ int ix = (int) sx;
+ float dx = sx - ix;
+ val = (1 - dx) * get_pixel(im, ix, r, k) + dx * get_pixel(im, ix+1, r, k);
}
- }
- }
-
- free_image(part);
- return resized;
-}
-
-
-void test_resize(char *filename)
-{
- image im = load_image(filename, 0,0, 3);
- float mag = mag_array(im.data, im.w*im.h*im.c);
- printf("L2 Norm: %f\n", mag);
- image gray = grayscale_image(im);
-
- image c1 = copy_image(im);
- image c2 = copy_image(im);
- image c3 = copy_image(im);
- image c4 = copy_image(im);
- distort_image(c1, .1, 1.5, 1.5);
- distort_image(c2, -.1, .66666, .66666);
- distort_image(c3, .1, 1.5, .66666);
- distort_image(c4, .1, .66666, 1.5);
-
-
- show_image(im, "Original", 1);
- show_image(gray, "Gray", 1);
- show_image(c1, "C1", 1);
- show_image(c2, "C2", 1);
- show_image(c3, "C3", 1);
- show_image(c4, "C4", 1);
+ set_pixel(part, c, r, k, val);
+ }
+ }
+ }
+ for(k = 0; k < im.c; ++k){
+ for(r = 0; r < h; ++r){
+ float sy = r*h_scale;
+ int iy = (int) sy;
+ float dy = sy - iy;
+ for(c = 0; c < w; ++c){
+ float val = (1-dy) * get_pixel(part, c, iy, k);
+ set_pixel(resized, c, r, k, val);
+ }
+ if(r == h-1 || im.h == 1) continue;
+ for(c = 0; c < w; ++c){
+ float val = dy * get_pixel(part, c, iy+1, k);
+ add_pixel(resized, c, r, k, val);
+ }
+ }
+ }
+
+ free_image(part);
+ return resized;
+}
+
+
+void test_resize(const char *filename)
+{
+ dn_image im = load_image(filename, 0,0, 3);
+ float mag = mag_array(im.data, im.w*im.h*im.c);
+ printf("L2 Norm: %f\n", mag);
+ dn_image gray = grayscale_image(im);
+
+ dn_image c1 = copy_image(im);
+ dn_image c2 = copy_image(im);
+ dn_image c3 = copy_image(im);
+ dn_image c4 = copy_image(im);
+ distort_image(c1, .1, 1.5, 1.5);
+ distort_image(c2, -.1, .66666, .66666);
+ distort_image(c3, .1, 1.5, .66666);
+ distort_image(c4, .1, .66666, 1.5);
+
+
+ show_image(im, "Original", 1);
+ show_image(gray, "Gray", 1);
+ show_image(c1, "C1", 1);
+ show_image(c2, "C2", 1);
+ show_image(c3, "C3", 1);
+ show_image(c4, "C4", 1);
#ifdef OPENCV
- while(1){
+ while(1){
image aug = random_augment_image(im, 0, .75, 320, 448, 320, 320);
show_image(aug, "aug", 1);
free_image(aug);
@@ -1290,177 +1290,186 @@ void test_resize(char *filename)
}
-image load_image_stb(char *filename, int channels)
-{
- int w, h, c;
- unsigned char *data = stbi_load(filename, &w, &h, &c, channels);
- if (!data) {
- fprintf(stderr, "Cannot load image \"%s\"\nSTB Reason: %s\n", filename, stbi_failure_reason());
- exit(0);
- }
- if(channels) c = channels;
- int i,j,k;
- image im = make_image(w, h, c);
- for(k = 0; k < c; ++k){
- for(j = 0; j < h; ++j){
- for(i = 0; i < w; ++i){
- int dst_index = i + w*j + w*h*k;
- int src_index = k + c*i + c*w*j;
- im.data[dst_index] = (float)data[src_index]/255.;
- }
- }
- }
- free(data);
- return im;
-}
-
-image load_image(char *filename, int w, int h, int c)
+dn_image load_image_stb(const char *filename, int channels)
+{
+ int w, h, c;
+ void* data;
+ int is_16bit = stbi_is_16_bit(filename);
+ if (is_16bit)
+ data = stbi_load_16(filename, &w, &h, &c, channels);
+ else
+ data = stbi_load(filename, &w, &h, &c, channels);
+ if (!data) {
+ fprintf(stderr, "Cannot load image \"%s\"\nSTB Reason: %s\n", filename, stbi_failure_reason());
+ exit(0);
+ }
+ if(channels)
+ c = channels;
+ int i, j, k, dst_index, src_index;
+ dn_image im = make_image(w, h, c);
+ for(k = 0; k < c; ++k){
+ for(j = 0; j < h; ++j){
+ for(i = 0; i < w; ++i){
+ dst_index = i + w*j + w*h*k;
+ src_index = k + c*i + c*w*j;
+ if (is_16bit)
+ im.data[dst_index] = (float) ((unsigned short*)data)[src_index] / 65535.0;
+ else
+ im.data[dst_index] = (float) ((unsigned char*)data)[src_index] / 255.0;
+ }
+ }
+ }
+ free(data);
+ return im;
+}
+
+dn_image load_image(const char *filename, int w, int h, int c)
{
#ifdef OPENCV
- image out = load_image_cv(filename, c);
+ image out = load_image_cv(filename, c);
#else
- image out = load_image_stb(filename, c);
+ dn_image out = load_image_stb(filename, c);
#endif
- if((h && w) && (h != out.h || w != out.w)){
- image resized = resize_image(out, w, h);
- free_image(out);
- out = resized;
- }
- return out;
-}
-
-image load_image_color(char *filename, int w, int h)
-{
- return load_image(filename, w, h, 3);
-}
-
-image get_image_layer(image m, int l)
-{
- image out = make_image(m.w, m.h, 1);
- int i;
- for(i = 0; i < m.h*m.w; ++i){
- out.data[i] = m.data[i+l*m.h*m.w];
- }
- return out;
-}
-void print_image(image m)
-{
- int i, j, k;
- for(i =0 ; i < m.c; ++i){
- for(j =0 ; j < m.h; ++j){
- for(k = 0; k < m.w; ++k){
- printf("%.2lf, ", m.data[i*m.h*m.w + j*m.w + k]);
- if(k > 30) break;
- }
- printf("\n");
- if(j > 30) break;
- }
- printf("\n");
- }
- printf("\n");
-}
-
-image collapse_images_vert(image *ims, int n)
-{
- int color = 1;
- int border = 1;
- int h,w,c;
- w = ims[0].w;
- h = (ims[0].h + border) * n - border;
- c = ims[0].c;
- if(c != 3 || !color){
- w = (w+border)*c - border;
- c = 1;
- }
-
- image filters = make_image(w, h, c);
- int i,j;
- for(i = 0; i < n; ++i){
- int h_offset = i*(ims[0].h+border);
- image copy = copy_image(ims[i]);
- //normalize_image(copy);
- if(c == 3 && color){
- embed_image(copy, filters, 0, h_offset);
- }
- else{
- for(j = 0; j < copy.c; ++j){
- int w_offset = j*(ims[0].w+border);
- image layer = get_image_layer(copy, j);
- embed_image(layer, filters, w_offset, h_offset);
- free_image(layer);
- }
- }
- free_image(copy);
- }
- return filters;
-}
-
-image collapse_images_horz(image *ims, int n)
-{
- int color = 1;
- int border = 1;
- int h,w,c;
- int size = ims[0].h;
- h = size;
- w = (ims[0].w + border) * n - border;
- c = ims[0].c;
- if(c != 3 || !color){
- h = (h+border)*c - border;
- c = 1;
- }
-
- image filters = make_image(w, h, c);
- int i,j;
- for(i = 0; i < n; ++i){
- int w_offset = i*(size+border);
- image copy = copy_image(ims[i]);
- //normalize_image(copy);
- if(c == 3 && color){
- embed_image(copy, filters, w_offset, 0);
- }
- else{
- for(j = 0; j < copy.c; ++j){
- int h_offset = j*(size+border);
- image layer = get_image_layer(copy, j);
- embed_image(layer, filters, w_offset, h_offset);
- free_image(layer);
- }
- }
- free_image(copy);
- }
- return filters;
-}
-
-void show_image_normalized(image im, const char *name)
-{
- image c = copy_image(im);
- normalize_image(c);
- show_image(c, name, 1);
- free_image(c);
-}
-
-void show_images(image *ims, int n, char *window)
-{
- image m = collapse_images_vert(ims, n);
- /*
- int w = 448;
- int h = ((float)m.h/m.w) * 448;
- if(h > 896){
- h = 896;
- w = ((float)m.w/m.h) * 896;
- }
- image sized = resize_image(m, w, h);
- */
- normalize_image(m);
- save_image(m, window);
- show_image(m, window, 1);
- free_image(m);
+ if((h && w) && (h != out.h || w != out.w)){
+ dn_image resized = resize_image(out, w, h);
+ free_image(out);
+ out = resized;
+ }
+ return out;
+}
+
+dn_image load_image_color(const char *filename, int w, int h)
+{
+ return load_image(filename, w, h, 3);
+}
+
+dn_image get_image_layer(dn_image m, int l)
+{
+ dn_image out = make_image(m.w, m.h, 1);
+ int i;
+ for(i = 0; i < m.h*m.w; ++i){
+ out.data[i] = m.data[i+l*m.h*m.w];
+ }
+ return out;
+}
+void print_image(dn_image m)
+{
+ int i, j, k;
+ for(i =0 ; i < m.c; ++i){
+ for(j =0 ; j < m.h; ++j){
+ for(k = 0; k < m.w; ++k){
+ printf("%.2lf, ", m.data[i*m.h*m.w + j*m.w + k]);
+ if(k > 30) break;
+ }
+ printf("\n");
+ if(j > 30) break;
+ }
+ printf("\n");
+ }
+ printf("\n");
+}
+
+dn_image collapse_images_vert(dn_image *ims, int n)
+{
+ int color = 1;
+ int border = 1;
+ int h,w,c;
+ w = ims[0].w;
+ h = (ims[0].h + border) * n - border;
+ c = ims[0].c;
+ if(c != 3 || !color){
+ w = (w+border)*c - border;
+ c = 1;
+ }
+
+ dn_image filters = make_image(w, h, c);
+ int i,j;
+ for(i = 0; i < n; ++i){
+ int h_offset = i*(ims[0].h+border);
+ dn_image copy = copy_image(ims[i]);
+ //normalize_image(copy);
+ if(c == 3 && color){
+ embed_image(copy, filters, 0, h_offset);
+ }
+ else{
+ for(j = 0; j < copy.c; ++j){
+ int w_offset = j*(ims[0].w+border);
+ dn_image layer = get_image_layer(copy, j);
+ embed_image(layer, filters, w_offset, h_offset);
+ free_image(layer);
+ }
+ }
+ free_image(copy);
+ }
+ return filters;
+}
+
+dn_image collapse_images_horz(dn_image *ims, int n)
+{
+ int color = 1;
+ int border = 1;
+ int h,w,c;
+ int size = ims[0].h;
+ h = size;
+ w = (ims[0].w + border) * n - border;
+ c = ims[0].c;
+ if(c != 3 || !color){
+ h = (h+border)*c - border;
+ c = 1;
+ }
+
+ dn_image filters = make_image(w, h, c);
+ int i,j;
+ for(i = 0; i < n; ++i){
+ int w_offset = i*(size+border);
+ dn_image copy = copy_image(ims[i]);
+ //normalize_image(copy);
+ if(c == 3 && color){
+ embed_image(copy, filters, w_offset, 0);
+ }
+ else{
+ for(j = 0; j < copy.c; ++j){
+ int h_offset = j*(size+border);
+ dn_image layer = get_image_layer(copy, j);
+ embed_image(layer, filters, w_offset, h_offset);
+ free_image(layer);
+ }
+ }
+ free_image(copy);
+ }
+ return filters;
+}
+
+void show_image_normalized(dn_image im, const char *name)
+{
+ dn_image c = copy_image(im);
+ normalize_image(c);
+ show_image(c, name, 1);
+ free_image(c);
+}
+
+void show_images(dn_image *ims, int n, char *window)
+{
+ dn_image m = collapse_images_vert(ims, n);
+ /*
+ int w = 448;
+ int h = ((float)m.h/m.w) * 448;
+ if(h > 896){
+ h = 896;
+ w = ((float)m.w/m.h) * 896;
+ }
+ image sized = resize_image(m, w, h);
+ */
+ normalize_image(m);
+ save_image(m, window);
+ show_image(m, window, 1);
+ free_image(m);
}
-void free_image(image m)
+void free_image(dn_image m)
{
- if(m.data){
- free(m.data);
- }
+ if(m.data){
+ free(m.data);
+ }
}
diff --git a/src/image.h b/src/image.h
index 3392bb9787f..8ed12a5b802 100644
--- a/src/image.h
+++ b/src/image.h
@@ -16,50 +16,50 @@ extern "C" {
#ifdef OPENCV
void *open_video_stream(const char *f, int c, int w, int h, int fps);
image get_image_from_stream(void *p);
-image load_image_cv(char *filename, int channels);
+image load_image_cv(const char *filename, int channels);
int show_image_cv(image im, const char* name, int ms);
#endif
float get_color(int c, int x, int max);
-void draw_box(image a, int x1, int y1, int x2, int y2, float r, float g, float b);
-void draw_bbox(image a, box bbox, int w, float r, float g, float b);
-void write_label(image a, int r, int c, image *characters, char *string, float *rgb);
-image image_distance(image a, image b);
-void scale_image(image m, float s);
-image rotate_crop_image(image im, float rad, float s, int w, int h, float dx, float dy, float aspect);
-image random_crop_image(image im, int w, int h);
-image random_augment_image(image im, float angle, float aspect, int low, int high, int w, int h);
-augment_args random_augment_args(image im, float angle, float aspect, int low, int high, int w, int h);
-void letterbox_image_into(image im, int w, int h, image boxed);
-image resize_max(image im, int max);
-void translate_image(image m, float s);
-void embed_image(image source, image dest, int dx, int dy);
-void place_image(image im, int w, int h, int dx, int dy, image canvas);
-void saturate_image(image im, float sat);
-void exposure_image(image im, float sat);
-void distort_image(image im, float hue, float sat, float val);
-void saturate_exposure_image(image im, float sat, float exposure);
-void rgb_to_hsv(image im);
-void hsv_to_rgb(image im);
-void yuv_to_rgb(image im);
-void rgb_to_yuv(image im);
+void draw_box(dn_image a, int x1, int y1, int x2, int y2, float r, float g, float b);
+void draw_bbox(dn_image a, dn_box bbox, int w, float r, float g, float b);
+void write_label(dn_image a, int r, int c, dn_image *characters, char *string, float *rgb);
+dn_image image_distance(dn_image a, dn_image b);
+void scale_image(dn_image m, float s);
+dn_image rotate_crop_image(dn_image im, float rad, float s, int w, int h, float dx, float dy, float aspect);
+dn_image random_crop_image(dn_image im, int w, int h);
+dn_image random_augment_image(dn_image im, float angle, float aspect, int low, int high, int w, int h);
+augment_args random_augment_args(dn_image im, float angle, float aspect, int low, int high, int w, int h);
+void letterbox_image_into(dn_image im, int w, int h, dn_image boxed);
+dn_image resize_max(dn_image im, int max);
+void translate_image(dn_image m, float s);
+void embed_image(dn_image source, dn_image dest, int dx, int dy);
+void place_image(dn_image im, int w, int h, int dx, int dy, dn_image canvas);
+void saturate_image(dn_image im, float sat);
+void exposure_image(dn_image im, float sat);
+void distort_image(dn_image im, float hue, float sat, float val);
+void saturate_exposure_image(dn_image im, float sat, float exposure);
+void rgb_to_hsv(dn_image im);
+void hsv_to_rgb(dn_image im);
+void yuv_to_rgb(dn_image im);
+void rgb_to_yuv(dn_image im);
-image collapse_image_layers(image source, int border);
-image collapse_images_horz(image *ims, int n);
-image collapse_images_vert(image *ims, int n);
+dn_image collapse_image_layers(dn_image source, int border);
+dn_image collapse_images_horz(dn_image *ims, int n);
+dn_image collapse_images_vert(dn_image *ims, int n);
-void show_image_normalized(image im, const char *name);
-void show_images(image *ims, int n, char *window);
-void show_image_layers(image p, char *name);
-void show_image_collapsed(image p, char *name);
+void show_image_normalized(dn_image im, const char *name);
+void show_images(dn_image *ims, int n, char *window);
+void show_image_layers(dn_image p, char *name);
+void show_image_collapsed(dn_image p, char *name);
-void print_image(image m);
+void print_image(dn_image m);
-image make_empty_image(int w, int h, int c);
-void copy_image_into(image src, image dest);
+dn_image make_empty_image(int w, int h, int c);
+void copy_image_into(dn_image src, dn_image dest);
-image get_image_layer(image m, int l);
+dn_image get_image_layer(dn_image m, int l);
#ifdef __cplusplus
}
diff --git a/src/image_opencv.cpp b/src/image_opencv.cpp
index 7511280be07..674e78740c5 100644
--- a/src/image_opencv.cpp
+++ b/src/image_opencv.cpp
@@ -87,7 +87,7 @@ image get_image_from_stream(void *p)
return mat_to_image(m);
}
-image load_image_cv(char *filename, int channels)
+image load_image_cv(const char *filename, int channels)
{
int flag = -1;
if (channels == 0) flag = -1;
diff --git a/src/iseg_layer.c b/src/iseg_layer.c
index 2bf03a8ac2f..0a49cb74646 100644
--- a/src/iseg_layer.c
+++ b/src/iseg_layer.c
@@ -10,9 +10,9 @@
#include
#include
-layer make_iseg_layer(int batch, int w, int h, int classes, int ids)
+dn_layer make_iseg_layer(int batch, int w, int h, int classes, int ids)
{
- layer l = {0};
+ dn_layer l = {0};
l.type = ISEG;
l.h = h;
@@ -55,7 +55,7 @@ layer make_iseg_layer(int batch, int w, int h, int classes, int ids)
return l;
}
-void resize_iseg_layer(layer *l, int w, int h)
+void resize_iseg_layer(dn_layer *l, int w, int h)
{
l->w = w;
l->h = h;
@@ -75,7 +75,7 @@ void resize_iseg_layer(layer *l, int w, int h)
#endif
}
-void forward_iseg_layer(const layer l, network net)
+void forward_iseg_layer(const dn_layer l, dn_network net)
{
double time = what_time_is_it_now();
@@ -192,7 +192,7 @@ void forward_iseg_layer(const layer l, network net)
printf("took %lf sec\n", what_time_is_it_now() - time);
}
-void backward_iseg_layer(const layer l, network net)
+void backward_iseg_layer(const dn_layer l, dn_network net)
{
axpy_cpu(l.batch*l.inputs, 1, l.delta, 1, net.delta, 1);
}
diff --git a/src/iseg_layer.h b/src/iseg_layer.h
index dd8e64e023c..b43bef4cf95 100644
--- a/src/iseg_layer.h
+++ b/src/iseg_layer.h
@@ -5,11 +5,11 @@
#include "layer.h"
#include "network.h"
-layer make_iseg_layer(int batch, int w, int h, int classes, int ids);
-void forward_iseg_layer(const layer l, network net);
-void backward_iseg_layer(const layer l, network net);
-void resize_iseg_layer(layer *l, int w, int h);
-int iseg_num_detections(layer l, float thresh);
+dn_layer make_iseg_layer(int batch, int w, int h, int classes, int ids);
+void forward_iseg_layer(const dn_layer l, dn_network net);
+void backward_iseg_layer(const dn_layer l, dn_network net);
+void resize_iseg_layer(dn_layer *l, int w, int h);
+int iseg_num_detections(dn_layer l, float thresh);
#ifdef GPU
void forward_iseg_layer_gpu(const layer l, network net);
diff --git a/src/l2norm_layer.c b/src/l2norm_layer.c
index d099479b4c0..cdb13485a97 100644
--- a/src/l2norm_layer.c
+++ b/src/l2norm_layer.c
@@ -9,10 +9,10 @@
#include
#include
-layer make_l2norm_layer(int batch, int inputs)
+dn_layer make_l2norm_layer(int batch, int inputs)
{
fprintf(stderr, "l2norm %4d\n", inputs);
- layer l = {0};
+ dn_layer l = {0};
l.type = L2NORM;
l.batch = batch;
l.inputs = inputs;
@@ -34,13 +34,13 @@ layer make_l2norm_layer(int batch, int inputs)
return l;
}
-void forward_l2norm_layer(const layer l, network net)
+void forward_l2norm_layer(const dn_layer l, dn_network net)
{
copy_cpu(l.outputs*l.batch, net.input, 1, l.output, 1);
l2normalize_cpu(l.output, l.scales, l.batch, l.out_c, l.out_w*l.out_h);
}
-void backward_l2norm_layer(const layer l, network net)
+void backward_l2norm_layer(const dn_layer l, dn_network net)
{
//axpy_cpu(l.inputs*l.batch, 1, l.scales, 1, l.delta, 1);
axpy_cpu(l.inputs*l.batch, 1, l.delta, 1, net.delta, 1);
diff --git a/src/l2norm_layer.h b/src/l2norm_layer.h
index 1ca6f710f01..02a9b768cfc 100644
--- a/src/l2norm_layer.h
+++ b/src/l2norm_layer.h
@@ -3,9 +3,9 @@
#include "layer.h"
#include "network.h"
-layer make_l2norm_layer(int batch, int inputs);
-void forward_l2norm_layer(const layer l, network net);
-void backward_l2norm_layer(const layer l, network net);
+dn_layer make_l2norm_layer(int batch, int inputs);
+void forward_l2norm_layer(const dn_layer l, dn_network net);
+void backward_l2norm_layer(const dn_layer l, dn_network net);
#ifdef GPU
void forward_l2norm_layer_gpu(const layer l, network net);
diff --git a/src/layer.c b/src/layer.c
index c27b4776421..98bd2d8d9cc 100644
--- a/src/layer.c
+++ b/src/layer.c
@@ -3,7 +3,7 @@
#include
-void free_layer(layer l)
+void free_layer(dn_layer l)
{
if(l.type == DROPOUT){
if(l.rand) free(l.rand);
diff --git a/src/list.c b/src/list.c
index 0e4165d3780..749751fb6df 100644
--- a/src/list.c
+++ b/src/list.c
@@ -2,9 +2,9 @@
#include
#include "list.h"
-list *make_list()
+dn_list *make_list()
{
- list *l = malloc(sizeof(list));
+ dn_list *l = malloc(sizeof(dn_list));
l->size = 0;
l->front = 0;
l->back = 0;
@@ -25,9 +25,9 @@ void transfer_node(list *s, list *d, node *n)
}
*/
-void *list_pop(list *l){
+void *list_pop(dn_list *l){
if(!l->back) return 0;
- node *b = l->back;
+ dn_node *b = l->back;
void *val = b->val;
l->back = b->prev;
if(l->back) l->back->next = 0;
@@ -37,9 +37,9 @@ void *list_pop(list *l){
return val;
}
-void list_insert(list *l, void *val)
+void list_insert(dn_list *l, void *val)
{
- node *new = malloc(sizeof(node));
+ dn_node *new = malloc(sizeof(dn_node));
new->val = val;
new->next = 0;
@@ -54,9 +54,9 @@ void list_insert(list *l, void *val)
++l->size;
}
-void free_node(node *n)
+void free_node(dn_node *n)
{
- node *next;
+ dn_node *next;
while(n) {
next = n->next;
free(n);
@@ -64,26 +64,26 @@ void free_node(node *n)
}
}
-void free_list(list *l)
+void free_list(dn_list *l)
{
free_node(l->front);
free(l);
}
-void free_list_contents(list *l)
+void free_list_contents(dn_list *l)
{
- node *n = l->front;
+ dn_node *n = l->front;
while(n){
free(n->val);
n = n->next;
}
}
-void **list_to_array(list *l)
+void **list_to_array(dn_list *l)
{
void **a = calloc(l->size, sizeof(void*));
int count = 0;
- node *n = l->front;
+ dn_node *n = l->front;
while(n){
a[count++] = n->val;
n = n->next;
diff --git a/src/list.h b/src/list.h
index 6b445c717c2..fc1844b0a69 100644
--- a/src/list.h
+++ b/src/list.h
@@ -2,12 +2,12 @@
#define LIST_H
#include "darknet.h"
-list *make_list();
-int list_find(list *l, void *val);
+dn_list *make_list();
+int list_find(dn_list *l, void *val);
-void list_insert(list *, void *);
+void list_insert(dn_list *, void *);
-void free_list_contents(list *l);
+void free_list_contents(dn_list *l);
#endif
diff --git a/src/local_layer.c b/src/local_layer.c
index 74f6910a8fd..6b51047b606 100644
--- a/src/local_layer.c
+++ b/src/local_layer.c
@@ -88,7 +88,7 @@ local_layer make_local_layer(int batch, int h, int w, int c, int n, int size, in
return l;
}
-void forward_local_layer(const local_layer l, network net)
+void forward_local_layer(const local_layer l, dn_network net)
{
int out_h = local_out_height(l);
int out_w = local_out_width(l);
@@ -119,7 +119,7 @@ void forward_local_layer(const local_layer l, network net)
activate_array(l.output, l.outputs*l.batch, l.activation);
}
-void backward_local_layer(local_layer l, network net)
+void backward_local_layer(local_layer l, dn_network net)
{
int i, j;
int locations = l.out_w*l.out_h;
diff --git a/src/local_layer.h b/src/local_layer.h
index 776e572f420..88d17c24bd9 100644
--- a/src/local_layer.h
+++ b/src/local_layer.h
@@ -7,7 +7,7 @@
#include "layer.h"
#include "network.h"
-typedef layer local_layer;
+typedef dn_layer local_layer;
#ifdef GPU
void forward_local_layer_gpu(local_layer layer, network net);
@@ -20,8 +20,8 @@ void pull_local_layer(local_layer layer);
local_layer make_local_layer(int batch, int h, int w, int c, int n, int size, int stride, int pad, ACTIVATION activation);
-void forward_local_layer(const local_layer layer, network net);
-void backward_local_layer(local_layer layer, network net);
+void forward_local_layer(const local_layer layer, dn_network net);
+void backward_local_layer(local_layer layer, dn_network net);
void update_local_layer(local_layer layer, update_args a);
void bias_output(float *output, float *biases, int batch, int n, int size);
diff --git a/src/logistic_layer.c b/src/logistic_layer.c
index b2b3d6b1ccf..b56a6d9c77b 100644
--- a/src/logistic_layer.c
+++ b/src/logistic_layer.c
@@ -9,10 +9,10 @@
#include
#include
-layer make_logistic_layer(int batch, int inputs)
+dn_layer make_logistic_layer(int batch, int inputs)
{
fprintf(stderr, "logistic x entropy %4d\n", inputs);
- layer l = {0};
+ dn_layer l = {0};
l.type = LOGXENT;
l.batch = batch;
l.inputs = inputs;
@@ -35,7 +35,7 @@ layer make_logistic_layer(int batch, int inputs)
return l;
}
-void forward_logistic_layer(const layer l, network net)
+void forward_logistic_layer(const dn_layer l, dn_network net)
{
copy_cpu(l.outputs*l.batch, net.input, 1, l.output, 1);
activate_array(l.output, l.outputs*l.batch, LOGISTIC);
@@ -45,7 +45,7 @@ void forward_logistic_layer(const layer l, network net)
}
}
-void backward_logistic_layer(const layer l, network net)
+void backward_logistic_layer(const dn_layer l, dn_network net)
{
axpy_cpu(l.inputs*l.batch, 1, l.delta, 1, net.delta, 1);
}
diff --git a/src/logistic_layer.h b/src/logistic_layer.h
index 9c25bee3c2a..1d766886220 100644
--- a/src/logistic_layer.h
+++ b/src/logistic_layer.h
@@ -3,9 +3,9 @@
#include "layer.h"
#include "network.h"
-layer make_logistic_layer(int batch, int inputs);
-void forward_logistic_layer(const layer l, network net);
-void backward_logistic_layer(const layer l, network net);
+dn_layer make_logistic_layer(int batch, int inputs);
+void forward_logistic_layer(const dn_layer l, dn_network net);
+void backward_logistic_layer(const dn_layer l, dn_network net);
#ifdef GPU
void forward_logistic_layer_gpu(const layer l, network net);
diff --git a/src/lstm_layer.c b/src/lstm_layer.c
index fb07de20228..76587e8b8af 100644
--- a/src/lstm_layer.c
+++ b/src/lstm_layer.c
@@ -10,7 +10,7 @@
#include
#include
-static void increment_layer(layer *l, int steps)
+static void increment_layer(dn_layer *l, int steps)
{
int num = l->outputs*l->batch*steps;
l->output += num;
@@ -26,52 +26,52 @@ static void increment_layer(layer *l, int steps)
#endif
}
-layer make_lstm_layer(int batch, int inputs, int outputs, int steps, int batch_normalize, int adam)
+dn_layer make_lstm_layer(int batch, int inputs, int outputs, int steps, int batch_normalize, int adam)
{
fprintf(stderr, "LSTM Layer: %d inputs, %d outputs\n", inputs, outputs);
batch = batch / steps;
- layer l = { 0 };
+ dn_layer l = { 0 };
l.batch = batch;
l.type = LSTM;
l.steps = steps;
l.inputs = inputs;
- l.uf = malloc(sizeof(layer));
+ l.uf = malloc(sizeof(dn_layer));
fprintf(stderr, "\t\t");
*(l.uf) = make_connected_layer(batch*steps, inputs, outputs, LINEAR, batch_normalize, adam);
l.uf->batch = batch;
- l.ui = malloc(sizeof(layer));
+ l.ui = malloc(sizeof(dn_layer));
fprintf(stderr, "\t\t");
*(l.ui) = make_connected_layer(batch*steps, inputs, outputs, LINEAR, batch_normalize, adam);
l.ui->batch = batch;
- l.ug = malloc(sizeof(layer));
+ l.ug = malloc(sizeof(dn_layer));
fprintf(stderr, "\t\t");
*(l.ug) = make_connected_layer(batch*steps, inputs, outputs, LINEAR, batch_normalize, adam);
l.ug->batch = batch;
- l.uo = malloc(sizeof(layer));
+ l.uo = malloc(sizeof(dn_layer));
fprintf(stderr, "\t\t");
*(l.uo) = make_connected_layer(batch*steps, inputs, outputs, LINEAR, batch_normalize, adam);
l.uo->batch = batch;
- l.wf = malloc(sizeof(layer));
+ l.wf = malloc(sizeof(dn_layer));
fprintf(stderr, "\t\t");
*(l.wf) = make_connected_layer(batch*steps, outputs, outputs, LINEAR, batch_normalize, adam);
l.wf->batch = batch;
- l.wi = malloc(sizeof(layer));
+ l.wi = malloc(sizeof(dn_layer));
fprintf(stderr, "\t\t");
*(l.wi) = make_connected_layer(batch*steps, outputs, outputs, LINEAR, batch_normalize, adam);
l.wi->batch = batch;
- l.wg = malloc(sizeof(layer));
+ l.wg = malloc(sizeof(dn_layer));
fprintf(stderr, "\t\t");
*(l.wg) = make_connected_layer(batch*steps, outputs, outputs, LINEAR, batch_normalize, adam);
l.wg->batch = batch;
- l.wo = malloc(sizeof(layer));
+ l.wo = malloc(sizeof(dn_layer));
fprintf(stderr, "\t\t");
*(l.wo) = make_connected_layer(batch*steps, outputs, outputs, LINEAR, batch_normalize, adam);
l.wo->batch = batch;
@@ -141,7 +141,7 @@ layer make_lstm_layer(int batch, int inputs, int outputs, int steps, int batch_n
return l;
}
-void update_lstm_layer(layer l, update_args a)
+void update_lstm_layer(dn_layer l, update_args a)
{
update_connected_layer(*(l.wf), a);
update_connected_layer(*(l.wi), a);
@@ -153,20 +153,20 @@ void update_lstm_layer(layer l, update_args a)
update_connected_layer(*(l.uo), a);
}
-void forward_lstm_layer(layer l, network state)
+void forward_lstm_layer(dn_layer l, dn_network state)
{
- network s = { 0 };
+ dn_network s = { 0 };
s.train = state.train;
int i;
- layer wf = *(l.wf);
- layer wi = *(l.wi);
- layer wg = *(l.wg);
- layer wo = *(l.wo);
+ dn_layer wf = *(l.wf);
+ dn_layer wi = *(l.wi);
+ dn_layer wg = *(l.wg);
+ dn_layer wo = *(l.wo);
- layer uf = *(l.uf);
- layer ui = *(l.ui);
- layer ug = *(l.ug);
- layer uo = *(l.uo);
+ dn_layer uf = *(l.uf);
+ dn_layer ui = *(l.ui);
+ dn_layer ug = *(l.ug);
+ dn_layer uo = *(l.uo);
fill_cpu(l.outputs * l.batch * l.steps, 0, wf.delta, 1);
fill_cpu(l.outputs * l.batch * l.steps, 0, wi.delta, 1);
@@ -239,20 +239,20 @@ void forward_lstm_layer(layer l, network state)
}
}
-void backward_lstm_layer(layer l, network state)
+void backward_lstm_layer(dn_layer l, dn_network state)
{
- network s = { 0 };
+ dn_network s = { 0 };
s.train = state.train;
int i;
- layer wf = *(l.wf);
- layer wi = *(l.wi);
- layer wg = *(l.wg);
- layer wo = *(l.wo);
-
- layer uf = *(l.uf);
- layer ui = *(l.ui);
- layer ug = *(l.ug);
- layer uo = *(l.uo);
+ dn_layer wf = *(l.wf);
+ dn_layer wi = *(l.wi);
+ dn_layer wg = *(l.wg);
+ dn_layer wo = *(l.wo);
+
+ dn_layer uf = *(l.uf);
+ dn_layer ui = *(l.ui);
+ dn_layer ug = *(l.ug);
+ dn_layer uo = *(l.uo);
increment_layer(&wf, l.steps - 1);
increment_layer(&wi, l.steps - 1);
diff --git a/src/lstm_layer.h b/src/lstm_layer.h
index b9f07e6424b..b6372d2cb64 100644
--- a/src/lstm_layer.h
+++ b/src/lstm_layer.h
@@ -6,10 +6,10 @@
#include "network.h"
#define USET
-layer make_lstm_layer(int batch, int inputs, int outputs, int steps, int batch_normalize, int adam);
+dn_layer make_lstm_layer(int batch, int inputs, int outputs, int steps, int batch_normalize, int adam);
-void forward_lstm_layer(layer l, network net);
-void update_lstm_layer(layer l, update_args a);
+void forward_lstm_layer(dn_layer l, dn_network net);
+void update_lstm_layer(dn_layer l, update_args a);
#ifdef GPU
void forward_lstm_layer_gpu(layer l, network net);
diff --git a/src/matrix.c b/src/matrix.c
index 799916bff01..3b26e2c7e7c 100644
--- a/src/matrix.c
+++ b/src/matrix.c
@@ -7,14 +7,14 @@
#include
#include
-void free_matrix(matrix m)
+void free_matrix(dn_matrix m)
{
int i;
for(i = 0; i < m.rows; ++i) free(m.vals[i]);
free(m.vals);
}
-float matrix_topk_accuracy(matrix truth, matrix guess, int k)
+float matrix_topk_accuracy(dn_matrix truth, dn_matrix guess, int k)
{
int *indexes = calloc(k, sizeof(int));
int n = truth.cols;
@@ -34,7 +34,7 @@ float matrix_topk_accuracy(matrix truth, matrix guess, int k)
return (float)correct/truth.rows;
}
-void scale_matrix(matrix m, float scale)
+void scale_matrix(dn_matrix m, float scale)
{
int i,j;
for(i = 0; i < m.rows; ++i){
@@ -44,7 +44,7 @@ void scale_matrix(matrix m, float scale)
}
}
-matrix resize_matrix(matrix m, int size)
+dn_matrix resize_matrix(dn_matrix m, int size)
{
int i;
if (m.rows == size) return m;
@@ -63,7 +63,7 @@ matrix resize_matrix(matrix m, int size)
return m;
}
-void matrix_add_matrix(matrix from, matrix to)
+void matrix_add_matrix(dn_matrix from, dn_matrix to)
{
assert(from.rows == to.rows && from.cols == to.cols);
int i,j;
@@ -74,9 +74,9 @@ void matrix_add_matrix(matrix from, matrix to)
}
}
-matrix copy_matrix(matrix m)
+dn_matrix copy_matrix(dn_matrix m)
{
- matrix c = {0};
+ dn_matrix c = {0};
c.rows = m.rows;
c.cols = m.cols;
c.vals = calloc(c.rows, sizeof(float *));
@@ -88,10 +88,10 @@ matrix copy_matrix(matrix m)
return c;
}
-matrix make_matrix(int rows, int cols)
+dn_matrix make_matrix(int rows, int cols)
{
int i;
- matrix m;
+ dn_matrix m;
m.rows = rows;
m.cols = cols;
m.vals = calloc(m.rows, sizeof(float *));
@@ -101,10 +101,10 @@ matrix make_matrix(int rows, int cols)
return m;
}
-matrix hold_out_matrix(matrix *m, int n)
+dn_matrix hold_out_matrix(dn_matrix *m, int n)
{
int i;
- matrix h;
+ dn_matrix h;
h.rows = n;
h.cols = m->cols;
h.vals = calloc(h.rows, sizeof(float *));
@@ -116,7 +116,7 @@ matrix hold_out_matrix(matrix *m, int n)
return h;
}
-float *pop_column(matrix *m, int c)
+float *pop_column(dn_matrix *m, int c)
{
float *col = calloc(m->rows, sizeof(float));
int i, j;
@@ -130,12 +130,12 @@ float *pop_column(matrix *m, int c)
return col;
}
-matrix csv_to_matrix(char *filename)
+dn_matrix csv_to_matrix(const char *filename)
{
FILE *fp = fopen(filename, "r");
if(!fp) file_error(filename);
- matrix m;
+ dn_matrix m;
m.cols = -1;
char *line;
@@ -158,7 +158,7 @@ matrix csv_to_matrix(char *filename)
return m;
}
-void matrix_to_csv(matrix m)
+void matrix_to_csv(dn_matrix m)
{
int i, j;
@@ -171,7 +171,7 @@ void matrix_to_csv(matrix m)
}
}
-void print_matrix(matrix m)
+void print_matrix(dn_matrix m)
{
int i, j;
printf("%d X %d Matrix:\n",m.rows, m.cols);
diff --git a/src/matrix.h b/src/matrix.h
index 879acd70d26..f30bb4a90e7 100644
--- a/src/matrix.h
+++ b/src/matrix.h
@@ -2,12 +2,12 @@
#define MATRIX_H
#include "darknet.h"
-matrix copy_matrix(matrix m);
-void print_matrix(matrix m);
+dn_matrix copy_matrix(dn_matrix m);
+void print_matrix(dn_matrix m);
-matrix hold_out_matrix(matrix *m, int n);
-matrix resize_matrix(matrix m, int size);
+dn_matrix hold_out_matrix(dn_matrix *m, int n);
+dn_matrix resize_matrix(dn_matrix m, int size);
-float *pop_column(matrix *m, int c);
+float *pop_column(dn_matrix *m, int c);
#endif
diff --git a/src/maxpool_layer.c b/src/maxpool_layer.c
index fb05635ea4f..76cf5e7531b 100644
--- a/src/maxpool_layer.c
+++ b/src/maxpool_layer.c
@@ -2,7 +2,7 @@
#include "cuda.h"
#include
-image get_maxpool_image(maxpool_layer l)
+dn_image get_maxpool_image(maxpool_layer l)
{
int h = l.out_h;
int w = l.out_w;
@@ -10,7 +10,7 @@ image get_maxpool_image(maxpool_layer l)
return float_to_image(w,h,c,l.output);
}
-image get_maxpool_delta(maxpool_layer l)
+dn_image get_maxpool_delta(maxpool_layer l)
{
int h = l.out_h;
int w = l.out_w;
@@ -76,7 +76,7 @@ void resize_maxpool_layer(maxpool_layer *l, int w, int h)
#endif
}
-void forward_maxpool_layer(const maxpool_layer l, network net)
+void forward_maxpool_layer(const maxpool_layer l, dn_network net)
{
int b,i,j,k,m,n;
int w_offset = -l.pad/2;
@@ -113,7 +113,7 @@ void forward_maxpool_layer(const maxpool_layer l, network net)
}
}
-void backward_maxpool_layer(const maxpool_layer l, network net)
+void backward_maxpool_layer(const maxpool_layer l, dn_network net)
{
int i;
int h = l.out_h;
diff --git a/src/maxpool_layer.h b/src/maxpool_layer.h
index ceb5190716c..7a7f5587738 100644
--- a/src/maxpool_layer.h
+++ b/src/maxpool_layer.h
@@ -6,13 +6,13 @@
#include "layer.h"
#include "network.h"
-typedef layer maxpool_layer;
+typedef dn_layer maxpool_layer;
-image get_maxpool_image(maxpool_layer l);
+dn_image get_maxpool_image(maxpool_layer l);
maxpool_layer make_maxpool_layer(int batch, int h, int w, int c, int size, int stride, int padding);
void resize_maxpool_layer(maxpool_layer *l, int w, int h);
-void forward_maxpool_layer(const maxpool_layer l, network net);
-void backward_maxpool_layer(const maxpool_layer l, network net);
+void forward_maxpool_layer(const maxpool_layer l, dn_network net);
+void backward_maxpool_layer(const maxpool_layer l, dn_network net);
#ifdef GPU
void forward_maxpool_layer_gpu(maxpool_layer l, network net);
diff --git a/src/network.c b/src/network.c
index aaab7997b5e..76d5dd04cd0 100644
--- a/src/network.c
+++ b/src/network.c
@@ -32,9 +32,9 @@
#include "parser.h"
#include "data.h"
-load_args get_base_args(network *net)
+dn_load_args get_base_args(dn_network *net)
{
- load_args args = {0};
+ dn_load_args args = {0};
args.w = net->w;
args.h = net->h;
args.size = net->w;
@@ -50,9 +50,9 @@ load_args get_base_args(network *net)
return args;
}
-network *load_network(char *cfg, char *weights, int clear)
+dn_network *load_network(const char *cfg, const char *weights, int clear)
{
- network *net = parse_network_cfg(cfg);
+ dn_network *net = parse_network_cfg(cfg);
if(weights && weights[0] != 0){
load_weights(net, weights);
}
@@ -60,13 +60,13 @@ network *load_network(char *cfg, char *weights, int clear)
return net;
}
-size_t get_current_batch(network *net)
+size_t get_current_batch(dn_network *net)
{
size_t batch_num = (*net->seen)/(net->batch*net->subdivisions);
return batch_num;
}
-void reset_network_state(network *net, int b)
+void reset_network_state(dn_network *net, int b)
{
int i;
for (i = 0; i < net->n; ++i) {
@@ -82,12 +82,12 @@ void reset_network_state(network *net, int b)
}
}
-void reset_rnn(network *net)
+void reset_rnn(dn_network *net)
{
reset_network_state(net, 0);
}
-float get_current_rate(network *net)
+float get_current_rate(dn_network *net)
{
size_t batch_num = get_current_batch(net);
int i;
@@ -174,18 +174,18 @@ char *get_layer_string(LAYER_TYPE a)
return "none";
}
-network *make_network(int n)
+dn_network *make_network(int n)
{
- network *net = calloc(1, sizeof(network));
+ dn_network *net = calloc(1, sizeof(dn_network));
net->n = n;
- net->layers = calloc(net->n, sizeof(layer));
+ net->layers = calloc(net->n, sizeof(dn_layer));
net->seen = calloc(1, sizeof(size_t));
net->t = calloc(1, sizeof(int));
net->cost = calloc(1, sizeof(float));
return net;
}
-void forward_network(network *netp)
+void forward_network(dn_network *netp)
{
#ifdef GPU
if(netp->gpu_index >= 0){
@@ -193,11 +193,11 @@ void forward_network(network *netp)
return;
}
#endif
- network net = *netp;
+ dn_network net = *netp;
int i;
for(i = 0; i < net.n; ++i){
net.index = i;
- layer l = net.layers[i];
+ dn_layer l = net.layers[i];
if(l.delta){
fill_cpu(l.outputs * l.batch, 0, l.delta, 1);
}
@@ -210,7 +210,7 @@ void forward_network(network *netp)
calc_network_cost(netp);
}
-void update_network(network *netp)
+void update_network(dn_network *netp)
{
#ifdef GPU
if(netp->gpu_index >= 0){
@@ -218,7 +218,7 @@ void update_network(network *netp)
return;
}
#endif
- network net = *netp;
+ dn_network net = *netp;
int i;
update_args a = {0};
a.batch = net.batch*net.subdivisions;
@@ -233,16 +233,16 @@ void update_network(network *netp)
a.t = *net.t;
for(i = 0; i < net.n; ++i){
- layer l = net.layers[i];
+ dn_layer l = net.layers[i];
if(l.update){
l.update(l, a);
}
}
}
-void calc_network_cost(network *netp)
+void calc_network_cost(dn_network *netp)
{
- network net = *netp;
+ dn_network net = *netp;
int i;
float sum = 0;
int count = 0;
@@ -255,12 +255,12 @@ void calc_network_cost(network *netp)
*net.cost = sum/count;
}
-int get_predicted_class_network(network *net)
+int get_predicted_class_network(dn_network *net)
{
return max_index(net->output, net->outputs);
}
-void backward_network(network *netp)
+void backward_network(dn_network *netp)
{
#ifdef GPU
if(netp->gpu_index >= 0){
@@ -268,16 +268,16 @@ void backward_network(network *netp)
return;
}
#endif
- network net = *netp;
+ dn_network net = *netp;
int i;
- network orig = net;
+ dn_network orig = net;
for(i = net.n-1; i >= 0; --i){
- layer l = net.layers[i];
+ dn_layer l = net.layers[i];
if(l.stopbackward) break;
if(i == 0){
net = orig;
}else{
- layer prev = net.layers[i-1];
+ dn_layer prev = net.layers[i-1];
net.input = prev.output;
net.delta = prev.delta;
}
@@ -286,7 +286,7 @@ void backward_network(network *netp)
}
}
-float train_network_datum(network *net)
+float train_network_datum(dn_network *net)
{
*net->seen += net->batch;
net->train = 1;
@@ -297,7 +297,7 @@ float train_network_datum(network *net)
return error;
}
-float train_network_sgd(network *net, data d, int n)
+float train_network_sgd(dn_network *net, dn_data d, int n)
{
int batch = net->batch;
@@ -311,7 +311,7 @@ float train_network_sgd(network *net, data d, int n)
return (float)sum/(n*batch);
}
-float train_network(network *net, data d)
+float train_network(dn_network *net, dn_data d)
{
assert(d.X.rows % net->batch == 0);
int batch = net->batch;
@@ -327,7 +327,7 @@ float train_network(network *net, data d)
return (float)sum/(n*batch);
}
-void set_temp_network(network *net, float t)
+void set_temp_network(dn_network *net, float t)
{
int i;
for(i = 0; i < net->n; ++i){
@@ -336,7 +336,7 @@ void set_temp_network(network *net, float t)
}
-void set_batch_network(network *net, int b)
+void set_batch_network(dn_network *net, int b)
{
net->batch = b;
int i;
@@ -355,7 +355,7 @@ void set_batch_network(network *net, int b)
}
}
-int resize_network(network *net, int w, int h)
+int resize_network(dn_network *net, int w, int h)
{
#ifdef GPU
cuda_set_device(net->gpu_index);
@@ -370,7 +370,7 @@ int resize_network(network *net, int w, int h)
//fprintf(stderr, "Resizing to %d x %d...\n", w, h);
//fflush(stderr);
for (i = 0; i < net->n; ++i){
- layer l = net->layers[i];
+ dn_layer l = net->layers[i];
if(l.type == CONVOLUTIONAL){
resize_convolutional_layer(&l, w, h);
}else if(l.type == CROP){
@@ -406,7 +406,7 @@ int resize_network(network *net, int w, int h)
h = l.out_h;
if(l.type == AVGPOOL) break;
}
- layer out = get_network_output_layer(net);
+ dn_layer out = get_network_output_layer(net);
net->inputs = net->layers[0].inputs;
net->outputs = out.outputs;
net->truths = out.outputs;
@@ -437,7 +437,7 @@ int resize_network(network *net, int w, int h)
return 0;
}
-layer get_network_detection_layer(network *net)
+dn_layer get_network_detection_layer(dn_network *net)
{
int i;
for(i = 0; i < net->n; ++i){
@@ -446,57 +446,57 @@ layer get_network_detection_layer(network *net)
}
}
fprintf(stderr, "Detection layer not found!!\n");
- layer l = {0};
+ dn_layer l = {0};
return l;
}
-image get_network_image_layer(network *net, int i)
+dn_image get_network_image_layer(dn_network *net, int i)
{
- layer l = net->layers[i];
+ dn_layer l = net->layers[i];
#ifdef GPU
//cuda_pull_array(l.output_gpu, l.output, l.outputs);
#endif
if (l.out_w && l.out_h && l.out_c){
return float_to_image(l.out_w, l.out_h, l.out_c, l.output);
}
- image def = {0};
+ dn_image def = {0};
return def;
}
-image get_network_image(network *net)
+dn_image get_network_image(dn_network *net)
{
int i;
for(i = net->n-1; i >= 0; --i){
- image m = get_network_image_layer(net, i);
+ dn_image m = get_network_image_layer(net, i);
if(m.h != 0) return m;
}
- image def = {0};
+ dn_image def = {0};
return def;
}
-void visualize_network(network *net)
+void visualize_network(dn_network *net)
{
- image *prev = 0;
+ dn_image *prev = 0;
int i;
char buff[256];
for(i = 0; i < net->n; ++i){
sprintf(buff, "Layer %d", i);
- layer l = net->layers[i];
+ dn_layer l = net->layers[i];
if(l.type == CONVOLUTIONAL){
prev = visualize_convolutional_layer(l, buff, prev);
}
}
}
-void top_predictions(network *net, int k, int *index)
+void top_predictions(dn_network *net, int k, int *index)
{
top_k(net->output, net->outputs, k, index);
}
-float *network_predict(network *net, float *input)
+float *network_predict(dn_network *net, float *input)
{
- network orig = *net;
+ dn_network orig = *net;
net->input = input;
net->truth = 0;
net->train = 0;
@@ -507,12 +507,12 @@ float *network_predict(network *net, float *input)
return out;
}
-int num_detections(network *net, float thresh)
+int num_detections(dn_network *net, float thresh)
{
int i;
int s = 0;
for(i = 0; i < net->n; ++i){
- layer l = net->layers[i];
+ dn_layer l = net->layers[i];
if(l.type == YOLO){
s += yolo_num_detections(l, thresh);
}
@@ -523,9 +523,9 @@ int num_detections(network *net, float thresh)
return s;
}
-detection *make_network_boxes(network *net, float thresh, int *num)
+detection *make_network_boxes(dn_network *net, float thresh, int *num)
{
- layer l = net->layers[net->n - 1];
+ dn_layer l = net->layers[net->n - 1];
int i;
int nboxes = num_detections(net, thresh);
if(num) *num = nboxes;
@@ -539,11 +539,11 @@ detection *make_network_boxes(network *net, float thresh, int *num)
return dets;
}
-void fill_network_boxes(network *net, int w, int h, float thresh, float hier, int *map, int relative, detection *dets)
+void fill_network_boxes(dn_network *net, int w, int h, float thresh, float hier, int *map, int relative, detection *dets)
{
int j;
for(j = 0; j < net->n; ++j){
- layer l = net->layers[j];
+ dn_layer l = net->layers[j];
if(l.type == YOLO){
int count = get_yolo_detections(l, w, h, net->w, net->h, thresh, map, relative, dets);
dets += count;
@@ -559,7 +559,7 @@ void fill_network_boxes(network *net, int w, int h, float thresh, float hier, in
}
}
-detection *get_network_boxes(network *net, int w, int h, float thresh, float hier, int *map, int relative, int *num)
+detection *get_network_boxes(dn_network *net, int w, int h, float thresh, float hier, int *map, int relative, int *num)
{
detection *dets = make_network_boxes(net, thresh, num);
fill_network_boxes(net, w, h, thresh, hier, map, relative, dets);
@@ -576,23 +576,23 @@ void free_detections(detection *dets, int n)
free(dets);
}
-float *network_predict_image(network *net, image im)
+float *network_predict_image(dn_network *net, dn_image im)
{
- image imr = letterbox_image(im, net->w, net->h);
+ dn_image imr = letterbox_image(im, net->w, net->h);
set_batch_network(net, 1);
float *p = network_predict(net, imr.data);
free_image(imr);
return p;
}
-int network_width(network *net){return net->w;}
-int network_height(network *net){return net->h;}
+int network_width(dn_network *net){return net->w;}
+int network_height(dn_network *net){return net->h;}
-matrix network_predict_data_multi(network *net, data test, int n)
+dn_matrix network_predict_data_multi(dn_network *net, dn_data test, int n)
{
int i,j,b,m;
int k = net->outputs;
- matrix pred = make_matrix(test.X.rows, k);
+ dn_matrix pred = make_matrix(test.X.rows, k);
float *X = calloc(net->batch*test.X.rows, sizeof(float));
for(i = 0; i < test.X.rows; i += net->batch){
for(b = 0; b < net->batch; ++b){
@@ -613,11 +613,11 @@ matrix network_predict_data_multi(network *net, data test, int n)
return pred;
}
-matrix network_predict_data(network *net, data test)
+dn_matrix network_predict_data(dn_network *net, dn_data test)
{
int i,j,b;
int k = net->outputs;
- matrix pred = make_matrix(test.X.rows, k);
+ dn_matrix pred = make_matrix(test.X.rows, k);
float *X = calloc(net->batch*test.X.cols, sizeof(float));
for(i = 0; i < test.X.rows; i += net->batch){
for(b = 0; b < net->batch; ++b){
@@ -636,11 +636,11 @@ matrix network_predict_data(network *net, data test)
return pred;
}
-void print_network(network *net)
+void print_network(dn_network *net)
{
int i,j;
for(i = 0; i < net->n; ++i){
- layer l = net->layers[i];
+ dn_layer l = net->layers[i];
float *output = l.output;
int n = l.outputs;
float mean = mean_array(output, n);
@@ -653,10 +653,10 @@ void print_network(network *net)
}
}
-void compare_networks(network *n1, network *n2, data test)
+void compare_networks(dn_network *n1, dn_network *n2, dn_data test)
{
- matrix g1 = network_predict_data(n1, test);
- matrix g2 = network_predict_data(n2, test);
+ dn_matrix g1 = network_predict_data(n1, test);
+ dn_matrix g2 = network_predict_data(n2, test);
int i;
int a,b,c,d;
a = b = c = d = 0;
@@ -678,25 +678,25 @@ void compare_networks(network *n1, network *n2, data test)
printf("%f\n", num/den);
}
-float network_accuracy(network *net, data d)
+float network_accuracy(dn_network *net, dn_data d)
{
- matrix guess = network_predict_data(net, d);
+ dn_matrix guess = network_predict_data(net, d);
float acc = matrix_topk_accuracy(d.y, guess,1);
free_matrix(guess);
return acc;
}
-float *network_accuracies(network *net, data d, int n)
+float *network_accuracies(dn_network *net, dn_data d, int n)
{
static float acc[2];
- matrix guess = network_predict_data(net, d);
+ dn_matrix guess = network_predict_data(net, d);
acc[0] = matrix_topk_accuracy(d.y, guess, 1);
acc[1] = matrix_topk_accuracy(d.y, guess, n);
free_matrix(guess);
return acc;
}
-layer get_network_output_layer(network *net)
+dn_layer get_network_output_layer(dn_network *net)
{
int i;
for(i = net->n - 1; i >= 0; --i){
@@ -705,15 +705,15 @@ layer get_network_output_layer(network *net)
return net->layers[i];
}
-float network_accuracy_multi(network *net, data d, int n)
+float network_accuracy_multi(dn_network *net, dn_data d, int n)
{
- matrix guess = network_predict_data_multi(net, d, n);
+ dn_matrix guess = network_predict_data_multi(net, d, n);
float acc = matrix_topk_accuracy(d.y, guess,1);
free_matrix(guess);
return acc;
}
-void free_network(network *net)
+void free_network(dn_network *net)
{
int i;
for(i = 0; i < net->n; ++i){
@@ -733,7 +733,7 @@ void free_network(network *net)
// ^ What the hell is this comment for?
-layer network_output_layer(network *net)
+dn_layer network_output_layer(dn_network *net)
{
int i;
for(i = net->n - 1; i >= 0; --i){
@@ -742,17 +742,17 @@ layer network_output_layer(network *net)
return net->layers[i];
}
-int network_inputs(network *net)
+int network_inputs(dn_network *net)
{
return net->layers[0].inputs;
}
-int network_outputs(network *net)
+int network_outputs(dn_network *net)
{
return network_output_layer(net).outputs;
}
-float *network_output(network *net)
+float *network_output(dn_network *net)
{
return network_output_layer(net).output;
}
diff --git a/src/network.h b/src/network.h
index 1b0dfd1aaa3..17fd1c4cb7f 100644
--- a/src/network.h
+++ b/src/network.h
@@ -13,17 +13,17 @@
void pull_network_output(network *net);
#endif
-void compare_networks(network *n1, network *n2, data d);
+void compare_networks(dn_network *n1, dn_network *n2, dn_data d);
char *get_layer_string(LAYER_TYPE a);
-network *make_network(int n);
+dn_network *make_network(int n);
-float network_accuracy_multi(network *net, data d, int n);
-int get_predicted_class_network(network *net);
-void print_network(network *net);
-int resize_network(network *net, int w, int h);
-void calc_network_cost(network *net);
+float network_accuracy_multi(dn_network *net, dn_data d, int n);
+int get_predicted_class_network(dn_network *net);
+void print_network(dn_network *net);
+int resize_network(dn_network *net, int w, int h);
+void calc_network_cost(dn_network *net);
#endif
diff --git a/src/normalization_layer.c b/src/normalization_layer.c
index 424714fe865..492c866572b 100644
--- a/src/normalization_layer.c
+++ b/src/normalization_layer.c
@@ -3,10 +3,10 @@
#include
-layer make_normalization_layer(int batch, int w, int h, int c, int size, float alpha, float beta, float kappa)
+dn_layer make_normalization_layer(int batch, int w, int h, int c, int size, float alpha, float beta, float kappa)
{
fprintf(stderr, "Local Response Normalization Layer: %d x %d x %d image, %d size\n", w,h,c,size);
- layer layer = {0};
+ dn_layer layer = {0};
layer.type = NORMALIZATION;
layer.batch = batch;
layer.h = layer.out_h = h;
@@ -37,7 +37,7 @@ layer make_normalization_layer(int batch, int w, int h, int c, int size, float a
return layer;
}
-void resize_normalization_layer(layer *layer, int w, int h)
+void resize_normalization_layer(dn_layer *layer, int w, int h)
{
int c = layer->c;
int batch = layer->batch;
@@ -63,7 +63,7 @@ void resize_normalization_layer(layer *layer, int w, int h)
#endif
}
-void forward_normalization_layer(const layer layer, network net)
+void forward_normalization_layer(const dn_layer layer, dn_network net)
{
int k,b;
int w = layer.w;
@@ -94,7 +94,7 @@ void forward_normalization_layer(const layer layer, network net)
mul_cpu(w*h*c*layer.batch, net.input, 1, layer.output, 1);
}
-void backward_normalization_layer(const layer layer, network net)
+void backward_normalization_layer(const dn_layer layer, dn_network net)
{
// TODO This is approximate ;-)
// Also this should add in to delta instead of overwritting.
diff --git a/src/normalization_layer.h b/src/normalization_layer.h
index 665baa50662..87cc1e6bb90 100644
--- a/src/normalization_layer.h
+++ b/src/normalization_layer.h
@@ -5,11 +5,11 @@
#include "layer.h"
#include "network.h"
-layer make_normalization_layer(int batch, int w, int h, int c, int size, float alpha, float beta, float kappa);
-void resize_normalization_layer(layer *layer, int h, int w);
-void forward_normalization_layer(const layer layer, network net);
-void backward_normalization_layer(const layer layer, network net);
-void visualize_normalization_layer(layer layer, char *window);
+dn_layer make_normalization_layer(int batch, int w, int h, int c, int size, float alpha, float beta, float kappa);
+void resize_normalization_layer(dn_layer *layer, int h, int w);
+void forward_normalization_layer(const dn_layer layer, dn_network net);
+void backward_normalization_layer(const dn_layer layer, dn_network net);
+void visualize_normalization_layer(dn_layer layer, char *window);
#ifdef GPU
void forward_normalization_layer_gpu(const layer layer, network net);
diff --git a/src/option_list.c b/src/option_list.c
index 2f52781f809..cc2f8dac2dd 100644
--- a/src/option_list.c
+++ b/src/option_list.c
@@ -4,13 +4,13 @@
#include "option_list.h"
#include "utils.h"
-list *read_data_cfg(char *filename)
+dn_list *read_data_cfg(const char *filename)
{
FILE *file = fopen(filename, "r");
if(file == 0) file_error(filename);
char *line;
int nu = 0;
- list *options = make_list();
+ dn_list *options = make_list();
while((line=fgetl(file)) != 0){
++ nu;
strip(line);
@@ -35,7 +35,7 @@ list *read_data_cfg(char *filename)
metadata get_metadata(char *file)
{
metadata m = {0};
- list *options = read_data_cfg(file);
+ dn_list *options = read_data_cfg(file);
char *name_list = option_find_str(options, "names", 0);
if(!name_list) name_list = option_find_str(options, "labels", 0);
@@ -49,7 +49,7 @@ metadata get_metadata(char *file)
return m;
}
-int read_option(char *s, list *options)
+int read_option(char *s, dn_list *options)
{
size_t i;
size_t len = strlen(s);
@@ -67,7 +67,7 @@ int read_option(char *s, list *options)
return 1;
}
-void option_insert(list *l, char *key, char *val)
+void option_insert(dn_list *l, char *key, char *val)
{
kvp *p = malloc(sizeof(kvp));
p->key = key;
@@ -76,9 +76,9 @@ void option_insert(list *l, char *key, char *val)
list_insert(l, p);
}
-void option_unused(list *l)
+void option_unused(dn_list *l)
{
- node *n = l->front;
+ dn_node *n = l->front;
while(n){
kvp *p = (kvp *)n->val;
if(!p->used){
@@ -88,9 +88,9 @@ void option_unused(list *l)
}
}
-char *option_find(list *l, char *key)
+char *option_find(dn_list *l, char *key)
{
- node *n = l->front;
+ dn_node *n = l->front;
while(n){
kvp *p = (kvp *)n->val;
if(strcmp(p->key, key) == 0){
@@ -101,7 +101,7 @@ char *option_find(list *l, char *key)
}
return 0;
}
-char *option_find_str(list *l, char *key, char *def)
+char *option_find_str(dn_list *l, char *key, char *def)
{
char *v = option_find(l, key);
if(v) return v;
@@ -109,7 +109,7 @@ char *option_find_str(list *l, char *key, char *def)
return def;
}
-int option_find_int(list *l, char *key, int def)
+int option_find_int(dn_list *l, char *key, int def)
{
char *v = option_find(l, key);
if(v) return atoi(v);
@@ -117,21 +117,21 @@ int option_find_int(list *l, char *key, int def)
return def;
}
-int option_find_int_quiet(list *l, char *key, int def)
+int option_find_int_quiet(dn_list *l, char *key, int def)
{
char *v = option_find(l, key);
if(v) return atoi(v);
return def;
}
-float option_find_float_quiet(list *l, char *key, float def)
+float option_find_float_quiet(dn_list *l, char *key, float def)
{
char *v = option_find(l, key);
if(v) return atof(v);
return def;
}
-float option_find_float(list *l, char *key, float def)
+float option_find_float(dn_list *l, char *key, float def)
{
char *v = option_find(l, key);
if(v) return atof(v);
diff --git a/src/option_list.h b/src/option_list.h
index 844bd8724b7..3bc13cadd96 100644
--- a/src/option_list.h
+++ b/src/option_list.h
@@ -9,11 +9,11 @@ typedef struct{
} kvp;
-int read_option(char *s, list *options);
-void option_insert(list *l, char *key, char *val);
-char *option_find(list *l, char *key);
-float option_find_float(list *l, char *key, float def);
-float option_find_float_quiet(list *l, char *key, float def);
-void option_unused(list *l);
+int read_option(char *s, dn_list *options);
+void option_insert(dn_list *l, char *key, char *val);
+char *option_find(dn_list *l, char *key);
+float option_find_float(dn_list *l, char *key, float def);
+float option_find_float_quiet(dn_list *l, char *key, float def);
+void option_unused(dn_list *l);
#endif
diff --git a/src/parser.c b/src/parser.c
index c8141c9f2dd..b7f0704c15a 100644
--- a/src/parser.c
+++ b/src/parser.c
@@ -39,10 +39,10 @@
typedef struct{
char *type;
- list *options;
-}section;
+ dn_list *options;
+}dn_section;
-list *read_cfg(char *filename);
+dn_list *read_cfg(const char *filename);
LAYER_TYPE string_to_layer_type(char * type)
{
@@ -86,15 +86,15 @@ LAYER_TYPE string_to_layer_type(char * type)
return BLANK;
}
-void free_section(section *s)
+void free_section(dn_section *s)
{
free(s->type);
- node *n = s->options->front;
+ dn_node *n = s->options->front;
while(n){
kvp *pair = (kvp *)n->val;
free(pair->key);
free(pair);
- node *next = n->next;
+ dn_node *next = n->next;
free(n);
n = next;
}
@@ -118,7 +118,7 @@ void parse_data(char *data, float *a, int n)
}
}
-typedef struct size_params{
+typedef struct dn_size_params{
int batch;
int inputs;
int h;
@@ -126,10 +126,10 @@ typedef struct size_params{
int c;
int index;
int time_steps;
- network *net;
-} size_params;
+ dn_network *net;
+} dn_size_params;
-local_layer parse_local(list *options, size_params params)
+local_layer parse_local(dn_list *options, dn_size_params params)
{
int n = option_find_int(options, "filters",1);
int size = option_find_int(options, "size",1);
@@ -150,7 +150,7 @@ local_layer parse_local(list *options, size_params params)
return layer;
}
-layer parse_deconvolutional(list *options, size_params params)
+dn_layer parse_deconvolutional(dn_list *options, dn_size_params params)
{
int n = option_find_int(options, "filters",1);
int size = option_find_int(options, "size",1);
@@ -170,13 +170,13 @@ layer parse_deconvolutional(list *options, size_params params)
int padding = option_find_int_quiet(options, "padding",0);
if(pad) padding = size/2;
- layer l = make_deconvolutional_layer(batch,h,w,c,n,size,stride,padding, activation, batch_normalize, params.net->adam);
+ dn_layer l = make_deconvolutional_layer(batch,h,w,c,n,size,stride,padding, activation, batch_normalize, params.net->adam);
return l;
}
-convolutional_layer parse_convolutional(list *options, size_params params)
+convolutional_layer parse_convolutional(dn_list *options, dn_size_params params)
{
int n = option_find_int(options, "filters",1);
int size = option_find_int(options, "size",1);
@@ -206,7 +206,7 @@ convolutional_layer parse_convolutional(list *options, size_params params)
return layer;
}
-layer parse_crnn(list *options, size_params params)
+dn_layer parse_crnn(dn_list *options, dn_size_params params)
{
int output_filters = option_find_int(options, "output_filters",1);
int hidden_filters = option_find_int(options, "hidden_filters",1);
@@ -214,63 +214,63 @@ layer parse_crnn(list *options, size_params params)
ACTIVATION activation = get_activation(activation_s);
int batch_normalize = option_find_int_quiet(options, "batch_normalize", 0);
- layer l = make_crnn_layer(params.batch, params.w, params.h, params.c, hidden_filters, output_filters, params.time_steps, activation, batch_normalize);
+ dn_layer l = make_crnn_layer(params.batch, params.w, params.h, params.c, hidden_filters, output_filters, params.time_steps, activation, batch_normalize);
l.shortcut = option_find_int_quiet(options, "shortcut", 0);
return l;
}
-layer parse_rnn(list *options, size_params params)
+dn_layer parse_rnn(dn_list *options, dn_size_params params)
{
int output = option_find_int(options, "output",1);
char *activation_s = option_find_str(options, "activation", "logistic");
ACTIVATION activation = get_activation(activation_s);
int batch_normalize = option_find_int_quiet(options, "batch_normalize", 0);
- layer l = make_rnn_layer(params.batch, params.inputs, output, params.time_steps, activation, batch_normalize, params.net->adam);
+ dn_layer l = make_rnn_layer(params.batch, params.inputs, output, params.time_steps, activation, batch_normalize, params.net->adam);
l.shortcut = option_find_int_quiet(options, "shortcut", 0);
return l;
}
-layer parse_gru(list *options, size_params params)
+dn_layer parse_gru(dn_list *options, dn_size_params params)
{
int output = option_find_int(options, "output",1);
int batch_normalize = option_find_int_quiet(options, "batch_normalize", 0);
- layer l = make_gru_layer(params.batch, params.inputs, output, params.time_steps, batch_normalize, params.net->adam);
+ dn_layer l = make_gru_layer(params.batch, params.inputs, output, params.time_steps, batch_normalize, params.net->adam);
l.tanh = option_find_int_quiet(options, "tanh", 0);
return l;
}
-layer parse_lstm(list *options, size_params params)
+dn_layer parse_lstm(dn_list *options, dn_size_params params)
{
int output = option_find_int(options, "output", 1);
int batch_normalize = option_find_int_quiet(options, "batch_normalize", 0);
- layer l = make_lstm_layer(params.batch, params.inputs, output, params.time_steps, batch_normalize, params.net->adam);
+ dn_layer l = make_lstm_layer(params.batch, params.inputs, output, params.time_steps, batch_normalize, params.net->adam);
return l;
}
-layer parse_connected(list *options, size_params params)
+dn_layer parse_connected(dn_list *options, dn_size_params params)
{
int output = option_find_int(options, "output",1);
char *activation_s = option_find_str(options, "activation", "logistic");
ACTIVATION activation = get_activation(activation_s);
int batch_normalize = option_find_int_quiet(options, "batch_normalize", 0);
- layer l = make_connected_layer(params.batch, params.inputs, output, activation, batch_normalize, params.net->adam);
+ dn_layer l = make_connected_layer(params.batch, params.inputs, output, activation, batch_normalize, params.net->adam);
return l;
}
-layer parse_softmax(list *options, size_params params)
+dn_layer parse_softmax(dn_list *options, dn_size_params params)
{
int groups = option_find_int_quiet(options, "groups",1);
- layer l = make_softmax_layer(params.batch, params.inputs, groups);
+ dn_layer l = make_softmax_layer(params.batch, params.inputs, groups);
l.temperature = option_find_float_quiet(options, "temperature", 1);
char *tree_file = option_find_str(options, "tree", 0);
if (tree_file) l.softmax_tree = read_tree(tree_file);
@@ -303,7 +303,7 @@ int *parse_yolo_mask(char *a, int *num)
return mask;
}
-layer parse_yolo(list *options, size_params params)
+dn_layer parse_yolo(dn_list *options, dn_size_params params)
{
int classes = option_find_int(options, "classes", 20);
int total = option_find_int(options, "num", 1);
@@ -311,7 +311,7 @@ layer parse_yolo(list *options, size_params params)
char *a = option_find_str(options, "mask", 0);
int *mask = parse_yolo_mask(a, &num);
- layer l = make_yolo_layer(params.batch, params.w, params.h, num, total, mask, classes);
+ dn_layer l = make_yolo_layer(params.batch, params.w, params.h, num, total, mask, classes);
assert(l.outputs == params.inputs);
l.max_boxes = option_find_int_quiet(options, "max",90);
@@ -341,22 +341,22 @@ layer parse_yolo(list *options, size_params params)
return l;
}
-layer parse_iseg(list *options, size_params params)
+dn_layer parse_iseg(dn_list *options, dn_size_params params)
{
int classes = option_find_int(options, "classes", 20);
int ids = option_find_int(options, "ids", 32);
- layer l = make_iseg_layer(params.batch, params.w, params.h, classes, ids);
+ dn_layer l = make_iseg_layer(params.batch, params.w, params.h, classes, ids);
assert(l.outputs == params.inputs);
return l;
}
-layer parse_region(list *options, size_params params)
+dn_layer parse_region(dn_list *options, dn_size_params params)
{
int coords = option_find_int(options, "coords", 4);
int classes = option_find_int(options, "classes", 20);
int num = option_find_int(options, "num", 1);
- layer l = make_region_layer(params.batch, params.w, params.h, num, classes, coords);
+ dn_layer l = make_region_layer(params.batch, params.w, params.h, num, classes, coords);
assert(l.outputs == params.inputs);
l.log = option_find_int_quiet(options, "log", 0);
@@ -402,7 +402,7 @@ layer parse_region(list *options, size_params params)
return l;
}
-detection_layer parse_detection(list *options, size_params params)
+detection_layer parse_detection(dn_list *options, dn_size_params params)
{
int coords = option_find_int(options, "coords", 1);
int classes = option_find_int(options, "classes", 1);
@@ -426,7 +426,7 @@ detection_layer parse_detection(list *options, size_params params)
return layer;
}
-cost_layer parse_cost(list *options, size_params params)
+cost_layer parse_cost(dn_list *options, dn_size_params params)
{
char *type_s = option_find_str(options, "type", "sse");
COST_TYPE type = get_cost_type(type_s);
@@ -438,7 +438,7 @@ cost_layer parse_cost(list *options, size_params params)
return layer;
}
-crop_layer parse_crop(list *options, size_params params)
+crop_layer parse_crop(dn_list *options, dn_size_params params)
{
int crop_height = option_find_int(options, "crop_height",1);
int crop_width = option_find_int(options, "crop_width",1);
@@ -462,7 +462,7 @@ crop_layer parse_crop(list *options, size_params params)
return l;
}
-layer parse_reorg(list *options, size_params params)
+dn_layer parse_reorg(dn_list *options, dn_size_params params)
{
int stride = option_find_int(options, "stride",1);
int reverse = option_find_int_quiet(options, "reverse",0);
@@ -476,11 +476,11 @@ layer parse_reorg(list *options, size_params params)
batch=params.batch;
if(!(h && w && c)) error("Layer before reorg layer must output image.");
- layer layer = make_reorg_layer(batch,w,h,c,stride,reverse, flatten, extra);
+ dn_layer layer = make_reorg_layer(batch,w,h,c,stride,reverse, flatten, extra);
return layer;
}
-maxpool_layer parse_maxpool(list *options, size_params params)
+maxpool_layer parse_maxpool(dn_list *options, dn_size_params params)
{
int stride = option_find_int(options, "stride",1);
int size = option_find_int(options, "size",stride);
@@ -497,7 +497,7 @@ maxpool_layer parse_maxpool(list *options, size_params params)
return layer;
}
-avgpool_layer parse_avgpool(list *options, size_params params)
+avgpool_layer parse_avgpool(dn_list *options, dn_size_params params)
{
int batch,w,h,c;
w = params.w;
@@ -510,7 +510,7 @@ avgpool_layer parse_avgpool(list *options, size_params params)
return layer;
}
-dropout_layer parse_dropout(list *options, size_params params)
+dropout_layer parse_dropout(dn_list *options, dn_size_params params)
{
float probability = option_find_float(options, "probability", .5);
dropout_layer layer = make_dropout_layer(params.batch, params.inputs, probability);
@@ -520,32 +520,32 @@ dropout_layer parse_dropout(list *options, size_params params)
return layer;
}
-layer parse_normalization(list *options, size_params params)
+dn_layer parse_normalization(dn_list *options, dn_size_params params)
{
float alpha = option_find_float(options, "alpha", .0001);
float beta = option_find_float(options, "beta" , .75);
float kappa = option_find_float(options, "kappa", 1);
int size = option_find_int(options, "size", 5);
- layer l = make_normalization_layer(params.batch, params.w, params.h, params.c, size, alpha, beta, kappa);
+ dn_layer l = make_normalization_layer(params.batch, params.w, params.h, params.c, size, alpha, beta, kappa);
return l;
}
-layer parse_batchnorm(list *options, size_params params)
+dn_layer parse_batchnorm(dn_list *options, dn_size_params params)
{
- layer l = make_batchnorm_layer(params.batch, params.w, params.h, params.c);
+ dn_layer l = make_batchnorm_layer(params.batch, params.w, params.h, params.c);
return l;
}
-layer parse_shortcut(list *options, size_params params, network *net)
+dn_layer parse_shortcut(dn_list *options, dn_size_params params, dn_network *net)
{
char *l = option_find(options, "from");
int index = atoi(l);
if(index < 0) index = params.index + index;
int batch = params.batch;
- layer from = net->layers[index];
+ dn_layer from = net->layers[index];
- layer s = make_shortcut_layer(batch, index, params.w, params.h, params.c, from.out_w, from.out_h, from.out_c);
+ dn_layer s = make_shortcut_layer(batch, index, params.w, params.h, params.c, from.out_w, from.out_h, from.out_c);
char *activation_s = option_find_str(options, "activation", "linear");
ACTIVATION activation = get_activation(activation_s);
@@ -556,9 +556,9 @@ layer parse_shortcut(list *options, size_params params, network *net)
}
-layer parse_l2norm(list *options, size_params params)
+dn_layer parse_l2norm(dn_list *options, dn_size_params params)
{
- layer l = make_l2norm_layer(params.batch, params.inputs);
+ dn_layer l = make_l2norm_layer(params.batch, params.inputs);
l.h = l.out_h = params.h;
l.w = l.out_w = params.w;
l.c = l.out_c = params.c;
@@ -566,21 +566,21 @@ layer parse_l2norm(list *options, size_params params)
}
-layer parse_logistic(list *options, size_params params)
+dn_layer parse_logistic(dn_list *options, dn_size_params params)
{
- layer l = make_logistic_layer(params.batch, params.inputs);
+ dn_layer l = make_logistic_layer(params.batch, params.inputs);
l.h = l.out_h = params.h;
l.w = l.out_w = params.w;
l.c = l.out_c = params.c;
return l;
}
-layer parse_activation(list *options, size_params params)
+dn_layer parse_activation(dn_list *options, dn_size_params params)
{
char *activation_s = option_find_str(options, "activation", "linear");
ACTIVATION activation = get_activation(activation_s);
- layer l = make_activation_layer(params.batch, params.inputs, activation);
+ dn_layer l = make_activation_layer(params.batch, params.inputs, activation);
l.h = l.out_h = params.h;
l.w = l.out_w = params.w;
@@ -589,16 +589,16 @@ layer parse_activation(list *options, size_params params)
return l;
}
-layer parse_upsample(list *options, size_params params, network *net)
+dn_layer parse_upsample(dn_list *options, dn_size_params params, dn_network *net)
{
int stride = option_find_int(options, "stride",2);
- layer l = make_upsample_layer(params.batch, params.w, params.h, params.c, stride);
+ dn_layer l = make_upsample_layer(params.batch, params.w, params.h, params.c, stride);
l.scale = option_find_float_quiet(options, "scale", 1);
return l;
}
-route_layer parse_route(list *options, size_params params, network *net)
+route_layer parse_route(dn_list *options, dn_size_params params, dn_network *net)
{
char *l = option_find(options, "layers");
int len = strlen(l);
@@ -652,7 +652,7 @@ learning_rate_policy get_policy(char *s)
return CONSTANT;
}
-void parse_net_options(list *options, network *net)
+void parse_net_options(dn_list *options, dn_network *net)
{
net->batch = option_find_int(options, "batch",1);
net->learning_rate = option_find_float(options, "learning_rate", .001);
@@ -733,23 +733,23 @@ void parse_net_options(list *options, network *net)
net->max_batches = option_find_int(options, "max_batches", 0);
}
-int is_network(section *s)
+int is_network(dn_section *s)
{
return (strcmp(s->type, "[net]")==0
|| strcmp(s->type, "[network]")==0);
}
-network *parse_network_cfg(char *filename)
+dn_network* parse_network_cfg(const char *filename)
{
- list *sections = read_cfg(filename);
- node *n = sections->front;
+ dn_list *sections = read_cfg(filename);
+ dn_node *n = sections->front;
if(!n) error("Config file has no sections");
- network *net = make_network(sections->size - 1);
+ dn_network *net = make_network(sections->size - 1);
net->gpu_index = gpu_index;
- size_params params;
+ dn_size_params params;
- section *s = (section *)n->val;
- list *options = s->options;
+ dn_section *s = (dn_section *)n->val;
+ dn_list *options = s->options;
if(!is_network(s)) error("First section must be [net] or [network]");
parse_net_options(options, net);
@@ -769,9 +769,9 @@ network *parse_network_cfg(char *filename)
while(n){
params.index = count;
fprintf(stderr, "%5d ", count);
- s = (section *)n->val;
+ s = (dn_section *)n->val;
options = s->options;
- layer l = {0};
+ dn_layer l = {0};
LAYER_TYPE lt = string_to_layer_type(s->type);
if(lt == CONVOLUTIONAL){
l = parse_convolutional(options, params);
@@ -861,7 +861,7 @@ network *parse_network_cfg(char *filename)
}
}
free_list(sections);
- layer out = get_network_output_layer(net);
+ dn_layer out = get_network_output_layer(net);
net->outputs = out.outputs;
net->truths = out.outputs;
if(net->layers[net->n-1].truths) net->truths = net->layers[net->n-1].truths;
@@ -888,20 +888,20 @@ network *parse_network_cfg(char *filename)
return net;
}
-list *read_cfg(char *filename)
+dn_list *read_cfg(const char *filename)
{
FILE *file = fopen(filename, "r");
if(file == 0) file_error(filename);
char *line;
int nu = 0;
- list *options = make_list();
- section *current = 0;
+ dn_list *options = make_list();
+ dn_section *current = 0;
while((line=fgetl(file)) != 0){
++ nu;
strip(line);
switch(line[0]){
case '[':
- current = malloc(sizeof(section));
+ current = malloc(sizeof(dn_section));
list_insert(options, current);
current->options = make_list();
current->type = line;
@@ -923,7 +923,7 @@ list *read_cfg(char *filename)
return options;
}
-void save_convolutional_weights_binary(layer l, FILE *fp)
+void save_convolutional_weights_binary(dn_layer l, FILE *fp)
{
#ifdef GPU
if(gpu_index >= 0){
@@ -955,7 +955,7 @@ void save_convolutional_weights_binary(layer l, FILE *fp)
}
}
-void save_convolutional_weights(layer l, FILE *fp)
+void save_convolutional_weights(dn_layer l, FILE *fp)
{
if(l.binary){
//save_convolutional_weights_binary(l, fp);
@@ -976,7 +976,7 @@ void save_convolutional_weights(layer l, FILE *fp)
fwrite(l.weights, sizeof(float), num, fp);
}
-void save_batchnorm_weights(layer l, FILE *fp)
+void save_batchnorm_weights(dn_layer l, FILE *fp)
{
#ifdef GPU
if(gpu_index >= 0){
@@ -988,7 +988,7 @@ void save_batchnorm_weights(layer l, FILE *fp)
fwrite(l.rolling_variance, sizeof(float), l.c, fp);
}
-void save_connected_weights(layer l, FILE *fp)
+void save_connected_weights(dn_layer l, FILE *fp)
{
#ifdef GPU
if(gpu_index >= 0){
@@ -1004,7 +1004,7 @@ void save_connected_weights(layer l, FILE *fp)
}
}
-void save_weights_upto(network *net, char *filename, int cutoff)
+void save_weights_upto(dn_network *net, const char *filename, int cutoff)
{
#ifdef GPU
if(net->gpu_index >= 0){
@@ -1025,7 +1025,7 @@ void save_weights_upto(network *net, char *filename, int cutoff)
int i;
for(i = 0; i < net->n && i < cutoff; ++i){
- layer l = net->layers[i];
+ dn_layer l = net->layers[i];
if (l.dontsave) continue;
if(l.type == CONVOLUTIONAL || l.type == DECONVOLUTIONAL){
save_convolutional_weights(l, fp);
@@ -1077,7 +1077,7 @@ void save_weights_upto(network *net, char *filename, int cutoff)
}
fclose(fp);
}
-void save_weights(network *net, char *filename)
+void save_weights(dn_network *net, const char *filename)
{
save_weights_upto(net, filename, net->n);
}
@@ -1095,7 +1095,7 @@ void transpose_matrix(float *a, int rows, int cols)
free(transpose);
}
-void load_connected_weights(layer l, FILE *fp, int transpose)
+void load_connected_weights(dn_layer l, FILE *fp, int transpose)
{
fread(l.biases, sizeof(float), l.outputs, fp);
fread(l.weights, sizeof(float), l.outputs*l.inputs, fp);
@@ -1119,7 +1119,7 @@ void load_connected_weights(layer l, FILE *fp, int transpose)
#endif
}
-void load_batchnorm_weights(layer l, FILE *fp)
+void load_batchnorm_weights(dn_layer l, FILE *fp)
{
fread(l.scales, sizeof(float), l.c, fp);
fread(l.rolling_mean, sizeof(float), l.c, fp);
@@ -1131,7 +1131,7 @@ void load_batchnorm_weights(layer l, FILE *fp)
#endif
}
-void load_convolutional_weights_binary(layer l, FILE *fp)
+void load_convolutional_weights_binary(dn_layer l, FILE *fp)
{
fread(l.biases, sizeof(float), l.n, fp);
if (l.batch_normalize && (!l.dontloadscales)){
@@ -1161,7 +1161,7 @@ void load_convolutional_weights_binary(layer l, FILE *fp)
#endif
}
-void load_convolutional_weights(layer l, FILE *fp)
+void load_convolutional_weights(dn_layer l, FILE *fp)
{
if(l.binary){
//load_convolutional_weights_binary(l, fp);
@@ -1215,7 +1215,7 @@ void load_convolutional_weights(layer l, FILE *fp)
}
-void load_weights_upto(network *net, char *filename, int start, int cutoff)
+void load_weights_upto(dn_network *net, const char *filename, int start, int cutoff)
{
#ifdef GPU
if(net->gpu_index >= 0){
@@ -1244,7 +1244,7 @@ void load_weights_upto(network *net, char *filename, int start, int cutoff)
int i;
for(i = start; i < net->n && i < cutoff; ++i){
- layer l = net->layers[i];
+ dn_layer l = net->layers[i];
if (l.dontload) continue;
if(l.type == CONVOLUTIONAL || l.type == DECONVOLUTIONAL){
load_convolutional_weights(l, fp);
@@ -1305,7 +1305,7 @@ void load_weights_upto(network *net, char *filename, int start, int cutoff)
fclose(fp);
}
-void load_weights(network *net, char *filename)
+void load_weights(dn_network *net, const char *filename)
{
load_weights_upto(net, filename, 0, net->n);
}
diff --git a/src/parser.h b/src/parser.h
index 81aef2c86f3..ee393f7a0f0 100644
--- a/src/parser.h
+++ b/src/parser.h
@@ -3,7 +3,7 @@
#include "darknet.h"
#include "network.h"
-void save_network(network net, char *filename);
-void save_weights_double(network net, char *filename);
+void save_network(dn_network net, const char *filename);
+void save_weights_double(dn_network net, const char *filename);
#endif
diff --git a/src/region_layer.c b/src/region_layer.c
index 179f5e32a60..cc7e2fbb804 100644
--- a/src/region_layer.c
+++ b/src/region_layer.c
@@ -10,9 +10,9 @@
#include
#include
-layer make_region_layer(int batch, int w, int h, int n, int classes, int coords)
+dn_layer make_region_layer(int batch, int w, int h, int n, int classes, int coords)
{
- layer l = {0};
+ dn_layer l = {0};
l.type = REGION;
l.n = n;
@@ -53,7 +53,7 @@ layer make_region_layer(int batch, int w, int h, int n, int classes, int coords)
return l;
}
-void resize_region_layer(layer *l, int w, int h)
+void resize_region_layer(dn_layer *l, int w, int h)
{
l->w = w;
l->h = h;
@@ -73,9 +73,9 @@ void resize_region_layer(layer *l, int w, int h)
#endif
}
-box get_region_box(float *x, float *biases, int n, int index, int i, int j, int w, int h, int stride)
+dn_box get_region_box(float *x, float *biases, int n, int index, int i, int j, int w, int h, int stride)
{
- box b;
+ dn_box b;
b.x = (i + x[index + 0*stride]) / w;
b.y = (j + x[index + 1*stride]) / h;
b.w = exp(x[index + 2*stride]) * biases[2*n] / w;
@@ -83,9 +83,9 @@ box get_region_box(float *x, float *biases, int n, int index, int i, int j, int
return b;
}
-float delta_region_box(box truth, float *x, float *biases, int n, int index, int i, int j, int w, int h, float *delta, float scale, int stride)
+float delta_region_box(dn_box truth, float *x, float *biases, int n, int index, int i, int j, int w, int h, float *delta, float scale, int stride)
{
- box pred = get_region_box(x, biases, n, index, i, j, w, h, stride);
+ dn_box pred = get_region_box(x, biases, n, index, i, j, w, h, stride);
float iou = box_iou(pred, truth);
float tx = (truth.x*w - i);
@@ -109,7 +109,7 @@ void delta_region_mask(float *truth, float *x, int n, int index, float *delta, i
}
-void delta_region_class(float *output, float *delta, int index, int class, int classes, tree *hier, float scale, int stride, float *avg_cat, int tag)
+void delta_region_class(float *output, float *delta, int index, int class, int classes, dn_tree *hier, float scale, int stride, float *avg_cat, int tag)
{
int i, n;
if(hier){
@@ -148,14 +148,14 @@ float tisnan(float x)
return (x != x);
}
-int entry_index(layer l, int batch, int location, int entry)
+int entry_index(dn_layer l, int batch, int location, int entry)
{
int n = location / (l.w*l.h);
int loc = location % (l.w*l.h);
return batch*l.outputs + n*l.w*l.h*(l.coords+l.classes+1) + entry*l.w*l.h + loc;
}
-void forward_region_layer(const layer l, network net)
+void forward_region_layer(const dn_layer l, dn_network net)
{
int i,j,b,t,n;
memcpy(l.output, net.input, l.outputs*l.batch*sizeof(float));
@@ -199,7 +199,7 @@ void forward_region_layer(const layer l, network net)
if(l.softmax_tree){
int onlyclass = 0;
for(t = 0; t < 30; ++t){
- box truth = float_to_box(net.truth + t*(l.coords + 1) + b*l.truths, 1);
+ dn_box truth = float_to_box(net.truth + t*(l.coords + 1) + b*l.truths, 1);
if(!truth.x) break;
int class = net.truth[t*(l.coords + 1) + b*l.truths + l.coords];
float maxp = 0;
@@ -233,10 +233,10 @@ void forward_region_layer(const layer l, network net)
for (i = 0; i < l.w; ++i) {
for (n = 0; n < l.n; ++n) {
int box_index = entry_index(l, b, n*l.w*l.h + j*l.w + i, 0);
- box pred = get_region_box(l.output, l.biases, n, box_index, i, j, l.w, l.h, l.w*l.h);
+ dn_box pred = get_region_box(l.output, l.biases, n, box_index, i, j, l.w, l.h, l.w*l.h);
float best_iou = 0;
for(t = 0; t < 30; ++t){
- box truth = float_to_box(net.truth + t*(l.coords + 1) + b*l.truths, 1);
+ dn_box truth = float_to_box(net.truth + t*(l.coords + 1) + b*l.truths, 1);
if(!truth.x) break;
float iou = box_iou(pred, truth);
if (iou > best_iou) {
@@ -252,7 +252,7 @@ void forward_region_layer(const layer l, network net)
}
if(*(net.seen) < 12800){
- box truth = {0};
+ dn_box truth = {0};
truth.x = (i + .5)/l.w;
truth.y = (j + .5)/l.h;
truth.w = l.biases[2*n]/l.w;
@@ -263,19 +263,19 @@ void forward_region_layer(const layer l, network net)
}
}
for(t = 0; t < 30; ++t){
- box truth = float_to_box(net.truth + t*(l.coords + 1) + b*l.truths, 1);
+ dn_box truth = float_to_box(net.truth + t*(l.coords + 1) + b*l.truths, 1);
if(!truth.x) break;
float best_iou = 0;
int best_n = 0;
i = (truth.x * l.w);
j = (truth.y * l.h);
- box truth_shift = truth;
+ dn_box truth_shift = truth;
truth_shift.x = 0;
truth_shift.y = 0;
for(n = 0; n < l.n; ++n){
int box_index = entry_index(l, b, n*l.w*l.h + j*l.w + i, 0);
- box pred = get_region_box(l.output, l.biases, n, box_index, i, j, l.w, l.h, l.w*l.h);
+ dn_box pred = get_region_box(l.output, l.biases, n, box_index, i, j, l.w, l.h, l.w*l.h);
if(l.bias_match){
pred.w = l.biases[2*n]/l.w;
pred.h = l.biases[2*n+1]/l.h;
@@ -320,7 +320,7 @@ void forward_region_layer(const layer l, network net)
printf("Region Avg IOU: %f, Class: %f, Obj: %f, No Obj: %f, Avg Recall: %f, count: %d\n", avg_iou/count, avg_cat/class_count, avg_obj/count, avg_anyobj/(l.w*l.h*l.n*l.batch), recall/count, count);
}
-void backward_region_layer(const layer l, network net)
+void backward_region_layer(const dn_layer l, dn_network net)
{
/*
int b;
@@ -346,7 +346,7 @@ void correct_region_boxes(detection *dets, int n, int w, int h, int netw, int ne
new_w = (w * neth)/h;
}
for (i = 0; i < n; ++i){
- box b = dets[i].bbox;
+ dn_box b = dets[i].bbox;
b.x = (b.x - (netw - new_w)/2./netw) / ((float)new_w/netw);
b.y = (b.y - (neth - new_h)/2./neth) / ((float)new_h/neth);
b.w *= (float)netw/new_w;
@@ -361,7 +361,7 @@ void correct_region_boxes(detection *dets, int n, int w, int h, int netw, int ne
}
}
-void get_region_detections(layer l, int w, int h, int netw, int neth, float thresh, int *map, float tree_thresh, int relative, detection *dets)
+void get_region_detections(dn_layer l, int w, int h, int netw, int neth, float thresh, int *map, float tree_thresh, int relative, detection *dets)
{
int i,j,n,z;
float *predictions = l.output;
@@ -494,7 +494,7 @@ void backward_region_layer_gpu(const layer l, network net)
}
#endif
-void zero_objectness(layer l)
+void zero_objectness(dn_layer l)
{
int i, n;
for (i = 0; i < l.w*l.h; ++i){
diff --git a/src/region_layer.h b/src/region_layer.h
index 9f12fd187fd..89ec5ca5da7 100644
--- a/src/region_layer.h
+++ b/src/region_layer.h
@@ -5,10 +5,10 @@
#include "layer.h"
#include "network.h"
-layer make_region_layer(int batch, int w, int h, int n, int classes, int coords);
-void forward_region_layer(const layer l, network net);
-void backward_region_layer(const layer l, network net);
-void resize_region_layer(layer *l, int w, int h);
+dn_layer make_region_layer(int batch, int w, int h, int n, int classes, int coords);
+void forward_region_layer(const dn_layer l, dn_network net);
+void backward_region_layer(const dn_layer l, dn_network net);
+void resize_region_layer(dn_layer *l, int w, int h);
#ifdef GPU
void forward_region_layer_gpu(const layer l, network net);
diff --git a/src/reorg_layer.c b/src/reorg_layer.c
index 31d6b843676..5ec39286438 100644
--- a/src/reorg_layer.c
+++ b/src/reorg_layer.c
@@ -5,9 +5,9 @@
#include
-layer make_reorg_layer(int batch, int w, int h, int c, int stride, int reverse, int flatten, int extra)
+dn_layer make_reorg_layer(int batch, int w, int h, int c, int stride, int reverse, int flatten, int extra)
{
- layer l = {0};
+ dn_layer l = {0};
l.type = REORG;
l.batch = batch;
l.stride = stride;
@@ -55,7 +55,7 @@ layer make_reorg_layer(int batch, int w, int h, int c, int stride, int reverse,
return l;
}
-void resize_reorg_layer(layer *l, int w, int h)
+void resize_reorg_layer(dn_layer *l, int w, int h)
{
int stride = l->stride;
int c = l->c;
@@ -88,7 +88,7 @@ void resize_reorg_layer(layer *l, int w, int h)
#endif
}
-void forward_reorg_layer(const layer l, network net)
+void forward_reorg_layer(const dn_layer l, dn_network net)
{
int i;
if(l.flatten){
@@ -109,7 +109,7 @@ void forward_reorg_layer(const layer l, network net)
}
}
-void backward_reorg_layer(const layer l, network net)
+void backward_reorg_layer(const dn_layer l, dn_network net)
{
int i;
if(l.flatten){
diff --git a/src/reorg_layer.h b/src/reorg_layer.h
index e6513a5f441..21a756adf46 100644
--- a/src/reorg_layer.h
+++ b/src/reorg_layer.h
@@ -6,10 +6,10 @@
#include "layer.h"
#include "network.h"
-layer make_reorg_layer(int batch, int w, int h, int c, int stride, int reverse, int flatten, int extra);
-void resize_reorg_layer(layer *l, int w, int h);
-void forward_reorg_layer(const layer l, network net);
-void backward_reorg_layer(const layer l, network net);
+dn_layer make_reorg_layer(int batch, int w, int h, int c, int stride, int reverse, int flatten, int extra);
+void resize_reorg_layer(dn_layer *l, int w, int h);
+void forward_reorg_layer(const dn_layer l, dn_network net);
+void backward_reorg_layer(const dn_layer l, dn_network net);
#ifdef GPU
void forward_reorg_layer_gpu(layer l, network net);
diff --git a/src/rnn_layer.c b/src/rnn_layer.c
index 8c9b457e26e..f75c1eea2f9 100644
--- a/src/rnn_layer.c
+++ b/src/rnn_layer.c
@@ -10,7 +10,7 @@
#include
#include
-static void increment_layer(layer *l, int steps)
+static void increment_layer(dn_layer *l, int steps)
{
int num = l->outputs*l->batch*steps;
l->output += num;
@@ -26,11 +26,11 @@ static void increment_layer(layer *l, int steps)
#endif
}
-layer make_rnn_layer(int batch, int inputs, int outputs, int steps, ACTIVATION activation, int batch_normalize, int adam)
+dn_layer make_rnn_layer(int batch, int inputs, int outputs, int steps, ACTIVATION activation, int batch_normalize, int adam)
{
fprintf(stderr, "RNN Layer: %d inputs, %d outputs\n", inputs, outputs);
batch = batch / steps;
- layer l = {0};
+ dn_layer l = {0};
l.batch = batch;
l.type = RNN;
l.steps = steps;
@@ -39,17 +39,17 @@ layer make_rnn_layer(int batch, int inputs, int outputs, int steps, ACTIVATION a
l.state = calloc(batch*outputs, sizeof(float));
l.prev_state = calloc(batch*outputs, sizeof(float));
- l.input_layer = malloc(sizeof(layer));
+ l.input_layer = malloc(sizeof(dn_layer));
fprintf(stderr, "\t\t");
*(l.input_layer) = make_connected_layer(batch*steps, inputs, outputs, activation, batch_normalize, adam);
l.input_layer->batch = batch;
- l.self_layer = malloc(sizeof(layer));
+ l.self_layer = malloc(sizeof(dn_layer));
fprintf(stderr, "\t\t");
*(l.self_layer) = make_connected_layer(batch*steps, outputs, outputs, activation, batch_normalize, adam);
l.self_layer->batch = batch;
- l.output_layer = malloc(sizeof(layer));
+ l.output_layer = malloc(sizeof(dn_layer));
fprintf(stderr, "\t\t");
*(l.output_layer) = make_connected_layer(batch*steps, outputs, outputs, activation, batch_normalize, adam);
l.output_layer->batch = batch;
@@ -79,21 +79,21 @@ layer make_rnn_layer(int batch, int inputs, int outputs, int steps, ACTIVATION a
return l;
}
-void update_rnn_layer(layer l, update_args a)
+void update_rnn_layer(dn_layer l, update_args a)
{
update_connected_layer(*(l.input_layer), a);
update_connected_layer(*(l.self_layer), a);
update_connected_layer(*(l.output_layer), a);
}
-void forward_rnn_layer(layer l, network net)
+void forward_rnn_layer(dn_layer l, dn_network net)
{
- network s = net;
+ dn_network s = net;
s.train = net.train;
int i;
- layer input_layer = *(l.input_layer);
- layer self_layer = *(l.self_layer);
- layer output_layer = *(l.output_layer);
+ dn_layer input_layer = *(l.input_layer);
+ dn_layer self_layer = *(l.self_layer);
+ dn_layer output_layer = *(l.output_layer);
fill_cpu(l.outputs * l.batch * l.steps, 0, output_layer.delta, 1);
fill_cpu(l.outputs * l.batch * l.steps, 0, self_layer.delta, 1);
@@ -127,14 +127,14 @@ void forward_rnn_layer(layer l, network net)
}
}
-void backward_rnn_layer(layer l, network net)
+void backward_rnn_layer(dn_layer l, dn_network net)
{
- network s = net;
+ dn_network s = net;
s.train = net.train;
int i;
- layer input_layer = *(l.input_layer);
- layer self_layer = *(l.self_layer);
- layer output_layer = *(l.output_layer);
+ dn_layer input_layer = *(l.input_layer);
+ dn_layer self_layer = *(l.self_layer);
+ dn_layer output_layer = *(l.output_layer);
increment_layer(&input_layer, l.steps-1);
increment_layer(&self_layer, l.steps-1);
diff --git a/src/rnn_layer.h b/src/rnn_layer.h
index 270a63ffafc..ce87e100954 100644
--- a/src/rnn_layer.h
+++ b/src/rnn_layer.h
@@ -7,11 +7,11 @@
#include "network.h"
#define USET
-layer make_rnn_layer(int batch, int inputs, int outputs, int steps, ACTIVATION activation, int batch_normalize, int adam);
+dn_layer make_rnn_layer(int batch, int inputs, int outputs, int steps, ACTIVATION activation, int batch_normalize, int adam);
-void forward_rnn_layer(layer l, network net);
-void backward_rnn_layer(layer l, network net);
-void update_rnn_layer(layer l, update_args a);
+void forward_rnn_layer(dn_layer l, dn_network net);
+void backward_rnn_layer(dn_layer l, dn_network net);
+void update_rnn_layer(dn_layer l, update_args a);
#ifdef GPU
void forward_rnn_layer_gpu(layer l, network net);
diff --git a/src/route_layer.c b/src/route_layer.c
index a8970a46001..f89ea5a7a2d 100644
--- a/src/route_layer.c
+++ b/src/route_layer.c
@@ -37,10 +37,10 @@ route_layer make_route_layer(int batch, int n, int *input_layers, int *input_siz
return l;
}
-void resize_route_layer(route_layer *l, network *net)
+void resize_route_layer(route_layer *l, dn_network *net)
{
int i;
- layer first = net->layers[l->input_layers[0]];
+ dn_layer first = net->layers[l->input_layers[0]];
l->out_w = first.out_w;
l->out_h = first.out_h;
l->out_c = first.out_c;
@@ -48,7 +48,7 @@ void resize_route_layer(route_layer *l, network *net)
l->input_sizes[0] = first.outputs;
for(i = 1; i < l->n; ++i){
int index = l->input_layers[i];
- layer next = net->layers[index];
+ dn_layer next = net->layers[index];
l->outputs += next.outputs;
l->input_sizes[i] = next.outputs;
if(next.out_w == first.out_w && next.out_h == first.out_h){
@@ -71,7 +71,7 @@ void resize_route_layer(route_layer *l, network *net)
}
-void forward_route_layer(const route_layer l, network net)
+void forward_route_layer(const route_layer l, dn_network net)
{
int i, j;
int offset = 0;
@@ -86,7 +86,7 @@ void forward_route_layer(const route_layer l, network net)
}
}
-void backward_route_layer(const route_layer l, network net)
+void backward_route_layer(const route_layer l, dn_network net)
{
int i, j;
int offset = 0;
diff --git a/src/route_layer.h b/src/route_layer.h
index 1d40330ff30..07a94ed5c1e 100644
--- a/src/route_layer.h
+++ b/src/route_layer.h
@@ -3,12 +3,12 @@
#include "network.h"
#include "layer.h"
-typedef layer route_layer;
+typedef dn_layer route_layer;
route_layer make_route_layer(int batch, int n, int *input_layers, int *input_size);
-void forward_route_layer(const route_layer l, network net);
-void backward_route_layer(const route_layer l, network net);
-void resize_route_layer(route_layer *l, network *net);
+void forward_route_layer(const route_layer l, dn_network net);
+void backward_route_layer(const route_layer l, dn_network net);
+void resize_route_layer(route_layer *l, dn_network *net);
#ifdef GPU
void forward_route_layer_gpu(const route_layer l, network net);
diff --git a/src/shortcut_layer.c b/src/shortcut_layer.c
index 49d17f56f66..e6186776be9 100644
--- a/src/shortcut_layer.c
+++ b/src/shortcut_layer.c
@@ -6,10 +6,10 @@
#include
#include
-layer make_shortcut_layer(int batch, int index, int w, int h, int c, int w2, int h2, int c2)
+dn_layer make_shortcut_layer(int batch, int index, int w, int h, int c, int w2, int h2, int c2)
{
fprintf(stderr, "res %3d %4d x%4d x%4d -> %4d x%4d x%4d\n",index, w2,h2,c2, w,h,c);
- layer l = {0};
+ dn_layer l = {0};
l.type = SHORTCUT;
l.batch = batch;
l.w = w2;
@@ -38,7 +38,7 @@ layer make_shortcut_layer(int batch, int index, int w, int h, int c, int w2, int
return l;
}
-void resize_shortcut_layer(layer *l, int w, int h)
+void resize_shortcut_layer(dn_layer *l, int w, int h)
{
assert(l->w == l->out_w);
assert(l->h == l->out_h);
@@ -59,14 +59,14 @@ void resize_shortcut_layer(layer *l, int w, int h)
}
-void forward_shortcut_layer(const layer l, network net)
+void forward_shortcut_layer(const dn_layer l, dn_network net)
{
copy_cpu(l.outputs*l.batch, net.input, 1, l.output, 1);
shortcut_cpu(l.batch, l.w, l.h, l.c, net.layers[l.index].output, l.out_w, l.out_h, l.out_c, l.alpha, l.beta, l.output);
activate_array(l.output, l.outputs*l.batch, l.activation);
}
-void backward_shortcut_layer(const layer l, network net)
+void backward_shortcut_layer(const dn_layer l, dn_network net)
{
gradient_array(l.output, l.outputs*l.batch, l.activation, l.delta);
axpy_cpu(l.outputs*l.batch, l.alpha, l.delta, 1, net.delta, 1);
diff --git a/src/shortcut_layer.h b/src/shortcut_layer.h
index 5f684fc1ead..cf4054b0262 100644
--- a/src/shortcut_layer.h
+++ b/src/shortcut_layer.h
@@ -4,10 +4,10 @@
#include "layer.h"
#include "network.h"
-layer make_shortcut_layer(int batch, int index, int w, int h, int c, int w2, int h2, int c2);
-void forward_shortcut_layer(const layer l, network net);
-void backward_shortcut_layer(const layer l, network net);
-void resize_shortcut_layer(layer *l, int w, int h);
+dn_layer make_shortcut_layer(int batch, int index, int w, int h, int c, int w2, int h2, int c2);
+void forward_shortcut_layer(const dn_layer l, dn_network net);
+void backward_shortcut_layer(const dn_layer l, dn_network net);
+void resize_shortcut_layer(dn_layer *l, int w, int h);
#ifdef GPU
void forward_shortcut_layer_gpu(const layer l, network net);
diff --git a/src/softmax_layer.c b/src/softmax_layer.c
index 9cbc6be120d..cb18e407238 100644
--- a/src/softmax_layer.c
+++ b/src/softmax_layer.c
@@ -36,7 +36,7 @@ softmax_layer make_softmax_layer(int batch, int inputs, int groups)
return l;
}
-void forward_softmax_layer(const softmax_layer l, network net)
+void forward_softmax_layer(const softmax_layer l, dn_network net)
{
if(l.softmax_tree){
int i;
@@ -56,7 +56,7 @@ void forward_softmax_layer(const softmax_layer l, network net)
}
}
-void backward_softmax_layer(const softmax_layer l, network net)
+void backward_softmax_layer(const softmax_layer l, dn_network net)
{
axpy_cpu(l.inputs*l.batch, 1, l.delta, 1, net.delta, 1);
}
diff --git a/src/softmax_layer.h b/src/softmax_layer.h
index 2e3ffe01a6c..7e4a4a7f495 100644
--- a/src/softmax_layer.h
+++ b/src/softmax_layer.h
@@ -3,12 +3,12 @@
#include "layer.h"
#include "network.h"
-typedef layer softmax_layer;
+typedef dn_layer softmax_layer;
void softmax_array(float *input, int n, float temp, float *output);
softmax_layer make_softmax_layer(int batch, int inputs, int groups);
-void forward_softmax_layer(const softmax_layer l, network net);
-void backward_softmax_layer(const softmax_layer l, network net);
+void forward_softmax_layer(const softmax_layer l, dn_network net);
+void backward_softmax_layer(const softmax_layer l, dn_network net);
#ifdef GPU
void pull_softmax_layer_output(const softmax_layer l);
diff --git a/src/tree.c b/src/tree.c
index 67b6d431f6f..2ee87a48b90 100644
--- a/src/tree.c
+++ b/src/tree.c
@@ -4,9 +4,9 @@
#include "utils.h"
#include "data.h"
-void change_leaves(tree *t, char *leaf_list)
+void change_leaves(dn_tree *t, char *leaf_list)
{
- list *llist = get_paths(leaf_list);
+ dn_list *llist = get_paths(leaf_list);
char **leaves = (char **)list_to_array(llist);
int n = llist->size;
int i,j;
@@ -24,7 +24,7 @@ void change_leaves(tree *t, char *leaf_list)
fprintf(stderr, "Found %d leaves.\n", found);
}
-float get_hierarchy_probability(float *x, tree *hier, int c, int stride)
+float get_hierarchy_probability(float *x, dn_tree *hier, int c, int stride)
{
float p = 1;
while(c >= 0){
@@ -34,7 +34,7 @@ float get_hierarchy_probability(float *x, tree *hier, int c, int stride)
return p;
}
-void hierarchy_predictions(float *predictions, int n, tree *hier, int only_leaves, int stride)
+void hierarchy_predictions(float *predictions, int n, dn_tree *hier, int only_leaves, int stride)
{
int j;
for(j = 0; j < n; ++j){
@@ -50,7 +50,7 @@ void hierarchy_predictions(float *predictions, int n, tree *hier, int only_leave
}
}
-int hierarchy_top_prediction(float *predictions, tree *hier, float thresh, int stride)
+int hierarchy_top_prediction(float *predictions, dn_tree *hier, float thresh, int stride)
{
float p = 1;
int group = 0;
@@ -80,9 +80,9 @@ int hierarchy_top_prediction(float *predictions, tree *hier, float thresh, int s
return 0;
}
-tree *read_tree(char *filename)
+dn_tree *read_tree(const char *filename)
{
- tree t = {0};
+ dn_tree t = {0};
FILE *fp = fopen(filename, "r");
char *line;
@@ -132,7 +132,7 @@ tree *read_tree(char *filename)
for(i = 0; i < n; ++i) if(t.parent[i] >= 0) t.leaf[t.parent[i]] = 0;
fclose(fp);
- tree *tree_ptr = calloc(1, sizeof(tree));
+ dn_tree *tree_ptr = calloc(1, sizeof(dn_tree));
*tree_ptr = t;
//error(0);
return tree_ptr;
diff --git a/src/tree.h b/src/tree.h
index 3802b8ead80..f215ad66056 100644
--- a/src/tree.h
+++ b/src/tree.h
@@ -2,7 +2,7 @@
#define TREE_H
#include "darknet.h"
-int hierarchy_top_prediction(float *predictions, tree *hier, float thresh, int stride);
-float get_hierarchy_probability(float *x, tree *hier, int c, int stride);
+int hierarchy_top_prediction(float *predictions, dn_tree *hier, float thresh, int stride);
+float get_hierarchy_probability(float *x, dn_tree *hier, int c, int stride);
#endif
diff --git a/src/upsample_layer.c b/src/upsample_layer.c
index 605f21f8ebd..a49fa7abf1a 100644
--- a/src/upsample_layer.c
+++ b/src/upsample_layer.c
@@ -4,9 +4,9 @@
#include
-layer make_upsample_layer(int batch, int w, int h, int c, int stride)
+dn_layer make_upsample_layer(int batch, int w, int h, int c, int stride)
{
- layer l = {0};
+ dn_layer l = {0};
l.type = UPSAMPLE;
l.batch = batch;
l.w = w;
@@ -41,7 +41,7 @@ layer make_upsample_layer(int batch, int w, int h, int c, int stride)
return l;
}
-void resize_upsample_layer(layer *l, int w, int h)
+void resize_upsample_layer(dn_layer *l, int w, int h)
{
l->w = w;
l->h = h;
@@ -65,7 +65,7 @@ void resize_upsample_layer(layer *l, int w, int h)
}
-void forward_upsample_layer(const layer l, network net)
+void forward_upsample_layer(const dn_layer l, dn_network net)
{
fill_cpu(l.outputs*l.batch, 0, l.output, 1);
if(l.reverse){
@@ -75,7 +75,7 @@ void forward_upsample_layer(const layer l, network net)
}
}
-void backward_upsample_layer(const layer l, network net)
+void backward_upsample_layer(const dn_layer l, dn_network net)
{
if(l.reverse){
upsample_cpu(l.delta, l.out_w, l.out_h, l.c, l.batch, l.stride, 1, l.scale, net.delta);
diff --git a/src/upsample_layer.h b/src/upsample_layer.h
index 86790d10883..16469d67acf 100644
--- a/src/upsample_layer.h
+++ b/src/upsample_layer.h
@@ -2,10 +2,10 @@
#define UPSAMPLE_LAYER_H
#include "darknet.h"
-layer make_upsample_layer(int batch, int w, int h, int c, int stride);
-void forward_upsample_layer(const layer l, network net);
-void backward_upsample_layer(const layer l, network net);
-void resize_upsample_layer(layer *l, int w, int h);
+dn_layer make_upsample_layer(int batch, int w, int h, int c, int stride);
+void forward_upsample_layer(const dn_layer l, dn_network net);
+void backward_upsample_layer(const dn_layer l, dn_network net);
+void resize_upsample_layer(dn_layer *l, int w, int h);
#ifdef GPU
void forward_upsample_layer_gpu(const layer l, network net);
diff --git a/src/utils.c b/src/utils.c
index 626b4678c1e..bc9527b9d52 100644
--- a/src/utils.c
+++ b/src/utils.c
@@ -56,7 +56,7 @@ int *read_intlist(char *gpu_list, int *ngpus, int d)
return gpus;
}
-int *read_map(char *filename)
+int *read_map(const char *filename)
{
int n = 0;
int *map = 0;
@@ -257,7 +257,7 @@ void error(const char *s)
exit(-1);
}
-unsigned char *read_file(char *filename)
+unsigned char *read_file(const char *filename)
{
FILE *fp = fopen(filename, "rb");
size_t size;
@@ -278,17 +278,17 @@ void malloc_error()
exit(-1);
}
-void file_error(char *s)
+void file_error(const char *s)
{
fprintf(stderr, "Couldn't open file: %s\n", s);
exit(0);
}
-list *split_str(char *s, char delim)
+dn_list *split_str(char *s, char delim)
{
size_t i;
size_t len = strlen(s);
- list *l = make_list();
+ dn_list *l = make_list();
list_insert(l, s);
for(i = 0; i < len; ++i){
if(s[i] == delim){
@@ -427,9 +427,9 @@ char *copy_string(char *s)
return copy;
}
-list *parse_csv_line(char *line)
+dn_list *parse_csv_line(char *line)
{
- list *l = make_list();
+ dn_list *l = make_list();
char *c, *p;
int in = 0;
for(c = line, p = line; *c != '\0'; ++c){
diff --git a/src/utils.h b/src/utils.h
index ef24da79888..b5b3cd2e6a2 100644
--- a/src/utils.h
+++ b/src/utils.h
@@ -28,12 +28,12 @@ int read_all_fail(int fd, char *buffer, size_t bytes);
int write_all_fail(int fd, char *buffer, size_t bytes);
void find_replace(char *str, char *orig, char *rep, char *output);
void malloc_error();
-void file_error(char *s);
+void file_error(const char *s);
void strip(char *s);
void strip_char(char *s, char bad);
-list *split_str(char *s, char delim);
+dn_list *split_str(char *s, char delim);
char *fgetl(FILE *fp);
-list *parse_csv_line(char *line);
+dn_list *parse_csv_line(char *line);
char *copy_string(char *s);
int count_fields(char *line);
float *parse_fields(char *line, int n);
diff --git a/src/yolo_layer.c b/src/yolo_layer.c
index c3380363cd3..331328a8d92 100644
--- a/src/yolo_layer.c
+++ b/src/yolo_layer.c
@@ -10,10 +10,10 @@
#include
#include
-layer make_yolo_layer(int batch, int w, int h, int n, int total, int *mask, int classes)
+dn_layer make_yolo_layer(int batch, int w, int h, int n, int total, int *mask, int classes)
{
int i;
- layer l = {0};
+ dn_layer l = {0};
l.type = YOLO;
l.n = n;
@@ -60,7 +60,7 @@ layer make_yolo_layer(int batch, int w, int h, int n, int total, int *mask, int
return l;
}
-void resize_yolo_layer(layer *l, int w, int h)
+void resize_yolo_layer(dn_layer *l, int w, int h)
{
l->w = w;
l->h = h;
@@ -80,9 +80,9 @@ void resize_yolo_layer(layer *l, int w, int h)
#endif
}
-box get_yolo_box(float *x, float *biases, int n, int index, int i, int j, int lw, int lh, int w, int h, int stride)
+dn_box get_yolo_box(float *x, float *biases, int n, int index, int i, int j, int lw, int lh, int w, int h, int stride)
{
- box b;
+ dn_box b;
b.x = (i + x[index + 0*stride]) / lw;
b.y = (j + x[index + 1*stride]) / lh;
b.w = exp(x[index + 2*stride]) * biases[2*n] / w;
@@ -90,9 +90,9 @@ box get_yolo_box(float *x, float *biases, int n, int index, int i, int j, int lw
return b;
}
-float delta_yolo_box(box truth, float *x, float *biases, int n, int index, int i, int j, int lw, int lh, int w, int h, float *delta, float scale, int stride)
+float delta_yolo_box(dn_box truth, float *x, float *biases, int n, int index, int i, int j, int lw, int lh, int w, int h, float *delta, float scale, int stride)
{
- box pred = get_yolo_box(x, biases, n, index, i, j, lw, lh, w, h, stride);
+ dn_box pred = get_yolo_box(x, biases, n, index, i, j, lw, lh, w, h, stride);
float iou = box_iou(pred, truth);
float tx = (truth.x*lw - i);
@@ -122,14 +122,14 @@ void delta_yolo_class(float *output, float *delta, int index, int class, int cla
}
}
-static int entry_index(layer l, int batch, int location, int entry)
+static int entry_index(dn_layer l, int batch, int location, int entry)
{
int n = location / (l.w*l.h);
int loc = location % (l.w*l.h);
return batch*l.outputs + n*l.w*l.h*(4+l.classes+1) + entry*l.w*l.h + loc;
}
-void forward_yolo_layer(const layer l, network net)
+void forward_yolo_layer(const dn_layer l, dn_network net)
{
int i,j,b,t,n;
memcpy(l.output, net.input, l.outputs*l.batch*sizeof(float));
@@ -161,11 +161,11 @@ void forward_yolo_layer(const layer l, network net)
for (i = 0; i < l.w; ++i) {
for (n = 0; n < l.n; ++n) {
int box_index = entry_index(l, b, n*l.w*l.h + j*l.w + i, 0);
- box pred = get_yolo_box(l.output, l.biases, l.mask[n], box_index, i, j, l.w, l.h, net.w, net.h, l.w*l.h);
+ dn_box pred = get_yolo_box(l.output, l.biases, l.mask[n], box_index, i, j, l.w, l.h, net.w, net.h, l.w*l.h);
float best_iou = 0;
int best_t = 0;
for(t = 0; t < l.max_boxes; ++t){
- box truth = float_to_box(net.truth + t*(4 + 1) + b*l.truths, 1);
+ dn_box truth = float_to_box(net.truth + t*(4 + 1) + b*l.truths, 1);
if(!truth.x) break;
float iou = box_iou(pred, truth);
if (iou > best_iou) {
@@ -186,24 +186,24 @@ void forward_yolo_layer(const layer l, network net)
if (l.map) class = l.map[class];
int class_index = entry_index(l, b, n*l.w*l.h + j*l.w + i, 4 + 1);
delta_yolo_class(l.output, l.delta, class_index, class, l.classes, l.w*l.h, 0);
- box truth = float_to_box(net.truth + best_t*(4 + 1) + b*l.truths, 1);
+ dn_box truth = float_to_box(net.truth + best_t*(4 + 1) + b*l.truths, 1);
delta_yolo_box(truth, l.output, l.biases, l.mask[n], box_index, i, j, l.w, l.h, net.w, net.h, l.delta, (2-truth.w*truth.h), l.w*l.h);
}
}
}
}
for(t = 0; t < l.max_boxes; ++t){
- box truth = float_to_box(net.truth + t*(4 + 1) + b*l.truths, 1);
+ dn_box truth = float_to_box(net.truth + t*(4 + 1) + b*l.truths, 1);
if(!truth.x) break;
float best_iou = 0;
int best_n = 0;
i = (truth.x * l.w);
j = (truth.y * l.h);
- box truth_shift = truth;
+ dn_box truth_shift = truth;
truth_shift.x = truth_shift.y = 0;
for(n = 0; n < l.total; ++n){
- box pred = {0};
+ dn_box pred = {0};
pred.w = l.biases[2*n]/net.w;
pred.h = l.biases[2*n+1]/net.h;
float iou = box_iou(pred, truth_shift);
@@ -239,7 +239,7 @@ void forward_yolo_layer(const layer l, network net)
printf("Region %d Avg IOU: %f, Class: %f, Obj: %f, No Obj: %f, .5R: %f, .75R: %f, count: %d\n", net.index, avg_iou/count, avg_cat/class_count, avg_obj/count, avg_anyobj/(l.w*l.h*l.n*l.batch), recall/count, recall75/count, count);
}
-void backward_yolo_layer(const layer l, network net)
+void backward_yolo_layer(const dn_layer l, dn_network net)
{
axpy_cpu(l.batch*l.inputs, 1, l.delta, 1, net.delta, 1);
}
@@ -257,7 +257,7 @@ void correct_yolo_boxes(detection *dets, int n, int w, int h, int netw, int neth
new_w = (w * neth)/h;
}
for (i = 0; i < n; ++i){
- box b = dets[i].bbox;
+ dn_box b = dets[i].bbox;
b.x = (b.x - (netw - new_w)/2./netw) / ((float)new_w/netw);
b.y = (b.y - (neth - new_h)/2./neth) / ((float)new_h/neth);
b.w *= (float)netw/new_w;
@@ -272,7 +272,7 @@ void correct_yolo_boxes(detection *dets, int n, int w, int h, int netw, int neth
}
}
-int yolo_num_detections(layer l, float thresh)
+int yolo_num_detections(dn_layer l, float thresh)
{
int i, n;
int count = 0;
@@ -287,7 +287,7 @@ int yolo_num_detections(layer l, float thresh)
return count;
}
-void avg_flipped_yolo(layer l)
+void avg_flipped_yolo(dn_layer l)
{
int i,j,n,z;
float *flip = l.output + l.outputs;
@@ -313,7 +313,7 @@ void avg_flipped_yolo(layer l)
}
}
-int get_yolo_detections(layer l, int w, int h, int netw, int neth, float thresh, int *map, int relative, detection *dets)
+int get_yolo_detections(dn_layer l, int w, int h, int netw, int neth, float thresh, int *map, int relative, detection *dets)
{
int i,j,n;
float *predictions = l.output;
diff --git a/src/yolo_layer.h b/src/yolo_layer.h
index d2a02432681..185f2033591 100644
--- a/src/yolo_layer.h
+++ b/src/yolo_layer.h
@@ -5,11 +5,11 @@
#include "layer.h"
#include "network.h"
-layer make_yolo_layer(int batch, int w, int h, int n, int total, int *mask, int classes);
-void forward_yolo_layer(const layer l, network net);
-void backward_yolo_layer(const layer l, network net);
-void resize_yolo_layer(layer *l, int w, int h);
-int yolo_num_detections(layer l, float thresh);
+dn_layer make_yolo_layer(int batch, int w, int h, int n, int total, int *mask, int classes);
+void forward_yolo_layer(const dn_layer l, dn_network net);
+void backward_yolo_layer(const dn_layer l, dn_network net);
+void resize_yolo_layer(dn_layer *l, int w, int h);
+int yolo_num_detections(dn_layer l, float thresh);
#ifdef GPU
void forward_yolo_layer_gpu(const layer l, network net);