Skip to content

Commit

Permalink
saving weight files as binaries, hell yeah
Browse files Browse the repository at this point in the history
  • Loading branch information
pjreddie committed Feb 7, 2015
1 parent bfffadc commit 2f62fe3
Show file tree
Hide file tree
Showing 9 changed files with 147 additions and 47 deletions.
6 changes: 3 additions & 3 deletions Makefile
Original file line number Diff line number Diff line change
Expand Up @@ -12,13 +12,13 @@ OPTS=-O3
LDFLAGS=`pkg-config --libs opencv` -lm -pthread
COMMON=`pkg-config --cflags opencv` -I/usr/local/cuda/include/
CFLAGS=-Wall -Wfatal-errors
CFLAGS+=$(OPTS)

ifeq ($(DEBUG), 1)
COMMON+=-O0 -g
CFLAGS+=-O0 -g
OPTS=-O0 -g
endif

CFLAGS+=$(OPTS)

ifeq ($(GPU), 1)
COMMON+=-DGPU
CFLAGS+=-DGPU
Expand Down
2 changes: 0 additions & 2 deletions src/connected_layer.c
Original file line number Diff line number Diff line change
Expand Up @@ -36,14 +36,12 @@ connected_layer *make_connected_layer(int batch, int inputs, int outputs, ACTIVA


float scale = 1./sqrt(inputs);
//scale = .01;
for(i = 0; i < inputs*outputs; ++i){
layer->weights[i] = scale*rand_normal();
}

for(i = 0; i < outputs; ++i){
layer->biases[i] = scale;
// layer->biases[i] = 1;
}

#ifdef GPU
Expand Down
3 changes: 0 additions & 3 deletions src/convolutional_layer.c
Original file line number Diff line number Diff line change
Expand Up @@ -66,12 +66,9 @@ convolutional_layer *make_convolutional_layer(int batch, int h, int w, int c, in
layer->biases = calloc(n, sizeof(float));
layer->bias_updates = calloc(n, sizeof(float));
float scale = 1./sqrt(size*size*c);
//scale = .01;
for(i = 0; i < c*n*size*size; ++i) layer->filters[i] = scale*rand_normal();
for(i = 0; i < n; ++i){
//layer->biases[i] = rand_normal()*scale + scale;
layer->biases[i] = scale;
//layer->biases[i] = 1;
}
int out_h = convolutional_out_height(*layer);
int out_w = convolutional_out_width(*layer);
Expand Down
55 changes: 40 additions & 15 deletions src/darknet.c
Original file line number Diff line number Diff line change
Expand Up @@ -222,13 +222,16 @@ char *basename(char *cfgfile)
return c;
}

void train_imagenet(char *cfgfile)
void train_imagenet(char *cfgfile, char *weightfile)
{
float avg_loss = -1;
srand(time(0));
char *base = basename(cfgfile);
printf("%s\n", base);
network net = parse_network_cfg(cfgfile);
if(weightfile){
load_weights(&net, weightfile);
}
//test_learn_bias(*(convolutional_layer *)net.layers[1]);
//set_learning_network(&net, net.learning_rate, 0, net.decay);
printf("Learning Rate: %g, Momentum: %g, Decay: %g\n", net.learning_rate, net.momentum, net.decay);
Expand Down Expand Up @@ -259,16 +262,19 @@ void train_imagenet(char *cfgfile)
free_data(train);
if(i%100==0){
char buff[256];
sprintf(buff, "/home/pjreddie/imagenet_backup/%s_%d.cfg",base, i);
save_network(net, buff);
sprintf(buff, "/home/pjreddie/imagenet_backup/%s_%d.weights",base, i);
save_weights(net, buff);
}
}
}

void validate_imagenet(char *filename)
void validate_imagenet(char *filename, char *weightfile)
{
int i = 0;
network net = parse_network_cfg(filename);
if(weightfile){
load_weights(&net, weightfile);
}
srand(time(0));

char **labels = get_labels("/home/pjreddie/data/imagenet/cls.val.labels.list");
Expand Down Expand Up @@ -370,14 +376,14 @@ void test_dog(char *cfgfile)
float *X = im.data;
network net = parse_network_cfg(cfgfile);
set_batch_network(&net, 1);
float *predictions = network_predict(net, X);
network_predict(net, X);
image crop = get_network_image_layer(net, 0);
//show_image(crop, "cropped");
// print_image(crop);
//show_image(im, "orig");
show_image(crop, "cropped");
print_image(crop);
show_image(im, "orig");
float * inter = get_network_output(net);
pm(1000, 1, inter);
//cvWaitKey(0);
cvWaitKey(0);
}

void test_imagenet(char *cfgfile)
Expand Down Expand Up @@ -586,7 +592,6 @@ void test_convolutional_layer()
float *in = calloc(size, sizeof(float));
int i;
for(i = 0; i < size; ++i) in[i] = rand_normal();
float *in_gpu = cuda_make_array(in, size);
convolutional_layer layer = *(convolutional_layer *)net.layers[0];
int out_size = convolutional_out_height(layer)*convolutional_out_width(layer)*layer.batch;
cuda_compare(layer.output_gpu, layer.output, out_size, "nothing");
Expand Down Expand Up @@ -703,14 +708,18 @@ void del_arg(int argc, char **argv, int index)
{
int i;
for(i = index; i < argc-1; ++i) argv[i] = argv[i+1];
argv[i] = 0;
}

int find_arg(int argc, char* argv[], char *arg)
{
int i;
for(i = 0; i < argc; ++i) if(0==strcmp(argv[i], arg)) {
del_arg(argc, argv, i);
return 1;
for(i = 0; i < argc; ++i) {
if(!argv[i]) continue;
if(0==strcmp(argv[i], arg)) {
del_arg(argc, argv, i);
return 1;
}
}
return 0;
}
Expand All @@ -719,6 +728,7 @@ int find_int_arg(int argc, char **argv, char *arg, int def)
{
int i;
for(i = 0; i < argc-1; ++i){
if(!argv[i]) continue;
if(0==strcmp(argv[i], arg)){
def = atoi(argv[i+1]);
del_arg(argc, argv, i);
Expand All @@ -729,6 +739,20 @@ int find_int_arg(int argc, char **argv, char *arg, int def)
return def;
}

void scale_rate(char *filename, float scale)
{
// Ready for some weird shit??
FILE *fp = fopen(filename, "r+b");
if(!fp) file_error(filename);
float rate = 0;
fread(&rate, sizeof(float), 1, fp);
printf("Scaling learning rate from %f to %f\n", rate, rate*scale);
rate = rate*scale;
fseek(fp, 0, SEEK_SET);
fwrite(&rate, sizeof(float), 1, fp);
fclose(fp);
}

int main(int argc, char **argv)
{
//test_convolutional_layer();
Expand Down Expand Up @@ -765,19 +789,20 @@ int main(int argc, char **argv)
else if(0==strcmp(argv[1], "ctrain")) train_cifar10(argv[2]);
else if(0==strcmp(argv[1], "nist")) train_nist(argv[2]);
else if(0==strcmp(argv[1], "ctest")) test_cifar10(argv[2]);
else if(0==strcmp(argv[1], "train")) train_imagenet(argv[2]);
else if(0==strcmp(argv[1], "train")) train_imagenet(argv[2], (argc > 3)? argv[3] : 0);
//else if(0==strcmp(argv[1], "client")) train_imagenet_distributed(argv[2]);
else if(0==strcmp(argv[1], "detect")) test_detection(argv[2]);
else if(0==strcmp(argv[1], "init")) test_init(argv[2]);
else if(0==strcmp(argv[1], "visualize")) test_visualize(argv[2]);
else if(0==strcmp(argv[1], "valid")) validate_imagenet(argv[2]);
else if(0==strcmp(argv[1], "valid")) validate_imagenet(argv[2], (argc > 3)? argv[3] : 0);
else if(0==strcmp(argv[1], "testnist")) test_nist(argv[2]);
else if(0==strcmp(argv[1], "validetect")) validate_detection_net(argv[2]);
else if(argc < 4){
fprintf(stderr, "usage: %s <function> <filename> <filename>\n", argv[0]);
return 0;
}
else if(0==strcmp(argv[1], "compare")) compare_nist(argv[2], argv[3]);
else if(0==strcmp(argv[1], "scale")) scale_rate(argv[2], atof(argv[3]));
fprintf(stderr, "Success!\n");
return 0;
}
Expand Down
80 changes: 78 additions & 2 deletions src/parser.c
Original file line number Diff line number Diff line change
Expand Up @@ -103,7 +103,7 @@ convolutional_layer *parse_convolutional(list *options, network *net, int count)
parse_data(weights, layer->filters, c*n*size*size);
parse_data(biases, layer->biases, n);
#ifdef GPU
push_convolutional_layer(*layer);
if(weights || biases) push_convolutional_layer(*layer);
#endif
option_unused(options);
return layer;
Expand Down Expand Up @@ -137,7 +137,7 @@ connected_layer *parse_connected(list *options, network *net, int count)
parse_data(biases, layer->biases, output);
parse_data(weights, layer->weights, input*output);
#ifdef GPU
push_connected_layer(*layer);
if(weights || biases) push_connected_layer(*layer);
#endif
option_unused(options);
return layer;
Expand Down Expand Up @@ -597,6 +597,82 @@ void print_cost_cfg(FILE *fp, cost_layer *l, network net, int count)
fprintf(fp, "\n");
}

void save_weights(network net, char *filename)
{
printf("Saving weights to %s\n", filename);
FILE *fp = fopen(filename, "w");
if(!fp) file_error(filename);

fwrite(&net.learning_rate, sizeof(float), 1, fp);
fwrite(&net.momentum, sizeof(float), 1, fp);
fwrite(&net.decay, sizeof(float), 1, fp);
fwrite(&net.seen, sizeof(int), 1, fp);

int i;
for(i = 0; i < net.n; ++i){
if(net.types[i] == CONVOLUTIONAL){
convolutional_layer layer = *(convolutional_layer *) net.layers[i];
#ifdef GPU
if(gpu_index >= 0){
pull_convolutional_layer(layer);
}
#endif
int num = layer.n*layer.c*layer.size*layer.size;
fwrite(layer.biases, sizeof(float), layer.n, fp);
fwrite(layer.filters, sizeof(float), num, fp);
}
if(net.types[i] == CONNECTED){
connected_layer layer = *(connected_layer *) net.layers[i];
#ifdef GPU
if(gpu_index >= 0){
pull_connected_layer(layer);
}
#endif
fwrite(layer.biases, sizeof(float), layer.outputs, fp);
fwrite(layer.weights, sizeof(float), layer.outputs*layer.inputs, fp);
}
}
fclose(fp);
}

void load_weights(network *net, char *filename)
{
printf("Loading weights from %s\n", filename);
FILE *fp = fopen(filename, "r");
if(!fp) file_error(filename);

fread(&net->learning_rate, sizeof(float), 1, fp);
fread(&net->momentum, sizeof(float), 1, fp);
fread(&net->decay, sizeof(float), 1, fp);
fread(&net->seen, sizeof(int), 1, fp);
set_learning_network(net, net->learning_rate, net->momentum, net->decay);

int i;
for(i = 0; i < net->n; ++i){
if(net->types[i] == CONVOLUTIONAL){
convolutional_layer layer = *(convolutional_layer *) net->layers[i];
int num = layer.n*layer.c*layer.size*layer.size;
fread(layer.biases, sizeof(float), layer.n, fp);
fread(layer.filters, sizeof(float), num, fp);
#ifdef GPU
if(gpu_index >= 0){
push_convolutional_layer(layer);
}
#endif
}
if(net->types[i] == CONNECTED){
connected_layer layer = *(connected_layer *) net->layers[i];
fread(layer.biases, sizeof(float), layer.outputs, fp);
fread(layer.weights, sizeof(float), layer.outputs*layer.inputs, fp);
#ifdef GPU
if(gpu_index >= 0){
push_connected_layer(layer);
}
#endif
}
}
fclose(fp);
}

void save_network(network net, char *filename)
{
Expand Down
2 changes: 2 additions & 0 deletions src/parser.h
Original file line number Diff line number Diff line change
Expand Up @@ -4,5 +4,7 @@

network parse_network_cfg(char *filename);
void save_network(network net, char *filename);
void save_weights(network net, char *filename);
void load_weights(network *net, char *filename);

#endif
22 changes: 0 additions & 22 deletions src/server.c
Original file line number Diff line number Diff line change
Expand Up @@ -50,28 +50,6 @@ typedef struct{
network net;
} connection_info;

void read_all(int fd, char *buffer, size_t bytes)
{
//printf("Want %d\n", bytes);
size_t n = 0;
while(n < bytes){
int next = read(fd, buffer + n, bytes-n);
if(next <= 0) error("read failed");
n += next;
}
}

void write_all(int fd, char *buffer, size_t bytes)
{
//printf("Writ %d\n", bytes);
size_t n = 0;
while(n < bytes){
int next = write(fd, buffer + n, bytes-n);
if(next <= 0) error("write failed");
n += next;
}
}

void read_and_add_into(int fd, float *a, int n)
{
float *buff = calloc(n, sizeof(float));
Expand Down
22 changes: 22 additions & 0 deletions src/utils.c
Original file line number Diff line number Diff line change
Expand Up @@ -2,6 +2,7 @@
#include <stdlib.h>
#include <string.h>
#include <math.h>
#include <unistd.h>
#include <float.h>
#include <limits.h>

Expand Down Expand Up @@ -148,6 +149,27 @@ char *fgetl(FILE *fp)
return line;
}

void read_all(int fd, char *buffer, size_t bytes)
{
size_t n = 0;
while(n < bytes){
int next = read(fd, buffer + n, bytes-n);
if(next <= 0) error("read failed");
n += next;
}
}

void write_all(int fd, char *buffer, size_t bytes)
{
size_t n = 0;
while(n < bytes){
size_t next = write(fd, buffer + n, bytes-n);
if(next <= 0) error("write failed");
n += next;
}
}


char *copy_string(char *s)
{
char *copy = malloc(strlen(s)+1);
Expand Down
2 changes: 2 additions & 0 deletions src/utils.h
Original file line number Diff line number Diff line change
Expand Up @@ -4,6 +4,8 @@
#include <time.h>
#include "list.h"

void read_all(int fd, char *buffer, size_t bytes);
void write_all(int fd, char *buffer, size_t bytes);
char *find_replace(char *str, char *orig, char *rep);
void error(const char *s);
void malloc_error();
Expand Down

0 comments on commit 2f62fe3

Please sign in to comment.