You signed in with another tab or window. Reload to refresh your session.You signed out in another tab or window. Reload to refresh your session.You switched accounts on another tab or window. Reload to refresh your session.Dismiss alert
I'm experiencing some strange drops in my loss during training regardless of solver type - I've tried Adagrad and SGD (inv & step). Does this indicate that there's something wrong with Caffe (or my Caffe build in particular)?
The accuracy of my model is actually somewhat descent using Adagrad (~60-65% on a complex 2-class problem). I've tried using different learning rates and batch sizes.
Below is my log, a plot of the loss and a plot of the accuracy by running the model over the entire train set and the entire test using the saved snapshots. As you can see there's a drop in accuracy, but it is not synced with the drop in loss. The learning rate is almost invisible in the plot as it is constant at 10^(-2).
libdc1394 error: Failed to initialize libdc1394
I0420 16:48:02.248317 29735 caffe.cpp:117] Use CPU.
I0420 16:48:02.248450 29735 caffe.cpp:121] Starting Optimization
I0420 16:48:02.248560 29735 solver.cpp:32] Initializing solver from parameters:
test_iter: 100
test_interval: 500
base_lr: 0.01
display: 100
max_iter: 10000
lr_policy: "fixed"
gamma: 0.0001
power: 0.75
weight_decay: 0.0005
snapshot: 1000
snapshot_prefix: "hdf5_classification/data/train"
solver_mode: CPU
net: "hdf5_classification/cnn_train.prototxt"
solver_type: ADAGRAD
I0420 16:48:02.248597 29735 solver.cpp:70] Creating training net from net file: hdf5_classification/cnn_train.prototxt
E0420 16:48:02.248960 29735 upgrade_proto.cpp:618] Attempting to upgrade input file specified using deprecated V1LayerParameter: hdf5_classification/cnn_train.prototxt
I0420 16:48:02.249085 29735 upgrade_proto.cpp:626] Successfully upgraded file specified using deprecated V1LayerParameter
I0420 16:48:02.249176 29735 net.cpp:257] The NetState phase (0) differed from the phase (1) specified by a rule in layer data
I0420 16:48:02.249204 29735 net.cpp:257] The NetState phase (0) differed from the phase (1) specified by a rule in layer accuracy
I0420 16:48:02.249217 29735 net.cpp:257] The NetState phase (0) differed from the phase (1) specified by a rule in layer pred
I0420 16:48:02.249336 29735 net.cpp:42] Initializing net from parameters:
name: "CDR-CNN"
state {
phase: TRAIN
}
layer {
name: "data"
type: "HDF5Data"
top: "data"
top: "label"
include {
phase: TRAIN
}
hdf5_data_param {
source: "hdf5_classification/data/train.txt"
batch_size: 10
}
}
layer {
name: "conv1"
type: "Convolution"
bottom: "data"
top: "conv1"
param {
lr_mult: 1
}
param {
lr_mult: 2
}
convolution_param {
num_output: 12
weight_filler {
type: "gaussian"
std: 0.01
}
bias_filler {
type: "constant"
}
kernel_h: 1
kernel_w: 3
stride_h: 1
stride_w: 1
}
}
layer {
name: "relu1"
type: "ReLU"
bottom: "conv1"
top: "conv1"
}
layer {
name: "drop1"
type: "Dropout"
bottom: "conv1"
top: "conv1"
dropout_param {
dropout_ratio: 0.5
}
}
layer {
name: "pool1"
type: "Pooling"
bottom: "conv1"
top: "pool1"
pooling_param {
pool: MAX
kernel_h: 1
kernel_w: 2
stride_h: 1
stride_w: 2
}
}
layer {
name: "conv2"
type: "Convolution"
bottom: "pool1"
top: "conv2"
param {
lr_mult: 1
}
param {
lr_mult: 2
}
convolution_param {
num_output: 20
weight_filler {
type: "gaussian"
std: 0.01
}
bias_filler {
type: "constant"
}
kernel_h: 1
kernel_w: 11
stride_h: 1
stride_w: 1
}
}
layer {
name: "relu2"
type: "ReLU"
bottom: "conv2"
top: "conv2"
}
layer {
name: "drop2"
type: "Dropout"
bottom: "conv2"
top: "conv2"
dropout_param {
dropout_ratio: 0.5
}
}
layer {
name: "conv3"
type: "Convolution"
bottom: "conv2"
top: "conv3"
param {
lr_mult: 1
}
param {
lr_mult: 2
}
convolution_param {
num_output: 110
weight_filler {
type: "gaussian"
std: 0.01
}
bias_filler {
type: "constant"
}
kernel_h: 7
kernel_w: 1
stride_h: 1
stride_w: 1
}
}
layer {
name: "relu3"
type: "ReLU"
bottom: "conv3"
top: "conv3"
}
layer {
name: "drop3"
type: "Dropout"
bottom: "conv3"
top: "conv3"
dropout_param {
dropout_ratio: 0.5
}
}
layer {
name: "fc4"
type: "InnerProduct"
bottom: "conv3"
top: "fc4"
param {
lr_mult: 1
decay_mult: 1
}
param {
lr_mult: 2
decay_mult: 0
}
inner_product_param {
num_output: 90
weight_filler {
type: "gaussian"
std: 0.01
}
bias_filler {
type: "constant"
value: 0
}
}
}
layer {
name: "relu4"
type: "ReLU"
bottom: "fc4"
top: "fc4"
}
layer {
name: "drop4"
type: "Dropout"
bottom: "fc4"
top: "fc4"
dropout_param {
dropout_ratio: 0.5
}
}
layer {
name: "fc5"
type: "InnerProduct"
bottom: "fc4"
top: "fc5"
param {
lr_mult: 1
decay_mult: 1
}
param {
lr_mult: 2
decay_mult: 0
}
inner_product_param {
num_output: 2
weight_filler {
type: "gaussian"
std: 0.01
}
bias_filler {
type: "constant"
value: 0
}
}
}
layer {
name: "loss"
type: "SoftmaxWithLoss"
bottom: "fc5"
bottom: "label"
top: "loss"
include {
phase: TRAIN
}
}
I0420 16:48:02.249445 29735 layer_factory.hpp:74] Creating layer data
I0420 16:48:02.249480 29735 net.cpp:84] Creating Layer data
I0420 16:48:02.249503 29735 net.cpp:338] data -> data
I0420 16:48:02.249546 29735 net.cpp:338] data -> label
I0420 16:48:02.249567 29735 net.cpp:113] Setting up data
I0420 16:48:02.249583 29735 hdf5_data_layer.cpp:80] Loading list of HDF5 filenames from: hdf5_classification/data/train.txt
I0420 16:48:02.249652 29735 hdf5_data_layer.cpp:94] Number of HDF5 files: 1
I0420 16:48:02.336033 29735 net.cpp:120] Top shape: 10 16 7 24 (26880)
I0420 16:48:02.336078 29735 net.cpp:120] Top shape: 10 (10)
I0420 16:48:02.336096 29735 layer_factory.hpp:74] Creating layer conv1
I0420 16:48:02.336134 29735 net.cpp:84] Creating Layer conv1
I0420 16:48:02.336148 29735 net.cpp:380] conv1 <- data
I0420 16:48:02.336175 29735 net.cpp:338] conv1 -> conv1
I0420 16:48:02.336197 29735 net.cpp:113] Setting up conv1
I0420 16:48:02.336660 29735 net.cpp:120] Top shape: 10 12 7 22 (18480)
I0420 16:48:02.336685 29735 layer_factory.hpp:74] Creating layer relu1
I0420 16:48:02.336704 29735 net.cpp:84] Creating Layer relu1
I0420 16:48:02.336714 29735 net.cpp:380] relu1 <- conv1
I0420 16:48:02.336725 29735 net.cpp:327] relu1 -> conv1 (in-place)
I0420 16:48:02.336738 29735 net.cpp:113] Setting up relu1
I0420 16:48:02.336760 29735 net.cpp:120] Top shape: 10 12 7 22 (18480)
I0420 16:48:02.336772 29735 layer_factory.hpp:74] Creating layer drop1
I0420 16:48:02.336796 29735 net.cpp:84] Creating Layer drop1
I0420 16:48:02.336807 29735 net.cpp:380] drop1 <- conv1
I0420 16:48:02.336822 29735 net.cpp:327] drop1 -> conv1 (in-place)
I0420 16:48:02.336835 29735 net.cpp:113] Setting up drop1
I0420 16:48:02.336860 29735 net.cpp:120] Top shape: 10 12 7 22 (18480)
I0420 16:48:02.336871 29735 layer_factory.hpp:74] Creating layer pool1
I0420 16:48:02.336885 29735 net.cpp:84] Creating Layer pool1
I0420 16:48:02.336895 29735 net.cpp:380] pool1 <- conv1
I0420 16:48:02.336906 29735 net.cpp:338] pool1 -> pool1
I0420 16:48:02.336918 29735 net.cpp:113] Setting up pool1
I0420 16:48:02.336949 29735 net.cpp:120] Top shape: 10 12 7 11 (9240)
I0420 16:48:02.336961 29735 layer_factory.hpp:74] Creating layer conv2
I0420 16:48:02.336978 29735 net.cpp:84] Creating Layer conv2
I0420 16:48:02.336988 29735 net.cpp:380] conv2 <- pool1
I0420 16:48:02.337003 29735 net.cpp:338] conv2 -> conv2
I0420 16:48:02.337018 29735 net.cpp:113] Setting up conv2
I0420 16:48:02.337132 29735 net.cpp:120] Top shape: 10 20 7 1 (1400)
I0420 16:48:02.337152 29735 layer_factory.hpp:74] Creating layer relu2
I0420 16:48:02.337167 29735 net.cpp:84] Creating Layer relu2
I0420 16:48:02.337177 29735 net.cpp:380] relu2 <- conv2
I0420 16:48:02.337188 29735 net.cpp:327] relu2 -> conv2 (in-place)
I0420 16:48:02.337199 29735 net.cpp:113] Setting up relu2
I0420 16:48:02.337211 29735 net.cpp:120] Top shape: 10 20 7 1 (1400)
I0420 16:48:02.337221 29735 layer_factory.hpp:74] Creating layer drop2
I0420 16:48:02.337234 29735 net.cpp:84] Creating Layer drop2
I0420 16:48:02.337244 29735 net.cpp:380] drop2 <- conv2
I0420 16:48:02.337255 29735 net.cpp:327] drop2 -> conv2 (in-place)
I0420 16:48:02.337266 29735 net.cpp:113] Setting up drop2
I0420 16:48:02.337280 29735 net.cpp:120] Top shape: 10 20 7 1 (1400)
I0420 16:48:02.337290 29735 layer_factory.hpp:74] Creating layer conv3
I0420 16:48:02.337311 29735 net.cpp:84] Creating Layer conv3
I0420 16:48:02.337321 29735 net.cpp:380] conv3 <- conv2
I0420 16:48:02.337333 29735 net.cpp:338] conv3 -> conv3
I0420 16:48:02.337347 29735 net.cpp:113] Setting up conv3
I0420 16:48:02.337903 29735 net.cpp:120] Top shape: 10 110 1 1 (1100)
I0420 16:48:02.337919 29735 layer_factory.hpp:74] Creating layer relu3
I0420 16:48:02.337949 29735 net.cpp:84] Creating Layer relu3
I0420 16:48:02.337959 29735 net.cpp:380] relu3 <- conv3
I0420 16:48:02.337970 29735 net.cpp:327] relu3 -> conv3 (in-place)
I0420 16:48:02.337982 29735 net.cpp:113] Setting up relu3
I0420 16:48:02.337993 29735 net.cpp:120] Top shape: 10 110 1 1 (1100)
I0420 16:48:02.338003 29735 layer_factory.hpp:74] Creating layer drop3
I0420 16:48:02.338014 29735 net.cpp:84] Creating Layer drop3
I0420 16:48:02.338024 29735 net.cpp:380] drop3 <- conv3
I0420 16:48:02.338038 29735 net.cpp:327] drop3 -> conv3 (in-place)
I0420 16:48:02.338050 29735 net.cpp:113] Setting up drop3
I0420 16:48:02.338063 29735 net.cpp:120] Top shape: 10 110 1 1 (1100)
I0420 16:48:02.338073 29735 layer_factory.hpp:74] Creating layer fc4
I0420 16:48:02.338096 29735 net.cpp:84] Creating Layer fc4
I0420 16:48:02.338107 29735 net.cpp:380] fc4 <- conv3
I0420 16:48:02.338124 29735 net.cpp:338] fc4 -> fc4
I0420 16:48:02.338136 29735 net.cpp:113] Setting up fc4
I0420 16:48:02.338506 29735 net.cpp:120] Top shape: 10 90 (900)
I0420 16:48:02.338522 29735 layer_factory.hpp:74] Creating layer relu4
I0420 16:48:02.338537 29735 net.cpp:84] Creating Layer relu4
I0420 16:48:02.338548 29735 net.cpp:380] relu4 <- fc4
I0420 16:48:02.338559 29735 net.cpp:327] relu4 -> fc4 (in-place)
I0420 16:48:02.338572 29735 net.cpp:113] Setting up relu4
I0420 16:48:02.338582 29735 net.cpp:120] Top shape: 10 90 (900)
I0420 16:48:02.338593 29735 layer_factory.hpp:74] Creating layer drop4
I0420 16:48:02.338603 29735 net.cpp:84] Creating Layer drop4
I0420 16:48:02.338613 29735 net.cpp:380] drop4 <- fc4
I0420 16:48:02.338628 29735 net.cpp:327] drop4 -> fc4 (in-place)
I0420 16:48:02.338639 29735 net.cpp:113] Setting up drop4
I0420 16:48:02.338651 29735 net.cpp:120] Top shape: 10 90 (900)
I0420 16:48:02.338661 29735 layer_factory.hpp:74] Creating layer fc5
I0420 16:48:02.338675 29735 net.cpp:84] Creating Layer fc5
I0420 16:48:02.338685 29735 net.cpp:380] fc5 <- fc4
I0420 16:48:02.338698 29735 net.cpp:338] fc5 -> fc5
I0420 16:48:02.338711 29735 net.cpp:113] Setting up fc5
I0420 16:48:02.338737 29735 net.cpp:120] Top shape: 10 2 (20)
I0420 16:48:02.338752 29735 layer_factory.hpp:74] Creating layer loss
I0420 16:48:02.338775 29735 net.cpp:84] Creating Layer loss
I0420 16:48:02.338786 29735 net.cpp:380] loss <- fc5
I0420 16:48:02.338798 29735 net.cpp:380] loss <- label
I0420 16:48:02.338810 29735 net.cpp:338] loss -> loss
I0420 16:48:02.338834 29735 net.cpp:113] Setting up loss
I0420 16:48:02.338855 29735 layer_factory.hpp:74] Creating layer loss
I0420 16:48:02.338891 29735 net.cpp:120] Top shape: (1)
I0420 16:48:02.338901 29735 net.cpp:122] with loss weight 1
I0420 16:48:02.338937 29735 net.cpp:167] loss needs backward computation.
I0420 16:48:02.338948 29735 net.cpp:167] fc5 needs backward computation.
I0420 16:48:02.338958 29735 net.cpp:167] drop4 needs backward computation.
I0420 16:48:02.338966 29735 net.cpp:167] relu4 needs backward computation.
I0420 16:48:02.338975 29735 net.cpp:167] fc4 needs backward computation.
I0420 16:48:02.338985 29735 net.cpp:167] drop3 needs backward computation.
I0420 16:48:02.338994 29735 net.cpp:167] relu3 needs backward computation.
I0420 16:48:02.339004 29735 net.cpp:167] conv3 needs backward computation.
I0420 16:48:02.339012 29735 net.cpp:167] drop2 needs backward computation.
I0420 16:48:02.339021 29735 net.cpp:167] relu2 needs backward computation.
I0420 16:48:02.339030 29735 net.cpp:167] conv2 needs backward computation.
I0420 16:48:02.339040 29735 net.cpp:167] pool1 needs backward computation.
I0420 16:48:02.339049 29735 net.cpp:167] drop1 needs backward computation.
I0420 16:48:02.339058 29735 net.cpp:167] relu1 needs backward computation.
I0420 16:48:02.339067 29735 net.cpp:167] conv1 needs backward computation.
I0420 16:48:02.339076 29735 net.cpp:169] data does not need backward computation.
I0420 16:48:02.339085 29735 net.cpp:205] This network produces output loss
I0420 16:48:02.339102 29735 net.cpp:447] Collecting Learning Rate and Weight Decay.
I0420 16:48:02.339114 29735 net.cpp:217] Network initialization done.
I0420 16:48:02.339128 29735 net.cpp:218] Memory required for data: 407164
E0420 16:48:02.339588 29735 upgrade_proto.cpp:618] Attempting to upgrade input file specified using deprecated V1LayerParameter: hdf5_classification/cnn_train.prototxt
I0420 16:48:02.339651 29735 upgrade_proto.cpp:626] Successfully upgraded file specified using deprecated V1LayerParameter
I0420 16:48:02.339681 29735 solver.cpp:154] Creating test net (#0) specified by net file: hdf5_classification/cnn_train.prototxt
I0420 16:48:02.339715 29735 net.cpp:257] The NetState phase (1) differed from the phase (0) specified by a rule in layer data
I0420 16:48:02.339736 29735 net.cpp:257] The NetState phase (1) differed from the phase (0) specified by a rule in layer loss
I0420 16:48:02.339872 29735 net.cpp:42] Initializing net from parameters:
name: "CDR-CNN"
state {
phase: TEST
}
layer {
name: "data"
type: "HDF5Data"
top: "data"
top: "label"
include {
phase: TEST
}
hdf5_data_param {
source: "hdf5_classification/data/test.txt"
batch_size: 10
}
}
layer {
name: "conv1"
type: "Convolution"
bottom: "data"
top: "conv1"
param {
lr_mult: 1
}
param {
lr_mult: 2
}
convolution_param {
num_output: 12
weight_filler {
type: "gaussian"
std: 0.01
}
bias_filler {
type: "constant"
}
kernel_h: 1
kernel_w: 3
stride_h: 1
stride_w: 1
}
}
layer {
name: "relu1"
type: "ReLU"
bottom: "conv1"
top: "conv1"
}
layer {
name: "drop1"
type: "Dropout"
bottom: "conv1"
top: "conv1"
dropout_param {
dropout_ratio: 0.5
}
}
layer {
name: "pool1"
type: "Pooling"
bottom: "conv1"
top: "pool1"
pooling_param {
pool: MAX
kernel_h: 1
kernel_w: 2
stride_h: 1
stride_w: 2
}
}
layer {
name: "conv2"
type: "Convolution"
bottom: "pool1"
top: "conv2"
param {
lr_mult: 1
}
param {
lr_mult: 2
}
convolution_param {
num_output: 20
weight_filler {
type: "gaussian"
std: 0.01
}
bias_filler {
type: "constant"
}
kernel_h: 1
kernel_w: 11
stride_h: 1
stride_w: 1
}
}
layer {
name: "relu2"
type: "ReLU"
bottom: "conv2"
top: "conv2"
}
layer {
name: "drop2"
type: "Dropout"
bottom: "conv2"
top: "conv2"
dropout_param {
dropout_ratio: 0.5
}
}
layer {
name: "conv3"
type: "Convolution"
bottom: "conv2"
top: "conv3"
param {
lr_mult: 1
}
param {
lr_mult: 2
}
convolution_param {
num_output: 110
weight_filler {
type: "gaussian"
std: 0.01
}
bias_filler {
type: "constant"
}
kernel_h: 7
kernel_w: 1
stride_h: 1
stride_w: 1
}
}
layer {
name: "relu3"
type: "ReLU"
bottom: "conv3"
top: "conv3"
}
layer {
name: "drop3"
type: "Dropout"
bottom: "conv3"
top: "conv3"
dropout_param {
dropout_ratio: 0.5
}
}
layer {
name: "fc4"
type: "InnerProduct"
bottom: "conv3"
top: "fc4"
param {
lr_mult: 1
decay_mult: 1
}
param {
lr_mult: 2
decay_mult: 0
}
inner_product_param {
num_output: 90
weight_filler {
type: "gaussian"
std: 0.01
}
bias_filler {
type: "constant"
value: 0
}
}
}
layer {
name: "relu4"
type: "ReLU"
bottom: "fc4"
top: "fc4"
}
layer {
name: "drop4"
type: "Dropout"
bottom: "fc4"
top: "fc4"
dropout_param {
dropout_ratio: 0.5
}
}
layer {
name: "fc5"
type: "InnerProduct"
bottom: "fc4"
top: "fc5"
param {
lr_mult: 1
decay_mult: 1
}
param {
lr_mult: 2
decay_mult: 0
}
inner_product_param {
num_output: 2
weight_filler {
type: "gaussian"
std: 0.01
}
bias_filler {
type: "constant"
value: 0
}
}
}
layer {
name: "accuracy"
type: "Accuracy"
bottom: "fc5"
bottom: "label"
top: "accuracy"
include {
phase: TEST
}
}
layer {
name: "pred"
type: "Softmax"
bottom: "fc5"
top: "pred"
include {
phase: TEST
}
}
I0420 16:48:02.339997 29735 layer_factory.hpp:74] Creating layer data
I0420 16:48:02.340023 29735 net.cpp:84] Creating Layer data
I0420 16:48:02.340034 29735 net.cpp:338] data -> data
I0420 16:48:02.340049 29735 net.cpp:338] data -> label
I0420 16:48:02.340064 29735 net.cpp:113] Setting up data
I0420 16:48:02.340073 29735 hdf5_data_layer.cpp:80] Loading list of HDF5 filenames from: hdf5_classification/data/test.txt
I0420 16:48:02.340106 29735 hdf5_data_layer.cpp:94] Number of HDF5 files: 1
I0420 16:48:02.364920 29735 net.cpp:120] Top shape: 10 16 7 24 (26880)
I0420 16:48:02.364967 29735 net.cpp:120] Top shape: 10 (10)
I0420 16:48:02.364984 29735 layer_factory.hpp:74] Creating layer conv1
I0420 16:48:02.365010 29735 net.cpp:84] Creating Layer conv1
I0420 16:48:02.365021 29735 net.cpp:380] conv1 <- data
I0420 16:48:02.365038 29735 net.cpp:338] conv1 -> conv1
I0420 16:48:02.365058 29735 net.cpp:113] Setting up conv1
I0420 16:48:02.365110 29735 net.cpp:120] Top shape: 10 12 7 22 (18480)
I0420 16:48:02.365128 29735 layer_factory.hpp:74] Creating layer relu1
I0420 16:48:02.365144 29735 net.cpp:84] Creating Layer relu1
I0420 16:48:02.365154 29735 net.cpp:380] relu1 <- conv1
I0420 16:48:02.365166 29735 net.cpp:327] relu1 -> conv1 (in-place)
I0420 16:48:02.365178 29735 net.cpp:113] Setting up relu1
I0420 16:48:02.365191 29735 net.cpp:120] Top shape: 10 12 7 22 (18480)
I0420 16:48:02.365201 29735 layer_factory.hpp:74] Creating layer drop1
I0420 16:48:02.365214 29735 net.cpp:84] Creating Layer drop1
I0420 16:48:02.365226 29735 net.cpp:380] drop1 <- conv1
I0420 16:48:02.365237 29735 net.cpp:327] drop1 -> conv1 (in-place)
I0420 16:48:02.365249 29735 net.cpp:113] Setting up drop1
I0420 16:48:02.365264 29735 net.cpp:120] Top shape: 10 12 7 22 (18480)
I0420 16:48:02.365274 29735 layer_factory.hpp:74] Creating layer pool1
I0420 16:48:02.365288 29735 net.cpp:84] Creating Layer pool1
I0420 16:48:02.365299 29735 net.cpp:380] pool1 <- conv1
I0420 16:48:02.365311 29735 net.cpp:338] pool1 -> pool1
I0420 16:48:02.365324 29735 net.cpp:113] Setting up pool1
I0420 16:48:02.365340 29735 net.cpp:120] Top shape: 10 12 7 11 (9240)
I0420 16:48:02.365351 29735 layer_factory.hpp:74] Creating layer conv2
I0420 16:48:02.365365 29735 net.cpp:84] Creating Layer conv2
I0420 16:48:02.365375 29735 net.cpp:380] conv2 <- pool1
I0420 16:48:02.365388 29735 net.cpp:338] conv2 -> conv2
I0420 16:48:02.365402 29735 net.cpp:113] Setting up conv2
I0420 16:48:02.365514 29735 net.cpp:120] Top shape: 10 20 7 1 (1400)
I0420 16:48:02.365530 29735 layer_factory.hpp:74] Creating layer relu2
I0420 16:48:02.365542 29735 net.cpp:84] Creating Layer relu2
I0420 16:48:02.365552 29735 net.cpp:380] relu2 <- conv2
I0420 16:48:02.365564 29735 net.cpp:327] relu2 -> conv2 (in-place)
I0420 16:48:02.365577 29735 net.cpp:113] Setting up relu2
I0420 16:48:02.365589 29735 net.cpp:120] Top shape: 10 20 7 1 (1400)
I0420 16:48:02.365599 29735 layer_factory.hpp:74] Creating layer drop2
I0420 16:48:02.365612 29735 net.cpp:84] Creating Layer drop2
I0420 16:48:02.365622 29735 net.cpp:380] drop2 <- conv2
I0420 16:48:02.365633 29735 net.cpp:327] drop2 -> conv2 (in-place)
I0420 16:48:02.365645 29735 net.cpp:113] Setting up drop2
I0420 16:48:02.365659 29735 net.cpp:120] Top shape: 10 20 7 1 (1400)
I0420 16:48:02.365669 29735 layer_factory.hpp:74] Creating layer conv3
I0420 16:48:02.365684 29735 net.cpp:84] Creating Layer conv3
I0420 16:48:02.365694 29735 net.cpp:380] conv3 <- conv2
I0420 16:48:02.365706 29735 net.cpp:338] conv3 -> conv3
I0420 16:48:02.365720 29735 net.cpp:113] Setting up conv3
I0420 16:48:02.366268 29735 net.cpp:120] Top shape: 10 110 1 1 (1100)
I0420 16:48:02.366286 29735 layer_factory.hpp:74] Creating layer relu3
I0420 16:48:02.366299 29735 net.cpp:84] Creating Layer relu3
I0420 16:48:02.366309 29735 net.cpp:380] relu3 <- conv3
I0420 16:48:02.366322 29735 net.cpp:327] relu3 -> conv3 (in-place)
I0420 16:48:02.366334 29735 net.cpp:113] Setting up relu3
I0420 16:48:02.366346 29735 net.cpp:120] Top shape: 10 110 1 1 (1100)
I0420 16:48:02.366356 29735 layer_factory.hpp:74] Creating layer drop3
I0420 16:48:02.366369 29735 net.cpp:84] Creating Layer drop3
I0420 16:48:02.366394 29735 net.cpp:380] drop3 <- conv3
I0420 16:48:02.366405 29735 net.cpp:327] drop3 -> conv3 (in-place)
I0420 16:48:02.366418 29735 net.cpp:113] Setting up drop3
I0420 16:48:02.366432 29735 net.cpp:120] Top shape: 10 110 1 1 (1100)
I0420 16:48:02.366442 29735 layer_factory.hpp:74] Creating layer fc4
I0420 16:48:02.366457 29735 net.cpp:84] Creating Layer fc4
I0420 16:48:02.366468 29735 net.cpp:380] fc4 <- conv3
I0420 16:48:02.366480 29735 net.cpp:338] fc4 -> fc4
I0420 16:48:02.366494 29735 net.cpp:113] Setting up fc4
I0420 16:48:02.366855 29735 net.cpp:120] Top shape: 10 90 (900)
I0420 16:48:02.366870 29735 layer_factory.hpp:74] Creating layer relu4
I0420 16:48:02.366883 29735 net.cpp:84] Creating Layer relu4
I0420 16:48:02.366894 29735 net.cpp:380] relu4 <- fc4
I0420 16:48:02.366906 29735 net.cpp:327] relu4 -> fc4 (in-place)
I0420 16:48:02.366919 29735 net.cpp:113] Setting up relu4
I0420 16:48:02.366930 29735 net.cpp:120] Top shape: 10 90 (900)
I0420 16:48:02.366940 29735 layer_factory.hpp:74] Creating layer drop4
I0420 16:48:02.366952 29735 net.cpp:84] Creating Layer drop4
I0420 16:48:02.366962 29735 net.cpp:380] drop4 <- fc4
I0420 16:48:02.366974 29735 net.cpp:327] drop4 -> fc4 (in-place)
I0420 16:48:02.366986 29735 net.cpp:113] Setting up drop4
I0420 16:48:02.366999 29735 net.cpp:120] Top shape: 10 90 (900)
I0420 16:48:02.367010 29735 layer_factory.hpp:74] Creating layer fc5
I0420 16:48:02.367023 29735 net.cpp:84] Creating Layer fc5
I0420 16:48:02.367034 29735 net.cpp:380] fc5 <- fc4
I0420 16:48:02.367048 29735 net.cpp:338] fc5 -> fc5
I0420 16:48:02.367060 29735 net.cpp:113] Setting up fc5
I0420 16:48:02.367084 29735 net.cpp:120] Top shape: 10 2 (20)
I0420 16:48:02.367100 29735 layer_factory.hpp:74] Creating layer fc5_fc5_0_split
I0420 16:48:02.367120 29735 net.cpp:84] Creating Layer fc5_fc5_0_split
I0420 16:48:02.367130 29735 net.cpp:380] fc5_fc5_0_split <- fc5
I0420 16:48:02.367142 29735 net.cpp:338] fc5_fc5_0_split -> fc5_fc5_0_split_0
I0420 16:48:02.367156 29735 net.cpp:338] fc5_fc5_0_split -> fc5_fc5_0_split_1
I0420 16:48:02.367169 29735 net.cpp:113] Setting up fc5_fc5_0_split
I0420 16:48:02.367183 29735 net.cpp:120] Top shape: 10 2 (20)
I0420 16:48:02.367194 29735 net.cpp:120] Top shape: 10 2 (20)
I0420 16:48:02.367204 29735 layer_factory.hpp:74] Creating layer accuracy
I0420 16:48:02.367223 29735 net.cpp:84] Creating Layer accuracy
I0420 16:48:02.367233 29735 net.cpp:380] accuracy <- fc5_fc5_0_split_0
I0420 16:48:02.367244 29735 net.cpp:380] accuracy <- label
I0420 16:48:02.367257 29735 net.cpp:338] accuracy -> accuracy
I0420 16:48:02.367271 29735 net.cpp:113] Setting up accuracy
I0420 16:48:02.367290 29735 net.cpp:120] Top shape: (1)
I0420 16:48:02.367300 29735 layer_factory.hpp:74] Creating layer pred
I0420 16:48:02.367313 29735 net.cpp:84] Creating Layer pred
I0420 16:48:02.367324 29735 net.cpp:380] pred <- fc5_fc5_0_split_1
I0420 16:48:02.367336 29735 net.cpp:338] pred -> pred
I0420 16:48:02.367348 29735 net.cpp:113] Setting up pred
I0420 16:48:02.367363 29735 net.cpp:120] Top shape: 10 2 (20)
I0420 16:48:02.367374 29735 net.cpp:169] pred does not need backward computation.
I0420 16:48:02.367384 29735 net.cpp:169] accuracy does not need backward computation.
I0420 16:48:02.367393 29735 net.cpp:169] fc5_fc5_0_split does not need backward computation.
I0420 16:48:02.367403 29735 net.cpp:169] fc5 does not need backward computation.
I0420 16:48:02.367413 29735 net.cpp:169] drop4 does not need backward computation.
I0420 16:48:02.367421 29735 net.cpp:169] relu4 does not need backward computation.
I0420 16:48:02.367430 29735 net.cpp:169] fc4 does not need backward computation.
I0420 16:48:02.367439 29735 net.cpp:169] drop3 does not need backward computation.
I0420 16:48:02.367449 29735 net.cpp:169] relu3 does not need backward computation.
I0420 16:48:02.367458 29735 net.cpp:169] conv3 does not need backward computation.
I0420 16:48:02.367467 29735 net.cpp:169] drop2 does not need backward computation.
I0420 16:48:02.367476 29735 net.cpp:169] relu2 does not need backward computation.
I0420 16:48:02.367491 29735 net.cpp:169] conv2 does not need backward computation.
I0420 16:48:02.367501 29735 net.cpp:169] pool1 does not need backward computation.
I0420 16:48:02.367511 29735 net.cpp:169] drop1 does not need backward computation.
I0420 16:48:02.367521 29735 net.cpp:169] relu1 does not need backward computation.
I0420 16:48:02.367529 29735 net.cpp:169] conv1 does not need backward computation.
I0420 16:48:02.367538 29735 net.cpp:169] data does not need backward computation.
I0420 16:48:02.367547 29735 net.cpp:205] This network produces output accuracy
I0420 16:48:02.367558 29735 net.cpp:205] This network produces output pred
I0420 16:48:02.367574 29735 net.cpp:447] Collecting Learning Rate and Weight Decay.
I0420 16:48:02.367588 29735 net.cpp:217] Network initialization done.
I0420 16:48:02.367597 29735 net.cpp:218] Memory required for data: 407404
I0420 16:48:02.367712 29735 solver.cpp:42] Solver scaffolding done.
I0420 16:48:02.367749 29735 solver.cpp:222] Solving CDR-CNN
I0420 16:48:02.367761 29735 solver.cpp:223] Learning Rate Policy: fixed
I0420 16:48:02.367776 29735 solver.cpp:266] Iteration 0, Testing net (#0)
I0420 16:48:02.492290 29735 solver.cpp:315] Test net output #0: accuracy = 0.481
I0420 16:48:02.492362 29735 solver.cpp:315] Test net output #1: pred = 0.5
I0420 16:48:02.492375 29735 solver.cpp:315] Test net output #2: pred = 0.5
I0420 16:48:02.492388 29735 solver.cpp:315] Test net output #3: pred = 0.5
I0420 16:48:02.492401 29735 solver.cpp:315] Test net output #4: pred = 0.5
I0420 16:48:02.492413 29735 solver.cpp:315] Test net output #5: pred = 0.5
I0420 16:48:02.492425 29735 solver.cpp:315] Test net output #6: pred = 0.5
I0420 16:48:02.492439 29735 solver.cpp:315] Test net output #7: pred = 0.5
I0420 16:48:02.492450 29735 solver.cpp:315] Test net output #8: pred = 0.5
I0420 16:48:02.492463 29735 solver.cpp:315] Test net output #9: pred = 0.5
I0420 16:48:02.492475 29735 solver.cpp:315] Test net output #10: pred = 0.5
I0420 16:48:02.492487 29735 solver.cpp:315] Test net output #11: pred = 0.5
I0420 16:48:02.492501 29735 solver.cpp:315] Test net output #12: pred = 0.5
I0420 16:48:02.492512 29735 solver.cpp:315] Test net output #13: pred = 0.5
I0420 16:48:02.492526 29735 solver.cpp:315] Test net output #14: pred = 0.5
I0420 16:48:02.492537 29735 solver.cpp:315] Test net output #15: pred = 0.5
I0420 16:48:02.492549 29735 solver.cpp:315] Test net output #16: pred = 0.5
I0420 16:48:02.492563 29735 solver.cpp:315] Test net output #17: pred = 0.5
I0420 16:48:02.492574 29735 solver.cpp:315] Test net output #18: pred = 0.5
I0420 16:48:02.492586 29735 solver.cpp:315] Test net output #19: pred = 0.5
I0420 16:48:02.492599 29735 solver.cpp:315] Test net output #20: pred = 0.5
I0420 16:48:02.496176 29735 solver.cpp:189] Iteration 0, loss = 0.693148
I0420 16:48:02.496206 29735 solver.cpp:204] Train net output #0: loss = 0.693148 (* 1 = 0.693148 loss)
I0420 16:48:02.496223 29735 solver.cpp:697] Iteration 0, lr = 0.01
I0420 16:48:04.232394 29735 solver.cpp:189] Iteration 100, loss = 0.673534
I0420 16:48:04.232467 29735 solver.cpp:204] Train net output #0: loss = 0.673534 (* 1 = 0.673534 loss)
I0420 16:48:04.232481 29735 solver.cpp:697] Iteration 100, lr = 0.01
I0420 16:48:07.784406 29735 solver.cpp:189] Iteration 200, loss = 0.681191
I0420 16:48:07.784482 29735 solver.cpp:204] Train net output #0: loss = 0.681191 (* 1 = 0.681191 loss)
I0420 16:48:07.784497 29735 solver.cpp:697] Iteration 200, lr = 0.01
I0420 16:48:11.983409 29735 solver.cpp:189] Iteration 300, loss = 0.608198
I0420 16:48:11.983487 29735 solver.cpp:204] Train net output #0: loss = 0.608198 (* 1 = 0.608198 loss)
I0420 16:48:11.983502 29735 solver.cpp:697] Iteration 300, lr = 0.01
I0420 16:48:16.403545 29735 solver.cpp:189] Iteration 400, loss = 0.596086
I0420 16:48:16.403620 29735 solver.cpp:204] Train net output #0: loss = 0.596086 (* 1 = 0.596086 loss)
I0420 16:48:16.403636 29735 solver.cpp:697] Iteration 400, lr = 0.01
I0420 16:48:20.881836 29735 solver.cpp:266] Iteration 500, Testing net (#0)
I0420 16:48:23.063827 29735 solver.cpp:315] Test net output #0: accuracy = 0.575
I0420 16:48:23.063899 29735 solver.cpp:315] Test net output #1: pred = 0.59188
I0420 16:48:23.063913 29735 solver.cpp:315] Test net output #2: pred = 0.408119
I0420 16:48:23.063925 29735 solver.cpp:315] Test net output #3: pred = 0.578908
I0420 16:48:23.063937 29735 solver.cpp:315] Test net output #4: pred = 0.421092
I0420 16:48:23.063949 29735 solver.cpp:315] Test net output #5: pred = 0.57543
I0420 16:48:23.063977 29735 solver.cpp:315] Test net output #6: pred = 0.42457
I0420 16:48:23.063990 29735 solver.cpp:315] Test net output #7: pred = 0.582379
I0420 16:48:23.064002 29735 solver.cpp:315] Test net output #8: pred = 0.417621
I0420 16:48:23.064014 29735 solver.cpp:315] Test net output #9: pred = 0.570022
I0420 16:48:23.064026 29735 solver.cpp:315] Test net output #10: pred = 0.429978
I0420 16:48:23.064038 29735 solver.cpp:315] Test net output #11: pred = 0.575581
I0420 16:48:23.064050 29735 solver.cpp:315] Test net output #12: pred = 0.424419
I0420 16:48:23.064062 29735 solver.cpp:315] Test net output #13: pred = 0.573376
I0420 16:48:23.064074 29735 solver.cpp:315] Test net output #14: pred = 0.426625
I0420 16:48:23.064086 29735 solver.cpp:315] Test net output #15: pred = 0.569459
I0420 16:48:23.064098 29735 solver.cpp:315] Test net output #16: pred = 0.430541
I0420 16:48:23.064110 29735 solver.cpp:315] Test net output #17: pred = 0.596212
I0420 16:48:23.064122 29735 solver.cpp:315] Test net output #18: pred = 0.403788
I0420 16:48:23.064134 29735 solver.cpp:315] Test net output #19: pred = 0.587882
I0420 16:48:23.064147 29735 solver.cpp:315] Test net output #20: pred = 0.412118
I0420 16:48:23.105515 29735 solver.cpp:189] Iteration 500, loss = 0.539311
I0420 16:48:23.105557 29735 solver.cpp:204] Train net output #0: loss = 0.539311 (* 1 = 0.539311 loss)
I0420 16:48:23.105573 29735 solver.cpp:697] Iteration 500, lr = 0.01
I0420 16:48:27.680073 29735 solver.cpp:189] Iteration 600, loss = 0.651605
I0420 16:48:27.681032 29735 solver.cpp:204] Train net output #0: loss = 0.651605 (* 1 = 0.651605 loss)
I0420 16:48:27.681052 29735 solver.cpp:697] Iteration 600, lr = 0.01
I0420 16:48:32.307653 29735 solver.cpp:189] Iteration 700, loss = 0.582182
I0420 16:48:32.307711 29735 solver.cpp:204] Train net output #0: loss = 0.582182 (* 1 = 0.582182 loss)
I0420 16:48:32.307726 29735 solver.cpp:697] Iteration 700, lr = 0.01
I0420 16:48:36.909667 29735 solver.cpp:189] Iteration 800, loss = 0.686645
I0420 16:48:36.909744 29735 solver.cpp:204] Train net output #0: loss = 0.686645 (* 1 = 0.686645 loss)
I0420 16:48:36.909759 29735 solver.cpp:697] Iteration 800, lr = 0.01
I0420 16:48:41.314266 29735 solver.cpp:189] Iteration 900, loss = 0.674465
I0420 16:48:41.314347 29735 solver.cpp:204] Train net output #0: loss = 0.674465 (* 1 = 0.674465 loss)
I0420 16:48:41.314363 29735 solver.cpp:697] Iteration 900, lr = 0.01
I0420 16:48:45.777344 29735 solver.cpp:334] Snapshotting to hdf5_classification/data/train_iter_1000.caffemodel
I0420 16:48:45.778039 29735 solver.cpp:342] Snapshotting solver state to hdf5_classification/data/train_iter_1000.solverstate
I0420 16:48:45.778370 29735 solver.cpp:266] Iteration 1000, Testing net (#0)
I0420 16:48:48.014457 29735 solver.cpp:315] Test net output #0: accuracy = 0.616
I0420 16:48:48.014526 29735 solver.cpp:315] Test net output #1: pred = 0.571863
I0420 16:48:48.014540 29735 solver.cpp:315] Test net output #2: pred = 0.428137
I0420 16:48:48.014552 29735 solver.cpp:315] Test net output #3: pred = 0.584536
I0420 16:48:48.014564 29735 solver.cpp:315] Test net output #4: pred = 0.415464
I0420 16:48:48.014576 29735 solver.cpp:315] Test net output #5: pred = 0.563226
I0420 16:48:48.014588 29735 solver.cpp:315] Test net output #6: pred = 0.436774
I0420 16:48:48.014600 29735 solver.cpp:315] Test net output #7: pred = 0.565195
I0420 16:48:48.014611 29735 solver.cpp:315] Test net output #8: pred = 0.434805
I0420 16:48:48.014642 29735 solver.cpp:315] Test net output #9: pred = 0.55471
I0420 16:48:48.014655 29735 solver.cpp:315] Test net output #10: pred = 0.44529
I0420 16:48:48.014667 29735 solver.cpp:315] Test net output #11: pred = 0.563331
I0420 16:48:48.014678 29735 solver.cpp:315] Test net output #12: pred = 0.436669
I0420 16:48:48.014689 29735 solver.cpp:315] Test net output #13: pred = 0.549899
I0420 16:48:48.014701 29735 solver.cpp:315] Test net output #14: pred = 0.450102
I0420 16:48:48.014713 29735 solver.cpp:315] Test net output #15: pred = 0.557282
I0420 16:48:48.014724 29735 solver.cpp:315] Test net output #16: pred = 0.442718
I0420 16:48:48.014736 29735 solver.cpp:315] Test net output #17: pred = 0.564575
I0420 16:48:48.014747 29735 solver.cpp:315] Test net output #18: pred = 0.435425
I0420 16:48:48.014760 29735 solver.cpp:315] Test net output #19: pred = 0.563374
I0420 16:48:48.014770 29735 solver.cpp:315] Test net output #20: pred = 0.436626
I0420 16:48:48.054803 29735 solver.cpp:189] Iteration 1000, loss = 0.636793
I0420 16:48:48.054833 29735 solver.cpp:204] Train net output #0: loss = 0.636793 (* 1 = 0.636793 loss)
I0420 16:48:48.054848 29735 solver.cpp:697] Iteration 1000, lr = 0.01
I0420 16:48:52.592427 29735 solver.cpp:189] Iteration 1100, loss = 0.428557
I0420 16:48:52.592504 29735 solver.cpp:204] Train net output #0: loss = 0.428557 (* 1 = 0.428557 loss)
I0420 16:48:52.592517 29735 solver.cpp:697] Iteration 1100, lr = 0.01
I0420 16:48:56.929852 29735 solver.cpp:189] Iteration 1200, loss = 0.121006
I0420 16:48:56.930611 29735 solver.cpp:204] Train net output #0: loss = 0.121006 (* 1 = 0.121006 loss)
I0420 16:48:56.930632 29735 solver.cpp:697] Iteration 1200, lr = 0.01
I0420 16:49:01.258540 29735 solver.cpp:189] Iteration 1300, loss = 0.682049
I0420 16:49:01.258607 29735 solver.cpp:204] Train net output #0: loss = 0.682049 (* 1 = 0.682049 loss)
I0420 16:49:01.258623 29735 solver.cpp:697] Iteration 1300, lr = 0.01
I0420 16:49:05.715176 29735 solver.cpp:189] Iteration 1400, loss = 0.631498
I0420 16:49:05.715256 29735 solver.cpp:204] Train net output #0: loss = 0.631498 (* 1 = 0.631498 loss)
I0420 16:49:05.715272 29735 solver.cpp:697] Iteration 1400, lr = 0.01
I0420 16:49:10.185698 29735 solver.cpp:266] Iteration 1500, Testing net (#0)
I0420 16:49:12.192678 29735 solver.cpp:315] Test net output #0: accuracy = 0.577
I0420 16:49:12.192752 29735 solver.cpp:315] Test net output #1: pred = 0.541405
I0420 16:49:12.192769 29735 solver.cpp:315] Test net output #2: pred = 0.458595
I0420 16:49:12.192781 29735 solver.cpp:315] Test net output #3: pred = 0.550804
I0420 16:49:12.192795 29735 solver.cpp:315] Test net output #4: pred = 0.449196
I0420 16:49:12.192808 29735 solver.cpp:315] Test net output #5: pred = 0.556196
I0420 16:49:12.192821 29735 solver.cpp:315] Test net output #6: pred = 0.443804
I0420 16:49:12.192834 29735 solver.cpp:315] Test net output #7: pred = 0.540584
I0420 16:49:12.192847 29735 solver.cpp:315] Test net output #8: pred = 0.459416
I0420 16:49:12.192860 29735 solver.cpp:315] Test net output #9: pred = 0.560318
I0420 16:49:12.192873 29735 solver.cpp:315] Test net output #10: pred = 0.439682
I0420 16:49:12.192886 29735 solver.cpp:315] Test net output #11: pred = 0.549469
I0420 16:49:12.192899 29735 solver.cpp:315] Test net output #12: pred = 0.450531
I0420 16:49:12.192912 29735 solver.cpp:315] Test net output #13: pred = 0.555313
I0420 16:49:12.192926 29735 solver.cpp:315] Test net output #14: pred = 0.444687
I0420 16:49:12.192939 29735 solver.cpp:315] Test net output #15: pred = 0.542689
I0420 16:49:12.192952 29735 solver.cpp:315] Test net output #16: pred = 0.457311
I0420 16:49:12.192965 29735 solver.cpp:315] Test net output #17: pred = 0.565394
I0420 16:49:12.192978 29735 solver.cpp:315] Test net output #18: pred = 0.434606
I0420 16:49:12.192991 29735 solver.cpp:315] Test net output #19: pred = 0.5412
I0420 16:49:12.193022 29735 solver.cpp:315] Test net output #20: pred = 0.4588
I0420 16:49:12.231516 29735 solver.cpp:189] Iteration 1500, loss = 0.653017
I0420 16:49:12.231550 29735 solver.cpp:204] Train net output #0: loss = 0.653017 (* 1 = 0.653017 loss)
I0420 16:49:12.231567 29735 solver.cpp:697] Iteration 1500, lr = 0.01
I0420 16:49:16.535563 29735 solver.cpp:189] Iteration 1600, loss = 0.674005
I0420 16:49:16.535640 29735 solver.cpp:204] Train net output #0: loss = 0.674005 (* 1 = 0.674005 loss)
I0420 16:49:16.535655 29735 solver.cpp:697] Iteration 1600, lr = 0.01
I0420 16:49:20.924551 29735 solver.cpp:189] Iteration 1700, loss = 0.517834
I0420 16:49:20.925324 29735 solver.cpp:204] Train net output #0: loss = 0.517834 (* 1 = 0.517834 loss)
I0420 16:49:20.925345 29735 solver.cpp:697] Iteration 1700, lr = 0.01
I0420 16:49:25.367591 29735 solver.cpp:189] Iteration 1800, loss = 0.495529
I0420 16:49:25.367666 29735 solver.cpp:204] Train net output #0: loss = 0.495529 (* 1 = 0.495529 loss)
I0420 16:49:25.367682 29735 solver.cpp:697] Iteration 1800, lr = 0.01
I0420 16:49:29.637954 29735 solver.cpp:189] Iteration 1900, loss = 0.525992
I0420 16:49:29.638036 29735 solver.cpp:204] Train net output #0: loss = 0.525992 (* 1 = 0.525992 loss)
I0420 16:49:29.638052 29735 solver.cpp:697] Iteration 1900, lr = 0.01
I0420 16:49:33.918493 29735 solver.cpp:334] Snapshotting to hdf5_classification/data/train_iter_2000.caffemodel
I0420 16:49:33.919069 29735 solver.cpp:342] Snapshotting solver state to hdf5_classification/data/train_iter_2000.solverstate
I0420 16:49:33.919422 29735 solver.cpp:266] Iteration 2000, Testing net (#0)
I0420 16:49:36.043881 29735 solver.cpp:315] Test net output #0: accuracy = 0.61
I0420 16:49:36.043931 29735 solver.cpp:315] Test net output #1: pred = 0.585156
I0420 16:49:36.043946 29735 solver.cpp:315] Test net output #2: pred = 0.414844
I0420 16:49:36.043978 29735 solver.cpp:315] Test net output #3: pred = 0.591673
I0420 16:49:36.043992 29735 solver.cpp:315] Test net output #4: pred = 0.408327
I0420 16:49:36.044005 29735 solver.cpp:315] Test net output #5: pred = 0.58527
I0420 16:49:36.044018 29735 solver.cpp:315] Test net output #6: pred = 0.41473
I0420 16:49:36.044031 29735 solver.cpp:315] Test net output #7: pred = 0.591608
I0420 16:49:36.044044 29735 solver.cpp:315] Test net output #8: pred = 0.408392
I0420 16:49:36.044057 29735 solver.cpp:315] Test net output #9: pred = 0.596768
I0420 16:49:36.044070 29735 solver.cpp:315] Test net output #10: pred = 0.403232
I0420 16:49:36.044083 29735 solver.cpp:315] Test net output #11: pred = 0.583966
I0420 16:49:36.044096 29735 solver.cpp:315] Test net output #12: pred = 0.416034
I0420 16:49:36.044109 29735 solver.cpp:315] Test net output #13: pred = 0.57839
I0420 16:49:36.044122 29735 solver.cpp:315] Test net output #14: pred = 0.42161
I0420 16:49:36.044136 29735 solver.cpp:315] Test net output #15: pred = 0.575803
I0420 16:49:36.044148 29735 solver.cpp:315] Test net output #16: pred = 0.424197
I0420 16:49:36.044162 29735 solver.cpp:315] Test net output #17: pred = 0.578711
I0420 16:49:36.044174 29735 solver.cpp:315] Test net output #18: pred = 0.421289
I0420 16:49:36.044188 29735 solver.cpp:315] Test net output #19: pred = 0.574626
I0420 16:49:36.044200 29735 solver.cpp:315] Test net output #20: pred = 0.425374
I0420 16:49:36.082851 29735 solver.cpp:189] Iteration 2000, loss = 0.532844
I0420 16:49:36.082887 29735 solver.cpp:204] Train net output #0: loss = 0.532844 (* 1 = 0.532844 loss)
I0420 16:49:36.082903 29735 solver.cpp:697] Iteration 2000, lr = 0.01
I0420 16:49:40.393103 29735 solver.cpp:189] Iteration 2100, loss = 0.644424
I0420 16:49:40.393187 29735 solver.cpp:204] Train net output #0: loss = 0.644424 (* 1 = 0.644424 loss)
I0420 16:49:40.393203 29735 solver.cpp:697] Iteration 2100, lr = 0.01
I0420 16:49:44.717038 29735 solver.cpp:189] Iteration 2200, loss = 0.576916
I0420 16:49:44.717118 29735 solver.cpp:204] Train net output #0: loss = 0.576916 (* 1 = 0.576916 loss)
I0420 16:49:44.717152 29735 solver.cpp:697] Iteration 2200, lr = 0.01
I0420 16:49:49.054168 29735 solver.cpp:189] Iteration 2300, loss = 0.692073
I0420 16:49:49.054249 29735 solver.cpp:204] Train net output #0: loss = 0.692073 (* 1 = 0.692073 loss)
I0420 16:49:49.054265 29735 solver.cpp:697] Iteration 2300, lr = 0.01
I0420 16:49:53.366863 29735 solver.cpp:189] Iteration 2400, loss = 0.0915128
I0420 16:49:53.366945 29735 solver.cpp:204] Train net output #0: loss = 0.0915128 (* 1 = 0.0915128 loss)
I0420 16:49:53.366961 29735 solver.cpp:697] Iteration 2400, lr = 0.01
I0420 16:49:57.214252 29735 solver.cpp:266] Iteration 2500, Testing net (#0)
I0420 16:49:59.174927 29735 solver.cpp:315] Test net output #0: accuracy = 0.428
I0420 16:49:59.174994 29735 solver.cpp:315] Test net output #1: pred = 9.08621e-05
I0420 16:49:59.175010 29735 solver.cpp:315] Test net output #2: pred = 0.999909
I0420 16:49:59.175024 29735 solver.cpp:315] Test net output #3: pred = 0.000107498
I0420 16:49:59.175037 29735 solver.cpp:315] Test net output #4: pred = 0.999892
I0420 16:49:59.175050 29735 solver.cpp:315] Test net output #5: pred = 7.88812e-05
I0420 16:49:59.175065 29735 solver.cpp:315] Test net output #6: pred = 0.999921
I0420 16:49:59.175077 29735 solver.cpp:315] Test net output #7: pred = 8.25942e-05
I0420 16:49:59.175091 29735 solver.cpp:315] Test net output #8: pred = 0.999917
I0420 16:49:59.175103 29735 solver.cpp:315] Test net output #9: pred = 7.08832e-05
I0420 16:49:59.175117 29735 solver.cpp:315] Test net output #10: pred = 0.999929
I0420 16:49:59.175129 29735 solver.cpp:315] Test net output #11: pred = 7.74622e-05
I0420 16:49:59.175143 29735 solver.cpp:315] Test net output #12: pred = 0.999923
I0420 16:49:59.175156 29735 solver.cpp:315] Test net output #13: pred = 8.86715e-05
I0420 16:49:59.175169 29735 solver.cpp:315] Test net output #14: pred = 0.999911
I0420 16:49:59.175182 29735 solver.cpp:315] Test net output #15: pred = 9.81704e-05
I0420 16:49:59.175195 29735 solver.cpp:315] Test net output #16: pred = 0.999902
I0420 16:49:59.175209 29735 solver.cpp:315] Test net output #17: pred = 8.0889e-05
I0420 16:49:59.175222 29735 solver.cpp:315] Test net output #18: pred = 0.999919
I0420 16:49:59.175235 29735 solver.cpp:315] Test net output #19: pred = 8.24118e-05
I0420 16:49:59.175248 29735 solver.cpp:315] Test net output #20: pred = 0.999917
I0420 16:49:59.207571 29735 solver.cpp:189] Iteration 2500, loss = 0.00441335
I0420 16:49:59.207602 29735 solver.cpp:204] Train net output #0: loss = 0.00441332 (* 1 = 0.00441332 loss)
I0420 16:49:59.207618 29735 solver.cpp:697] Iteration 2500, lr = 0.01
I0420 16:50:03.098521 29735 solver.cpp:189] Iteration 2600, loss = 0.624993
I0420 16:50:03.098608 29735 solver.cpp:204] Train net output #0: loss = 0.624994 (* 1 = 0.624994 loss)
I0420 16:50:03.098623 29735 solver.cpp:697] Iteration 2600, lr = 0.01
I0420 16:50:06.961983 29735 solver.cpp:189] Iteration 2700, loss = 0.635225
I0420 16:50:06.962824 29735 solver.cpp:204] Train net output #0: loss = 0.635225 (* 1 = 0.635225 loss)
I0420 16:50:06.962846 29735 solver.cpp:697] Iteration 2700, lr = 0.01
I0420 16:50:10.773032 29735 solver.cpp:189] Iteration 2800, loss = 0.787628
I0420 16:50:10.773087 29735 solver.cpp:204] Train net output #0: loss = 0.787628 (* 1 = 0.787628 loss)
I0420 16:50:10.773102 29735 solver.cpp:697] Iteration 2800, lr = 0.01
I0420 16:50:14.620463 29735 solver.cpp:189] Iteration 2900, loss = 0.854141
I0420 16:50:14.620543 29735 solver.cpp:204] Train net output #0: loss = 0.854141 (* 1 = 0.854141 loss)
I0420 16:50:14.620558 29735 solver.cpp:697] Iteration 2900, lr = 0.01
I0420 16:50:18.318394 29735 solver.cpp:334] Snapshotting to hdf5_classification/data/train_iter_3000.caffemodel
I0420 16:50:18.318908 29735 solver.cpp:342] Snapshotting solver state to hdf5_classification/data/train_iter_3000.solverstate
I0420 16:50:18.319242 29735 solver.cpp:266] Iteration 3000, Testing net (#0)
I0420 16:50:20.086493 29735 solver.cpp:315] Test net output #0: accuracy = 0.637
I0420 16:50:20.086542 29735 solver.cpp:315] Test net output #1: pred = 0.558259
I0420 16:50:20.086557 29735 solver.cpp:315] Test net output #2: pred = 0.441742
I0420 16:50:20.086570 29735 solver.cpp:315] Test net output #3: pred = 0.586151
I0420 16:50:20.086582 29735 solver.cpp:315] Test net output #4: pred = 0.413849
I0420 16:50:20.086594 29735 solver.cpp:315] Test net output #5: pred = 0.561162
I0420 16:50:20.086607 29735 solver.cpp:315] Test net output #6: pred = 0.438838
I0420 16:50:20.086619 29735 solver.cpp:315] Test net output #7: pred = 0.583628
I0420 16:50:20.086632 29735 solver.cpp:315] Test net output #8: pred = 0.416372
I0420 16:50:20.086644 29735 solver.cpp:315] Test net output #9: pred = 0.580218
I0420 16:50:20.086658 29735 solver.cpp:315] Test net output #10: pred = 0.419782
I0420 16:50:20.086669 29735 solver.cpp:315] Test net output #11: pred = 0.568803
I0420 16:50:20.086681 29735 solver.cpp:315] Test net output #12: pred = 0.431197
I0420 16:50:20.086694 29735 solver.cpp:315] Test net output #13: pred = 0.568918
I0420 16:50:20.086706 29735 solver.cpp:315] Test net output #14: pred = 0.431082
I0420 16:50:20.086719 29735 solver.cpp:315] Test net output #15: pred = 0.568085
I0420 16:50:20.086731 29735 solver.cpp:315] Test net output #16: pred = 0.431915
I0420 16:50:20.086745 29735 solver.cpp:315] Test net output #17: pred = 0.565299
I0420 16:50:20.086756 29735 solver.cpp:315] Test net output #18: pred = 0.434701
I0420 16:50:20.086769 29735 solver.cpp:315] Test net output #19: pred = 0.568966
I0420 16:50:20.086781 29735 solver.cpp:315] Test net output #20: pred = 0.431034
I0420 16:50:20.119350 29735 solver.cpp:189] Iteration 3000, loss = 0.569
I0420 16:50:20.119382 29735 solver.cpp:204] Train net output #0: loss = 0.569 (* 1 = 0.569 loss)
I0420 16:50:20.119398 29735 solver.cpp:697] Iteration 3000, lr = 0.01
I0420 16:50:23.651332 29735 solver.cpp:189] Iteration 3100, loss = 0.674803
I0420 16:50:23.651437 29735 solver.cpp:204] Train net output #0: loss = 0.674804 (* 1 = 0.674804 loss)
I0420 16:50:23.651461 29735 solver.cpp:697] Iteration 3100, lr = 0.01
I0420 16:50:27.167780 29735 solver.cpp:189] Iteration 3200, loss = 0.671249
I0420 16:50:27.167861 29735 solver.cpp:204] Train net output #0: loss = 0.671249 (* 1 = 0.671249 loss)
I0420 16:50:27.167877 29735 solver.cpp:697] Iteration 3200, lr = 0.01
I0420 16:50:30.769071 29735 solver.cpp:189] Iteration 3300, loss = 0.622869
I0420 16:50:30.769143 29735 solver.cpp:204] Train net output #0: loss = 0.622869 (* 1 = 0.622869 loss)
I0420 16:50:30.769160 29735 solver.cpp:697] Iteration 3300, lr = 0.01
I0420 16:50:34.460253 29735 solver.cpp:189] Iteration 3400, loss = 0.556255
I0420 16:50:34.460341 29735 solver.cpp:204] Train net output #0: loss = 0.556256 (* 1 = 0.556256 loss)
I0420 16:50:34.460360 29735 solver.cpp:697] Iteration 3400, lr = 0.01
I0420 16:50:38.118206 29735 solver.cpp:266] Iteration 3500, Testing net (#0)
I0420 16:50:39.893321 29735 solver.cpp:315] Test net output #0: accuracy = 0.624
I0420 16:50:39.893384 29735 solver.cpp:315] Test net output #1: pred = 0.566495
I0420 16:50:39.893399 29735 solver.cpp:315] Test net output #2: pred = 0.433505
I0420 16:50:39.893412 29735 solver.cpp:315] Test net output #3: pred = 0.575508
I0420 16:50:39.893425 29735 solver.cpp:315] Test net output #4: pred = 0.424492
I0420 16:50:39.893439 29735 solver.cpp:315] Test net output #5: pred = 0.558024
I0420 16:50:39.893451 29735 solver.cpp:315] Test net output #6: pred = 0.441976
I0420 16:50:39.893465 29735 solver.cpp:315] Test net output #7: pred = 0.567997
I0420 16:50:39.893476 29735 solver.cpp:315] Test net output #8: pred = 0.432003
I0420 16:50:39.893489 29735 solver.cpp:315] Test net output #9: pred = 0.575843
I0420 16:50:39.893502 29735 solver.cpp:315] Test net output #10: pred = 0.424157
I0420 16:50:39.893515 29735 solver.cpp:315] Test net output #11: pred = 0.568018
I0420 16:50:39.893543 29735 solver.cpp:315] Test net output #12: pred = 0.431982
I0420 16:50:39.893558 29735 solver.cpp:315] Test net output #13: pred = 0.560558
I0420 16:50:39.893570 29735 solver.cpp:315] Test net output #14: pred = 0.439442
I0420 16:50:39.893587 29735 solver.cpp:315] Test net output #15: pred = 0.59734
I0420 16:50:39.893601 29735 solver.cpp:315] Test net output #16: pred = 0.40266
I0420 16:50:39.893615 29735 solver.cpp:315] Test net output #17: pred = 0.585077
I0420 16:50:39.893627 29735 solver.cpp:315] Test net output #18: pred = 0.414923
I0420 16:50:39.893640 29735 solver.cpp:315] Test net output #19: pred = 0.571654
I0420 16:50:39.893653 29735 solver.cpp:315] Test net output #20: pred = 0.428346
I0420 16:50:39.926024 29735 solver.cpp:189] Iteration 3500, loss = 0.657635
I0420 16:50:39.926056 29735 solver.cpp:204] Train net output #0: loss = 0.657635 (* 1 = 0.657635 loss)
I0420 16:50:39.926074 29735 solver.cpp:697] Iteration 3500, lr = 0.01
I0420 16:50:43.547009 29735 solver.cpp:189] Iteration 3600, loss = 0.164131
I0420 16:50:43.547090 29735 solver.cpp:204] Train net output #0: loss = 0.164132 (* 1 = 0.164132 loss)
I0420 16:50:43.547106 29735 solver.cpp:697] Iteration 3600, lr = 0.01
I0420 16:50:46.842165 29735 solver.cpp:189] Iteration 3700, loss = 0.025433
I0420 16:50:46.842252 29735 solver.cpp:204] Train net output #0: loss = 0.0254333 (* 1 = 0.0254333 loss)
I0420 16:50:46.842268 29735 solver.cpp:697] Iteration 3700, lr = 0.01
I0420 16:50:50.048209 29735 solver.cpp:189] Iteration 3800, loss = 0.645
I0420 16:50:50.048285 29735 solver.cpp:204] Train net output #0: loss = 0.645 (* 1 = 0.645 loss)
I0420 16:50:50.048302 29735 solver.cpp:697] Iteration 3800, lr = 0.01
I0420 16:50:53.455698 29735 solver.cpp:189] Iteration 3900, loss = 0.613926
I0420 16:50:53.455781 29735 solver.cpp:204] Train net output #0: loss = 0.613926 (* 1 = 0.613926 loss)
I0420 16:50:53.455797 29735 solver.cpp:697] Iteration 3900, lr = 0.01
I0420 16:50:56.663151 29735 solver.cpp:334] Snapshotting to hdf5_classification/data/train_iter_4000.caffemodel
I0420 16:50:56.663637 29735 solver.cpp:342] Snapshotting solver state to hdf5_classification/data/train_iter_4000.solverstate
I0420 16:50:56.664021 29735 solver.cpp:266] Iteration 4000, Testing net (#0)
I0420 16:50:58.210675 29735 solver.cpp:315] Test net output #0: accuracy = 0.631
I0420 16:50:58.210724 29735 solver.cpp:315] Test net output #1: pred = 0.531904
I0420 16:50:58.210738 29735 solver.cpp:315] Test net output #2: pred = 0.468096
I0420 16:50:58.210752 29735 solver.cpp:315] Test net output #3: pred = 0.54569
I0420 16:50:58.210763 29735 solver.cpp:315] Test net output #4: pred = 0.45431
I0420 16:50:58.210777 29735 solver.cpp:315] Test net output #5: pred = 0.536053
I0420 16:50:58.210789 29735 solver.cpp:315] Test net output #6: pred = 0.463947
I0420 16:50:58.210801 29735 solver.cpp:315] Test net output #7: pred = 0.545469
I0420 16:50:58.210813 29735 solver.cpp:315] Test net output #8: pred = 0.454531
I0420 16:50:58.210826 29735 solver.cpp:315] Test net output #9: pred = 0.556441
I0420 16:50:58.210839 29735 solver.cpp:315] Test net output #10: pred = 0.443559
I0420 16:50:58.210851 29735 solver.cpp:315] Test net output #11: pred = 0.561356
I0420 16:50:58.210863 29735 solver.cpp:315] Test net output #12: pred = 0.438644
I0420 16:50:58.210876 29735 solver.cpp:315] Test net output #13: pred = 0.56959
I0420 16:50:58.210888 29735 solver.cpp:315] Test net output #14: pred = 0.43041
I0420 16:50:58.210901 29735 solver.cpp:315] Test net output #15: pred = 0.540903
I0420 16:50:58.210913 29735 solver.cpp:315] Test net output #16: pred = 0.459097
I0420 16:50:58.210927 29735 solver.cpp:315] Test net output #17: pred = 0.542675
I0420 16:50:58.210938 29735 solver.cpp:315] Test net output #18: pred = 0.457325
I0420 16:50:58.210952 29735 solver.cpp:315] Test net output #19: pred = 0.545148
I0420 16:50:58.210963 29735 solver.cpp:315] Test net output #20: pred = 0.454852
I0420 16:50:58.239229 29735 solver.cpp:189] Iteration 4000, loss = 0.615041
I0420 16:50:58.239261 29735 solver.cpp:204] Train net output #0: loss = 0.615041 (* 1 = 0.615041 loss)
I0420 16:50:58.239277 29735 solver.cpp:697] Iteration 4000, lr = 0.01
I0420 16:51:01.540236 29735 solver.cpp:189] Iteration 4100, loss = 0.637595
I0420 16:51:01.540298 29735 solver.cpp:204] Train net output #0: loss = 0.637595 (* 1 = 0.637595 loss)
I0420 16:51:01.540314 29735 solver.cpp:697] Iteration 4100, lr = 0.01
I0420 16:51:04.700532 29735 solver.cpp:189] Iteration 4200, loss = 0.447858
I0420 16:51:04.700608 29735 solver.cpp:204] Train net output #0: loss = 0.447858 (* 1 = 0.447858 loss)
I0420 16:51:04.700624 29735 solver.cpp:697] Iteration 4200, lr = 0.01
I0420 16:51:07.998533 29735 solver.cpp:189] Iteration 4300, loss = 0.718916
I0420 16:51:07.998611 29735 solver.cpp:204] Train net output #0: loss = 0.718916 (* 1 = 0.718916 loss)
I0420 16:51:07.998626 29735 solver.cpp:697] Iteration 4300, lr = 0.01
I0420 16:51:11.296586 29735 solver.cpp:189] Iteration 4400, loss = 0.602182
I0420 16:51:11.296665 29735 solver.cpp:204] Train net output #0: loss = 0.602182 (* 1 = 0.602182 loss)
I0420 16:51:11.296681 29735 solver.cpp:697] Iteration 4400, lr = 0.01
I0420 16:51:14.617761 29735 solver.cpp:266] Iteration 4500, Testing net (#0)
I0420 16:51:16.213587 29735 solver.cpp:315] Test net output #0: accuracy = 0.625
I0420 16:51:16.214346 29735 solver.cpp:315] Test net output #1: pred = 0.556579
I0420 16:51:16.214366 29735 solver.cpp:315] Test net output #2: pred = 0.443421
I0420 16:51:16.214380 29735 solver.cpp:315] Test net output #3: pred = 0.558402
I0420 16:51:16.214393 29735 solver.cpp:315] Test net output #4: pred = 0.441598
I0420 16:51:16.214406 29735 solver.cpp:315] Test net output #5: pred = 0.568547
I0420 16:51:16.214419 29735 solver.cpp:315] Test net output #6: pred = 0.431453
I0420 16:51:16.214432 29735 solver.cpp:315] Test net output #7: pred = 0.550501
I0420 16:51:16.214447 29735 solver.cpp:315] Test net output #8: pred = 0.449499
I0420 16:51:16.214459 29735 solver.cpp:315] Test net output #9: pred = 0.577977
I0420 16:51:16.214473 29735 solver.cpp:315] Test net output #10: pred = 0.422023
I0420 16:51:16.214485 29735 solver.cpp:315] Test net output #11: pred = 0.57132
I0420 16:51:16.214498 29735 solver.cpp:315] Test net output #12: pred = 0.42868
I0420 16:51:16.214511 29735 solver.cpp:315] Test net output #13: pred = 0.578659
I0420 16:51:16.214524 29735 solver.cpp:315] Test net output #14: pred = 0.421341
I0420 16:51:16.214537 29735 solver.cpp:315] Test net output #15: pred = 0.591837
I0420 16:51:16.214550 29735 solver.cpp:315] Test net output #16: pred = 0.408163
I0420 16:51:16.214563 29735 solver.cpp:315] Test net output #17: pred = 0.564985
I0420 16:51:16.214581 29735 solver.cpp:315] Test net output #18: pred = 0.435015
I0420 16:51:16.214632 29735 solver.cpp:315] Test net output #19: pred = 0.570586
I0420 16:51:16.214649 29735 solver.cpp:315] Test net output #20: pred = 0.429414
I0420 16:51:16.244009 29735 solver.cpp:189] Iteration 4500, loss = 0.675016
I0420 16:51:16.244061 29735 solver.cpp:204] Train net output #0: loss = 0.675016 (* 1 = 0.675016 loss)
I0420 16:51:16.244079 29735 solver.cpp:697] Iteration 4500, lr = 0.01
I0420 16:51:19.603934 29735 solver.cpp:189] Iteration 4600, loss = 0.649181
I0420 16:51:19.604027 29735 solver.cpp:204] Train net output #0: loss = 0.649181 (* 1 = 0.649181 loss)
I0420 16:51:19.604043 29735 solver.cpp:697] Iteration 4600, lr = 0.01
I0420 16:51:22.986728 29735 solver.cpp:189] Iteration 4700, loss = 0.684809
I0420 16:51:22.986809 29735 solver.cpp:204] Train net output #0: loss = 0.684809 (* 1 = 0.684809 loss)
I0420 16:51:22.986824 29735 solver.cpp:697] Iteration 4700, lr = 0.01
I0420 16:51:26.346411 29735 solver.cpp:189] Iteration 4800, loss = 0.653093
I0420 16:51:26.346490 29735 solver.cpp:204] Train net output #0: loss = 0.653093 (* 1 = 0.653093 loss)
I0420 16:51:26.346528 29735 solver.cpp:697] Iteration 4800, lr = 0.01
I0420 16:51:29.673588 29735 solver.cpp:189] Iteration 4900, loss = 0.0870395
I0420 16:51:29.673666 29735 solver.cpp:204] Train net output #0: loss = 0.0870395 (* 1 = 0.0870395 loss)
I0420 16:51:29.673681 29735 solver.cpp:697] Iteration 4900, lr = 0.01
I0420 16:51:32.849895 29735 solver.cpp:334] Snapshotting to hdf5_classification/data/train_iter_5000.caffemodel
I0420 16:51:32.850430 29735 solver.cpp:342] Snapshotting solver state to hdf5_classification/data/train_iter_5000.solverstate
I0420 16:51:32.850756 29735 solver.cpp:266] Iteration 5000, Testing net (#0)
I0420 16:51:34.468585 29735 solver.cpp:315] Test net output #0: accuracy = 0.434
I0420 16:51:34.468658 29735 solver.cpp:315] Test net output #1: pred = 0.00749312
I0420 16:51:34.468674 29735 solver.cpp:315] Test net output #2: pred = 0.992507
I0420 16:51:34.468688 29735 solver.cpp:315] Test net output #3: pred = 0.00788112
I0420 16:51:34.468699 29735 solver.cpp:315] Test net output #4: pred = 0.992119
I0420 16:51:34.468713 29735 solver.cpp:315] Test net output #5: pred = 0.00782909
I0420 16:51:34.468724 29735 solver.cpp:315] Test net output #6: pred = 0.992171
I0420 16:51:34.468737 29735 solver.cpp:315] Test net output #7: pred = 0.00807234
I0420 16:51:34.468750 29735 solver.cpp:315] Test net output #8: pred = 0.991928
I0420 16:51:34.468762 29735 solver.cpp:315] Test net output #9: pred = 0.00808398
I0420 16:51:34.468775 29735 solver.cpp:315] Test net output #10: pred = 0.991916
I0420 16:51:34.468787 29735 solver.cpp:315] Test net output #11: pred = 0.00751587
I0420 16:51:34.468801 29735 solver.cpp:315] Test net output #12: pred = 0.992484
I0420 16:51:34.468812 29735 solver.cpp:315] Test net output #13: pred = 0.00756163
I0420 16:51:34.468825 29735 solver.cpp:315] Test net output #14: pred = 0.992439
I0420 16:51:34.468837 29735 solver.cpp:315] Test net output #15: pred = 0.00781831
I0420 16:51:34.468850 29735 solver.cpp:315] Test net output #16: pred = 0.992182
I0420 16:51:34.468863 29735 solver.cpp:315] Test net output #17: pred = 0.00795638
I0420 16:51:34.468875 29735 solver.cpp:315] Test net output #18: pred = 0.992044
I0420 16:51:34.468888 29735 solver.cpp:315] Test net output #19: pred = 0.00781076
I0420 16:51:34.468900 29735 solver.cpp:315] Test net output #20: pred = 0.992189
I0420 16:51:34.496229 29735 solver.cpp:189] Iteration 5000, loss = 0.0864078
I0420 16:51:34.496260 29735 solver.cpp:204] Train net output #0: loss = 0.0864077 (* 1 = 0.0864077 loss)
I0420 16:51:34.496278 29735 solver.cpp:697] Iteration 5000, lr = 0.01
I0420 16:51:37.668391 29735 solver.cpp:189] Iteration 5100, loss = 0.59533
I0420 16:51:37.668455 29735 solver.cpp:204] Train net output #0: loss = 0.59533 (* 1 = 0.59533 loss)
I0420 16:51:37.668472 29735 solver.cpp:697] Iteration 5100, lr = 0.01
I0420 16:51:40.608533 29735 solver.cpp:189] Iteration 5200, loss = 0.639798
I0420 16:51:40.608613 29735 solver.cpp:204] Train net output #0: loss = 0.639798 (* 1 = 0.639798 loss)
I0420 16:51:40.608628 29735 solver.cpp:697] Iteration 5200, lr = 0.01
I0420 16:51:43.576477 29735 solver.cpp:189] Iteration 5300, loss = 0.566049
I0420 16:51:43.576555 29735 solver.cpp:204] Train net output #0: loss = 0.566049 (* 1 = 0.566049 loss)
I0420 16:51:43.576570 29735 solver.cpp:697] Iteration 5300, lr = 0.01
I0420 16:51:46.611352 29735 solver.cpp:189] Iteration 5400, loss = 0.574825
I0420 16:51:46.611428 29735 solver.cpp:204] Train net output #0: loss = 0.574825 (* 1 = 0.574825 loss)
I0420 16:51:46.611444 29735 solver.cpp:697] Iteration 5400, lr = 0.01
I0420 16:51:49.630537 29735 solver.cpp:266] Iteration 5500, Testing net (#0)
I0420 16:51:51.080809 29735 solver.cpp:315] Test net output #0: accuracy = 0.631
I0420 16:51:51.080880 29735 solver.cpp:315] Test net output #1: pred = 0.587586
I0420 16:51:51.080895 29735 solver.cpp:315] Test net output #2: pred = 0.412414
I0420 16:51:51.080909 29735 solver.cpp:315] Test net output #3: pred = 0.554927
I0420 16:51:51.080940 29735 solver.cpp:315] Test net output #4: pred = 0.445072
I0420 16:51:51.080953 29735 solver.cpp:315] Test net output #5: pred = 0.548697
I0420 16:51:51.080965 29735 solver.cpp:315] Test net output #6: pred = 0.451303
I0420 16:51:51.080978 29735 solver.cpp:315] Test net output #7: pred = 0.534551
I0420 16:51:51.080991 29735 solver.cpp:315] Test net output #8: pred = 0.465449
I0420 16:51:51.081003 29735 solver.cpp:315] Test net output #9: pred = 0.544528
I0420 16:51:51.081015 29735 solver.cpp:315] Test net output #10: pred = 0.455472
I0420 16:51:51.081028 29735 solver.cpp:315] Test net output #11: pred = 0.546775
I0420 16:51:51.081040 29735 solver.cpp:315] Test net output #12: pred = 0.453225
I0420 16:51:51.081053 29735 solver.cpp:315] Test net output #13: pred = 0.560509
I0420 16:51:51.081065 29735 solver.cpp:315] Test net output #14: pred = 0.439491
I0420 16:51:51.081078 29735 solver.cpp:315] Test net output #15: pred = 0.542123
I0420 16:51:51.081089 29735 solver.cpp:315] Test net output #16: pred = 0.457877
I0420 16:51:51.081102 29735 solver.cpp:315] Test net output #17: pred = 0.559547
I0420 16:51:51.081115 29735 solver.cpp:315] Test net output #18: pred = 0.440453
I0420 16:51:51.081126 29735 solver.cpp:315] Test net output #19: pred = 0.563369
I0420 16:51:51.081140 29735 solver.cpp:315] Test net output #20: pred = 0.436631
I0420 16:51:51.107537 29735 solver.cpp:189] Iteration 5500, loss = 0.49012
I0420 16:51:51.107568 29735 solver.cpp:204] Train net output #0: loss = 0.49012 (* 1 = 0.49012 loss)
I0420 16:51:51.107584 29735 solver.cpp:697] Iteration 5500, lr = 0.01
I0420 16:51:54.227391 29735 solver.cpp:189] Iteration 5600, loss = 0.638121
I0420 16:51:54.227473 29735 solver.cpp:204] Train net output #0: loss = 0.638121 (* 1 = 0.638121 loss)
I0420 16:51:54.227489 29735 solver.cpp:697] Iteration 5600, lr = 0.01
I0420 16:51:57.362210 29735 solver.cpp:189] Iteration 5700, loss = 0.678685
I0420 16:51:57.362272 29735 solver.cpp:204] Train net output #0: loss = 0.678685 (* 1 = 0.678685 loss)
I0420 16:51:57.362288 29735 solver.cpp:697] Iteration 5700, lr = 0.01
I0420 16:52:00.527508 29735 solver.cpp:189] Iteration 5800, loss = 0.863971
I0420 16:52:00.527611 29735 solver.cpp:204] Train net output #0: loss = 0.863971 (* 1 = 0.863971 loss)
I0420 16:52:00.527628 29735 solver.cpp:697] Iteration 5800, lr = 0.01
I0420 16:52:03.683406 29735 solver.cpp:189] Iteration 5900, loss = 0.629981
I0420 16:52:03.683485 29735 solver.cpp:204] Train net output #0: loss = 0.629981 (* 1 = 0.629981 loss)
I0420 16:52:03.683500 29735 solver.cpp:697] Iteration 5900, lr = 0.01
I0420 16:52:06.826619 29735 solver.cpp:334] Snapshotting to hdf5_classification/data/train_iter_6000.caffemodel
I0420 16:52:06.827201 29735 solver.cpp:342] Snapshotting solver state to hdf5_classification/data/train_iter_6000.solverstate
I0420 16:52:06.827569 29735 solver.cpp:266] Iteration 6000, Testing net (#0)
I0420 16:52:08.354238 29735 solver.cpp:315] Test net output #0: accuracy = 0.623
I0420 16:52:08.354310 29735 solver.cpp:315] Test net output #1: pred = 0.546661
I0420 16:52:08.354324 29735 solver.cpp:315] Test net output #2: pred = 0.453339
I0420 16:52:08.354337 29735 solver.cpp:315] Test net output #3: pred = 0.563613
I0420 16:52:08.354351 29735 solver.cpp:315] Test net output #4: pred = 0.436387
I0420 16:52:08.354362 29735 solver.cpp:315] Test net output #5: pred = 0.578799
I0420 16:52:08.354375 29735 solver.cpp:315] Test net output #6: pred = 0.421201
I0420 16:52:08.354387 29735 solver.cpp:315] Test net output #7: pred = 0.561936
I0420 16:52:08.354400 29735 solver.cpp:315] Test net output #8: pred = 0.438064
I0420 16:52:08.354413 29735 solver.cpp:315] Test net output #9: pred = 0.548454
I0420 16:52:08.354425 29735 solver.cpp:315] Test net output #10: pred = 0.451546
I0420 16:52:08.354437 29735 solver.cpp:315] Test net output #11: pred = 0.554804
I0420 16:52:08.354450 29735 solver.cpp:315] Test net output #12: pred = 0.445196
I0420 16:52:08.354480 29735 solver.cpp:315] Test net output #13: pred = 0.549084
I0420 16:52:08.354493 29735 solver.cpp:315] Test net output #14: pred = 0.450916
I0420 16:52:08.354506 29735 solver.cpp:315] Test net output #15: pred = 0.589027
I0420 16:52:08.354518 29735 solver.cpp:315] Test net output #16: pred = 0.410973
I0420 16:52:08.354531 29735 solver.cpp:315] Test net output #17: pred = 0.581123
I0420 16:52:08.354542 29735 solver.cpp:315] Test net output #18: pred = 0.418877
I0420 16:52:08.354555 29735 solver.cpp:315] Test net output #19: pred = 0.584222
I0420 16:52:08.354568 29735 solver.cpp:315] Test net output #20: pred = 0.415778
I0420 16:52:08.381863 29735 solver.cpp:189] Iteration 6000, loss = 0.4679
I0420 16:52:08.381893 29735 solver.cpp:204] Train net output #0: loss = 0.4679 (* 1 = 0.4679 loss)
I0420 16:52:08.381908 29735 solver.cpp:697] Iteration 6000, lr = 0.01
I0420 16:52:11.500934 29735 solver.cpp:189] Iteration 6100, loss = 0.239037
I0420 16:52:11.501013 29735 solver.cpp:204] Train net output #0: loss = 0.239037 (* 1 = 0.239037 loss)
I0420 16:52:11.501027 29735 solver.cpp:697] Iteration 6100, lr = 0.01
I0420 16:52:14.561591 29735 solver.cpp:189] Iteration 6200, loss = 0.0940268
I0420 16:52:14.561664 29735 solver.cpp:204] Train net output #0: loss = 0.0940268 (* 1 = 0.0940268 loss)
I0420 16:52:14.561678 29735 solver.cpp:697] Iteration 6200, lr = 0.01
I0420 16:52:17.587687 29735 solver.cpp:189] Iteration 6300, loss = 0.636399
I0420 16:52:17.587761 29735 solver.cpp:204] Train net output #0: loss = 0.636399 (* 1 = 0.636399 loss)
I0420 16:52:17.587775 29735 solver.cpp:697] Iteration 6300, lr = 0.01
I0420 16:52:20.718385 29735 solver.cpp:189] Iteration 6400, loss = 0.538837
I0420 16:52:20.718456 29735 solver.cpp:204] Train net output #0: loss = 0.538838 (* 1 = 0.538838 loss)
I0420 16:52:20.718469 29735 solver.cpp:697] Iteration 6400, lr = 0.01
I0420 16:52:23.829777 29735 solver.cpp:266] Iteration 6500, Testing net (#0)
I0420 16:52:25.327441 29735 solver.cpp:315] Test net output #0: accuracy = 0.633
I0420 16:52:25.327510 29735 solver.cpp:315] Test net output #1: pred = 0.565185
I0420 16:52:25.327524 29735 solver.cpp:315] Test net output #2: pred = 0.434815
I0420 16:52:25.327536 29735 solver.cpp:315] Test net output #3: pred = 0.524638
I0420 16:52:25.327548 29735 solver.cpp:315] Test net output #4: pred = 0.475362
I0420 16:52:25.327561 29735 solver.cpp:315] Test net output #5: pred = 0.515345
I0420 16:52:25.327574 29735 solver.cpp:315] Test net output #6: pred = 0.484655
I0420 16:52:25.327585 29735 solver.cpp:315] Test net output #7: pred = 0.49982
I0420 16:52:25.327597 29735 solver.cpp:315] Test net output #8: pred = 0.50018
I0420 16:52:25.327610 29735 solver.cpp:315] Test net output #9: pred = 0.533689
I0420 16:52:25.327621 29735 solver.cpp:315] Test net output #10: pred = 0.466311
I0420 16:52:25.327633 29735 solver.cpp:315] Test net output #11: pred = 0.527846
I0420 16:52:25.327646 29735 solver.cpp:315] Test net output #12: pred = 0.472154
I0420 16:52:25.327657 29735 solver.cpp:315] Test net output #13: pred = 0.530037
I0420 16:52:25.327671 29735 solver.cpp:315] Test net output #14: pred = 0.469963
I0420 16:52:25.327682 29735 solver.cpp:315] Test net output #15: pred = 0.549077
I0420 16:52:25.327694 29735 solver.cpp:315] Test net output #16: pred = 0.450923
I0420 16:52:25.327707 29735 solver.cpp:315] Test net output #17: pred = 0.529757
I0420 16:52:25.327718 29735 solver.cpp:315] Test net output #18: pred = 0.470243
I0420 16:52:25.327730 29735 solver.cpp:315] Test net output #19: pred = 0.534022
I0420 16:52:25.327743 29735 solver.cpp:315] Test net output #20: pred = 0.465978
I0420 16:52:25.354718 29735 solver.cpp:189] Iteration 6500, loss = 0.602011
I0420 16:52:25.354748 29735 solver.cpp:204] Train net output #0: loss = 0.602011 (* 1 = 0.602011 loss)
I0420 16:52:25.354763 29735 solver.cpp:697] Iteration 6500, lr = 0.01
I0420 16:52:28.471243 29735 solver.cpp:189] Iteration 6600, loss = 0.620924
I0420 16:52:28.471334 29735 solver.cpp:204] Train net output #0: loss = 0.620924 (* 1 = 0.620924 loss)
I0420 16:52:28.471350 29735 solver.cpp:697] Iteration 6600, lr = 0.01
I0420 16:52:31.602118 29735 solver.cpp:189] Iteration 6700, loss = 0.710113
I0420 16:52:31.602195 29735 solver.cpp:204] Train net output #0: loss = 0.710113 (* 1 = 0.710113 loss)
I0420 16:52:31.602210 29735 solver.cpp:697] Iteration 6700, lr = 0.01
I0420 16:52:34.769767 29735 solver.cpp:189] Iteration 6800, loss = 0.873442
I0420 16:52:34.769837 29735 solver.cpp:204] Train net output #0: loss = 0.873442 (* 1 = 0.873442 loss)
I0420 16:52:34.769852 29735 solver.cpp:697] Iteration 6800, lr = 0.01
I0420 16:52:37.915143 29735 solver.cpp:189] Iteration 6900, loss = 0.738456
I0420 16:52:37.915222 29735 solver.cpp:204] Train net output #0: loss = 0.738456 (* 1 = 0.738456 loss)
I0420 16:52:37.915237 29735 solver.cpp:697] Iteration 6900, lr = 0.01
I0420 16:52:41.054168 29735 solver.cpp:334] Snapshotting to hdf5_classification/data/train_iter_7000.caffemodel
I0420 16:52:41.054739 29735 solver.cpp:342] Snapshotting solver state to hdf5_classification/data/train_iter_7000.solverstate
I0420 16:52:41.055119 29735 solver.cpp:266] Iteration 7000, Testing net (#0)
I0420 16:52:42.559633 29735 solver.cpp:315] Test net output #0: accuracy = 0.625
I0420 16:52:42.559700 29735 solver.cpp:315] Test net output #1: pred = 0.584086
I0420 16:52:42.559715 29735 solver.cpp:315] Test net output #2: pred = 0.415915
I0420 16:52:42.559725 29735 solver.cpp:315] Test net output #3: pred = 0.590376
I0420 16:52:42.559737 29735 solver.cpp:315] Test net output #4: pred = 0.409624
I0420 16:52:42.559749 29735 solver.cpp:315] Test net output #5: pred = 0.568325
I0420 16:52:42.559762 29735 solver.cpp:315] Test net output #6: pred = 0.431675
I0420 16:52:42.559773 29735 solver.cpp:315] Test net output #7: pred = 0.598231
I0420 16:52:42.559785 29735 solver.cpp:315] Test net output #8: pred = 0.401769
I0420 16:52:42.559798 29735 solver.cpp:315] Test net output #9: pred = 0.592587
I0420 16:52:42.559808 29735 solver.cpp:315] Test net output #10: pred = 0.407413
I0420 16:52:42.559820 29735 solver.cpp:315] Test net output #11: pred = 0.586369
I0420 16:52:42.559833 29735 solver.cpp:315] Test net output #12: pred = 0.413632
I0420 16:52:42.559844 29735 solver.cpp:315] Test net output #13: pred = 0.557232
I0420 16:52:42.559856 29735 solver.cpp:315] Test net output #14: pred = 0.442768
I0420 16:52:42.559867 29735 solver.cpp:315] Test net output #15: pred = 0.587993
I0420 16:52:42.559880 29735 solver.cpp:315] Test net output #16: pred = 0.412007
I0420 16:52:42.559891 29735 solver.cpp:315] Test net output #17: pred = 0.563374
I0420 16:52:42.559902 29735 solver.cpp:315] Test net output #18: pred = 0.436626
I0420 16:52:42.559914 29735 solver.cpp:315] Test net output #19: pred = 0.573507
I0420 16:52:42.559926 29735 solver.cpp:315] Test net output #20: pred = 0.426493
I0420 16:52:42.587519 29735 solver.cpp:189] Iteration 7000, loss = 0.530851
I0420 16:52:42.587548 29735 solver.cpp:204] Train net output #0: loss = 0.530851 (* 1 = 0.530851 loss)
I0420 16:52:42.587563 29735 solver.cpp:697] Iteration 7000, lr = 0.01
I0420 16:52:45.779650 29735 solver.cpp:189] Iteration 7100, loss = 0.735672
I0420 16:52:45.779723 29735 solver.cpp:204] Train net output #0: loss = 0.735673 (* 1 = 0.735673 loss)
I0420 16:52:45.779737 29735 solver.cpp:697] Iteration 7100, lr = 0.01
I0420 16:52:48.943138 29735 solver.cpp:189] Iteration 7200, loss = 0.54106
I0420 16:52:48.943214 29735 solver.cpp:204] Train net output #0: loss = 0.54106 (* 1 = 0.54106 loss)
I0420 16:52:48.943228 29735 solver.cpp:697] Iteration 7200, lr = 0.01
I0420 16:52:52.080189 29735 solver.cpp:189] Iteration 7300, loss = 0.561791
I0420 16:52:52.080271 29735 solver.cpp:204] Train net output #0: loss = 0.561791 (* 1 = 0.561791 loss)
I0420 16:52:52.080286 29735 solver.cpp:697] Iteration 7300, lr = 0.01
I0420 16:52:55.214890 29735 solver.cpp:189] Iteration 7400, loss = 0.232745
I0420 16:52:55.214962 29735 solver.cpp:204] Train net output #0: loss = 0.232745 (* 1 = 0.232745 loss)
I0420 16:52:55.214977 29735 solver.cpp:697] Iteration 7400, lr = 0.01
I0420 16:52:58.194490 29735 solver.cpp:266] Iteration 7500, Testing net (#0)
I0420 16:52:59.703066 29735 solver.cpp:315] Test net output #0: accuracy = 0.427
I0420 16:52:59.703140 29735 solver.cpp:315] Test net output #1: pred = 0.01433
I0420 16:52:59.703155 29735 solver.cpp:315] Test net output #2: pred = 0.98567
I0420 16:52:59.703168 29735 solver.cpp:315] Test net output #3: pred = 0.0140455
I0420 16:52:59.703181 29735 solver.cpp:315] Test net output #4: pred = 0.985954
I0420 16:52:59.703192 29735 solver.cpp:315] Test net output #5: pred = 0.0171522
I0420 16:52:59.703204 29735 solver.cpp:315] Test net output #6: pred = 0.982848
I0420 16:52:59.703217 29735 solver.cpp:315] Test net output #7: pred = 0.0157002
I0420 16:52:59.703229 29735 solver.cpp:315] Test net output #8: pred = 0.9843
I0420 16:52:59.703241 29735 solver.cpp:315] Test net output #9: pred = 0.0165164
I0420 16:52:59.703253 29735 solver.cpp:315] Test net output #10: pred = 0.983483
I0420 16:52:59.703265 29735 solver.cpp:315] Test net output #11: pred = 0.0167645
I0420 16:52:59.703277 29735 solver.cpp:315] Test net output #12: pred = 0.983236
I0420 16:52:59.703289 29735 solver.cpp:315] Test net output #13: pred = 0.0150227
I0420 16:52:59.703301 29735 solver.cpp:315] Test net output #14: pred = 0.984978
I0420 16:52:59.703313 29735 solver.cpp:315] Test net output #15: pred = 0.0137793
I0420 16:52:59.703326 29735 solver.cpp:315] Test net output #16: pred = 0.986221
I0420 16:52:59.703338 29735 solver.cpp:315] Test net output #17: pred = 0.0142521
I0420 16:52:59.703351 29735 solver.cpp:315] Test net output #18: pred = 0.985748
I0420 16:52:59.703362 29735 solver.cpp:315] Test net output #19: pred = 0.0131479
I0420 16:52:59.703374 29735 solver.cpp:315] Test net output #20: pred = 0.986852
I0420 16:52:59.728981 29735 solver.cpp:189] Iteration 7500, loss = 0.0172948
I0420 16:52:59.729010 29735 solver.cpp:204] Train net output #0: loss = 0.0172949 (* 1 = 0.0172949 loss)
I0420 16:52:59.729025 29735 solver.cpp:697] Iteration 7500, lr = 0.01
I0420 16:53:02.824973 29735 solver.cpp:189] Iteration 7600, loss = 0.684581
I0420 16:53:02.825052 29735 solver.cpp:204] Train net output #0: loss = 0.684581 (* 1 = 0.684581 loss)
I0420 16:53:02.825067 29735 solver.cpp:697] Iteration 7600, lr = 0.01
I0420 16:53:05.969949 29735 solver.cpp:189] Iteration 7700, loss = 0.708668
I0420 16:53:05.970021 29735 solver.cpp:204] Train net output #0: loss = 0.708668 (* 1 = 0.708668 loss)
I0420 16:53:05.970036 29735 solver.cpp:697] Iteration 7700, lr = 0.01
I0420 16:53:09.093901 29735 solver.cpp:189] Iteration 7800, loss = 0.462488
I0420 16:53:09.093953 29735 solver.cpp:204] Train net output #0: loss = 0.462488 (* 1 = 0.462488 loss)
I0420 16:53:09.093967 29735 solver.cpp:697] Iteration 7800, lr = 0.01
I0420 16:53:12.257654 29735 solver.cpp:189] Iteration 7900, loss = 0.587084
I0420 16:53:12.257724 29735 solver.cpp:204] Train net output #0: loss = 0.587084 (* 1 = 0.587084 loss)
I0420 16:53:12.257737 29735 solver.cpp:697] Iteration 7900, lr = 0.01
I0420 16:53:15.394191 29735 solver.cpp:334] Snapshotting to hdf5_classification/data/train_iter_8000.caffemodel
I0420 16:53:15.394760 29735 solver.cpp:342] Snapshotting solver state to hdf5_classification/data/train_iter_8000.solverstate
I0420 16:53:15.395112 29735 solver.cpp:266] Iteration 8000, Testing net (#0)
I0420 16:53:16.825994 29735 solver.cpp:315] Test net output #0: accuracy = 0.622
I0420 16:53:16.826059 29735 solver.cpp:315] Test net output #1: pred = 0.533806
I0420 16:53:16.826073 29735 solver.cpp:315] Test net output #2: pred = 0.466194
I0420 16:53:16.826086 29735 solver.cpp:315] Test net output #3: pred = 0.52606
I0420 16:53:16.826098 29735 solver.cpp:315] Test net output #4: pred = 0.47394
I0420 16:53:16.826128 29735 solver.cpp:315] Test net output #5: pred = 0.53748
I0420 16:53:16.826139 29735 solver.cpp:315] Test net output #6: pred = 0.46252
I0420 16:53:16.826151 29735 solver.cpp:315] Test net output #7: pred = 0.543132
I0420 16:53:16.826164 29735 solver.cpp:315] Test net output #8: pred = 0.456868
I0420 16:53:16.826175 29735 solver.cpp:315] Test net output #9: pred = 0.528611
I0420 16:53:16.826187 29735 solver.cpp:315] Test net output #10: pred = 0.471389
I0420 16:53:16.826200 29735 solver.cpp:315] Test net output #11: pred = 0.529968
I0420 16:53:16.826210 29735 solver.cpp:315] Test net output #12: pred = 0.470032
I0420 16:53:16.826222 29735 solver.cpp:315] Test net output #13: pred = 0.518312
I0420 16:53:16.826234 29735 solver.cpp:315] Test net output #14: pred = 0.481688
I0420 16:53:16.826246 29735 solver.cpp:315] Test net output #15: pred = 0.508745
I0420 16:53:16.826258 29735 solver.cpp:315] Test net output #16: pred = 0.491255
I0420 16:53:16.826269 29735 solver.cpp:315] Test net output #17: pred = 0.517502
I0420 16:53:16.826282 29735 solver.cpp:315] Test net output #18: pred = 0.482498
I0420 16:53:16.826293 29735 solver.cpp:315] Test net output #19: pred = 0.502742
I0420 16:53:16.826304 29735 solver.cpp:315] Test net output #20: pred = 0.497258
I0420 16:53:16.853145 29735 solver.cpp:189] Iteration 8000, loss = 0.469552
I0420 16:53:16.853174 29735 solver.cpp:204] Train net output #0: loss = 0.469552 (* 1 = 0.469552 loss)
I0420 16:53:16.853189 29735 solver.cpp:697] Iteration 8000, lr = 0.01
I0420 16:53:20.002100 29735 solver.cpp:189] Iteration 8100, loss = 0.569537
I0420 16:53:20.002158 29735 solver.cpp:204] Train net output #0: loss = 0.569537 (* 1 = 0.569537 loss)
I0420 16:53:20.002172 29735 solver.cpp:697] Iteration 8100, lr = 0.01
I0420 16:53:23.204653 29735 solver.cpp:189] Iteration 8200, loss = 0.722134
I0420 16:53:23.204728 29735 solver.cpp:204] Train net output #0: loss = 0.722134 (* 1 = 0.722134 loss)
I0420 16:53:23.204742 29735 solver.cpp:697] Iteration 8200, lr = 0.01
I0420 16:53:26.440418 29735 solver.cpp:189] Iteration 8300, loss = 0.64612
I0420 16:53:26.440492 29735 solver.cpp:204] Train net output #0: loss = 0.64612 (* 1 = 0.64612 loss)
I0420 16:53:26.440507 29735 solver.cpp:697] Iteration 8300, lr = 0.01
I0420 16:53:29.640463 29735 solver.cpp:189] Iteration 8400, loss = 0.610873
I0420 16:53:29.640537 29735 solver.cpp:204] Train net output #0: loss = 0.610873 (* 1 = 0.610873 loss)
I0420 16:53:29.640552 29735 solver.cpp:697] Iteration 8400, lr = 0.01
I0420 16:53:32.808923 29735 solver.cpp:266] Iteration 8500, Testing net (#0)
I0420 16:53:34.301087 29735 solver.cpp:315] Test net output #0: accuracy = 0.638
I0420 16:53:34.301954 29735 solver.cpp:315] Test net output #1: pred = 0.568852
I0420 16:53:34.301988 29735 solver.cpp:315] Test net output #2: pred = 0.431148
I0420 16:53:34.302001 29735 solver.cpp:315] Test net output #3: pred = 0.541355
I0420 16:53:34.302013 29735 solver.cpp:315] Test net output #4: pred = 0.458645
I0420 16:53:34.302024 29735 solver.cpp:315] Test net output #5: pred = 0.537378
I0420 16:53:34.302037 29735 solver.cpp:315] Test net output #6: pred = 0.462622
I0420 16:53:34.302047 29735 solver.cpp:315] Test net output #7: pred = 0.552855
I0420 16:53:34.302059 29735 solver.cpp:315] Test net output #8: pred = 0.447145
I0420 16:53:34.302072 29735 solver.cpp:315] Test net output #9: pred = 0.537805
I0420 16:53:34.302083 29735 solver.cpp:315] Test net output #10: pred = 0.462195
I0420 16:53:34.302094 29735 solver.cpp:315] Test net output #11: pred = 0.526746
I0420 16:53:34.302105 29735 solver.cpp:315] Test net output #12: pred = 0.473254
I0420 16:53:34.302117 29735 solver.cpp:315] Test net output #13: pred = 0.520469
I0420 16:53:34.302129 29735 solver.cpp:315] Test net output #14: pred = 0.479531
I0420 16:53:34.302140 29735 solver.cpp:315] Test net output #15: pred = 0.54949
I0420 16:53:34.302152 29735 solver.cpp:315] Test net output #16: pred = 0.450509
I0420 16:53:34.302177 29735 solver.cpp:315] Test net output #17: pred = 0.536042
I0420 16:53:34.302189 29735 solver.cpp:315] Test net output #18: pred = 0.463958
I0420 16:53:34.302201 29735 solver.cpp:315] Test net output #19: pred = 0.53234
I0420 16:53:34.302213 29735 solver.cpp:315] Test net output #20: pred = 0.46766
I0420 16:53:34.329424 29735 solver.cpp:189] Iteration 8500, loss = 0.558107
I0420 16:53:34.329452 29735 solver.cpp:204] Train net output #0: loss = 0.558107 (* 1 = 0.558107 loss)
I0420 16:53:34.329465 29735 solver.cpp:697] Iteration 8500, lr = 0.01
I0420 16:53:37.478050 29735 solver.cpp:189] Iteration 8600, loss = 0.398375
I0420 16:53:37.478122 29735 solver.cpp:204] Train net output #0: loss = 0.398375 (* 1 = 0.398375 loss)
I0420 16:53:37.478137 29735 solver.cpp:697] Iteration 8600, lr = 0.01
I0420 16:53:40.582540 29735 solver.cpp:189] Iteration 8700, loss = 0.0635792
I0420 16:53:40.582605 29735 solver.cpp:204] Train net output #0: loss = 0.0635792 (* 1 = 0.0635792 loss)
I0420 16:53:40.582619 29735 solver.cpp:697] Iteration 8700, lr = 0.01
I0420 16:53:43.588667 29735 solver.cpp:189] Iteration 8800, loss = 0.752678
I0420 16:53:43.588737 29735 solver.cpp:204] Train net output #0: loss = 0.752678 (* 1 = 0.752678 loss)
I0420 16:53:43.588752 29735 solver.cpp:697] Iteration 8800, lr = 0.01
I0420 16:53:46.428026 29735 solver.cpp:189] Iteration 8900, loss = 0.526534
I0420 16:53:46.428102 29735 solver.cpp:204] Train net output #0: loss = 0.526534 (* 1 = 0.526534 loss)
I0420 16:53:46.428117 29735 solver.cpp:697] Iteration 8900, lr = 0.01
I0420 16:53:49.222363 29735 solver.cpp:334] Snapshotting to hdf5_classification/data/train_iter_9000.caffemodel
I0420 16:53:49.222887 29735 solver.cpp:342] Snapshotting solver state to hdf5_classification/data/train_iter_9000.solverstate
I0420 16:53:49.223229 29735 solver.cpp:266] Iteration 9000, Testing net (#0)
I0420 16:53:50.543879 29735 solver.cpp:315] Test net output #0: accuracy = 0.632
I0420 16:53:50.543946 29735 solver.cpp:315] Test net output #1: pred = 0.557883
I0420 16:53:50.543975 29735 solver.cpp:315] Test net output #2: pred = 0.442117
I0420 16:53:50.543987 29735 solver.cpp:315] Test net output #3: pred = 0.564399
I0420 16:53:50.543999 29735 solver.cpp:315] Test net output #4: pred = 0.435601
I0420 16:53:50.544010 29735 solver.cpp:315] Test net output #5: pred = 0.570416
I0420 16:53:50.544023 29735 solver.cpp:315] Test net output #6: pred = 0.429584
I0420 16:53:50.544034 29735 solver.cpp:315] Test net output #7: pred = 0.562906
I0420 16:53:50.544045 29735 solver.cpp:315] Test net output #8: pred = 0.437094
I0420 16:53:50.544057 29735 solver.cpp:315] Test net output #9: pred = 0.57696
I0420 16:53:50.544070 29735 solver.cpp:315] Test net output #10: pred = 0.42304
I0420 16:53:50.544080 29735 solver.cpp:315] Test net output #11: pred = 0.565567
I0420 16:53:50.544092 29735 solver.cpp:315] Test net output #12: pred = 0.434433
I0420 16:53:50.544103 29735 solver.cpp:315] Test net output #13: pred = 0.590795
I0420 16:53:50.544116 29735 solver.cpp:315] Test net output #14: pred = 0.409205
I0420 16:53:50.544131 29735 solver.cpp:315] Test net output #15: pred = 0.562879
I0420 16:53:50.544144 29735 solver.cpp:315] Test net output #16: pred = 0.437121
I0420 16:53:50.544157 29735 solver.cpp:315] Test net output #17: pred = 0.556494
I0420 16:53:50.544167 29735 solver.cpp:315] Test net output #18: pred = 0.443506
I0420 16:53:50.544179 29735 solver.cpp:315] Test net output #19: pred = 0.55567
I0420 16:53:50.544191 29735 solver.cpp:315] Test net output #20: pred = 0.44433
I0420 16:53:50.568188 29735 solver.cpp:189] Iteration 9000, loss = 0.668086
I0420 16:53:50.568217 29735 solver.cpp:204] Train net output #0: loss = 0.668086 (* 1 = 0.668086 loss)
I0420 16:53:50.568231 29735 solver.cpp:697] Iteration 9000, lr = 0.01
I0420 16:53:53.402562 29735 solver.cpp:189] Iteration 9100, loss = 0.475608
I0420 16:53:53.402636 29735 solver.cpp:204] Train net output #0: loss = 0.475608 (* 1 = 0.475608 loss)
I0420 16:53:53.402667 29735 solver.cpp:697] Iteration 9100, lr = 0.01
I0420 16:53:56.250541 29735 solver.cpp:189] Iteration 9200, loss = 0.645685
I0420 16:53:56.250613 29735 solver.cpp:204] Train net output #0: loss = 0.645685 (* 1 = 0.645685 loss)
I0420 16:53:56.250627 29735 solver.cpp:697] Iteration 9200, lr = 0.01
I0420 16:53:59.102303 29735 solver.cpp:189] Iteration 9300, loss = 0.509828
I0420 16:53:59.102377 29735 solver.cpp:204] Train net output #0: loss = 0.509828 (* 1 = 0.509828 loss)
I0420 16:53:59.102392 29735 solver.cpp:697] Iteration 9300, lr = 0.01
I0420 16:54:01.984561 29735 solver.cpp:189] Iteration 9400, loss = 0.509585
I0420 16:54:01.984640 29735 solver.cpp:204] Train net output #0: loss = 0.509585 (* 1 = 0.509585 loss)
I0420 16:54:01.984655 29735 solver.cpp:697] Iteration 9400, lr = 0.01
I0420 16:54:04.761355 29735 solver.cpp:266] Iteration 9500, Testing net (#0)
I0420 16:54:06.082834 29735 solver.cpp:315] Test net output #0: accuracy = 0.633
I0420 16:54:06.082901 29735 solver.cpp:315] Test net output #1: pred = 0.526764
I0420 16:54:06.082913 29735 solver.cpp:315] Test net output #2: pred = 0.473236
I0420 16:54:06.082926 29735 solver.cpp:315] Test net output #3: pred = 0.527068
I0420 16:54:06.082937 29735 solver.cpp:315] Test net output #4: pred = 0.472932
I0420 16:54:06.082949 29735 solver.cpp:315] Test net output #5: pred = 0.515123
I0420 16:54:06.082962 29735 solver.cpp:315] Test net output #6: pred = 0.484877
I0420 16:54:06.082973 29735 solver.cpp:315] Test net output #7: pred = 0.554335
I0420 16:54:06.082984 29735 solver.cpp:315] Test net output #8: pred = 0.445665
I0420 16:54:06.082996 29735 solver.cpp:315] Test net output #9: pred = 0.542611
I0420 16:54:06.083009 29735 solver.cpp:315] Test net output #10: pred = 0.457389
I0420 16:54:06.083020 29735 solver.cpp:315] Test net output #11: pred = 0.548582
I0420 16:54:06.083031 29735 solver.cpp:315] Test net output #12: pred = 0.451418
I0420 16:54:06.083044 29735 solver.cpp:315] Test net output #13: pred = 0.520983
I0420 16:54:06.083055 29735 solver.cpp:315] Test net output #14: pred = 0.479017
I0420 16:54:06.083066 29735 solver.cpp:315] Test net output #15: pred = 0.526284
I0420 16:54:06.083078 29735 solver.cpp:315] Test net output #16: pred = 0.473716
I0420 16:54:06.083091 29735 solver.cpp:315] Test net output #17: pred = 0.533361
I0420 16:54:06.083101 29735 solver.cpp:315] Test net output #18: pred = 0.466639
I0420 16:54:06.083113 29735 solver.cpp:315] Test net output #19: pred = 0.512475
I0420 16:54:06.083125 29735 solver.cpp:315] Test net output #20: pred = 0.487525
I0420 16:54:06.107233 29735 solver.cpp:189] Iteration 9500, loss = 0.628252
I0420 16:54:06.107260 29735 solver.cpp:204] Train net output #0: loss = 0.628252 (* 1 = 0.628252 loss)
I0420 16:54:06.107275 29735 solver.cpp:697] Iteration 9500, lr = 0.01
I0420 16:54:08.940244 29735 solver.cpp:189] Iteration 9600, loss = 0.429263
I0420 16:54:08.940315 29735 solver.cpp:204] Train net output #0: loss = 0.429263 (* 1 = 0.429263 loss)
I0420 16:54:08.940328 29735 solver.cpp:697] Iteration 9600, lr = 0.01
I0420 16:54:11.783327 29735 solver.cpp:189] Iteration 9700, loss = 0.898165
I0420 16:54:11.783393 29735 solver.cpp:204] Train net output #0: loss = 0.898165 (* 1 = 0.898165 loss)
I0420 16:54:11.783407 29735 solver.cpp:697] Iteration 9700, lr = 0.01
I0420 16:54:14.659075 29735 solver.cpp:189] Iteration 9800, loss = 0.641561
I0420 16:54:14.659127 29735 solver.cpp:204] Train net output #0: loss = 0.641561 (* 1 = 0.641561 loss)
I0420 16:54:14.659147 29735 solver.cpp:697] Iteration 9800, lr = 0.01
I0420 16:54:17.512887 29735 solver.cpp:189] Iteration 9900, loss = 0.084535
I0420 16:54:17.512958 29735 solver.cpp:204] Train net output #0: loss = 0.084535 (* 1 = 0.084535 loss)
I0420 16:54:17.512972 29735 solver.cpp:697] Iteration 9900, lr = 0.01
I0420 16:54:20.235441 29735 solver.cpp:334] Snapshotting to hdf5_classification/data/train_iter_10000.caffemodel
I0420 16:54:20.235995 29735 solver.cpp:342] Snapshotting solver state to hdf5_classification/data/train_iter_10000.solverstate
I0420 16:54:20.249811 29735 solver.cpp:248] Iteration 10000, loss = 0.0234883
I0420 16:54:20.249840 29735 solver.cpp:266] Iteration 10000, Testing net (#0)
I0420 16:54:21.603222 29735 solver.cpp:315] Test net output #0: accuracy = 0.43
I0420 16:54:21.603286 29735 solver.cpp:315] Test net output #1: pred = 0.0185596
I0420 16:54:21.603301 29735 solver.cpp:315] Test net output #2: pred = 0.98144
I0420 16:54:21.603312 29735 solver.cpp:315] Test net output #3: pred = 0.0167466
I0420 16:54:21.603323 29735 solver.cpp:315] Test net output #4: pred = 0.983253
I0420 16:54:21.603334 29735 solver.cpp:315] Test net output #5: pred = 0.0175813
I0420 16:54:21.603345 29735 solver.cpp:315] Test net output #6: pred = 0.982419
I0420 16:54:21.603356 29735 solver.cpp:315] Test net output #7: pred = 0.0177738
I0420 16:54:21.603368 29735 solver.cpp:315] Test net output #8: pred = 0.982226
I0420 16:54:21.603379 29735 solver.cpp:315] Test net output #9: pred = 0.0182237
I0420 16:54:21.603390 29735 solver.cpp:315] Test net output #10: pred = 0.981776
I0420 16:54:21.603401 29735 solver.cpp:315] Test net output #11: pred = 0.0222336
I0420 16:54:21.603412 29735 solver.cpp:315] Test net output #12: pred = 0.977767
I0420 16:54:21.603423 29735 solver.cpp:315] Test net output #13: pred = 0.0208131
I0420 16:54:21.603435 29735 solver.cpp:315] Test net output #14: pred = 0.979187
I0420 16:54:21.603446 29735 solver.cpp:315] Test net output #15: pred = 0.0188855
I0420 16:54:21.603456 29735 solver.cpp:315] Test net output #16: pred = 0.981115
I0420 16:54:21.603468 29735 solver.cpp:315] Test net output #17: pred = 0.0207035
I0420 16:54:21.603479 29735 solver.cpp:315] Test net output #18: pred = 0.979296
I0420 16:54:21.603490 29735 solver.cpp:315] Test net output #19: pred = 0.0181618
I0420 16:54:21.603502 29735 solver.cpp:315] Test net output #20: pred = 0.981838
I0420 16:54:21.603513 29735 solver.cpp:253] Optimization Done.
I0420 16:54:21.603523 29735 caffe.cpp:134] Optimization Done.
The text was updated successfully, but these errors were encountered:
I'm experiencing some strange drops in my loss during training regardless of solver type - I've tried Adagrad and SGD (inv & step). Does this indicate that there's something wrong with Caffe (or my Caffe build in particular)?
The accuracy of my model is actually somewhat descent using Adagrad (~60-65% on a complex 2-class problem). I've tried using different learning rates and batch sizes.
Below is my log, a plot of the loss and a plot of the accuracy by running the model over the entire train set and the entire test using the saved snapshots. As you can see there's a drop in accuracy, but it is not synced with the drop in loss. The learning rate is almost invisible in the plot as it is constant at 10^(-2).
The text was updated successfully, but these errors were encountered: