2017-10-18 167 views
2

我想通过训练AlexNet在黑白图像上使用Circles(标签:“1”)和 Rectangles(标签:“0 “)。我正在使用1800个训练图像(900个圈和900个矩形)。例如:Caffe Net不训练(损失不会在训练时改变)

我train_val.prototxt看起来是这样的:

name: "AlexNet" 
layer { 
    name: "data" 
    type: "Data" 
    top: "data" 
    top: "label" 
    include { 
     phase: TRAIN 
    } 
    data_param { 
     source: "newlmdb" 
     batch_size: 100 
     backend: LMDB 
    } 
} 
layer { 
    name: "data" 
    type: "Data" 
    top: "data" 
    top: "label" 
    include { 
     phase: TEST 
    } 
    data_param { 
     source: "newvallmdb" 
     batch_size: 50 
     backend: LMDB 
    } 
} 
layer { 
    name: "conv1" 
    type: "Convolution" 
    bottom: "data" 
    top: "conv1" 
    param { 
     lr_mult: 1 
     decay_mult: 1 
    } 
    param { 
     lr_mult: 2 
     decay_mult: 0 
    } 
    convolution_param { 
     num_output: 96 
     kernel_size: 11 
     stride: 4 
     weight_filler { 
     type: "gaussian" 
     std: 0.01 
     } 
     bias_filler { 
     type: "constant" 
     value: 0 
     } 
    } 
} 
layer { 
    name: "relu1" 
    type: "ReLU" 
    bottom: "conv1" 
    top: "conv1" 
} 
layer { 
    name: "norm1" 
    type: "LRN" 
    bottom: "conv1" 
    top: "norm1" 
    lrn_param { 
     local_size: 5 
     alpha: 0.0001 
     beta: 0.75 
    } 
} 
layer { 
    name: "pool1" 
    type: "Pooling" 
    bottom: "norm1" 
    top: "pool1" 
    pooling_param { 
     pool: MAX 
     kernel_size: 3 
     stride: 2 
    } 
} 
layer { 
    name: "conv2" 
    type: "Convolution" 
    bottom: "pool1" 
    top: "conv2" 
    param { 
    lr_mult: 1 
    decay_mult: 1 
    } 
    param { 
    lr_mult: 2 
    decay_mult: 0 
    } 
    convolution_param { 
    num_output: 256 
    pad: 2 
    kernel_size: 5 
    group: 2 
    weight_filler { 
     type: "gaussian" 
     std: 0.01 
    } 
    bias_filler { 
     type: "constant" 
     value: 0.1 
    } 
    } 
} 
layer { 
    name: "relu2" 
    type: "ReLU" 
    bottom: "conv2" 
    top: "conv2" 
} 
layer { 
    name: "norm2" 
    type: "LRN" 
    bottom: "conv2" 
    top: "norm2" 
    lrn_param { 
    local_size: 5 
    alpha: 0.0001 
    beta: 0.75 
    } 
} 
layer { 
    name: "pool2" 
    type: "Pooling" 
    bottom: "norm2" 
    top: "pool2" 
    pooling_param { 
    pool: MAX 
    kernel_size: 3 
    stride: 2 
    } 
} 
layer { 
    name: "conv3" 
    type: "Convolution" 
    bottom: "pool2" 
    top: "conv3" 
    param { 
    lr_mult: 1 
    decay_mult: 1 
    } 
    param { 
    lr_mult: 2 
    decay_mult: 0 
    } 
    convolution_param { 
    num_output: 384 
    pad: 1 
    kernel_size: 3 
    weight_filler { 
     type: "gaussian" 
     std: 0.01 
    } 
    bias_filler { 
     type: "constant" 
     value: 0 
    } 
    } 
} 
layer { 
    name: "relu3" 
    type: "ReLU" 
    bottom: "conv3" 
    top: "conv3" 
} 
layer { 
    name: "conv4" 
    type: "Convolution" 
    bottom: "conv3" 
    top: "conv4" 
    param { 
    lr_mult: 1 
    decay_mult: 1 
    } 
    param { 
    lr_mult: 2 
    decay_mult: 0 
    } 
    convolution_param { 
    num_output: 384 
    pad: 1 
    kernel_size: 3 
    group: 2 
    weight_filler { 
     type: "gaussian" 
     std: 0.01 
    } 
    bias_filler { 
     type: "constant" 
     value: 0.1 
    } 
    } 
} 
layer { 
    name: "relu4" 
    type: "ReLU" 
    bottom: "conv4" 
    top: "conv4" 
} 
layer { 
    name: "conv5" 
    type: "Convolution" 
    bottom: "conv4" 
    top: "conv5" 
    param { 
    lr_mult: 1 
    decay_mult: 1 
    } 
    param { 
    lr_mult: 2 
    decay_mult: 0 
    } 
    convolution_param { 
    num_output: 256 
    pad: 1 
    kernel_size: 3 
    group: 2 
    weight_filler { 
     type: "gaussian" 
     std: 0.01 
    } 
    bias_filler { 
     type: "constant" 
     value: 0.1 
    } 
    } 
} 

    layer { 
     name: "relu5" 
     type: "ReLU" 
     bottom: "conv5" 
     top: "conv5" 
    } 
    layer { 
     name: "pool5" 
     type: "Pooling" 
     bottom: "conv5" 
     top: "pool5" 
     pooling_param { 
     pool: MAX 
     kernel_size: 3 
     stride: 

2 
    } 
} 
layer { 
    name: "fc6" 
    type: "InnerProduct" 
    bottom: "pool5" 
    top: "fc6" 
    param { 
    lr_mult: 1 
    decay_mult: 1 
    } 
    param { 
    lr_mult: 2 
    decay_mult: 0 
    } 
    inner_product_param { 
    num_output: 4096 
    weight_filler { 
     type: "gaussian" 
     std: 0.005 
    } 
    bias_filler { 
     type: "constant" 
     value: 0.1 
    } 
    } 
} 
layer { 
    name: "relu6" 
    type: "ReLU" 
    bottom: "fc6" 
    top: "fc6" 
} 
layer { 
    name: "drop6" 
    type: "Dropout" 
    bottom: "fc6" 
    top: "fc6" 
    dropout_param { 
    dropout_ratio: 0.5 
    } 
} 
layer { 
    name: "fc7" 
    type: "InnerProduct" 
    bottom: "fc6" 
    top: "fc7" 
    param { 
    lr_mult: 1 
    decay_mult: 1 
    } 
    param { 
    lr_mult: 2 
    decay_mult: 0 
    } 
    inner_product_param { 
    num_output: 4096 
    weight_filler { 
     type: "gaussian" 
     std: 0.005 
    } 
    bias_filler { 
     type: "constant" 
     value: 0.1 
    } 
    } 
} 
layer { 
    name: "relu7" 
    type: "ReLU" 
    bottom: "fc7" 
    top: "fc7" 
} 
layer { 
    name: "drop7" 
    type: "Dropout" 
    bottom: "fc7" 
    top: "fc7" 
    dropout_param { 
    dropout_ratio: 0.5 
    } 
} 
layer { 
    name: "fc8" 
    type: "InnerProduct" 
    bottom: "fc7" 
    top: "fc8" 
    param { 
    lr_mult: 1 
    decay_mult: 1 
    } 
    param { 
    lr_mult: 2 
    decay_mult: 0 
    } 
    inner_product_param { 
    num_output: 1 
    weight_filler { 
     type: "gaussian" 
     std: 0.01 
    } 
    bias_filler { 
     type: "constant" 
     value: 0 
    } 
    } 
} 
layer { 
    name: "accuracy" 
    type: "Accuracy" 
    bottom: "fc8" 
    bottom: "label" 
    top: "accuracy" 
    include { 
    phase: TEST 
    } 
} 
layer { 
    name: "loss" 
    type: "SoftmaxWithLoss" 
    bottom: "fc8" 
    bottom: "label" 
    top: "loss" 
} 

我solver.prototxt看起来是这样的:

net: "train_val.prototxt" 
test_iter: 200 
test_interval: 200 
base_lr: 0.01 
lr_policy: "step" 
gamma: 0.1 
stepsize: 50 
display: 20 
max_iter: 500 
momentum: 0.9 
weight_decay: 0.0005 
snapshot: 100 
snapshot_prefix: "training" 
solver_mode: GPU 

虽然trainig我得到这样的输出:

I1018 10:13:04.936286 7404 solver.cpp:330] Iteration 0, Testing net (#0) 
I1018 10:13:06.262091 7792 data_layer.cpp:73] Restarting data prefetching from start. 
I1018 10:13:07.556700 7792 data_layer.cpp:73] Restarting data prefetching from start. 
I1018 10:13:11.440527 7792 data_layer.cpp:73] Restarting data prefetching from start. 
I1018 10:13:12.267205 7404 solver.cpp:397]  Test net output #0: accuracy = 0.94 
I1018 10:13:12.267205 7404 solver.cpp:397]  Test net output #1: loss = 0.104804 (* 1 = 0.104804 loss) 
I1018 10:13:12.594758 7404 solver.cpp:218] Iteration 0 (-9.63533e-42 iter/s, 7.69215s/20 iters), loss = 0.873365 
I1018 10:13:12.594758 7404 solver.cpp:237]  Train net output #0: loss = 0.873365 (* 1 = 0.873365 loss) 
I1018 10:13:12.594758 7404 sgd_solver.cpp:105] Iteration 0, lr = 0.01 
I1018 10:13:15.807883 7748 data_layer.cpp:73] Restarting data prefetching from start. 
I1018 10:13:17.305263 7404 solver.cpp:218] Iteration 20 (4.25024 iter/s, 4.70562s/20 iters), loss = 0.873365 
I1018 10:13:17.305263 7404 solver.cpp:237]  Train net output #0: loss = 0.873365 (* 1 = 0.873365 loss) 
I1018 10:13:17.305263 7404 sgd_solver.cpp:105] Iteration 20, lr = 0.01 
I1018 10:13:20.019263 7748 data_layer.cpp:73] Restarting data prefetching from start. 
I1018 10:13:21.984572 7404 solver.cpp:218] Iteration 40 (4.26967 iter/s, 4.6842s/20 iters), loss = 0.873365 
I1018 10:13:21.984572 7404 solver.cpp:237]  Train net output #0: loss = 0.873365 (* 1 = 0.873365 loss) 
I1018 10:13:21.984572 7404 sgd_solver.cpp:105] Iteration 40, lr = 0.01 
I1018 10:13:24.246239 7748 data_layer.cpp:73] Restarting data prefetching from start. 
I1018 10:13:26.695078 7404 solver.cpp:218] Iteration 60 (4.25863 iter/s, 4.69634s/20 iters), loss = 0.873365 
I1018 10:13:26.695078 7404 solver.cpp:237]  Train net output #0: loss = 0.873365 (* 1 = 0.873365 loss) 
I1018 10:13:26.695078 7404 sgd_solver.cpp:105] Iteration 60, lr = 0.001 
I1018 10:13:28.426422 7748 data_layer.cpp:73] Restarting data prefetching from start. 
I1018 10:13:31.421181 7404 solver.cpp:218] Iteration 80 (4.22339 iter/s, 4.73554s/20 iters), loss = 0.873365 
I1018 10:13:31.421181 7404 solver.cpp:237]  Train net output #0: loss = 0.873365 (* 1 = 0.873365 loss) 
I1018 10:13:31.421181 7404 sgd_solver.cpp:105] Iteration 80, lr = 0.001 
I1018 10:13:32.731387 7748 data_layer.cpp:73] Restarting data prefetching from start. 
[I 10:13:32.934 NotebookApp] Saving file at /Untitled2.ipynb 
I1018 10:13:35.788537 7404 solver.cpp:447] Snapshotting to binary proto file training_iter_100.caffemodel 
I1018 10:13:37.317111 7404 sgd_solver.cpp:273] Snapshotting solver state to binary proto file training_iter_100.solverstate 
I1018 10:13:38.081399 7404 solver.cpp:218] Iteration 100 (3.00631 iter/s, 6.65267s/20 iters), loss = 0 
I1018 10:13:38.081399 7404 solver.cpp:237]  Train net output #0: loss = 0 (* 1 = 0 loss) 
I1018 10:13:38.081399 7404 sgd_solver.cpp:105] Iteration 100, lr = 0.0001 
I1018 10:13:38.908077 7748 data_layer.cpp:73] Restarting data prefetching from start. 
I1018 10:13:42.791904 7404 solver.cpp:218] Iteration 120 (4.23481 iter/s, 4.72276s/20 iters), loss = 0 
I1018 10:13:42.807502 7404 solver.cpp:237]  Train net output #0: loss = 0 (* 1 = 0 loss) 
I1018 10:13:42.807502 7404 sgd_solver.cpp:105] Iteration 120, lr = 0.0001 
I1018 10:13:43.088260 7748 data_layer.cpp:73] Restarting data prefetching from start. 
I1018 10:13:47.393225 7748 data_layer.cpp:73] Restarting data prefetching from start. 
I1018 10:13:47.549202 7404 solver.cpp:218] Iteration 140 (4.21716 iter/s, 4.74253s/20 iters), loss = 0 
I1018 10:13:47.549202 7404 solver.cpp:237]  Train net output #0: loss = 0 (* 1 = 0 loss) 
I1018 10:13:47.549202 7404 sgd_solver.cpp:105] Iteration 140, lr = 0.0001 
I1018 10:13:51.635800 7748 data_layer.cpp:73] Restarting data prefetching from start. 
I1018 10:13:52.290904 7404 solver.cpp:218] Iteration 160 (4.21268 iter/s, 4.74757s/20 iters), loss = 0 
I1018 10:13:52.290904 7404 solver.cpp:237]  Train net output #0: loss = 0 (* 1 = 0 loss) 
I1018 10:13:52.290904 7404 sgd_solver.cpp:105] Iteration 160, lr = 1e-05 
I1018 10:13:56.003156 7748 data_layer.cpp:73] Restarting data prefetching from start. 
I1018 10:13:57.048202 7404 solver.cpp:218] Iteration 180 (4.20926 iter/s, 4.75142s/20 iters), loss = 0.873365 
I1018 10:13:57.048202 7404 solver.cpp:237]  Train net output #0: loss = 0.873365 (* 1 = 0.873365 loss) 
I1018 10:13:57.048202 7404 sgd_solver.cpp:105] Iteration 180, lr = 1e-05 
I1018 10:14:00.214535 7748 data_layer.cpp:73] Restarting data prefetching from start. 
I1018 10:14:01.431155 7404 solver.cpp:447] Snapshotting to binary proto file training_iter_200.caffemodel 
I1018 10:14:03.053316 7404 sgd_solver.cpp:273] Snapshotting solver state to binary proto file training_iter_200.solverstate 
I1018 10:14:03.552443 7404 solver.cpp:330] Iteration 200, Testing net (#0) 
I1018 10:14:04.082764 7792 data_layer.cpp:73] Restarting data prefetching from start. 
I1018 10:14:05.439764 7792 data_layer.cpp:73] Restarting data prefetching from start. 
I1018 10:14:10.727385 7792 data_layer.cpp:73] Restarting data prefetching from start. 
I1018 10:14:10.789775 7404 blocking_queue.cpp:49] Waiting for data 
I1018 10:14:10.961350 7404 solver.cpp:397]  Test net output #0: accuracy = 0.94 
I1018 10:14:10.961350 7404 solver.cpp:397]  Test net output #1: loss = 0.104804 (* 1 = 0.104804 loss) 
I1018 10:14:11.179718 7404 solver.cpp:218] Iteration 200 (1.41459 iter/s, 14.1384s/20 iters), loss = 0.873365 
I1018 10:14:11.179718 7404 solver.cpp:237]  Train net output #0: loss = 0.873365 (* 1 = 0.873365 loss) 
I1018 10:14:11.179718 7404 sgd_solver.cpp:105] Iteration 200, lr = 1e-06 
I1018 10:14:13.846925 7748 data_layer.cpp:73] Restarting data prefetching from start. 
I1018 10:14:15.952615 7404 solver.cpp:218] Iteration 220 (4.19673 iter/s, 4.76562s/20 iters), loss = 0.873365 
I1018 10:14:15.952615 7404 solver.cpp:237]  Train net output #0: loss = 0.873365 (* 1 = 0.873365 loss) 
I1018 10:14:15.952615 7404 sgd_solver.cpp:105] Iteration 220, lr = 1e-06 
I1018 10:14:18.198683 7748 data_layer.cpp:73] Restarting data prefetching from start. 
I1018 10:14:20.709913 7404 solver.cpp:218] Iteration 240 (4.19817 iter/s, 4.76398s/20 iters), loss = 0.873365 
I1018 10:14:20.709913 7404 solver.cpp:237]  Train net output #0: loss = 0.873365 (* 1 = 0.873365 loss) 
I1018 10:14:20.709913 7404 sgd_solver.cpp:105] Iteration 240, lr = 1e-06 
I1018 10:14:22.441257 7748 data_layer.cpp:73] Restarting data prefetching from start. 
I1018 10:14:25.498407 7404 solver.cpp:218] Iteration 260 (4.18243 iter/s, 4.78191s/20 iters), loss = 0.873365 
I1018 10:14:25.498407 7404 solver.cpp:237]  Train net output #0: loss = 0.873365 (* 1 = 0.873365 loss) 
I1018 10:14:25.498407 7404 sgd_solver.cpp:105] Iteration 260, lr = 1e-07 
I1018 10:14:26.761821 7748 data_layer.cpp:73] Restarting data prefetching from start. 
I1018 10:14:30.271303 7404 solver.cpp:218] Iteration 280 (4.18629 iter/s, 4.7775s/20 iters), loss = 0 
I1018 10:14:30.271303 7404 solver.cpp:237]  Train net output #0: loss = 0 (* 1 = 0 loss) 
I1018 10:14:30.271303 7404 sgd_solver.cpp:105] Iteration 280, lr = 1e-07 
I1018 10:14:31.129176 7748 data_layer.cpp:73] Restarting data prefetching from start. 
I1018 10:14:34.701050 7404 solver.cpp:447] Snapshotting to binary proto file training_iter_300.caffemodel 
I1018 10:14:36.136039 7404 sgd_solver.cpp:273] Snapshotting solver state to binary proto file training_iter_300.solverstate 
I1018 10:14:36.931521 7404 solver.cpp:218] Iteration 300 (3.00228 iter/s, 6.66161s/20 iters), loss = 0 
I1018 10:14:36.931521 7404 solver.cpp:237]  Train net output #0: loss = 0 (* 1 = 0 loss) 
I1018 10:14:36.931521 7404 sgd_solver.cpp:105] Iteration 300, lr = 1e-08 
I1018 10:14:37.337061 7748 data_layer.cpp:73] Restarting data prefetching from start. 
I1018 10:14:41.595233 7748 data_layer.cpp:73] Restarting data prefetching from start. 
I1018 10:14:41.688819 7404 solver.cpp:218] Iteration 320 (4.20513 iter/s, 4.7561s/20 iters), loss = 0 
I1018 10:14:41.688819 7404 solver.cpp:237]  Train net output #0: loss = 0 (* 1 = 0 loss) 
I1018 10:14:41.688819 7404 sgd_solver.cpp:105] Iteration 320, lr = 1e-08 
I1018 10:14:45.884600 7748 data_layer.cpp:73] Restarting data prefetching from start. 
I1018 10:14:46.461715 7404 solver.cpp:218] Iteration 340 (4.19496 iter/s, 4.76763s/20 iters), loss = 0 
I1018 10:14:46.461715 7404 solver.cpp:237]  Train net output #0: loss = 0 (* 1 = 0 loss) 
I1018 10:14:46.461715 7404 sgd_solver.cpp:105] Iteration 340, lr = 1e-08 
I1018 10:14:50.111598 7748 data_layer.cpp:73] Restarting data prefetching from start. 
I1018 10:14:51.234639 7404 solver.cpp:218] Iteration 360 (4.1858 iter/s, 4.77806s/20 iters), loss = 0.873365 
I1018 10:14:51.234639 7404 solver.cpp:237]  Train net output #0: loss = 0.873365 (* 1 = 0.873365 loss) 
I1018 10:14:51.234639 7404 sgd_solver.cpp:105] Iteration 360, lr = 1e-09 
I1018 10:14:54.478982 7748 data_layer.cpp:73] Restarting data prefetching from start. 
I1018 10:14:56.007566 7404 solver.cpp:218] Iteration 380 (4.19437 iter/s, 4.76829s/20 iters), loss = 0.873365 
I1018 10:14:56.007566 7404 solver.cpp:237]  Train net output #0: loss = 0.873365 (* 1 = 0.873365 loss) 
I1018 10:14:56.007566 7404 sgd_solver.cpp:105] Iteration 380, lr = 1e-09 
I1018 10:14:58.705986 7748 data_layer.cpp:73] Restarting data prefetching from start. 
I1018 10:15:00.421743 7404 solver.cpp:447] Snapshotting to binary proto file training_iter_400.caffemodel 
I1018 10:15:01.903534 7404 sgd_solver.cpp:273] Snapshotting solver state to binary proto file training_iter_400.solverstate 
I1018 10:15:02.371469 7404 solver.cpp:330] Iteration 400, Testing net (#0) 
I1018 10:15:03.478912 7792 data_layer.cpp:73] Restarting data prefetching from start. 
I1018 10:15:04.820323 7792 data_layer.cpp:73] Restarting data prefetching from start. 
I1018 10:15:06.146136 7792 data_layer.cpp:73] Restarting data prefetching from start. 
I1018 10:15:07.471949 7792 data_layer.cpp:73] Restarting data prefetching from start. 
I1018 10:15:08.813360 7792 data_layer.cpp:73] Restarting data prefetching from start. 
I1018 10:15:09.796021 7404 solver.cpp:397]  Test net output #0: accuracy = 0.95 
I1018 10:15:09.796021 7404 solver.cpp:397]  Test net output #1: loss = 0.0873365 (* 1 = 0.0873365 loss) 
I1018 10:15:10.014390 7404 solver.cpp:218] Iteration 400 (1.4278 iter/s, 14.0076s/20 iters), loss = 0.873365 
I1018 10:15:10.014390 7404 solver.cpp:237]  Train net output #0: loss = 0.873365 (* 1 = 0.873365 loss) 
I1018 10:15:10.014390 7404 sgd_solver.cpp:105] Iteration 400, lr = 1e-10 
I1018 10:15:12.291669 7748 data_layer.cpp:73] Restarting data prefetching from start. 
I1018 10:15:14.787317 7404 solver.cpp:218] Iteration 420 (4.18883 iter/s, 4.7746s/20 iters), loss = 0.873365 
I1018 10:15:14.787317 7404 solver.cpp:237]  Train net output #0: loss = 0.873365 (* 1 = 0.873365 loss) 
I1018 10:15:14.787317 7404 sgd_solver.cpp:105] Iteration 420, lr = 1e-10 
I1018 10:15:16.582064 7748 data_layer.cpp:73] Restarting data prefetching from start. 
I1018 10:15:19.545646 7404 solver.cpp:218] Iteration 440 (4.20273 iter/s, 4.75881s/20 iters), loss = 0.873365 
I1018 10:15:19.545646 7404 solver.cpp:237]  Train net output #0: loss = 0.873365 (* 1 = 0.873365 loss) 
I1018 10:15:19.545646 7404 sgd_solver.cpp:105] Iteration 440, lr = 1e-10 
I1018 10:15:20.824666 7748 data_layer.cpp:73] Restarting data prefetching from start. 
I1018 10:15:24.334172 7404 solver.cpp:218] Iteration 460 (4.18022 iter/s, 4.78443s/20 iters), loss = 0 
I1018 10:15:24.334172 7404 solver.cpp:237]  Train net output #0: loss = 0 (* 1 = 0 loss) 
I1018 10:15:24.334172 7404 sgd_solver.cpp:105] Iteration 460, lr = 1e-11 
I1018 10:15:25.114061 7748 data_layer.cpp:73] Restarting data prefetching from start. 
I1018 10:15:29.107098 7404 solver.cpp:218] Iteration 480 (4.18678 iter/s, 4.77694s/20 iters), loss = 0 
I1018 10:15:29.107098 7404 solver.cpp:237]  Train net output #0: loss = 0 (* 1 = 0 loss) 
I1018 10:15:29.107098 7404 sgd_solver.cpp:105] Iteration 480, lr = 1e-11 
I1018 10:15:29.497043 7748 data_layer.cpp:73] Restarting data prefetching from start. 
I1018 10:15:33.505677 7404 solver.cpp:447] Snapshotting to binary proto file training_iter_500.caffemodel 
I1018 10:15:35.112251 7404 sgd_solver.cpp:273] Snapshotting solver state to binary proto file training_iter_500.solverstate 
I1018 10:15:35.751760 7404 solver.cpp:310] Iteration 500, loss = 0 
I1018 10:15:35.751760 7404 solver.cpp:315] Optimization Done. 

正如你所看到的损失是不变的0.873365或0,我不知道为什么。 当我使用下面的代码在归零测试图片我总是得到:

img = caffe.io.load_image('val/img911.png', color=False) 
grayimg = img[:,:,0] 
gi = np.reshape(grayimg, (260,260,1)) 

net = caffe.Net('deploy.prototxt', 
       'training_iter_500.caffemodel', 
       caffe.TEST) 

transformer = caffe.io.Transformer({'data': net.blobs['data'].data.shape}) 
transformer.set_transpose('data', (2,0,1)) 
transformer.set_raw_scale('data', 255.0) 

net.blobs['data'].reshape(1,1,260,260) 
net.blobs['data'].data[...] = transformer.preprocess('data', gi) 

out = net.forward() 

print out['prob'].argmax() 

要创建我用这个脚本LMDB文件:

import numpy as np 
import lmdb 
import caffe 
import cv2 

N = 1800 

X = np.zeros((N, 1, 260, 260), dtype=np.uint8) 
y = np.zeros(N, dtype=np.int64) 
map_size = X.nbytes * 10 

file = open("train.txt", "r") 
files = file.readlines() 
print(len(files)) 

for i in range(0,len(files)): 
    line = files[i] 
    img_path = line.split()[0] 
    label = line.split()[1] 
    img = cv2.imread(img_path, cv2.IMREAD_GRAYSCALE) 
    X[i]=img 

env = lmdb.open('newlmdb', map_size=map_size) 

with env.begin(write=True) as txn: 
    # txn is a Transaction object 
    for i in range(N): 
     datum = caffe.proto.caffe_pb2.Datum() 
     datum.channels = X.shape[1] 
     datum.height = X.shape[2] 
     datum.width = X.shape[3] 
     datum.data = X[i].tobytes() # or .tostring() if numpy < 1.9 
     datum.label = int(y[i]) 
     y[i]=label 

这是在我的代码错误或没有我选择网络参数不好?

编辑

我修改了数据层来获得零均值输入:

layer { 
    name: "data" 
    type: "Data" 
    top: "data" 
    top: "label" 
    include { 
    phase: TRAIN 
    } 
    transform_param { 
    mirror: true 
    crop_size: 260 
    mean_file: "formen_mean.binaryproto" 
    } 
    data_param { 
    source: "newlmdb" 
    batch_size: 10 
    backend: LMDB 
    } 
} 

增加训练图像的数量为10000个测试图像到1000,打乱我的数据和编辑我的求解器。 prototxt:

net: "train_val.prototxt" 
test_iter: 20 
test_interval: 50 
base_lr: 0.01 
lr_policy: "step" 
gamma: 0.1 
stepsize: 50 
display: 20 
max_iter: 1000 
momentum: 0.9 
weight_decay: 0.0005 
snapshot: 200 
debug_info: true 
snapshot_prefix: "training" 
solver_mode: GPU 

在调试信息的一些点以下事情发生了:

I1018 14:21:16.238169 5540 net.cpp:619]  [Backward] Layer drop6, bottom blob fc6 diff: 2.64904e-05 
I1018 14:21:16.238169 5540 net.cpp:619]  [Backward] Layer relu6, bottom blob fc6 diff: 1.33896e-05 
I1018 14:21:16.269316 5540 net.cpp:619]  [Backward] Layer fc6, bottom blob pool2 diff: 8.48778e-06 
I1018 14:21:16.269316 5540 net.cpp:630]  [Backward] Layer fc6, param blob 0 diff: 0.000181272 
I1018 14:21:16.269316 5540 net.cpp:630]  [Backward] Layer fc6, param blob 1 diff: 0.000133896 
I1018 14:21:16.269316 5540 net.cpp:619]  [Backward] Layer pool2, bottom blob norm2 diff: 1.82455e-06 
I1018 14:21:16.269316 5540 net.cpp:619]  [Backward] Layer norm2, bottom blob conv2 diff: 1.82354e-06 
I1018 14:21:16.269316 5540 net.cpp:619]  [Backward] Layer relu2, bottom blob conv2 diff: 1.41858e-06 
I1018 14:21:16.284889 5540 net.cpp:619]  [Backward] Layer conv2, bottom blob pool1 diff: 1.989e-06 
I1018 14:21:16.284889 5540 net.cpp:630]  [Backward] Layer conv2, param blob 0 diff: 0.00600851 
I1018 14:21:16.284889 5540 net.cpp:630]  [Backward] Layer conv2, param blob 1 diff: 0.00107259 
I1018 14:21:16.284889 5540 net.cpp:619]  [Backward] Layer pool1, bottom blob norm1 diff: 4.57322e-07 
I1018 14:21:16.284889 5540 net.cpp:619]  [Backward] Layer norm1, bottom blob conv1 diff: 4.54691e-07 
I1018 14:21:16.284889 5540 net.cpp:619]  [Backward] Layer relu1, bottom blob conv1 diff: 2.18649e-07 
I1018 14:21:16.284889 5540 net.cpp:630]  [Backward] Layer conv1, param blob 0 diff: 0.0333731 
I1018 14:21:16.284889 5540 net.cpp:630]  [Backward] Layer conv1, param blob 1 diff: 0.000384605 
E1018 14:21:16.331610 5540 net.cpp:719]  [Backward] All net params (data, diff): L1 norm = (1.0116e+06, 55724.3); L2 norm = (80.218, 24.0218) 
I1018 14:21:16.331610 5540 solver.cpp:218] Iteration 0 (0 iter/s, 1.69776s/20 iters), loss = 8.73365 
I1018 14:21:16.331610 5540 solver.cpp:237]  Train net output #0: loss = 8.73365 (* 1 = 8.73365 loss) 
I1018 14:21:16.331610 5540 sgd_solver.cpp:105] Iteration 0, lr = 0.01 
I1018 14:21:19.726611 5540 net.cpp:591]  [Forward] Layer data, top blob data data: 44.8563 
I1018 14:21:19.742184 5540 net.cpp:591]  [Forward] Layer data, top blob label data: 1 
I1018 14:21:19.742184 5540 net.cpp:591]  [Forward] Layer conv1, top blob conv1 data: nan 
I1018 14:21:19.742184 5540 net.cpp:603]  [Forward] Layer conv1, param blob 0 data: nan 
I1018 14:21:19.742184 5540 net.cpp:603]  [Forward] Layer conv1, param blob 1 data: nan 
I1018 14:21:19.742184 5540 net.cpp:591]  [Forward] Layer relu1, top blob conv1 data: nan 
I1018 14:21:19.742184 5540 net.cpp:591]  [Forward] Layer norm1, top blob norm1 data: nan 
I1018 14:21:19.742184 5540 net.cpp:591]  [Forward] Layer pool1, top blob pool1 data: inf 
I1018 14:21:19.742184 5540 net.cpp:591]  [Forward] Layer conv2, top blob conv2 data: nan 
I1018 14:21:19.742184 5540 net.cpp:603]  [Forward] Layer conv2, param blob 0 data: nan 
I1018 14:21:19.742184 5540 net.cpp:603]  [Forward] Layer conv2, param blob 1 data: nan 
I1018 14:21:19.742184 5540 net.cpp:591]  [Forward] Layer relu2, top blob conv2 data: nan 
I1018 14:21:19.742184 5540 net.cpp:591]  [Forward] Layer norm2, top blob norm2 data: nan 
I1018 14:21:19.742184 5540 net.cpp:591]  [Forward] Layer pool2, top blob pool2 data: inf 

所以我减少了base_lr为0.0001。但在稍后的一点,梯度下降到零:

I1018 14:24:40.919765 5500 net.cpp:591]  [Forward] Layer loss, top blob loss data: 0 
I1018 14:24:40.919765 5500 net.cpp:619]  [Backward] Layer loss, bottom blob fc8 diff: 0 
I1018 14:24:40.919765 5500 net.cpp:619]  [Backward] Layer fc8, bottom blob fc7 diff: 0 
I1018 14:24:40.919765 5500 net.cpp:630]  [Backward] Layer fc8, param blob 0 diff: 0 
I1018 14:24:40.919765 5500 net.cpp:630]  [Backward] Layer fc8, param blob 1 diff: 0 
I1018 14:24:40.919765 5500 net.cpp:619]  [Backward] Layer drop7, bottom blob fc7 diff: 0 
I1018 14:24:40.919765 5500 net.cpp:619]  [Backward] Layer relu7, bottom blob fc7 diff: 0 
I1018 14:24:40.919765 5500 net.cpp:619]  [Backward] Layer fc7, bottom blob fc6 diff: 0 
I1018 14:24:40.919765 5500 net.cpp:630]  [Backward] Layer fc7, param blob 0 diff: 0 
I1018 14:24:40.919765 5500 net.cpp:630]  [Backward] Layer fc7, param blob 1 diff: 0 
I1018 14:24:40.919765 5500 net.cpp:619]  [Backward] Layer drop6, bottom blob fc6 diff: 0 
I1018 14:24:40.919765 5500 net.cpp:619]  [Backward] Layer relu6, bottom blob fc6 diff: 0 
I1018 14:24:40.936337 5500 net.cpp:619]  [Backward] Layer fc6, bottom blob pool2 diff: 0 
I1018 14:24:40.936337 5500 net.cpp:630]  [Backward] Layer fc6, param blob 0 diff: 0 
I1018 14:24:40.936337 5500 net.cpp:630]  [Backward] Layer fc6, param blob 1 diff: 0 
I1018 14:24:40.936337 5500 net.cpp:619]  [Backward] Layer pool2, bottom blob norm2 diff: 0 
I1018 14:24:40.951910 5500 net.cpp:619]  [Backward] Layer norm2, bottom blob conv2 diff: 0 
I1018 14:24:40.967483 5500 net.cpp:619]  [Backward] Layer relu2, bottom blob conv2 diff: 0 
I1018 14:24:40.967483 5500 net.cpp:619]  [Backward] Layer conv2, bottom blob pool1 diff: 0 
I1018 14:24:40.967483 5500 net.cpp:630]  [Backward] Layer conv2, param blob 0 diff: 0 
I1018 14:24:40.967483 5500 net.cpp:630]  [Backward] Layer conv2, param blob 1 diff: 0 
I1018 14:24:40.967483 5500 net.cpp:619]  [Backward] Layer pool1, bottom blob norm1 diff: 0 
I1018 14:24:40.967483 5500 net.cpp:619]  [Backward] Layer norm1, bottom blob conv1 diff: 0 
I1018 14:24:40.967483 5500 net.cpp:619]  [Backward] Layer relu1, bottom blob conv1 diff: 0 
+0

你有一个强大的'weight_decay'。看看[锻炼过程中'nan'的常见原因](https://stackoverflow.com/q/33962226/1714410) – Shai

+0

@Shai我将体重衰减减少到0.0000000005,仍然得到了0的梯度。说实话,我不会看看在训练过程中[nan的常见原因](https://stackoverflow.com/q/33962226/1714410)如何适合我的情况。 但我意识到梯度开始非常低:'层fc8,底部blob fc7 diff:8.12502e-05'。我看到[这里](https://stackoverflow.com/questions/40510706/how-to-interpret-caffe-log-with-debug-info)你提到产生具有更高方差的随机权重。你是怎样做的? – SimpleNotGood

+0

'8e-5'对于渐变不是很低。你可以通过操作不同层的'weight_filler'来改变初始值。 – Shai

回答

1

我不知道为什么你的网不学习。但在这里,你可能要考虑一些要点:

  1. 您的测试阶段:测试batch_size是50和test_iter是200意思是要验证在50*200=10,000例子。由于您只有1,800个示例 - 这个大的test_iter值的含义是什么?
    有关此问题的更多信息,请参阅this thread
  2. 看起来你正在使用图像“原样”,这意味着你的输入值的范围是[0..255]。从网络输入中减去平均值是很常见的,这样就可以得到网络的零均值输入。
  3. 考虑看看你的训练debug info:你的渐变消失了吗?你有没有“活跃”的图层(例如,所有负值的图层顶部为"ReLU"实际上不活动)。
  4. 获得恒定损失值表示您的图层只预测一个标签,而不考虑输入,请考虑shuffling您的数据集。
+1

谢谢你的帮助。调试信息是一个很好的提示 – SimpleNotGood