caffe 利用VGG训练自己的数据

写这个是因为有童鞋在跑VGG的时候遇到各种问题,供参考一下。

网络结构

以VGG16为例,自己跑的细胞数据

solver.prototxt:

net: "/media/dl/source/Experiment/cell/test/vgg/vgg16.prototxt"
test_iter:
test_interval:
base_lr: 0.0001
lr_policy: "step"
gamma: 0.1
stepsize:
display:
max_iter:
momentum: 0.9
weight_decay: 0.0005
snapshot:
snapshot_prefix: "/media/dl/source/Experiment/cell/test/vgg/vgg"
solver_mode: GPU

vgg16.prototxt:

注意,这里的数据层我是用的“ImageData”格式,也就是没有转为LMDB,直接导入图片进去的,因为我用的服务器,为了方便。如果为了更高效,还是使用LMDB数据库的形式。使用LMDB数据库形式的数据层我也写了下,放在这个prototxt后面作为补充。

另外,注意修改最后一个全连接层的num_output为自己的类别数。并修改该层的名字,如我改为了“cellfc8”,是为了finetune vgg时重新训练该层,不使用该层的预训练参数。

 name: "VGG16"
layer {
name: "data"
type: "ImageData"
top: "data"
top: "label"
include {
phase: TRAIN
}
# transform_param {
# mirror: true
# crop_size:
# mean_file: "data/ilsvrc12_shrt_256/imagenet_mean.binaryproto"
# } image_data_param {
source: "/media/dl/source/Experiment/cell/data/trainnew2_resize/trainnew.txt"
batch_size:
shuffle:true
#is_color: false
new_height:
new_width:
}
}
layer {
name: "data"
type: "ImageData"
top: "data"
top: "label"
include {
phase: TEST
}
# transform_param {
# mirror: false
# crop_size:
# mean_file: "data/ilsvrc12_shrt_256/imagenet_mean.binaryproto"
# } image_data_param {
source: "/media/dl/source/Experiment/cell/data/val2_resize/valnew.txt"
batch_size:
#is_color: false
new_height:
new_width:
}
}
layer {
bottom: "data"
top: "conv1_1"
name: "conv1_1"
type: "Convolution"
param {
lr_mult:
decay_mult:
}
param {
lr_mult:
decay_mult:
}
convolution_param {
num_output:
pad:
kernel_size:
weight_filler {
type: "gaussian"
std: 0.01
}
bias_filler {
type: "constant"
value:
}
}
}
layer {
bottom: "conv1_1"
top: "conv1_1"
name: "relu1_1"
type: "ReLU"
}
layer {
bottom: "conv1_1"
top: "conv1_2"
name: "conv1_2"
type: "Convolution"
param {
lr_mult:
decay_mult:
}
param {
lr_mult:
decay_mult:
}
convolution_param {
num_output:
pad:
kernel_size:
weight_filler {
type: "gaussian"
std: 0.01
}
bias_filler {
type: "constant"
value:
}
}
}
layer {
bottom: "conv1_2"
top: "conv1_2"
name: "relu1_2"
type: "ReLU"
}
layer {
bottom: "conv1_2"
top: "pool1"
name: "pool1"
type: "Pooling"
pooling_param {
pool: MAX
kernel_size:
stride:
}
}
layer {
bottom: "pool1"
top: "conv2_1"
name: "conv2_1"
type: "Convolution"
param {
lr_mult:
decay_mult:
}
param {
lr_mult:
decay_mult:
}
convolution_param {
num_output:
pad:
kernel_size:
weight_filler {
type: "gaussian"
std: 0.01
}
bias_filler {
type: "constant"
value:
}
}
}
layer {
bottom: "conv2_1"
top: "conv2_1"
name: "relu2_1"
type: "ReLU"
}
layer {
bottom: "conv2_1"
top: "conv2_2"
name: "conv2_2"
type: "Convolution"
param {
lr_mult:
decay_mult:
}
param {
lr_mult:
decay_mult:
}
convolution_param {
num_output:
pad:
kernel_size:
weight_filler {
type: "gaussian"
std: 0.01
}
bias_filler {
type: "constant"
value:
}
}
}
layer {
bottom: "conv2_2"
top: "conv2_2"
name: "relu2_2"
type: "ReLU"
}
layer {
bottom: "conv2_2"
top: "pool2"
name: "pool2"
type: "Pooling"
pooling_param {
pool: MAX
kernel_size:
stride:
}
}
layer {
bottom: "pool2"
top: "conv3_1"
name: "conv3_1"
type: "Convolution"
param {
lr_mult:
decay_mult:
}
param {
lr_mult:
decay_mult:
}
convolution_param {
num_output:
pad:
kernel_size:
weight_filler {
type: "gaussian"
std: 0.01
}
bias_filler {
type: "constant"
value:
}
}
}
layer {
bottom: "conv3_1"
top: "conv3_1"
name: "relu3_1"
type: "ReLU"
}
layer {
bottom: "conv3_1"
top: "conv3_2"
name: "conv3_2"
type: "Convolution"
param {
lr_mult:
decay_mult:
}
param {
lr_mult:
decay_mult:
}
convolution_param {
num_output:
pad:
kernel_size:
weight_filler {
type: "gaussian"
std: 0.01
}
bias_filler {
type: "constant"
value:
}
}
}
layer {
bottom: "conv3_2"
top: "conv3_2"
name: "relu3_2"
type: "ReLU"
}
layer {
bottom: "conv3_2"
top: "conv3_3"
name: "conv3_3"
type: "Convolution"
param {
lr_mult:
decay_mult:
}
param {
lr_mult:
decay_mult:
}
convolution_param {
num_output:
pad:
kernel_size:
weight_filler {
type: "gaussian"
std: 0.01
}
bias_filler {
type: "constant"
value:
}
}
}
layer {
bottom: "conv3_3"
top: "conv3_3"
name: "relu3_3"
type: "ReLU"
}
layer {
bottom: "conv3_3"
top: "pool3"
name: "pool3"
type: "Pooling"
pooling_param {
pool: MAX
kernel_size:
stride:
}
}
layer {
bottom: "pool3"
top: "conv4_1"
name: "conv4_1"
type: "Convolution"
param {
lr_mult:
decay_mult:
}
param {
lr_mult:
decay_mult:
}
convolution_param {
num_output:
pad:
kernel_size:
weight_filler {
type: "gaussian"
std: 0.01
}
bias_filler {
type: "constant"
value:
}
}
}
layer {
bottom: "conv4_1"
top: "conv4_1"
name: "relu4_1"
type: "ReLU"
}
layer {
bottom: "conv4_1"
top: "conv4_2"
name: "conv4_2"
type: "Convolution"
param {
lr_mult:
decay_mult:
}
param {
lr_mult:
decay_mult:
}
convolution_param {
num_output:
pad:
kernel_size:
weight_filler {
type: "gaussian"
std: 0.01
}
bias_filler {
type: "constant"
value:
}
}
}
layer {
bottom: "conv4_2"
top: "conv4_2"
name: "relu4_2"
type: "ReLU"
}
layer {
bottom: "conv4_2"
top: "conv4_3"
name: "conv4_3"
type: "Convolution"
param {
lr_mult:
decay_mult:
}
param {
lr_mult:
decay_mult:
}
convolution_param {
num_output:
pad:
kernel_size:
weight_filler {
type: "gaussian"
std: 0.01
}
bias_filler {
type: "constant"
value:
}
}
}
layer {
bottom: "conv4_3"
top: "conv4_3"
name: "relu4_3"
type: "ReLU"
}
layer {
bottom: "conv4_3"
top: "pool4"
name: "pool4"
type: "Pooling"
pooling_param {
pool: MAX
kernel_size:
stride:
}
}
layer {
bottom: "pool4"
top: "conv5_1"
name: "conv5_1"
type: "Convolution"
param {
lr_mult:
decay_mult:
}
param {
lr_mult:
decay_mult:
}
convolution_param {
num_output:
pad:
kernel_size:
weight_filler {
type: "gaussian"
std: 0.01
}
bias_filler {
type: "constant"
value:
}
}
}
layer {
bottom: "conv5_1"
top: "conv5_1"
name: "relu5_1"
type: "ReLU"
}
layer {
bottom: "conv5_1"
top: "conv5_2"
name: "conv5_2"
type: "Convolution"
param {
lr_mult:
decay_mult:
}
param {
lr_mult:
decay_mult:
}
convolution_param {
num_output:
pad:
kernel_size:
weight_filler {
type: "gaussian"
std: 0.01
}
bias_filler {
type: "constant"
value:
}
}
}
layer {
bottom: "conv5_2"
top: "conv5_2"
name: "relu5_2"
type: "ReLU"
}
layer {
bottom: "conv5_2"
top: "conv5_3"
name: "conv5_3"
type: "Convolution"
param {
lr_mult:
decay_mult:
}
param {
lr_mult:
decay_mult:
}
convolution_param {
num_output:
pad:
kernel_size:
weight_filler {
type: "gaussian"
std: 0.01
}
bias_filler {
type: "constant"
value:
}
}
}
layer {
bottom: "conv5_3"
top: "conv5_3"
name: "relu5_3"
type: "ReLU"
}
layer {
bottom: "conv5_3"
top: "pool5"
name: "pool5"
type: "Pooling"
pooling_param {
pool: MAX
kernel_size:
stride:
}
}
layer {
bottom: "pool5"
top: "fc6"
name: "fc6"
type: "InnerProduct"
param {
lr_mult:
decay_mult:
}
param {
lr_mult:
decay_mult:
}
inner_product_param {
num_output:
weight_filler {
type: "gaussian"
std: 0.005
}
bias_filler {
type: "constant"
value: 0.1
}
}
}
layer {
bottom: "fc6"
top: "fc6"
name: "relu6"
type: "ReLU"
}
layer {
bottom: "fc6"
top: "fc6"
name: "drop6"
type: "Dropout"
dropout_param {
dropout_ratio: 0.5
}
}
layer {
bottom: "fc6"
top: "fc7"
name: "fc7"
type: "InnerProduct"
param {
lr_mult:
decay_mult:
}
param {
lr_mult:
decay_mult:
}
inner_product_param {
num_output:
weight_filler {
type: "gaussian"
std: 0.005
}
bias_filler {
type: "constant"
value: 0.1
}
}
}
layer {
bottom: "fc7"
top: "fc7"
name: "relu7"
type: "ReLU"
}
layer {
bottom: "fc7"
top: "fc7"
name: "drop7"
type: "Dropout"
dropout_param {
dropout_ratio: 0.5
}
}
layer {
bottom: "fc7"
top: "fc8"
name: "cellfc8"
type: "InnerProduct"
param {
lr_mult:
decay_mult:
}
param {
lr_mult:
decay_mult:
}
inner_product_param {
num_output: 7 #改为自己的类别数
weight_filler {
type: "gaussian"
std: 0.005
}
bias_filler {
type: "constant"
value: 0.1
}
}
}
layer {
name: "accuracy_at_1"
type: "Accuracy"
bottom: "fc8"
bottom: "label"
top: "accuracy_at_1"
accuracy_param {
top_k:
}
include {
phase: TEST
}
}
layer {
name: "accuracy_at_5"
type: "Accuracy"
bottom: "fc8"
bottom: "label"
top: "accuracy_at_5"
accuracy_param {
top_k:
}
include {
phase: TEST
}
}
layer {
bottom: "fc8"
bottom: "label"
top: "loss"
name: "loss"
type: "SoftmaxWithLoss"
}

如果使用LMDB数据库形式,将前面的数据层改为:

 name: "vgg"
layer {
name: "data"
type: "Data"
top: "data"
top: "label"
include {
phase: TRAIN
}
transform_param {
mirror: true
crop_size:
#如果图片大于224,则使用crop的方式,小于则使用下面的new_height和new_width
# new_height:
#new_width:
mean_file: "vggface/face_mean.binaryproto"
}
data_param {
source: "vggface/face_train_lmdb"
batch_size:
backend: LMDB
}
}
layer {
name: "data"
type: "Data"
top: "data"
top: "label"
include {
phase: TEST
}
transform_param {
mirror: false
crop_size:
#如果图片大于224,则使用crop的方式,小于则使用下面的new_height和new_width
# new_height:
#new_width:
mean_file: "vggface/face_mean.binaryproto"
}
data_param {
source: "vggface/face_val_lmdb"
batch_size:
backend: LMDB
}
}

训练

放一个shell命令:

#!/usr/bin/env sh

TOOLS=/home/dl/caffe-jonlong/build/tools

$TOOLS/caffe train \
-solver=/media/dl/source/Experiment/cell/test/vgg/solver.prototxt \
-weights=/media/dl/source/Experiment/cell/test/vgg/VGG_ILSVRC_16_layers.caffemodel \
-gpu=all \

预训练模型VGG_ILSVRC_16_layers.caffemodel的下载地址为

 http://www.robots.ox.ac.uk/~vgg/software/very_deep/caffe/VGG_ILSVRC_16_layers.caffemodel 
上一篇:qt 提高ui响应度


下一篇:Python + Appium 环境搭建