caffe中softmax loss源码阅读

(1) softmax loss

<1> softmax loss的函数形式为:

caffe中softmax loss源码阅读    (1)

zi为softmax的输入,f(zi)为softmax的输出。

<2> softmax loss对其输入zj求导:

caffe中softmax loss源码阅读     (2)

如果j==k,则zk是变量,否则zj是变量。

和的导数等于导数的和,对和中某个元素求导的话有:caffe中softmax loss源码阅读

(2) softmax_loss_layer.cpp中的Forward_cpu()函数:

 template <typename Dtype>
void SoftmaxWithLossLayer<Dtype>::Forward_cpu(
const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) {
// The forward pass computes the softmax prob values.
//调用softmax层的forward函数,得到对应的输出,存到prob_中
softmax_layer_->Forward(softmax_bottom_vec_, softmax_top_vec_);
const Dtype* prob_data = prob_.cpu_data();
//一般loss层有两个输入blob,网络的predict blob(bottom[0])和label blob(bottom[1])
const Dtype* label = bottom[]->cpu_data();
//dim = N*C*H*W / N = C*H*W
int dim = prob_.count() / outer_num_;
//count变量是计算loss时的有效样本数
int count = ;
Dtype loss = ;
for (int i = ; i < outer_num_; ++i) {
for (int j = ; j < inner_num_; j++) {
//读取label
const int label_value = static_cast<int>(label[i * inner_num_ + j]);
//如果该样本的label等于deploy中softmaxWithLoss中设定的参数ignore_label_,则该样本不参与前向和后向计算
if (has_ignore_label_ && label_value == ignore_label_) {
continue;
}
//判断label_value是否大于等于0
DCHECK_GE(label_value, );
//判断label_value是否小于prob_.shape(softmax_axis_)=C
DCHECK_LT(label_value, prob_.shape(softmax_axis_));
//对于softmax的输出channel,计算label_value索引对应的channel中prob的log.对应公式(1)
loss -= log(std::max(prob_data[i * dim + label_value * inner_num_ + j],
Dtype(FLT_MIN)));
//有效样本数加一
++count;
}
}
//最终在训练日志中显示的loss为计算的总loss除以有效样本数
top[]->mutable_cpu_data()[] = loss / get_normalizer(normalization_, count);
if (top.size() == ) {
top[]->ShareData(prob_);
}
}

(3) softmax_loss_layer.cpp中的Backward_cpu函数:

 template <typename Dtype>
void SoftmaxWithLossLayer<Dtype>::Backward_cpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) {
if (propagate_down[]) {
LOG(FATAL) << this->type()
<< " Layer cannot backpropagate to label inputs.";
}
if (propagate_down[]) {
Dtype* bottom_diff = bottom[]->mutable_cpu_diff();
const Dtype* prob_data = prob_.cpu_data();
//将softmax的输出prob_复制给bottom[0]的diff(梯度) blob
caffe_copy(prob_.count(), prob_data, bottom_diff);
const Dtype* label = bottom[]->cpu_data();
int dim = prob_.count() / outer_num_;
int count = ;
for (int i = ; i < outer_num_; ++i) {
for (int j = ; j < inner_num_; ++j) {
const int label_value = static_cast<int>(label[i * inner_num_ + j]);
if (has_ignore_label_ && label_value == ignore_label_) {
for (int c = ; c < bottom[]->shape(softmax_axis_); ++c) {
bottom_diff[i * dim + c * inner_num_ + j] = ;
}
} else {
//对应公式(2),在反传梯度时,label索引对应的diff减1,其他不变。
bottom_diff[i * dim + label_value * inner_num_ + j] -= ;
++count;
}
}
}
// Scale gradient
//top[0]->cpu_diff()[0] = N
//N / count
Dtype loss_weight = top[]->cpu_diff()[] /
get_normalizer(normalization_, count);
caffe_scal(prob_.count(), loss_weight, bottom_diff);
}
}
上一篇:[原创]java WEB学习笔记79:Hibernate学习之路--- 四种对象的状态,session核心方法:save()方法,persist()方法,get() 和 load() 方法,update()方法,saveOrUpdate() 方法,merge() 方法,delete() 方法,evict(),hibernate 调用存储过程,hibernate 与 触发器协同工作


下一篇:基于Caffe的DeepID2实现(下)