#include #include "caffe/layers/split_layer.hpp" #include "caffe/util/math_functions.hpp" namespace caffe { template void SplitLayer::Reshape(const vector*>& bottom, const vector*>& top) { count_ = bottom[0]->count(); for (int i = 0; i < top.size(); ++i) { // Do not allow in-place computation in the SplitLayer. Instead, share data // by reference in the forward pass, and keep separate diff allocations in // the backward pass. (Technically, it should be possible to share the diff // blob of the first split output with the input, but this seems to cause // some strange effects in practice...) CHECK_NE(top[i], bottom[0]) << this->type() << " Layer does not " "allow in-place computation."; top[i]->ReshapeLike(*bottom[0]); CHECK_EQ(count_, top[i]->count()); } } template void SplitLayer::Forward_cpu(const vector*>& bottom, const vector*>& top) { for (int i = 0; i < top.size(); ++i) { top[i]->ShareData(*bottom[0]); } } template void SplitLayer::Backward_cpu(const vector*>& top, const vector& propagate_down, const vector*>& bottom) { if (!propagate_down[0]) { return; } if (top.size() == 1) { caffe_copy(count_, top[0]->cpu_diff(), bottom[0]->mutable_cpu_diff()); return; } caffe_add(count_, top[0]->cpu_diff(), top[1]->cpu_diff(), bottom[0]->mutable_cpu_diff()); // Add remaining top blob diffs. for (int i = 2; i < top.size(); ++i) { const Dtype* top_diff = top[i]->cpu_diff(); Dtype* bottom_diff = bottom[0]->mutable_cpu_diff(); caffe_axpy(count_, Dtype(1.), top_diff, bottom_diff); } } #ifdef CPU_ONLY STUB_GPU(SplitLayer); #endif INSTANTIATE_CLASS(SplitLayer); REGISTER_LAYER_CLASS(Split); } // namespace caffe