提交 6cf53c10 编写于 作者: Z Zhi

remove unused tests and add missing layers.

上级 22215b54
#include <algorithm>
#include <vector>
#include "caffe/layer.hpp"
#include "caffe/neuron_layers.hpp"
#include "caffe/util/math_functions.hpp"
namespace caffe {
template <typename Dtype>
void LogLayer<Dtype>::LayerSetUp(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top) {
NeuronLayer<Dtype>::LayerSetUp(bottom, top);
const Dtype base = this->layer_param_.log_param().base();
if (base != Dtype(-1)) {
CHECK_GT(base, 0) << "base must be strictly positive.";
}
// If base == -1, interpret the base as e and set log_base = 1 exactly.
// Otherwise, calculate its log explicitly.
const Dtype log_base = (base == Dtype(-1)) ? Dtype(1) : log(base);
CHECK(!isnan(log_base))
<< "NaN result: log(base) = log(" << base << ") = " << log_base;
CHECK(!isinf(log_base))
<< "Inf result: log(base) = log(" << base << ") = " << log_base;
base_scale_ = Dtype(1) / log_base;
CHECK(!isnan(base_scale_))
<< "NaN result: 1/log(base) = 1/log(" << base << ") = " << base_scale_;
CHECK(!isinf(base_scale_))
<< "Inf result: 1/log(base) = 1/log(" << base << ") = " << base_scale_;
input_scale_ = this->layer_param_.log_param().scale();
input_shift_ = this->layer_param_.log_param().shift();
backward_num_scale_ = input_scale_ / log_base;
}
template <typename Dtype>
void LogLayer<Dtype>::Forward_cpu(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top) {
const int count = bottom[0]->count();
const Dtype* bottom_data = bottom[0]->cpu_data();
Dtype* top_data = top[0]->mutable_cpu_data();
if (input_scale_ == Dtype(1) && input_shift_ == Dtype(0)) {
caffe_log(count, bottom_data, top_data);
} else {
caffe_copy(count, bottom_data, top_data);
if (input_scale_ != Dtype(1)) {
caffe_scal(count, input_scale_, top_data);
}
if (input_shift_ != Dtype(0)) {
caffe_add_scalar(count, input_shift_, top_data);
}
caffe_log(count, top_data, top_data);
}
if (base_scale_ != Dtype(1)) {
caffe_scal(count, base_scale_, top_data);
}
}
template <typename Dtype>
void LogLayer<Dtype>::Backward_cpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) {
if (!propagate_down[0]) { return; }
const int count = bottom[0]->count();
const Dtype* bottom_data = bottom[0]->cpu_data();
const Dtype* top_diff = top[0]->cpu_diff();
Dtype* bottom_diff = bottom[0]->mutable_cpu_diff();
caffe_copy(count, bottom_data, bottom_diff);
if (input_scale_ != Dtype(1)) {
caffe_scal(count, input_scale_, bottom_diff);
}
if (input_shift_ != Dtype(0)) {
caffe_add_scalar(count, input_shift_, bottom_diff);
}
caffe_powx(count, bottom_diff, Dtype(-1), bottom_diff);
if (backward_num_scale_ != Dtype(1)) {
caffe_scal(count, backward_num_scale_, bottom_diff);
}
caffe_mul(count, top_diff, bottom_diff, bottom_diff);
}
#ifdef CPU_ONLY
STUB_GPU(LogLayer);
#endif
INSTANTIATE_CLASS(LogLayer);
REGISTER_LAYER_CLASS(Log);
} // namespace caffe
#include <algorithm>
#include <vector>
#include "caffe/layer.hpp"
#include "caffe/neuron_layers.hpp"
#include "caffe/util/math_functions.hpp"
namespace caffe {
template <typename Dtype>
void LogLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top) {
const int count = bottom[0]->count();
const Dtype* bottom_data = bottom[0]->gpu_data();
Dtype* top_data = top[0]->mutable_gpu_data();
if (input_scale_ == Dtype(1) && input_shift_ == Dtype(0)) {
caffe_gpu_log(count, bottom_data, top_data);
} else {
caffe_copy(count, bottom_data, top_data);
if (input_scale_ != Dtype(1)) {
caffe_gpu_scal(count, input_scale_, top_data);
}
if (input_shift_ != Dtype(0)) {
caffe_gpu_add_scalar(count, input_shift_, top_data);
}
caffe_gpu_log(count, top_data, top_data);
}
if (base_scale_ != Dtype(1)) {
caffe_gpu_scal(count, base_scale_, top_data);
}
}
template <typename Dtype>
void LogLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) {
if (!propagate_down[0]) { return; }
const int count = bottom[0]->count();
const Dtype* bottom_data = bottom[0]->gpu_data();
const Dtype* top_diff = top[0]->gpu_diff();
Dtype* bottom_diff = bottom[0]->mutable_gpu_diff();
caffe_copy(count, bottom_data, bottom_diff);
if (input_scale_ != Dtype(1)) {
caffe_gpu_scal(count, input_scale_, bottom_diff);
}
if (input_shift_ != Dtype(0)) {
caffe_gpu_add_scalar(count, input_shift_, bottom_diff);
}
caffe_gpu_powx(count, bottom_diff, Dtype(-1), bottom_diff);
if (backward_num_scale_ != Dtype(1)) {
caffe_gpu_scal(count, backward_num_scale_, bottom_diff);
}
caffe_gpu_mul(count, top_diff, bottom_diff, bottom_diff);
}
INSTANTIATE_LAYER_GPU_FUNCS(LogLayer);
} // namespace caffe
......@@ -102,95 +102,4 @@ TYPED_TEST(LstmLayerTest, TestGradientBatchDefault) {
LOG(ERROR) << "Skipping test due to old architecture.";
}
}
TYPED_TEST(LstmLayerTest, TestGradientClipMask) {
typedef typename TypeParam::Dtype Dtype;
bool IS_VALID_CUDA = false;
#ifndef CPU_ONLY
IS_VALID_CUDA = CAFFE_TEST_CUDA_PROP.major >= 2;
#endif
if (Caffe::mode() == Caffe::CPU ||
sizeof(Dtype) == 4 || IS_VALID_CUDA) {
LayerParameter layer_param;
LSTMParameter* lstm_param =
layer_param.mutable_lstm_param();
lstm_param->set_num_output(4);
lstm_param->mutable_weight_filler()->set_type("uniform");
lstm_param->mutable_weight_filler()->set_min(-0.01);
lstm_param->mutable_weight_filler()->set_max(0.01);
lstm_param->mutable_bias_filler()->set_type("constant");
lstm_param->mutable_bias_filler()->set_value(0);
this->blob_bottom2_->mutable_cpu_data()[0] = 0;
this->blob_bottom2_->mutable_cpu_data()[1] = 1;
this->blob_bottom2_->mutable_cpu_data()[2] = 1;
this->blob_bottom2_->mutable_cpu_data()[3] = 0;
this->blob_bottom2_->mutable_cpu_data()[4] = 1;
this->blob_bottom2_->mutable_cpu_data()[5] = 1;
this->blob_bottom2_->mutable_cpu_data()[6] = 1;
this->blob_bottom2_->mutable_cpu_data()[7] = 0;
this->blob_bottom2_->mutable_cpu_data()[8] = 1;
this->blob_bottom2_->mutable_cpu_data()[9] = 1;
this->blob_bottom2_->mutable_cpu_data()[10] = 0;
this->blob_bottom2_->mutable_cpu_data()[11] = 1;
this->blob_bottom_vec_.clear();
this->blob_bottom_vec_.push_back(this->blob_bottom_);
this->blob_bottom_vec_.push_back(this->blob_bottom2_);
LstmLayer<Dtype> layer(layer_param);
GradientChecker<Dtype> checker(1e-2, 1e-3);
checker.CheckGradientExhaustive(&layer, this->blob_bottom_vec_,
this->blob_top_vec_, 0);
} else {
LOG(ERROR) << "Skipping test due to old architecture.";
}
}
TYPED_TEST(LstmLayerTest, TestGradientBatchClipMask) {
typedef typename TypeParam::Dtype Dtype;
bool IS_VALID_CUDA = false;
#ifndef CPU_ONLY
IS_VALID_CUDA = CAFFE_TEST_CUDA_PROP.major >= 2;
#endif
if (Caffe::mode() == Caffe::CPU ||
sizeof(Dtype) == 4 || IS_VALID_CUDA) {
LayerParameter layer_param;
LSTMParameter* lstm_param =
layer_param.mutable_lstm_param();
lstm_param->set_num_output(4);
lstm_param->set_batch_size(3);
lstm_param->mutable_weight_filler()->set_type("uniform");
lstm_param->mutable_weight_filler()->set_min(-0.01);
lstm_param->mutable_weight_filler()->set_max(0.01);
lstm_param->mutable_bias_filler()->set_type("constant");
lstm_param->mutable_bias_filler()->set_value(0);
// t = 0
this->blob_bottom2_->mutable_cpu_data()[0] = 0;
this->blob_bottom2_->mutable_cpu_data()[1] = 0;
this->blob_bottom2_->mutable_cpu_data()[2] = 0;
// t = 1
this->blob_bottom2_->mutable_cpu_data()[3] = 1;
this->blob_bottom2_->mutable_cpu_data()[4] = 1;
this->blob_bottom2_->mutable_cpu_data()[5] = 0;
// t = 2
this->blob_bottom2_->mutable_cpu_data()[6] = 1;
this->blob_bottom2_->mutable_cpu_data()[7] = 0;
this->blob_bottom2_->mutable_cpu_data()[8] = 1;
// t = 3
this->blob_bottom2_->mutable_cpu_data()[9] = 0;
this->blob_bottom2_->mutable_cpu_data()[10] = 1;
this->blob_bottom2_->mutable_cpu_data()[11] = 1;
this->blob_bottom_vec_.clear();
this->blob_bottom_vec_.push_back(this->blob_bottom_);
this->blob_bottom_vec_.push_back(this->blob_bottom2_);
LstmLayer<Dtype> layer(layer_param);
GradientChecker<Dtype> checker(1e-2, 1e-3);
checker.CheckGradientExhaustive(&layer, this->blob_bottom_vec_,
this->blob_top_vec_, 0);
} else {
LOG(ERROR) << "Skipping test due to old architecture.";
}
}
} // namespace caffe
#include <cstring>
#include <vector>
#include "gtest/gtest.h"
#include "caffe/blob.hpp"
#include "caffe/common.hpp"
#include "caffe/filler.hpp"
#include "caffe/vision_layers.hpp"
#include "caffe/test/test_caffe_main.hpp"
#include "caffe/test/test_gradient_check_util.hpp"
namespace caffe {
template <typename TypeParam>
class MoreSparseIm2colLayerTest : public MultiDeviceTest<TypeParam> {
typedef typename TypeParam::Dtype Dtype;
protected:
MoreSparseIm2colLayerTest()
: blob_bottom_(new Blob<Dtype>(2, 3, 6, 5)),
blob_top_(new Blob<Dtype>()) {
// fill the values
FillerParameter filler_param;
GaussianFiller<Dtype> filler(filler_param);
filler.Fill(this->blob_bottom_);
blob_bottom_vec_.push_back(blob_bottom_);
blob_top_vec_.push_back(blob_top_);
}
virtual ~MoreSparseIm2colLayerTest() { delete blob_bottom_; delete blob_top_; }
Blob<Dtype>* const blob_bottom_;
Blob<Dtype>* const blob_top_;
vector<Blob<Dtype>*> blob_bottom_vec_;
vector<Blob<Dtype>*> blob_top_vec_;
};
TYPED_TEST_CASE(MoreSparseIm2colLayerTest, TestDtypesAndDevices);
TYPED_TEST(MoreSparseIm2colLayerTest, TestSetup) {
typedef typename TypeParam::Dtype Dtype;
LayerParameter layer_param;
ConvolutionParameter* convolution_param =
layer_param.mutable_convolution_param();
convolution_param->set_kernel_size(7);
convolution_param->set_pad(3);
MoreSparseIm2colLayer<Dtype> layer(layer_param);
layer.SetUp(this->blob_bottom_vec_, this->blob_top_vec_);
EXPECT_EQ(this->blob_top_->num(), 2);
EXPECT_EQ(this->blob_top_->channels(), 5*3);
EXPECT_EQ(this->blob_top_->height(), 6);
EXPECT_EQ(this->blob_top_->width(), 5);
}
TYPED_TEST(MoreSparseIm2colLayerTest, TestGradient) {
typedef typename TypeParam::Dtype Dtype;
LayerParameter layer_param;
ConvolutionParameter* convolution_param =
layer_param.mutable_convolution_param();
convolution_param->set_kernel_size(7);
convolution_param->set_pad(3);
MoreSparseIm2colLayer<Dtype> layer(layer_param);
GradientChecker<Dtype> checker(1e-2, 1e-2);
checker.CheckGradientExhaustive(&layer, this->blob_bottom_vec_,
this->blob_top_vec_);
}
TYPED_TEST(MoreSparseIm2colLayerTest, TestRectGradient) {
typedef typename TypeParam::Dtype Dtype;
LayerParameter layer_param;
ConvolutionParameter* convolution_param =
layer_param.mutable_convolution_param();
convolution_param->set_kernel_h(5);
convolution_param->set_kernel_w(3);
convolution_param->set_pad_h(2);
convolution_param->set_pad_w(1);
MoreSparseIm2colLayer<Dtype> layer(layer_param);
GradientChecker<Dtype> checker(1e-2, 1e-2);
checker.CheckGradientExhaustive(&layer, this->blob_bottom_vec_,
this->blob_top_vec_);
}
} // namespace caffe
#include <cstring>
#include <vector>
#include "gtest/gtest.h"
#include "caffe/blob.hpp"
#include "caffe/common.hpp"
#include "caffe/filler.hpp"
#include "caffe/vision_layers.hpp"
#include "caffe/test/test_caffe_main.hpp"
#include "caffe/test/test_gradient_check_util.hpp"
namespace caffe {
template <typename TypeParam>
class SparseIm2colLayerTest : public MultiDeviceTest<TypeParam> {
typedef typename TypeParam::Dtype Dtype;
protected:
SparseIm2colLayerTest()
: blob_bottom_(new Blob<Dtype>(2, 3, 6, 5)),
blob_top_(new Blob<Dtype>()) {
// fill the values
FillerParameter filler_param;
GaussianFiller<Dtype> filler(filler_param);
filler.Fill(this->blob_bottom_);
blob_bottom_vec_.push_back(blob_bottom_);
blob_top_vec_.push_back(blob_top_);
}
virtual ~SparseIm2colLayerTest() { delete blob_bottom_; delete blob_top_; }
Blob<Dtype>* const blob_bottom_;
Blob<Dtype>* const blob_top_;
vector<Blob<Dtype>*> blob_bottom_vec_;
vector<Blob<Dtype>*> blob_top_vec_;
};
TYPED_TEST_CASE(SparseIm2colLayerTest, TestDtypesAndDevices);
TYPED_TEST(SparseIm2colLayerTest, TestSetup) {
typedef typename TypeParam::Dtype Dtype;
LayerParameter layer_param;
ConvolutionParameter* convolution_param =
layer_param.mutable_convolution_param();
convolution_param->set_kernel_size(7);
convolution_param->set_pad(3);
SparseIm2colLayer<Dtype> layer(layer_param);
layer.SetUp(this->blob_bottom_vec_, this->blob_top_vec_);
EXPECT_EQ(this->blob_top_->num(), 2);
EXPECT_EQ(this->blob_top_->channels(), 25*3);
EXPECT_EQ(this->blob_top_->height(), 6);
EXPECT_EQ(this->blob_top_->width(), 5);
}
TYPED_TEST(SparseIm2colLayerTest, TestGradient) {
typedef typename TypeParam::Dtype Dtype;
LayerParameter layer_param;
ConvolutionParameter* convolution_param =
layer_param.mutable_convolution_param();
convolution_param->set_kernel_size(7);
convolution_param->set_pad(3);
SparseIm2colLayer<Dtype> layer(layer_param);
GradientChecker<Dtype> checker(1e-2, 1e-2);
checker.CheckGradientExhaustive(&layer, this->blob_bottom_vec_,
this->blob_top_vec_);
}
TYPED_TEST(SparseIm2colLayerTest, TestRectGradient) {
typedef typename TypeParam::Dtype Dtype;
LayerParameter layer_param;
ConvolutionParameter* convolution_param =
layer_param.mutable_convolution_param();
convolution_param->set_kernel_h(5);
convolution_param->set_kernel_w(3);
convolution_param->set_pad_h(2);
convolution_param->set_pad_w(1);
SparseIm2colLayer<Dtype> layer(layer_param);
GradientChecker<Dtype> checker(1e-2, 1e-2);
checker.CheckGradientExhaustive(&layer, this->blob_bottom_vec_,
this->blob_top_vec_);
}
} // namespace caffe
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册