Skip to content

Commit b56db67

Browse files
committed
test case fix for Clip layer gradient
minor lint fixes
1 parent 7f4f5d2 commit b56db67

File tree

3 files changed

+33
-3
lines changed

3 files changed

+33
-3
lines changed

src/caffe/layers/clip_layer.cpp

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1,5 +1,6 @@
11
#include <algorithm>
22
#include <vector>
3+
34
#include "caffe/layers/clip_layer.hpp"
45

56
namespace caffe {

src/caffe/layers/clip_layer.cu

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,5 @@
11
#include <vector>
2+
23
#include "caffe/layers/clip_layer.hpp"
34
#include "caffe/util/math_functions.hpp"
45

src/caffe/test/test_neuron_layer.cpp

Lines changed: 31 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -234,9 +234,37 @@ TYPED_TEST(NeuronLayerTest, TestClipGradient) {
234234
CHECK(google::protobuf::TextFormat::ParseFromString(
235235
"clip_param { min: -1, max: 2 }", &layer_param));
236236
ClipLayer<Dtype> layer(layer_param);
237-
GradientChecker<Dtype> checker(1e-2, 1e-3);
238-
checker.CheckGradientEltwise(&layer, this->blob_bottom_vec_,
239-
this->blob_top_vec_);
237+
// Unfortunately, it might happen that an input value lands exactly within
238+
// the discontinuity region of the Clip function. In this case the numeric
239+
// gradient is likely to differ significantly (i.e. by a value larger than
240+
// checker tolerance) from the computed gradient. To handle such cases, we
241+
// eliminate such values from the input blob before the gradient check.
242+
const Dtype epsilon = 1e-2;
243+
const Dtype min_range_start = layer_param.clip_param().min() - epsilon;
244+
const Dtype min_range_end = layer_param.clip_param().min() + epsilon;
245+
const Dtype max_range_start = layer_param.clip_param().max() - epsilon;
246+
const Dtype max_range_end = layer_param.clip_param().max() + epsilon;
247+
// The input blob is owned by the NeuronLayerTest object, so we begin with
248+
// creating a temporary blob and copying the input data there.
249+
Blob<Dtype> temp_bottom;
250+
temp_bottom.ReshapeLike(*this->blob_bottom_);
251+
const Dtype* bottom_data = this->blob_bottom_->cpu_data();
252+
Dtype* temp_data_mutable = temp_bottom.mutable_cpu_data();
253+
for (int i = 0; i < this->blob_bottom_->count(); ++i) {
254+
if (bottom_data[i] >= min_range_start &&
255+
bottom_data[i] <= min_range_end) {
256+
temp_data_mutable[i] = bottom_data[i] - epsilon;
257+
} else if (bottom_data[i] >= max_range_start &&
258+
bottom_data[i] <= max_range_end) {
259+
temp_data_mutable[i] = bottom_data[i] + epsilon;
260+
} else {
261+
temp_data_mutable[i] = bottom_data[i];
262+
}
263+
}
264+
vector<Blob<Dtype>*> temp_bottom_vec;
265+
temp_bottom_vec.push_back(&temp_bottom);
266+
GradientChecker<Dtype> checker(epsilon, 1e-3);
267+
checker.CheckGradientEltwise(&layer, temp_bottom_vec, this->blob_top_vec_);
240268
}
241269

242270
TYPED_TEST(NeuronLayerTest, TestReLU) {

0 commit comments

Comments
 (0)