-
Notifications
You must be signed in to change notification settings - Fork 18.7k
/
flatten_layer.cpp
44 lines (37 loc) · 1.4 KB
/
flatten_layer.cpp
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
#include <vector>
#include "caffe/layers/flatten_layer.hpp"
namespace caffe {
template <typename Dtype>
void FlattenLayer<Dtype>::Reshape(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top) {
CHECK_NE(top[0], bottom[0]) << this->type() << " Layer does not "
"allow in-place computation.";
const int start_axis = bottom[0]->CanonicalAxisIndex(
this->layer_param_.flatten_param().axis());
const int end_axis = bottom[0]->CanonicalAxisIndex(
this->layer_param_.flatten_param().end_axis());
vector<int> top_shape;
for (int i = 0; i < start_axis; ++i) {
top_shape.push_back(bottom[0]->shape(i));
}
const int flattened_dim = bottom[0]->count(start_axis, end_axis + 1);
top_shape.push_back(flattened_dim);
for (int i = end_axis + 1; i < bottom[0]->num_axes(); ++i) {
top_shape.push_back(bottom[0]->shape(i));
}
top[0]->Reshape(top_shape);
CHECK_EQ(top[0]->count(), bottom[0]->count());
}
template <typename Dtype>
void FlattenLayer<Dtype>::Forward_cpu(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top) {
top[0]->ShareData(*bottom[0]);
}
template <typename Dtype>
void FlattenLayer<Dtype>::Backward_cpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) {
bottom[0]->ShareDiff(*top[0]);
}
INSTANTIATE_CLASS(FlattenLayer);
REGISTER_LAYER_CLASS(Flatten);
} // namespace caffe