Skip to content

Commit

Permalink
reset Layer::Forward method back to vanilla version. Indirected gpu m…
Browse files Browse the repository at this point in the history
…ath call through private layer function.
  • Loading branch information
lopho committed Aug 20, 2016
1 parent 63235e5 commit 9a8236e
Show file tree
Hide file tree
Showing 2 changed files with 42 additions and 38 deletions.
41 changes: 39 additions & 2 deletions include/caffe/layer.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -440,6 +440,8 @@ class DLL_EXPORT Layer {
void Lock();
/** Unlock forward_mutex_ if this layer is shared */
void Unlock();

void layer_gpu_dot(const int n, const Dtype* x, const Dtype* y, Dtype* out);

DISABLE_COPY_AND_ASSIGN(Layer);
}; // class Layer
Expand All @@ -448,8 +450,43 @@ class DLL_EXPORT Layer {
// gpu specific implementations instead, and should not change these
// functions.
template <typename Dtype>
Dtype Layer<Dtype>::Forward(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top);
inline Dtype Layer<Dtype>::Forward(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top) {
// Lock during forward to ensure sequential forward
Lock();
Dtype loss = 0;
Reshape(bottom, top);
switch (Caffe::mode()) {
case Caffe::CPU:
Forward_cpu(bottom, top);
for (int top_id = 0; top_id < top.size(); ++top_id) {
if (!this->loss(top_id)) { continue; }
const int count = top[top_id]->count();
const Dtype* data = top[top_id]->cpu_data();
const Dtype* loss_weights = top[top_id]->cpu_diff();
loss += caffe_cpu_dot(count, data, loss_weights);
}
break;
case Caffe::GPU:
Forward_gpu(bottom, top);
#ifndef CPU_ONLY
for (int top_id = 0; top_id < top.size(); ++top_id) {
if (!this->loss(top_id)) { continue; }
const int count = top[top_id]->count();
const Dtype* data = top[top_id]->gpu_data();
const Dtype* loss_weights = top[top_id]->gpu_diff();
Dtype blob_loss = 0;
layer_gpu_dot(count, data, loss_weights, &blob_loss);
loss += blob_loss;
}
#endif
break;
default:
LOG(FATAL) << "Unknown caffe mode.";
}
Unlock();
return loss;
}

template <typename Dtype>
inline void Layer<Dtype>::Backward(const vector<Blob<Dtype>*>& top,
Expand Down
39 changes: 3 additions & 36 deletions src/caffe/layer.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -23,44 +23,11 @@ void Layer<Dtype>::Unlock() {
}

template <typename Dtype>
Dtype Layer<Dtype>::Forward(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top) {
// Lock during forward to ensure sequential forward
Lock();
Dtype loss = 0;
Reshape(bottom, top);
switch (Caffe::mode()) {
case Caffe::CPU:
Forward_cpu(bottom, top);
for (int top_id = 0; top_id < top.size(); ++top_id) {
if (!this->loss(top_id)) { continue; }
const int count = top[top_id]->count();
const Dtype* data = top[top_id]->cpu_data();
const Dtype* loss_weights = top[top_id]->cpu_diff();
loss += caffe_cpu_dot(count, data, loss_weights);
}
break;
case Caffe::GPU:
Forward_gpu(bottom, top);
#ifndef CPU_ONLY
for (int top_id = 0; top_id < top.size(); ++top_id) {
if (!this->loss(top_id)) { continue; }
const int count = top[top_id]->count();
const Dtype* data = top[top_id]->gpu_data();
const Dtype* loss_weights = top[top_id]->gpu_diff();
Dtype blob_loss = 0;
caffe_gpu_dot(count, data, loss_weights, &blob_loss);
loss += blob_loss;
}
#endif
break;
default:
LOG(FATAL) << "Unknown caffe mode.";
}
Unlock();
return loss;
void Layer<Dtype>::layer_gpu_dot(const int n, const Dtype* x, const Dtype* y, Dtype* out) {
caffe_gpu_dot(n, x, y, out);
}


INSTANTIATE_CLASS(Layer);

} // namespace caffe

0 comments on commit 9a8236e

Please sign in to comment.