initial commit
This commit is contained in:
commit
e505acdb29
41 changed files with 2922 additions and 0 deletions
50
caffe-layers/include/caffe/layers/dspp_layer.hpp
Normal file
50
caffe-layers/include/caffe/layers/dspp_layer.hpp
Normal file
|
@ -0,0 +1,50 @@
|
|||
#ifndef CAFFE_DSPP_LAYER_HPP_
|
||||
#define CAFFE_DSPP_LAYER_HPP_
|
||||
|
||||
#include <string>
|
||||
#include <utility>
|
||||
#include <vector>
|
||||
|
||||
#include "caffe/blob.hpp"
|
||||
#include "caffe/common.hpp"
|
||||
#include "caffe/layers/data_layer.hpp"
|
||||
#include "caffe/layer.hpp"
|
||||
#include "caffe/layers/loss_layer.hpp"
|
||||
#include "caffe/layers/neuron_layer.hpp"
|
||||
#include "caffe/proto/caffe.pb.h"
|
||||
|
||||
namespace caffe {
|
||||
|
||||
template <typename Dtype>
|
||||
class DSPPLayer : public Layer<Dtype> {
|
||||
public:
|
||||
explicit DSPPLayer(const LayerParameter& param)
|
||||
: Layer<Dtype>(param) {}
|
||||
virtual void LayerSetUp(const vector<Blob<Dtype>*>& bottom,
|
||||
const vector<Blob<Dtype>*>& top);
|
||||
virtual void Reshape(const vector<Blob<Dtype>*>& bottom,
|
||||
const vector<Blob<Dtype>*>& top);
|
||||
virtual inline const char* type() const { return "DSPPLayer"; }
|
||||
virtual inline int ExactNumBottomBlobs() const { return 2; };
|
||||
virtual inline int MinTopBlobs() const { return 1; }
|
||||
|
||||
protected:
|
||||
virtual void Forward_cpu(const vector<Blob<Dtype>*>& bottom,
|
||||
const vector<Blob<Dtype>*>& top);
|
||||
//virtual void Forward_gpu(const vector<Blob<Dtype>*>& bottom,
|
||||
// const vector<Blob<Dtype>*>& top);
|
||||
virtual void Backward_cpu(const vector<Blob<Dtype>*>& top,
|
||||
const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom);
|
||||
//virtual void Backward_gpu(const vector<Blob<Dtype>*>& top,
|
||||
// const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom);
|
||||
|
||||
int width_;
|
||||
int height_;
|
||||
int channel_;
|
||||
int num_;
|
||||
|
||||
};
|
||||
|
||||
} // namespace caffe
|
||||
|
||||
#endif // CAFFE_DSPP_LAYER_HPP_
|
56
caffe-layers/include/caffe/layers/pose_data_layer.hpp
Normal file
56
caffe-layers/include/caffe/layers/pose_data_layer.hpp
Normal file
|
@ -0,0 +1,56 @@
|
|||
#ifndef CAFFE_POSE_DATA_LAYER_HPP_
|
||||
#define CAFFE_POSE_DATA_LAYER_HPP_
|
||||
|
||||
#include <vector>
|
||||
|
||||
#include "caffe/blob.hpp"
|
||||
#include "caffe/layer.hpp"
|
||||
#include "caffe/proto/caffe.pb.h"
|
||||
|
||||
#include "caffe/layers/base_data_layer.hpp"
|
||||
|
||||
namespace caffe {
|
||||
|
||||
template <typename Dtype>
|
||||
class PoseDataLayer : public BaseDataLayer<Dtype> {
|
||||
public:
|
||||
explicit PoseDataLayer(const LayerParameter& param)
|
||||
: BaseDataLayer<Dtype>(param), has_new_data_(false) {}
|
||||
virtual void DataLayerSetUp(const vector<Blob<Dtype>*>& bottom,
|
||||
const vector<Blob<Dtype>*>& top);
|
||||
|
||||
virtual inline const char* type() const { return "PoseData"; }
|
||||
virtual inline int ExactNumBottomBlobs() const { return 0; }
|
||||
virtual inline int ExactNumTopBlobs() const { return 2; }
|
||||
|
||||
virtual void AddDatumVector(const vector<Datum>& datum_vector);
|
||||
virtual void AddMatVector(const vector<cv::Mat>& mat_vector,
|
||||
const vector<float>& labels);
|
||||
|
||||
// Reset should accept const pointers, but can't, because the memory
|
||||
// will be given to Blob, which is mutable
|
||||
void Reset(Dtype* data, Dtype* label, int n);
|
||||
void set_batch_size(int new_size);
|
||||
|
||||
int batch_size() { return batch_size_; }
|
||||
int channels() { return channels_; }
|
||||
int height() { return height_; }
|
||||
int width() { return width_; }
|
||||
|
||||
protected:
|
||||
virtual void Forward_cpu(const vector<Blob<Dtype>*>& bottom,
|
||||
const vector<Blob<Dtype>*>& top);
|
||||
|
||||
int batch_size_, channels_, height_, width_, size_;
|
||||
Dtype* data_;
|
||||
Dtype* labels_;
|
||||
int n_;
|
||||
size_t pos_;
|
||||
Blob<Dtype> added_data_;
|
||||
Blob<Dtype> added_label_;
|
||||
bool has_new_data_;
|
||||
};
|
||||
|
||||
} // namespace caffe
|
||||
|
||||
#endif
|
92
caffe-layers/src/caffe/layers/dspp_layer.cpp
Normal file
92
caffe-layers/src/caffe/layers/dspp_layer.cpp
Normal file
|
@ -0,0 +1,92 @@
|
|||
#include <cmath>
|
||||
#include <algorithm>
|
||||
#include <vector>
|
||||
|
||||
#include "caffe/layer.hpp"
|
||||
#include "caffe/layers/dspp_layer.hpp"
|
||||
|
||||
#include <boost/spirit/include/phoenix_core.hpp>
|
||||
#include <boost/spirit/include/phoenix_operator.hpp>
|
||||
|
||||
|
||||
namespace caffe {
|
||||
template <typename Dtype>
|
||||
void DSPPLayer<Dtype>::LayerSetUp(const vector<Blob<Dtype>*>& bottom,
|
||||
const vector<Blob<Dtype>*>& top) {
|
||||
}
|
||||
|
||||
template <typename Dtype>
|
||||
void DSPPLayer<Dtype>::Reshape(const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) {
|
||||
|
||||
num_ = bottom[1]->shape()[0];
|
||||
channel_ = bottom[1]->shape()[1]; // the input data size
|
||||
height_ = bottom[1]->shape()[2];
|
||||
width_ = bottom[1]->shape()[3];
|
||||
|
||||
// init output size
|
||||
vector<int> output_shape;
|
||||
output_shape.push_back(num_);
|
||||
output_shape.push_back(channel_);
|
||||
output_shape.push_back(height_);
|
||||
output_shape.push_back(width_);
|
||||
top[0]->Reshape(output_shape);
|
||||
}
|
||||
|
||||
template <typename Dtype>
|
||||
void DSPPLayer<Dtype>::Forward_cpu(const vector<Blob<Dtype>*>& bottom,
|
||||
const vector<Blob<Dtype>*>& top) {
|
||||
Dtype* top_data = top[0]->mutable_cpu_data();
|
||||
|
||||
caffe_set<Dtype>(top[0]->count(), 0, top_data); // initilize to be 0
|
||||
|
||||
for (int n=0; n<num_; ++n) {
|
||||
for (int h = 0; h < height_; ++h) { // for the input data size
|
||||
for (int w = 0; w < width_; ++w) {
|
||||
for (int c = 0; c < channel_; ++c) {
|
||||
top_data[top[0]->offset(n, c, h, w)] = bottom[1]->data_at(n, c, h, w) * bottom[0]->data_at(n, 0, h, w);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
top_data = NULL;
|
||||
}
|
||||
|
||||
template <typename Dtype>
|
||||
void DSPPLayer<Dtype>::Backward_cpu(const vector<Blob<Dtype>*>& top,
|
||||
const vector<bool>& propagate_down,
|
||||
const vector<Blob<Dtype>*>& bottom) {
|
||||
if (propagate_down[0]) {
|
||||
const Dtype* top_diff = top[0]->cpu_diff();
|
||||
Dtype* data_diff = bottom[1]->mutable_cpu_diff();
|
||||
Dtype* heat_map_diff = bottom[0]->mutable_cpu_diff();
|
||||
|
||||
caffe_set<Dtype>(bottom[1]->count(), 0, data_diff);
|
||||
caffe_set<Dtype>(bottom[0]->count(), 0, heat_map_diff);
|
||||
// Dtype activation_h, activation_w;
|
||||
|
||||
for (int n = 0; n < num_; ++n) {
|
||||
for (int h = 0; h < height_; ++h) {
|
||||
for (int w = 0; w < width_; ++w) {
|
||||
for (int c = 0; c < channel_; ++c) {
|
||||
|
||||
Dtype buffer = top_diff[top[0]->offset(n, c, h, w)];
|
||||
data_diff[bottom[1]->offset(n, c, h, w)] = buffer * (bottom[0]->data_at(n, 0, h, w));
|
||||
|
||||
buffer *= bottom[1]->data_at(n,c,h,w) / channel_;
|
||||
|
||||
heat_map_diff[bottom[0]->offset(n,0,h,w)] += buffer;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
top_diff = NULL;
|
||||
data_diff = NULL;
|
||||
heat_map_diff = NULL;
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
INSTANTIATE_CLASS(DSPPLayer);
|
||||
REGISTER_LAYER_CLASS(DSPP);
|
||||
|
||||
} // namespace caffe
|
128
caffe-layers/src/caffe/layers/pose_data_layer.cpp
Normal file
128
caffe-layers/src/caffe/layers/pose_data_layer.cpp
Normal file
|
@ -0,0 +1,128 @@
|
|||
#include <opencv2/core/core.hpp>
|
||||
|
||||
#include <vector>
|
||||
|
||||
#include "caffe/layers/pose_data_layer.hpp"
|
||||
|
||||
namespace caffe {
|
||||
|
||||
template <typename Dtype>
|
||||
void PoseDataLayer<Dtype>::DataLayerSetUp(const vector<Blob<Dtype>*>& bottom,
|
||||
const vector<Blob<Dtype>*>& top) {
|
||||
batch_size_ = this->layer_param_.memory_data_param().batch_size();
|
||||
channels_ = this->layer_param_.memory_data_param().channels();
|
||||
height_ = this->layer_param_.memory_data_param().height();
|
||||
width_ = this->layer_param_.memory_data_param().width();
|
||||
size_ = channels_ * height_ * width_;
|
||||
CHECK_GT(batch_size_ * size_, 0) <<
|
||||
"batch_size, channels, height, and width must be specified and"
|
||||
" positive in memory_data_param";
|
||||
int label_shape_[] = {batch_size_, 4};
|
||||
vector<int> label_shape(label_shape_, label_shape_+2);
|
||||
top[0]->Reshape(batch_size_, channels_, height_, width_);
|
||||
top[1]->Reshape(label_shape);
|
||||
added_data_.Reshape(batch_size_, channels_, height_, width_);
|
||||
added_label_.Reshape(label_shape);
|
||||
data_ = NULL;
|
||||
labels_ = NULL;
|
||||
added_data_.cpu_data();
|
||||
added_label_.cpu_data();
|
||||
}
|
||||
|
||||
template <typename Dtype>
|
||||
void PoseDataLayer<Dtype>::AddDatumVector(const vector<Datum>& datum_vector) {
|
||||
CHECK(!has_new_data_) <<
|
||||
"Can't add data until current data has been consumed.";
|
||||
size_t num = datum_vector.size();
|
||||
CHECK_GT(num, 0) << "There is no datum to add.";
|
||||
CHECK_EQ(num % batch_size_, 0) <<
|
||||
"The added data must be a multiple of the batch size.";
|
||||
added_data_.Reshape(num, channels_, height_, width_);
|
||||
int label_shape_[] = {(int)num, 4};
|
||||
vector<int> label_shape(label_shape_, label_shape_+2);
|
||||
added_label_.Reshape(label_shape);
|
||||
// Apply data transformations (mirror, scale, crop...)
|
||||
this->data_transformer_->Transform(datum_vector, &added_data_);
|
||||
// Copy Labels
|
||||
Dtype* top_label = added_label_.mutable_cpu_data();
|
||||
for (int item_id = 0; item_id < num; ++item_id) {
|
||||
top_label[item_id] = datum_vector[item_id].label();
|
||||
}
|
||||
// num_images == batch_size_
|
||||
Dtype* top_data = added_data_.mutable_cpu_data();
|
||||
Reset(top_data, top_label, num);
|
||||
has_new_data_ = true;
|
||||
}
|
||||
|
||||
template <typename Dtype>
|
||||
void PoseDataLayer<Dtype>::AddMatVector(const vector<cv::Mat>& mat_vector,
|
||||
const vector<float>& labels) {
|
||||
size_t num = mat_vector.size();
|
||||
CHECK(!has_new_data_) <<
|
||||
"Can't add mat until current data has been consumed.";
|
||||
CHECK_GT(num, 0) << "There is no mat to add";
|
||||
CHECK_EQ(num % batch_size_, 0) <<
|
||||
"The added data must be a multiple of the batch size.";
|
||||
added_data_.Reshape(num, channels_, height_, width_);
|
||||
int label_shape_[] = {(int)num, 4};
|
||||
vector<int> label_shape(label_shape_, label_shape_+2);
|
||||
added_label_.Reshape(label_shape);
|
||||
// Apply data transformations (mirror, scale, crop...)
|
||||
this->data_transformer_->Transform(mat_vector, &added_data_);
|
||||
// Copy Labels
|
||||
Dtype* top_label = added_label_.mutable_cpu_data();
|
||||
for (int item_id = 0; item_id < num; ++item_id) {
|
||||
top_label[item_id] = labels[item_id];
|
||||
}
|
||||
// num_images == batch_size_
|
||||
Dtype* top_data = added_data_.mutable_cpu_data();
|
||||
Reset(top_data, top_label, num);
|
||||
has_new_data_ = true;
|
||||
}
|
||||
|
||||
template <typename Dtype>
|
||||
void PoseDataLayer<Dtype>::Reset(Dtype* data, Dtype* labels, int n) {
|
||||
CHECK(data);
|
||||
CHECK(labels);
|
||||
CHECK_EQ(n % batch_size_, 0) << "n must be a multiple of batch size";
|
||||
// Warn with transformation parameters since a memory array is meant to
|
||||
// be generic and no transformations are done with Reset().
|
||||
//if (this->layer_param_.has_transform_param()) {
|
||||
// LOG(WARNING) << this->type() << " does not transform array data on Reset()";
|
||||
//}
|
||||
data_ = data;
|
||||
labels_ = labels;
|
||||
n_ = n;
|
||||
pos_ = 0;
|
||||
}
|
||||
|
||||
template <typename Dtype>
|
||||
void PoseDataLayer<Dtype>::set_batch_size(int new_size) {
|
||||
CHECK(!has_new_data_) <<
|
||||
"Can't change batch_size until current data has been consumed.";
|
||||
batch_size_ = new_size;
|
||||
added_data_.Reshape(batch_size_, channels_, height_, width_);
|
||||
int label_shape_[] = {(int)batch_size_, 4};
|
||||
vector<int> label_shape(label_shape_, label_shape_+2);
|
||||
added_label_.Reshape(label_shape);
|
||||
}
|
||||
|
||||
template <typename Dtype>
|
||||
void PoseDataLayer<Dtype>::Forward_cpu(const vector<Blob<Dtype>*>& bottom,
|
||||
const vector<Blob<Dtype>*>& top) {
|
||||
CHECK(data_) << "PoseDataLayer needs to be initalized by calling Reset";
|
||||
top[0]->Reshape(batch_size_, channels_, height_, width_);
|
||||
int label_shape_[] = {(int)batch_size_, 4};
|
||||
vector<int> label_shape(label_shape_, label_shape_+2);
|
||||
added_label_.Reshape(label_shape);
|
||||
top[0]->set_cpu_data(data_ + pos_ * size_);
|
||||
top[1]->set_cpu_data(labels_ + pos_);
|
||||
pos_ = (pos_ + batch_size_) % n_;
|
||||
if (pos_ == 0)
|
||||
has_new_data_ = false;
|
||||
}
|
||||
|
||||
INSTANTIATE_CLASS(PoseDataLayer);
|
||||
REGISTER_LAYER_CLASS(PoseData);
|
||||
|
||||
} // namespace caffe
|
Loading…
Add table
Add a link
Reference in a new issue