forked from pytorch/pytorch
-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathSpatialMaxUnpooling.cu
32 lines (27 loc) · 1.29 KB
/
SpatialMaxUnpooling.cu
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
#include <THCUNN/THCUNN.h>
#include <THC/THCTensor.hpp>
#include <THCUNN/common.h>
template <typename Dtype>
__global__ void MaxUnpoolForward(const int nthreads, const Dtype* bottom_data, const int64_t* bottom_mask,
const int num, const int channels, const int iheight, const int iwidth, const int oheight, const int owidth, Dtype* top_data) {
CUDA_KERNEL_LOOP(index, nthreads) { //index here indices the input pixels
int c = (index / iwidth / iheight) % channels;
int n = index / iwidth / iheight / channels;
top_data += (n*channels + c)*oheight*owidth;
int maxind = bottom_mask[index] - TH_INDEX_BASE;
top_data[maxind] = bottom_data[index];
}
}
template <typename Dtype>
__global__ void MaxUnpoolBackward(const int nthreads, const Dtype* top_diff, const int64_t* bottom_mask,
const int num, const int channels, const int iheight, const int iwidth, const int oheight, const int owidth, Dtype* bottom_diff) {
CUDA_KERNEL_LOOP(index, nthreads) {
int c = (index / iwidth / iheight) % channels;
int n = index / iwidth / iheight / channels;
top_diff += (n*channels + c)*oheight*owidth;
int maxind = bottom_mask[index] - TH_INDEX_BASE;
bottom_diff[index] = top_diff[maxind];
}
}
#include <THCUNN/generic/SpatialMaxUnpooling.cu>
#include <THC/THCGenerateFloatTypes.h>