forked from torch/cunn
-
Notifications
You must be signed in to change notification settings - Fork 0
/
AbsCriterion.cu
103 lines (78 loc) · 3.12 KB
/
AbsCriterion.cu
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
#include "utils.h"
#include <thrust/fill.h>
#include <thrust/functional.h>
#include <thrust/reduce.h>
#include <thrust/inner_product.h>
struct abs_functor
{
abs_functor() {}
__host__ __device__ float operator()(const float& x, const float& y) const
{
float z = x-y;
return z >= 0 ? z : -z;
}
};
static int cunn_AbsCriterion_updateOutput(lua_State *L)
{
THCState *state = getCutorchState(L);
THCudaTensor *input = (THCudaTensor*)luaT_checkudata(L, 2, "torch.CudaTensor");
THCudaTensor *target = (THCudaTensor*)luaT_checkudata(L, 3, "torch.CudaTensor");
int sizeAverage = luaT_getfieldcheckboolean(L, 1, "sizeAverage");
THAssert(THCudaTensor_checkGPU(state, 2, input, target));
float sum;
long size = THCudaTensor_nElement(state, input);
input = THCudaTensor_newContiguous(state, input);
target = THCudaTensor_newContiguous(state, target);
thrust::device_ptr<float> input_data(THCudaTensor_data(state, input));
thrust::device_ptr<float> target_data(THCudaTensor_data(state, target));
sum = thrust::inner_product(input_data, input_data+size, target_data, (float) 0, thrust::plus<float>(), abs_functor());
if(sizeAverage)
sum /= size;
THCudaTensor_free(state, input);
THCudaTensor_free(state, target);
lua_pushnumber(L, sum);
lua_setfield(L, 1, "output");
lua_pushnumber(L, sum);
return 1;
}
struct abs_updateGradInput_functor
{
const float norm;
abs_updateGradInput_functor(float norm_) : norm(norm_) {}
__host__ __device__ float operator()(const float& x, const float& y) const
{
return (x - y) >= 0 ? norm : -norm;
}
};
static int cunn_AbsCriterion_updateGradInput(lua_State *L)
{
THCState *state = getCutorchState(L);
THCudaTensor *input = (THCudaTensor*)luaT_checkudata(L, 2, "torch.CudaTensor");
THCudaTensor *target = (THCudaTensor*)luaT_checkudata(L, 3, "torch.CudaTensor");
int sizeAverage = luaT_getfieldcheckboolean(L, 1, "sizeAverage");
THCudaTensor *gradInput = (THCudaTensor*)luaT_getfieldcheckudata(L, 1, "gradInput", "torch.CudaTensor");
THAssert(THCudaTensor_checkGPU(state, 3, input, target, gradInput));
long size = THCudaTensor_nElement(state, input);
float norm = (sizeAverage ? 1./size : 1.);
input = THCudaTensor_newContiguous(state, input);
target = THCudaTensor_newContiguous(state, target);
THCudaTensor_resizeAs(state, gradInput, input);
thrust::device_ptr<float> input_data(THCudaTensor_data(state, input));
thrust::device_ptr<float> target_data(THCudaTensor_data(state, target));
thrust::device_ptr<float> gradInput_data(THCudaTensor_data(state, gradInput));
thrust::transform(input_data, input_data+size, target_data, gradInput_data, abs_updateGradInput_functor(norm));
THCudaTensor_free(state, input);
THCudaTensor_free(state, target);
return 1;
}
static const struct luaL_Reg cunn_AbsCriterion__ [] = {
{"AbsCriterion_updateOutput", cunn_AbsCriterion_updateOutput},
{"AbsCriterion_updateGradInput", cunn_AbsCriterion_updateGradInput},
{NULL, NULL}
};
static void cunn_AbsCriterion_init(lua_State *L)
{
luaT_pushmetatable(L, "torch.CudaTensor");
luaT_registeratname(L, cunn_AbsCriterion__, "nn");
lua_pop(L,1);
}