From b2796ce6efb62f58c2dc9725585e48c5959e8221 Mon Sep 17 00:00:00 2001 From: laurent Date: Sat, 9 Sep 2023 21:10:30 +0100 Subject: [PATCH] Reduce the number of threads. --- candle-core/src/cuda_backend.rs | 14 ++++++++++---- candle-kernels/src/conv.cu | 19 ++++++++++++------- 2 files changed, 22 insertions(+), 11 deletions(-) diff --git a/candle-core/src/cuda_backend.rs b/candle-core/src/cuda_backend.rs index 7cc8548936..62b38a6d37 100644 --- a/candle-core/src/cuda_backend.rs +++ b/candle-core/src/cuda_backend.rs @@ -1050,11 +1050,11 @@ impl<'a> Map2 for ConvTranspose2D<'a> { k_l: &Layout, dev: &CudaDevice, ) -> Result> { - // Kernel shape: (c_in_k, c_out, h_k, w_k) + // Kernel shape: (c_in_k, c_out / groups, h_k, w_k) // Input shape: (b_size, c_in, h_in, w_in) let p = &self.0; let (out_w, out_h) = (p.out_w(), p.out_h()); - let dst_el = p.c_out * out_w * out_h * p.b_size; + let dst_el = p.c_out_per_group * p.groups * out_w * out_h * p.b_size; let inp = &inp.slice(inp_l.start_offset()..); let k = &k.slice(k_l.start_offset()..); let shape = inp_l.shape(); @@ -1063,7 +1063,13 @@ impl<'a> Map2 for ConvTranspose2D<'a> { // SAFETY: Set later by running the kernel. let out = unsafe { dev.alloc::(dst_el) }.w()?; - let cfg = LaunchConfig::for_num_elems(dst_el as u32); + const NUM_THREADS: u32 = 512; + let num_blocks = (dst_el as u32 + NUM_THREADS - 1) / NUM_THREADS; + let mut cfg = LaunchConfig { + grid_dim: (num_blocks, 1, 1), + block_dim: (NUM_THREADS, 1, 1), + shared_mem_bytes: 0, + }; let func = dev.get_or_load_func(&kernel_name::("conv_transpose2d"), kernels::CONV)?; let ds = if dims.len() == 4 { [dims, inp_l.stride(), k_l.dims(), k_l.stride()].concat() @@ -1072,13 +1078,13 @@ impl<'a> Map2 for ConvTranspose2D<'a> { }; let ds = dev.htod_copy(ds).w()?; let params = ( - el, out_w, out_h, p.stride, p.padding, p.output_padding, p.dilation, + p.groups, &ds, inp, k, diff --git a/candle-kernels/src/conv.cu b/candle-kernels/src/conv.cu index ba2fa1adbf..8b713a2ffc 100644 --- a/candle-kernels/src/conv.cu +++ b/candle-kernels/src/conv.cu @@ -116,13 +116,13 @@ __device__ void conv2d( // Naive implementation of conv_transpose2d. template __device__ void conv_transpose2d( - const size_t src_numel, const size_t w_out, const size_t h_out, const size_t stride, const size_t padding, const size_t out_padding, const size_t dilation, + const size_t groups, const size_t *info, const T *src, const T *kernel, @@ -130,17 +130,18 @@ __device__ void conv_transpose2d( ) { const size_t dst_i = blockIdx.x * blockDim.x + threadIdx.x; // src: (b_size, c_in, h_in, w_in) - // k: (c_in, c_out, h_k, w_k) + // k: (c_in, c_out / groups, h_k, w_k) const size_t *src_dims = info; const size_t *src_s = info + 4; const size_t *k_dims = info + 8; const size_t *k_s = info + 12; const size_t h_k = k_dims[2]; const size_t w_k = k_dims[3]; - const size_t c_out = k_dims[1]; + const size_t c_out_per_group = k_dims[1]; const size_t c_in = src_dims[1]; const size_t h_in = src_dims[2]; const size_t w_in = src_dims[3]; + const size_t c_out = c_out_per_group * groups; if (dst_i >= src_dims[0] * c_out * w_out * h_out) { return; } @@ -148,6 +149,10 @@ __device__ void conv_transpose2d( // TODO const size_t b_idx = dst_i / (w_out * h_out * c_out); const size_t dst_c_idx = (dst_i / (w_out * h_out)) % c_out; + const size_t c_idx_in_group = dst_c_idx % c_out_per_group; + const size_t c_in_per_group = c_in / groups; + const size_t group_idx = dst_c_idx / c_out_per_group; + // const size_t c_in_per_group = c_in; // NCHW layout. const size_t out_y = (dst_i / w_out) % h_out; const size_t out_x = dst_i % w_out; @@ -169,9 +174,9 @@ __device__ void conv_transpose2d( } int inp_y = inp_y_stride / stride; if (inp_y >= h_in) continue; - for (size_t src_c_idx = 0; src_c_idx < c_in; ++src_c_idx) { + for (size_t src_c_idx = group_idx * c_in_per_group; src_c_idx < (group_idx + 1) * c_in_per_group; ++src_c_idx) { const size_t src_idx = src_idx0 + src_c_idx * src_s[1] + inp_y * src_s[2] + inp_x * src_s[3]; - const size_t k_idx = src_c_idx * k_s[0] + dst_c_idx * k_s[1] + k_y * k_s[2] + k_x * k_s[3]; + const size_t k_idx = src_c_idx * k_s[0] + c_idx_in_group * k_s[1] + k_y * k_s[2] + k_x * k_s[3]; d += static_cast(src[src_idx]) * static_cast(kernel[k_idx]); } } @@ -365,19 +370,19 @@ extern "C" __global__ void FN_NAME( \ #define CONVT2D_OP(TYPENAME, TYPEACC, FN_NAME) \ extern "C" __global__ void FN_NAME( \ - const size_t src_numel, \ const size_t w_out, \ const size_t h_out, \ const size_t stride, \ const size_t padding, \ const size_t out_padding, \ const size_t dilation, \ + const size_t groups, \ const size_t *info, \ const TYPENAME *src, \ const TYPENAME *kernel, \ TYPENAME *dst \ ) { \ - conv_transpose2d(src_numel, w_out, h_out, stride, padding, out_padding, dilation, info, src, kernel, dst); \ + conv_transpose2d(w_out, h_out, stride, padding, out_padding, dilation, groups, info, src, kernel, dst); \ } \ #define AVG_POOL2D_OP(TYPENAME, TYPEACC, FN_NAME) \