diff --git a/tests/ttnn/unit_tests/operations/test_new_conv2d.py b/tests/ttnn/unit_tests/operations/test_new_conv2d.py index aca2169ac22..6f8c395b421 100644 --- a/tests/ttnn/unit_tests/operations/test_new_conv2d.py +++ b/tests/ttnn/unit_tests/operations/test_new_conv2d.py @@ -322,7 +322,11 @@ def run_conv_with_split( @pytest.mark.parametrize("stride", [1]) @pytest.mark.parametrize( "output_channels, input_channels, input_height, input_width, filter_height, filter_width, pad_h, pad_w, act_block_w_div", - ((64, 32, 130, 130, 3, 3, 0, 0, 1),), + ( + (64, 32, 130, 130, 3, 3, 0, 0, 1), + (64, 32, 128, 128, 3, 3, 1, 1, 1), + (64, 32, 1024, 1024, 3, 3, 1, 1, 1), + ), ) @pytest.mark.parametrize( "has_bias", @@ -424,6 +428,7 @@ def test_conv_dram( reshard_if_not_optimal=True, act_block_w_div=act_block_w_div, output_height_in_l1=64, + act_block_h_override=64, ) [tt_output_tensor_on_device, out_height, out_width, weights_device, bias_device] = ttnn.conv2d( input_tensor=tt_input_tensor, diff --git a/ttnn/cpp/ttnn/operations/conv/conv2d/conv2d.cpp b/ttnn/cpp/ttnn/operations/conv/conv2d/conv2d.cpp index e437354e1ed..9bffd08f23a 100644 --- a/ttnn/cpp/ttnn/operations/conv/conv2d/conv2d.cpp +++ b/ttnn/cpp/ttnn/operations/conv/conv2d/conv2d.cpp @@ -725,13 +725,13 @@ std::tuple{1, 1, 1, 1} //Step ); log_debug(tt::LogOp, "Sliced input tensor shape: {}", sliced_input_tensor.get_shape()); - if(pad_top>0) + if(pad_top>0 || pad_bottom > 0) { auto pad_top_tensor = ttnn::pad( + DefaultQueueId, sliced_input_tensor, - tt::tt_metal::Array4D({1, input_slice_height + pad_top, input_width, in_channels}), - tt::tt_metal::Array4D({0, 0, 0, 0}), - 0); + std::vector>{{0, 0}, {pad_top, pad_bottom}, {0, 0}, {0, 0}}, + 0, true, std::nullopt); sliced_input_tensor = pad_top_tensor; } log_debug(tt::LogOp, "Padded sliced input tensor shape: {}", sliced_input_tensor.get_shape()); @@ -745,7 +745,7 @@ std::tuple)(bias_tensor_on_device),