diff --git a/tests/ttnn/unit_tests/operations/test_pad.py b/tests/ttnn/unit_tests/operations/test_pad.py index fa1373d59fe..0dc9aa18ef1 100644 --- a/tests/ttnn/unit_tests/operations/test_pad.py +++ b/tests/ttnn/unit_tests/operations/test_pad.py @@ -18,7 +18,11 @@ @pytest.mark.parametrize("w", [224]) @pytest.mark.parametrize( "padding,torch_padding", - [(((0, 1), (3, 25), (32, 32)), (32, 32, 3, 25, 0, 1)), (((0, 1), (3, 25), (4, 6)), (4, 6, 3, 25, 0, 1))], + [ + (((0, 1), (3, 25), (32, 32)), (32, 32, 3, 25, 0, 1)), + (((0, 1), (3, 25), (4, 6)), (4, 6, 3, 25, 0, 1)), + (((0, 1), (3, 25), (4, 7)), (4, 7, 3, 25, 0, 1)), # Odd padding widths (5 and 7) + ], ) @pytest.mark.parametrize("value", [0]) def test_pad_rm(device, n, c, h, w, padding, torch_padding, value): diff --git a/ttnn/cpp/ttnn/operations/data_movement/pad/device/pad_op.cpp b/ttnn/cpp/ttnn/operations/data_movement/pad/device/pad_op.cpp index 0dbbf5fc4ae..ef91a8bd377 100644 --- a/ttnn/cpp/ttnn/operations/data_movement/pad/device/pad_op.cpp +++ b/ttnn/cpp/ttnn/operations/data_movement/pad/device/pad_op.cpp @@ -45,7 +45,6 @@ void Pad::validate_with_output_tensors( input_tensor.get_dtype() == DataType::FLOAT32 || input_tensor.get_dtype() == DataType::BFLOAT16, "Cannot pad tilized tensor with specified format"); } else if (input_tensor.get_layout() == Layout::ROW_MAJOR) { - TT_FATAL(this->output_tensor_shape[3] % 2 == 0, "RM padding requires output X dim to be a multiple of 2"); TT_FATAL( input_tensor.get_dtype() == DataType::FLOAT32 || input_tensor.get_dtype() == DataType::BFLOAT16, "Cannot pad RM tensor with specified format");