diff --git a/tests/ttnn/unit_tests/operations/test_conv1d.py b/tests/ttnn/unit_tests/operations/test_conv1d.py index 7013ef6b2db..d5f08d602bb 100644 --- a/tests/ttnn/unit_tests/operations/test_conv1d.py +++ b/tests/ttnn/unit_tests/operations/test_conv1d.py @@ -129,6 +129,15 @@ def run_conv( ) tt_output_tensor = ttnn.from_device(tt_output_tensor_on_device) + tt_output_tensor = ttnn.reshape( + tt_output_tensor, + [ + 1, + 1, + batch_size * out_length, + output_channels, + ], + ) torch_output_tensor = torch.Tensor(ttnn.to_torch(tt_output_tensor)) # torch_output_tensor is in row major layout and NLC shape diff --git a/tests/ttnn/unit_tests/operations/test_new_conv2d.py b/tests/ttnn/unit_tests/operations/test_new_conv2d.py index 37b25e6291b..eb9256cc5bf 100644 --- a/tests/ttnn/unit_tests/operations/test_new_conv2d.py +++ b/tests/ttnn/unit_tests/operations/test_new_conv2d.py @@ -44,13 +44,14 @@ def _nearest_32(x): def write_to_file(file_name, data): data = data.cpu().numpy() with open(file_name, "w") as f: - for i in range(1): + for i in range(data.shape[0]): for j in range(data.shape[2]): for k in range(data.shape[3]): for l in range(data.shape[1]): f.write(str(data[i][l][j][k]) + " ") f.write("\n") f.write("\n") + f.write("\n") def write_to_file_special(file_name, data): @@ -59,7 +60,7 @@ def write_to_file_special(file_name, data): for i in range(data.shape[0]): for j in range(data.shape[1]): for k in range(data.shape[2]): - for l in range(16): + for l in range(data.shape[3]): f.write(str(data[i][j][k][l]) + " ") f.write("\n") @@ -248,6 +249,8 @@ def run_conv( else: pcc = 0.997 + # write_to_file("golden_tensor.txt", torch_out_golden_tensor.float()) + # write_to_file("output_tensor_1.txt", torch_output_tensor.float()) passing, pcc_msg = check_with_pcc_without_tensor_printout(torch_output_tensor, torch_out_golden_tensor, pcc=pcc) logger.info(f"PCC = {pcc_msg}. Threshold = {pcc}") assert passing diff --git a/ttnn/cpp/ttnn/operations/conv/conv2d/device/conv2d_op.cpp b/ttnn/cpp/ttnn/operations/conv/conv2d/device/conv2d_op.cpp index b72912b82db..60f4f47b725 100644 --- a/ttnn/cpp/ttnn/operations/conv/conv2d/device/conv2d_op.cpp +++ b/ttnn/cpp/ttnn/operations/conv/conv2d/device/conv2d_op.cpp @@ -217,6 +217,9 @@ std::vector OptimizedConvNew::compute_output_specs(const std::vector auto output_padding = Padding( {{0, 0}, {0, 0}, {0, 0}, {0, (padded_shape_c - shape_c)}}, Padding::PadValue::Zero); auto output_shape = tt::tt_metal::LegacyShape({batch_size, conv_output_h, conv_output_w, padded_shape_c}, output_padding); + if(conv_output_w == 1){ + output_shape = tt::tt_metal::LegacyShape({batch_size, conv_output_w, conv_output_h, padded_shape_c}, output_padding); //handing conv1d transpose. + } auto output_layout = this->untilize_out ? Layout::ROW_MAJOR : Layout::TILE; if (this->memory_config.is_sharded()) { diff --git a/ttnn/cpp/ttnn/tensor/tensor_impl.cpp b/ttnn/cpp/ttnn/tensor/tensor_impl.cpp index f1f02c13bb4..987dcd55b85 100644 --- a/ttnn/cpp/ttnn/tensor/tensor_impl.cpp +++ b/ttnn/cpp/ttnn/tensor/tensor_impl.cpp @@ -18,7 +18,7 @@ namespace tt_metal { namespace tensor_impl { -TensorPrintProfile TTNN_TENSOR_PRINT_PROFILE = TensorPrintProfile::Full; +TensorPrintProfile TTNN_TENSOR_PRINT_PROFILE = TensorPrintProfile::Short; std::ostream& operator<<(std::ostream& os, const DataType& dtype) { switch (dtype) {