From 4b201bc922bc89522c15ba171e82bd77e54b7b62 Mon Sep 17 00:00:00 2001
From: Shwetank Singh <ssingh@tenstorrent.com>
Date: Mon, 23 Dec 2024 08:55:56 +0000
Subject: [PATCH] #0: clean up

---
 .../tt/ttnn_functional_resnet50_new_conv_api.py      |  3 ---
 .../functional_unet/tt/unet_shallow_ttnn.py          |  1 -
 .../conv/conv2d/prepare_conv2d_weights.cpp           | 12 ++++++------
 3 files changed, 6 insertions(+), 10 deletions(-)

diff --git a/models/demos/ttnn_resnet/tt/ttnn_functional_resnet50_new_conv_api.py b/models/demos/ttnn_resnet/tt/ttnn_functional_resnet50_new_conv_api.py
index 7a2f400d4377..2751a5cfabb2 100644
--- a/models/demos/ttnn_resnet/tt/ttnn_functional_resnet50_new_conv_api.py
+++ b/models/demos/ttnn_resnet/tt/ttnn_functional_resnet50_new_conv_api.py
@@ -258,7 +258,6 @@ def __call__(
         }
 
         if not ttnn.is_tensor_storage_on_device(self.conv1_weight_tensor):
-            print("preparing conv1 weights")
             self.conv1_weight_tensor = ttnn.prepare_conv_weights(
                 weight_tensor=self.conv1_weight_tensor,
                 weights_format="OIHW",
@@ -384,7 +383,6 @@ def __call__(
         }
 
         if not ttnn.is_tensor_storage_on_device(self.conv2_weight_tensor):
-            print("Preparing conv2 weights")
             self.conv2_weight_tensor = ttnn.prepare_conv_weights(
                 weight_tensor=self.conv2_weight_tensor,
                 weights_format="OIHW",
@@ -465,7 +463,6 @@ def __call__(
         }
 
         if not ttnn.is_tensor_storage_on_device(self.conv3_weight_tensor):
-            print("Preparing conv3 weights")
             self.conv3_weight_tensor = ttnn.prepare_conv_weights(
                 weight_tensor=self.conv3_weight_tensor,
                 weights_format="OIHW",
diff --git a/models/experimental/functional_unet/tt/unet_shallow_ttnn.py b/models/experimental/functional_unet/tt/unet_shallow_ttnn.py
index bf6903dd75b7..ed34523c15ec 100644
--- a/models/experimental/functional_unet/tt/unet_shallow_ttnn.py
+++ b/models/experimental/functional_unet/tt/unet_shallow_ttnn.py
@@ -147,7 +147,6 @@ def __init__(
         self.bias = ttnn.from_torch(bias, dtype=ttnn.float32, mesh_mapper=mesh_mapper)
 
     def __call__(self, x):
-        print(ttnn.get_memory_config(x))
         conv_kwargs = {
             "input_layout": x.get_layout(),
             "in_channels": self.in_channels,
diff --git a/ttnn/cpp/ttnn/operations/conv/conv2d/prepare_conv2d_weights.cpp b/ttnn/cpp/ttnn/operations/conv/conv2d/prepare_conv2d_weights.cpp
index 07eeb19b254c..9aba17b48ce0 100644
--- a/ttnn/cpp/ttnn/operations/conv/conv2d/prepare_conv2d_weights.cpp
+++ b/ttnn/cpp/ttnn/operations/conv/conv2d/prepare_conv2d_weights.cpp
@@ -188,8 +188,8 @@ std::pair<ttnn::Tensor, std::optional<ttnn::Tensor>> prepare_conv_weights_biases
     const bool parameters_on_device,
     bool is_non_tile_mul_width) {
 
-    std::cout << "pcwbmtd " << input_channels_alignment << " " << weight_block_h_ntiles << " " << weight_block_w_ntiles << " " << groups << " " << act_block_h_ntiles << " " << input_width << " " << is_non_tile_mul_width << std::endl;
-    std::cout << "parallel config" << (int)parallel_config.shard_scheme << " " << (int)parallel_config.shard_orientation << std::endl;
+    // std::cout << "pcwbmtd " << input_channels_alignment << " " << weight_block_h_ntiles << " " << weight_block_w_ntiles << " " << groups << " " << act_block_h_ntiles << " " << input_width << " " << is_non_tile_mul_width << std::endl;
+    // std::cout << "parallel config" << (int)parallel_config.shard_scheme << " " << (int)parallel_config.shard_orientation << std::endl;
 
     validate_weight_tensor(weight_tensor);
     ttnn::Tensor weight_tensor_;  // tensor to return
@@ -226,7 +226,7 @@ std::pair<ttnn::Tensor, std::optional<ttnn::Tensor>> prepare_conv_weights_biases
     uint32_t window_h = weights_shape[2];
     uint32_t window_w = weights_shape[3];
 
-    std::cout << "for bias -> " << out_channels << std::endl;
+    // std::cout << "for bias -> " << out_channels << std::endl;
 
     uint32_t num_cores_channels = get_num_cores_channels_from_parallel_config(parallel_config);
     uint32_t out_channels_padded = tt::round_up(out_channels, num_cores_channels * tt::constants::TILE_WIDTH);
@@ -313,8 +313,8 @@ ttnn::Tensor prepare_conv_weights(
     T *device,
     const std::optional<const Conv2dConfig>& conv_config_,
     const std::optional<const DeviceComputeKernelConfig>& compute_config_) {
-    std::cout << "prepare conv weight" << std::endl;
-    std::cout << "input_memory_config -> "  << input_memory_config << std::endl;
+    // std::cout << "prepare conv weight" << std::endl;
+    // std::cout << "input_memory_config -> "  << input_memory_config << std::endl;
     TT_FATAL(!ttnn::is_tensor_on_device_or_multidevice(weight_tensor), "Error: weight tensor must be on host for preparation.");
     Conv2dConfig conv_config = conv_config_.value_or(Conv2dConfig());
     DeviceComputeKernelConfig compute_config = compute_config_.value_or(init_device_compute_kernel_config(
@@ -407,7 +407,7 @@ ttnn::Tensor prepare_conv_bias(
 
     TT_FATAL(!ttnn::is_tensor_on_device_or_multidevice(bias_tensor), "Error: bias tensor must be on host for preparation.");
 
-    std::cout << "prepare conv bias" << std::endl;
+    // std::cout << "prepare conv bias" << std::endl;
 
     const bool mm_conv = use_matmul_for_1x1_conv(kernel_size, stride, padding, dilation, groups);
     const uint32_t output_height = ((input_height - kernel_size[0] - ((kernel_size[0] - 1 ) * (dilation[0] - 1)) + 2 * padding[0]) / stride[0]) + 1;