Skip to content

Commit

Permalink
#13127: Remove shape_without_padding() pybinding and usage
Browse files Browse the repository at this point in the history
  • Loading branch information
TT-BrianLiu committed Oct 2, 2024
1 parent 3478414 commit 9ddd6e1
Show file tree
Hide file tree
Showing 9 changed files with 24 additions and 34 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -703,7 +703,7 @@ def first_run(self, input_tensor, device, batch_size, ops_parallel_config) -> tt
print(f"=================================== layer: 4, module: 3")
x, x_height, x_width = self.layer4_module3(x, device, batch_size, x_height, x_width, conv_op_cache)

unpadded_shape = x.shape_without_padding()
unpadded_shape = x.shape
x = ttnn.untilize_with_unpadding(
x,
output_tensor_end=(
Expand Down Expand Up @@ -796,7 +796,7 @@ def first_run(self, input_tensor, device, batch_size, ops_parallel_config) -> tt
)

x = self.fc(x)
desired_shape = list(x.shape_without_padding())
desired_shape = list(x.shape)
desired_shape[-1] = 1000
x = ttnn.untilize_with_unpadding(
x,
Expand Down Expand Up @@ -934,7 +934,7 @@ def optimized_run(self, input_tensor, device, batch_size, ops_parallel_config, c
print(f"=================================== layer: 4, module: 3")
x, x_height, x_width = self.layer4_module3(x, device, batch_size, x_height, x_width, conv_op_cache)

unpadded_shape = x.shape_without_padding()
unpadded_shape = x.shape
x = ttnn.untilize_with_unpadding(
x,
output_tensor_end=(
Expand Down Expand Up @@ -1028,7 +1028,7 @@ def optimized_run(self, input_tensor, device, batch_size, ops_parallel_config, c
)

x = self.fc(x)
desired_shape = list(x.shape_without_padding())
desired_shape = list(x.shape)
desired_shape[-1] = 1000
x = ttnn.untilize_with_unpadding(
x,
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -1023,7 +1023,7 @@ def run(self, input_tensor, device, ops_parallel_config, conv_op_cache={}) -> tt
)

if is_wormhole_b0() and self.batch_size == 16:
xshape = x.shape_without_padding()
xshape = x.shape
x = ttnn.slice(x, [0, 0, 0, 0], [xshape[0], xshape[1], xshape[2], xshape[3]])

layer4_module1_input_shape = ttnn.Shape(x.shape.with_tile_padding())
Expand Down Expand Up @@ -1119,7 +1119,7 @@ def run(self, input_tensor, device, ops_parallel_config, conv_op_cache={}) -> tt
)
x = ttnn.to_memory_config(x, width_sharded_mem_config)

unpadded_shape = x.shape_without_padding()
unpadded_shape = x.shape
x = ttnn.untilize_with_unpadding(
x,
output_tensor_end=(
Expand Down Expand Up @@ -1195,7 +1195,7 @@ def run(self, input_tensor, device, ops_parallel_config, conv_op_cache={}) -> tt
)

x = self.fc(x)
desired_shape = list(x.shape_without_padding())
desired_shape = list(x.shape)
desired_shape[-1] = 1000
x = ttnn.untilize_with_unpadding(
x,
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -699,7 +699,7 @@ def first_run(self, input_tensor, device, batch_size, ops_parallel_config) -> tt
print(f"=================================== layer: 4, module: 3")
x, x_height, x_width = self.layer4_module3(x, device, batch_size, x_height, x_width, conv_op_cache)

unpadded_shape = x.shape_without_padding()
unpadded_shape = x.shape
x = ttnn.untilize_with_unpadding(
x,
output_tensor_end=(
Expand Down Expand Up @@ -792,7 +792,7 @@ def first_run(self, input_tensor, device, batch_size, ops_parallel_config) -> tt
)

x = self.fc(x)
desired_shape = list(x.shape_without_padding())
desired_shape = list(x.shape)
desired_shape[-1] = 1000
x = ttnn.untilize_with_unpadding(
x,
Expand Down Expand Up @@ -916,7 +916,7 @@ def optimized_run(self, input_tensor, device, batch_size, ops_parallel_config, c
x, x_height, x_width = self.layer4_module2(x, device, batch_size, x_height, x_width, conv_op_cache)
x, x_height, x_width = self.layer4_module3(x, device, batch_size, x_height, x_width, conv_op_cache)

unpadded_shape = x.shape_without_padding()
unpadded_shape = x.shape
x = ttnn.untilize_with_unpadding(
x,
output_tensor_end=(
Expand Down Expand Up @@ -1010,7 +1010,7 @@ def optimized_run(self, input_tensor, device, batch_size, ops_parallel_config, c
)

x = self.fc(x)
desired_shape = list(x.shape_without_padding())
desired_shape = list(x.shape)
desired_shape[-1] = 1000
x = ttnn.untilize_with_unpadding(
x,
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -686,7 +686,7 @@ def first_run(self, input_tensor, device, batch_size, ops_parallel_config) -> tt
eltwise_binary_out_in_place=True,
)

unpadded_shape = x.shape_without_padding()
unpadded_shape = x.shape
x = ttnn.slice(
x,
(0, 0, 0, 0),
Expand Down Expand Up @@ -728,7 +728,7 @@ def first_run(self, input_tensor, device, batch_size, ops_parallel_config) -> tt
print(f"=================================== layer: 4, module: 3")
x, x_height, x_width = self.layer4_module3(x, device, batch_size, x_height, x_width, conv_op_cache)

unpadded_shape = x.shape_without_padding()
unpadded_shape = x.shape
x = ttnn.untilize_with_unpadding(
x,
output_tensor_end=(
Expand Down Expand Up @@ -821,7 +821,7 @@ def first_run(self, input_tensor, device, batch_size, ops_parallel_config) -> tt
)

x = self.fc(x)
desired_shape = list(x.shape_without_padding())
desired_shape = list(x.shape)
desired_shape[-1] = 1000
x = ttnn.untilize_with_unpadding(
x,
Expand Down Expand Up @@ -941,7 +941,7 @@ def optimized_run(self, input_tensor, device, batch_size, ops_parallel_config, c
x, x_height, x_width = self.layer4_module2(x, device, batch_size, x_height, x_width, conv_op_cache)
x, x_height, x_width = self.layer4_module3(x, device, batch_size, x_height, x_width, conv_op_cache)

unpadded_shape = x.shape_without_padding()
unpadded_shape = x.shape
x = ttnn.untilize_with_unpadding(
x,
output_tensor_end=(
Expand Down Expand Up @@ -1035,7 +1035,7 @@ def optimized_run(self, input_tensor, device, batch_size, ops_parallel_config, c
)

x = self.fc(x)
desired_shape = list(x.shape_without_padding())
desired_shape = list(x.shape)
desired_shape[-1] = 1000
x = ttnn.untilize_with_unpadding(
x,
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -787,7 +787,7 @@ def first_run(self, input_tensor, device, batch_size, ops_parallel_config) -> tt
print(f"=================================== layer: 4, module: 3")
x, x_height, x_width = self.layer4_module3(x, device, batch_size, x_height, x_width, conv_op_cache)

unpadded_shape = x.shape_without_padding()
unpadded_shape = x.shape
x = ttnn.untilize_with_unpadding(
x,
output_tensor_end=(
Expand Down Expand Up @@ -882,7 +882,7 @@ def first_run(self, input_tensor, device, batch_size, ops_parallel_config) -> tt
)

x = self.fc(x)
desired_shape = list(x.shape_without_padding())
desired_shape = list(x.shape)
desired_shape[-1] = 1000
x = ttnn.untilize_with_unpadding(
x,
Expand Down Expand Up @@ -1016,7 +1016,7 @@ def optimized_run(self, input_tensor, device, batch_size, ops_parallel_config, c
x, x_height, x_width = self.layer4_module2(x, device, batch_size, x_height, x_width, conv_op_cache)
x, x_height, x_width = self.layer4_module3(x, device, batch_size, x_height, x_width, conv_op_cache)

unpadded_shape = x.shape_without_padding()
unpadded_shape = x.shape
x = ttnn.untilize_with_unpadding(
x,
output_tensor_end=(
Expand Down Expand Up @@ -1112,7 +1112,7 @@ def optimized_run(self, input_tensor, device, batch_size, ops_parallel_config, c
)

x = self.fc(x)
desired_shape = list(x.shape_without_padding())
desired_shape = list(x.shape)
desired_shape[-1] = 1000
x = ttnn.untilize_with_unpadding(
x,
Expand Down
2 changes: 1 addition & 1 deletion models/experimental/vgg/vgg_utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -22,7 +22,7 @@ def format_tensor(x, target_layout, device, output_mem_config, pad_value=0.0):
else:
return ttnn.tilize(x, memory_config=output_mem_config, use_multicore=True)
elif x.get_layout() == ttnn.TILE_LAYOUT and target_layout == ttnn.ROW_MAJOR_LAYOUT:
if x.shape.with_tile_padding() != x.shape_without_padding():
if x.shape.with_tile_padding() != x.shape:
return ttnn.format_output_tensor(x, x.shape_without_padding(), device, target_layout, output_mem_config)
else:
return ttnn.untilize(x, memory_config=output_mem_config, use_multicore=True)
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -147,8 +147,8 @@ def test_run_downsample(
out_shape = [1, 1, _nearest_y(batch_size * output_height * output_width, 32), input_channels]
assert out_shape == list(out.shape.with_tile_padding())
out_shape_unpadded = [1, 1, batch_size * output_height * output_width, input_channels]
assert out_shape_unpadded == list(out.shape_without_padding())
out = ttnn.format_output_tensor(out, out.shape_without_padding(), device, ttnn.ROW_MAJOR_LAYOUT)
assert out_shape_unpadded == list(out.shape)
out = ttnn.format_output_tensor(out, out.shape, device, ttnn.ROW_MAJOR_LAYOUT)
out = out.cpu()

out_debug = out
Expand Down
10 changes: 0 additions & 10 deletions ttnn/cpp/pybind11/pytensor.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -1591,16 +1591,6 @@ void pytensor_module(py::module &m_tensor) {
dtype = tt_tensor.get_dtype()
)doc")
.def(
"shape_without_padding",
[](const Tensor &self) { return Shape{self.get_legacy_shape().without_padding()}; },
R"doc(
Get shape without padding of TT Tensor.
.. code-block:: python
dtype = tt_tensor.shape_without_padding()
)doc")
.def(
"reshape",
[](Tensor &self, int N, int C, int H, int W) { return self.reshape(N, C, H, W); },
Expand Down
2 changes: 1 addition & 1 deletion ttnn/tt_lib/fused_ops/max_pool.py
Original file line number Diff line number Diff line change
Expand Up @@ -28,7 +28,7 @@ def run_max_pool_on_device_wrapper(
reshape_2d=False,
):
def max_pool_2d(x):
# x_shape_nopad = x.shape_without_padding()
# x_shape_nopad = x.shape
# out_shape_nopad = compute_max_pool_shape(kernel_size, stride, padding, x_shape_nopad)
# if reshape_2d and channels_last:
# x = x.reshape(x_shape_nopad[0], 1, x_shape_nopad[1] * x_shape_nopad[2], x_shape_nopad[3])
Expand Down

0 comments on commit 9ddd6e1

Please sign in to comment.