From de2b60bda3574be80bee4f3151fd65bae3abc75b Mon Sep 17 00:00:00 2001 From: mouliraj-mcw Date: Sat, 7 Dec 2024 11:41:34 +0000 Subject: [PATCH] #15642: Update files --- .../eltwise/binary/device/binary_composite_op.cpp | 3 +-- .../eltwise/binary/device/binary_device_operation.cpp | 8 ++++---- ...adcast_height_and_width_multi_core_program_factory.cpp | 8 ++++---- .../operations/eltwise/unary_backward/unary_backward.cpp | 2 +- 4 files changed, 10 insertions(+), 11 deletions(-) diff --git a/ttnn/cpp/ttnn/operations/eltwise/binary/device/binary_composite_op.cpp b/ttnn/cpp/ttnn/operations/eltwise/binary/device/binary_composite_op.cpp index e064634da176..196314d10b29 100644 --- a/ttnn/cpp/ttnn/operations/eltwise/binary/device/binary_composite_op.cpp +++ b/ttnn/cpp/ttnn/operations/eltwise/binary/device/binary_composite_op.cpp @@ -327,9 +327,8 @@ Tensor ExecutePrelu::invoke( Tensor ExecutePrelu::invoke( const Tensor& input_a, const Tensor& input_b, const std::optional& output_mem_config) { - const auto s_a = input_a.get_shape(); + const auto s_a = input_a.get_logical_shape(); const auto volume = input_b.get_logical_volume(); - TT_FATAL( s_a[1] == volume, "Mismatch of parameter numbers and input channel size. Found parameter numbers = {} and channel size = {}.", diff --git a/ttnn/cpp/ttnn/operations/eltwise/binary/device/binary_device_operation.cpp b/ttnn/cpp/ttnn/operations/eltwise/binary/device/binary_device_operation.cpp index 2026ab875514..ce524ac4ae6c 100644 --- a/ttnn/cpp/ttnn/operations/eltwise/binary/device/binary_device_operation.cpp +++ b/ttnn/cpp/ttnn/operations/eltwise/binary/device/binary_device_operation.cpp @@ -81,10 +81,10 @@ BinaryDeviceOperation::program_factory_t BinaryDeviceOperation::select_program_f } if (height_b == 1) { if (tensor_args.input_tensor_a.is_sharded()) { - if (tensor_args.input_tensor_a.get_logical_shape()[0] == - tensor_args.input_tensor_b->get_logical_shape()[0] || - tensor_args.input_tensor_a.get_logical_shape()[0] > 1 and - tensor_args.input_tensor_b->get_logical_shape()[0] == 1) { + if (tensor_args.input_tensor_a.get_padded_shape()[0] == + tensor_args.input_tensor_b->get_padded_shape()[0] || + tensor_args.input_tensor_a.get_padded_shape()[0] > 1 and + tensor_args.input_tensor_b->get_padded_shape()[0] == 1) { return BroadcastHeightMultiCoreShardedOptimized{}; } return BroadcastHeightMultiCoreSharded{}; diff --git a/ttnn/cpp/ttnn/operations/eltwise/binary/device/broadcast_height_and_width_multi_core_program_factory.cpp b/ttnn/cpp/ttnn/operations/eltwise/binary/device/broadcast_height_and_width_multi_core_program_factory.cpp index 37552a8bb443..d54c9bdef6fe 100644 --- a/ttnn/cpp/ttnn/operations/eltwise/binary/device/broadcast_height_and_width_multi_core_program_factory.cpp +++ b/ttnn/cpp/ttnn/operations/eltwise/binary/device/broadcast_height_and_width_multi_core_program_factory.cpp @@ -44,8 +44,8 @@ BinaryDeviceOperation::BroadcastHeightAndWidthMultiCore::create( const auto& b = tensor_args.input_tensor_b; auto& output = tensor_return_value; auto bcast_math = binary_op_type_to_bcast_op_math(operation_attributes.binary_op_type); - const auto ashape = a.get_logical_shape(); - const auto bshape = b.has_value() ? b->get_logical_shape() : Shape{1, 1}; + const auto ashape = a.get_padded_shape(); + const auto bshape = b.has_value() ? b->get_padded_shape() : Shape{1, 1}; uint32_t N = ashape.rank() >= 4 ? ashape[-4] : 1; uint32_t C = ashape.rank() >= 3 ? ashape[-3] : 1; uint32_t H = ashape[-2]; @@ -298,8 +298,8 @@ void BinaryDeviceOperation::BroadcastHeightAndWidthMultiCore::override_runtime_a auto dst_buffer = output_tensor.buffer(); - const auto ashape = input_tensor_a.get_logical_shape(); - const auto bshape = input_tensor_b.has_value() ? input_tensor_b->get_logical_shape() : Shape{1, 1}; + const auto ashape = input_tensor_a.get_padded_shape(); + const auto bshape = input_tensor_b.has_value() ? input_tensor_b->get_padded_shape() : Shape{1, 1}; uint32_t N = ashape.rank() >= 4 ? ashape[-4] : 1; uint32_t C = ashape.rank() >= 3 ? ashape[-3] : 1; uint32_t H = ashape[-2]; diff --git a/ttnn/cpp/ttnn/operations/eltwise/unary_backward/unary_backward.cpp b/ttnn/cpp/ttnn/operations/eltwise/unary_backward/unary_backward.cpp index f012ecb14a18..21c5853eca81 100644 --- a/ttnn/cpp/ttnn/operations/eltwise/unary_backward/unary_backward.cpp +++ b/ttnn/cpp/ttnn/operations/eltwise/unary_backward/unary_backward.cpp @@ -1874,7 +1874,7 @@ std::vector ExecuteUnaryBackwardProd::invoke( // all_dimensions = False Tensor updated_grad = prod_result; auto step = ttnn::SmallVector({1, 1, 1, 1}); - if (prod_result.get_logical_shape() != grad.get_logical_shape()) { + if (prod_result.get_logical_shape() != grad.get_padded_shape()) { if (dim == 3 || dim == -1) { ttnn::SmallVector after_permute_dims = {0, 3, 1, 2}; Tensor required = ttnn::permute(grad, after_permute_dims, output_memory_config);