diff --git a/tt_metal/common/test_tiles.hpp b/tt_metal/common/test_tiles.hpp index 4633e1a990bc..e828e07a7b72 100644 --- a/tt_metal/common/test_tiles.hpp +++ b/tt_metal/common/test_tiles.hpp @@ -348,7 +348,8 @@ inline std::vector convert_layout( ZoneScoped; TT_ASSERT(shape.size() >= 2, "Shape size {} must be at least rank 2!", shape.size()); - uint32_t H = shape[shape.size() - 2], W = shape[shape.size() - 1]; + uint32_t H = shape[shape.size() - 2]; + uint32_t W = shape[shape.size() - 1]; for (int i = 0; i < shape.size() - 2; i++) { H *= shape[i]; } diff --git a/ttnn/cpp/pybind11/pytensor.cpp b/ttnn/cpp/pybind11/pytensor.cpp index 82eaabcca8aa..09de90a74cd6 100644 --- a/ttnn/cpp/pybind11/pytensor.cpp +++ b/ttnn/cpp/pybind11/pytensor.cpp @@ -18,6 +18,7 @@ #include "ttnn/tensor/host_buffer/types.hpp" #include "ttnn/tensor/tensor.hpp" #include "ttnn/tensor/tensor_impl.hpp" +#include "ttnn/tensor/tensor_ops.hpp" namespace py = pybind11; @@ -107,8 +108,17 @@ Tensor convert_float_vector_to_tt_tensor( log_warning(tt::LogAlways, "Tensor layout must be Layout::TILE for bfloat8_b or bfloat4_b! Tensor layout will be {} instead of the requested {}!", Layout::TILE, layout); } auto owned_buffer = create_owned_buffer_from_vector_of_floats(std::move(data), DataType::FLOAT32); - auto float_tensor = Tensor(OwnedStorage{owned_buffer}, shape, DataType::FLOAT32, Layout::ROW_MAJOR, tile).to(Layout::TILE); - auto output_float_data = owned_buffer::get_as(float_tensor).get(); + auto float_tensor = Tensor(OwnedStorage{owned_buffer}, shape, DataType::FLOAT32, Layout::ROW_MAJOR, tile); + auto tile_val = tile.value_or(Tile()); + if (shape[2] % tile_val.get_height() != 0 || shape[3] % tile_val.get_width() != 0) { + auto padded_shape = shape; + padded_shape[2] = tt::round_up(shape[2], tile_val.get_height()); + padded_shape[3] = tt::round_up(shape[3], tile_val.get_width()); + + float_tensor = tensor_ops::tensor_pad(float_tensor, LegacyShape(shape, padded_shape), ttnn::SimpleShape{0, 0, 0, 0}, 0); + } + std::cout << float_tensor.shape() << std::endl; + auto output_float_data = owned_buffer::get_as(float_tensor.to(Layout::TILE)).get(); auto output_packed_data = data_type == DataType::BFLOAT8_B ? pack_fp32_vec_as_bfp8_tiles(output_float_data, /*row_major_input=*/false, /*is_exp_a=*/false, tile) diff --git a/ttnn/cpp/ttnn/tensor/tensor_ops.cpp b/ttnn/cpp/ttnn/tensor/tensor_ops.cpp index bd06cf3370c7..09aa043e49fe 100644 --- a/ttnn/cpp/ttnn/tensor/tensor_ops.cpp +++ b/ttnn/cpp/ttnn/tensor/tensor_ops.cpp @@ -237,7 +237,10 @@ Tensor tensor_pad(const Tensor& input_tensor, const tt::tt_metal::LegacyShape& o TT_ASSERT( input_tensor.storage_type() == StorageType::OWNED or input_tensor.storage_type() == StorageType::MULTI_DEVICE_HOST or input_tensor.storage_type() == StorageType::BORROWED && "Tensor must be on host for padding"); - TT_ASSERT(input_tensor.get_layout() == Layout::ROW_MAJOR && "Tensor layout must be ROW_MAJOR for padding"); + if (input_tensor.get_layout() != Layout::ROW_MAJOR) { + log_warning(tt::LogOp, "Tensor layout {} must be ROW_MAJOR for padding! Returning original tensor!", input_tensor.get_layout()); + return input_tensor; + } auto input_shape = input_tensor.get_legacy_shape(); auto dimensions_pads = std::vector();