Skip to content

Commit

Permalink
#0: Automatically add padding for bfloat8_b/bfloat4_b for float vecto…
Browse files Browse the repository at this point in the history
…r tensor creation

- Throw warning and return input tensor instead of assert in pad op
  • Loading branch information
TT-BrianLiu committed Nov 26, 2024
1 parent 9115484 commit da4aab2
Show file tree
Hide file tree
Showing 3 changed files with 18 additions and 4 deletions.
3 changes: 2 additions & 1 deletion tt_metal/common/test_tiles.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -348,7 +348,8 @@ inline std::vector<T> convert_layout(
ZoneScoped;

TT_ASSERT(shape.size() >= 2, "Shape size {} must be at least rank 2!", shape.size());
uint32_t H = shape[shape.size() - 2], W = shape[shape.size() - 1];
uint32_t H = shape[shape.size() - 2];
uint32_t W = shape[shape.size() - 1];
for (int i = 0; i < shape.size() - 2; i++) {
H *= shape[i];
}
Expand Down
14 changes: 12 additions & 2 deletions ttnn/cpp/pybind11/pytensor.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -18,6 +18,7 @@
#include "ttnn/tensor/host_buffer/types.hpp"
#include "ttnn/tensor/tensor.hpp"
#include "ttnn/tensor/tensor_impl.hpp"
#include "ttnn/tensor/tensor_ops.hpp"

namespace py = pybind11;

Expand Down Expand Up @@ -107,8 +108,17 @@ Tensor convert_float_vector_to_tt_tensor(
log_warning(tt::LogAlways, "Tensor layout must be Layout::TILE for bfloat8_b or bfloat4_b! Tensor layout will be {} instead of the requested {}!", Layout::TILE, layout);
}
auto owned_buffer = create_owned_buffer_from_vector_of_floats(std::move(data), DataType::FLOAT32);
auto float_tensor = Tensor(OwnedStorage{owned_buffer}, shape, DataType::FLOAT32, Layout::ROW_MAJOR, tile).to(Layout::TILE);
auto output_float_data = owned_buffer::get_as<float>(float_tensor).get();
auto float_tensor = Tensor(OwnedStorage{owned_buffer}, shape, DataType::FLOAT32, Layout::ROW_MAJOR, tile);
auto tile_val = tile.value_or(Tile());
if (shape[2] % tile_val.get_height() != 0 || shape[3] % tile_val.get_width() != 0) {
auto padded_shape = shape;
padded_shape[2] = tt::round_up(shape[2], tile_val.get_height());
padded_shape[3] = tt::round_up(shape[3], tile_val.get_width());

float_tensor = tensor_ops::tensor_pad(float_tensor, LegacyShape(shape, padded_shape), ttnn::SimpleShape{0, 0, 0, 0}, 0);
}
std::cout << float_tensor.shape() << std::endl;
auto output_float_data = owned_buffer::get_as<float>(float_tensor.to(Layout::TILE)).get();
auto output_packed_data =
data_type == DataType::BFLOAT8_B
? pack_fp32_vec_as_bfp8_tiles(output_float_data, /*row_major_input=*/false, /*is_exp_a=*/false, tile)
Expand Down
5 changes: 4 additions & 1 deletion ttnn/cpp/ttnn/tensor/tensor_ops.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -237,7 +237,10 @@ Tensor tensor_pad(const Tensor& input_tensor, const tt::tt_metal::LegacyShape& o
TT_ASSERT(
input_tensor.storage_type() == StorageType::OWNED or input_tensor.storage_type() == StorageType::MULTI_DEVICE_HOST or
input_tensor.storage_type() == StorageType::BORROWED && "Tensor must be on host for padding");
TT_ASSERT(input_tensor.get_layout() == Layout::ROW_MAJOR && "Tensor layout must be ROW_MAJOR for padding");
if (input_tensor.get_layout() != Layout::ROW_MAJOR) {
log_warning(tt::LogOp, "Tensor layout {} must be ROW_MAJOR for padding! Returning original tensor!", input_tensor.get_layout());
return input_tensor;
}

auto input_shape = input_tensor.get_legacy_shape();
auto dimensions_pads = std::vector<Padding::PadDimension>();
Expand Down

0 comments on commit da4aab2

Please sign in to comment.