diff --git a/ttnn/cpp/ttnn/operations/binary.hpp b/ttnn/cpp/ttnn/operations/binary.hpp index 8c22db4bbc4..85da18142da 100644 --- a/ttnn/cpp/ttnn/operations/binary.hpp +++ b/ttnn/cpp/ttnn/operations/binary.hpp @@ -47,7 +47,7 @@ inline ttnn::Tensor add( if (height_b == 1 or width_b == 1) { if (dtype.has_value()) { - TT_THROW("Bcast: Unsupported dtype argument."); + TT_THROW("ttnn.add: cannot change dtype when broadcasting"); } tt::tt_metal::BcastOpDim bcast_op_dim; if (height_b == 1 and width_b == 1) { diff --git a/ttnn/ttnn/operations/core.py b/ttnn/ttnn/operations/core.py index befd945f8e3..25f2039da9d 100644 --- a/ttnn/ttnn/operations/core.py +++ b/ttnn/ttnn/operations/core.py @@ -814,7 +814,8 @@ def clone(tensor, memory_config: ttnn.MemoryConfig, dtype: ttnn.DataType): In the case where the layout is the same, the operation simply pad or unpad the last two dimensions depending on layout requested. Args: * :attr:`tensor`: the ttnn.Tensor - * :attr:`layout`: the layout of either ttnn.ROW_MAJOR_LAYOUT or ttnn.TILE_LAYOUT. + * :attr:`memory_config`: the `ttnn` memory config, DRAM_MEMORY_CONFIG or L1_MEMORY_CONFIG. + * :attr:`dtype`: the `ttnn` data type. Example:: >>> tensor = ttnn.to_device(ttnn.from_torch(torch.zeros((1, 1, 64, 32), dtype=torch.bfloat16, layout=ttnn.TILE_LAYOUT)), device, memory_config=ttnn.DRAM_MEMORY_CONFIG) >>> output = ttnn.clone(tensor, tnn.DRAM_MEMORY_CONFIG, tnn.bfloat8_b)