From 20bf440206573a8f26ff46fc047ae636b2356c16 Mon Sep 17 00:00:00 2001 From: ngrujic Date: Tue, 10 Dec 2024 12:08:55 +0000 Subject: [PATCH] #15827: Add cpp unit test to showcase block sharding problems --- .../wormhole/test_eltwise_block_shard_spec.py | 18 ++++---- .../tensor/test_sharding_with_alignment.cpp | 45 +++++++++++++++++++ 2 files changed, 54 insertions(+), 9 deletions(-) diff --git a/tests/ttnn/python_api_testing/non_working_unit_tests/wormhole/test_eltwise_block_shard_spec.py b/tests/ttnn/python_api_testing/non_working_unit_tests/wormhole/test_eltwise_block_shard_spec.py index f73eb144f0b..f377c9a14a1 100644 --- a/tests/ttnn/python_api_testing/non_working_unit_tests/wormhole/test_eltwise_block_shard_spec.py +++ b/tests/ttnn/python_api_testing/non_working_unit_tests/wormhole/test_eltwise_block_shard_spec.py @@ -69,7 +69,7 @@ def run_tests( ttnn.CoreRangeSet({ttnn.CoreRange(ttnn.CoreCoord(0, 0), ttnn.CoreCoord(7, 7))}), # core grid [320, 192], # shard shape ttnn.ShardOrientation.COL_MAJOR, - 0, # halo + False, # halo ), ( (256, 2, 5, 1536), @@ -80,7 +80,7 @@ def run_tests( ttnn.CoreRangeSet({ttnn.CoreRange(ttnn.CoreCoord(0, 0), ttnn.CoreCoord(7, 7))}), # core grid [320, 192], ttnn.ShardOrientation.ROW_MAJOR, - 0, # halo + False, # halo ), ( (256, 2, 5, 1536), @@ -91,7 +91,7 @@ def run_tests( ttnn.CoreRangeSet({ttnn.CoreRange(ttnn.CoreCoord(0, 0), ttnn.CoreCoord(7, 7))}), # core grid [320, 192], ttnn.ShardOrientation.COL_MAJOR, - 0, # halo + False, # halo ), ( (1, 256, 2, 2304), @@ -102,7 +102,7 @@ def run_tests( ttnn.CoreRangeSet({ttnn.CoreRange(ttnn.CoreCoord(0, 0), ttnn.CoreCoord(7, 7))}), # core grid [64, 288], ttnn.ShardOrientation.COL_MAJOR, - 0, # halo + False, # halo ), ( (1, 256, 2, 2304), @@ -113,7 +113,7 @@ def run_tests( ttnn.CoreRangeSet({ttnn.CoreRange(ttnn.CoreCoord(0, 0), ttnn.CoreCoord(7, 7))}), # core grid [64, 288], ttnn.ShardOrientation.ROW_MAJOR, - 0, # halo + False, # halo ), ( (1, 256, 2, 2304), @@ -124,7 +124,7 @@ def run_tests( ttnn.CoreRangeSet({ttnn.CoreRange(ttnn.CoreCoord(0, 0), ttnn.CoreCoord(7, 7))}), # core grid [64, 288], ttnn.ShardOrientation.COL_MAJOR, - 0, # halo + False, # halo ), ( (32, 4, 8, 768), @@ -135,7 +135,7 @@ def run_tests( ttnn.CoreRangeSet({ttnn.CoreRange(ttnn.CoreCoord(0, 0), ttnn.CoreCoord(7, 7))}), # core grid [128, 96], ttnn.ShardOrientation.COL_MAJOR, - 0, # halo + False, # halo ), ( (32, 4, 8, 768), @@ -146,7 +146,7 @@ def run_tests( ttnn.CoreRangeSet({ttnn.CoreRange(ttnn.CoreCoord(0, 0), ttnn.CoreCoord(7, 7))}), # core grid [128, 96], ttnn.ShardOrientation.ROW_MAJOR, - 0, # halo + False, # halo ), ( (32, 4, 8, 768), @@ -157,7 +157,7 @@ def run_tests( ttnn.CoreRangeSet({ttnn.CoreRange(ttnn.CoreCoord(0, 0), ttnn.CoreCoord(7, 7))}), # core grid [128, 96], ttnn.ShardOrientation.COL_MAJOR, - 0, # halo + False, # halo ), ] diff --git a/tests/ttnn/unit_tests/gtests/tensor/test_sharding_with_alignment.cpp b/tests/ttnn/unit_tests/gtests/tensor/test_sharding_with_alignment.cpp index 520d851700e..32b74c866af 100644 --- a/tests/ttnn/unit_tests/gtests/tensor/test_sharding_with_alignment.cpp +++ b/tests/ttnn/unit_tests/gtests/tensor/test_sharding_with_alignment.cpp @@ -937,6 +937,51 @@ INSTANTIATE_TEST_SUITE_P( CreateShardedTensorWithAlignmentExpected{ .physical_size = Size{28, 9} } + }, + //////////////////////////////////////////////////////////////////// + // EXAMPLE 4: Some of block sharding failurs + //////////////////////////////////////////////////////////////////// + CreateShardedTensorWithAlignmentParams{ + CreateShardedTensorWithAlignmentInputs{ + .shape = SimpleShape{32, 4, 8, 768}, + .data_type = DataType::BFLOAT16, + .page_config = PageConfig(Layout::TILE), + .memory_config = + MemoryConfig{ + .memory_layout = TensorMemoryLayout::BLOCK_SHARDED, + .buffer_type = BufferType::L1, + .shard_spec = ShardSpec{ + num_cores_to_corerangeset(64, CoreCoord{8, 8}, /*row_wise=*/true), // tt::div_up(32 * 4 * 8, 128) * tt::div_up(768, 96) + {128, 96}, + ShardOrientation::ROW_MAJOR, + false, + ShardMode::PHYSICAL} + } + }, + CreateShardedTensorWithAlignmentExpected{ + .physical_size = Size{1024, 768} + } + }, + CreateShardedTensorWithAlignmentParams{ + CreateShardedTensorWithAlignmentInputs{ + .shape = SimpleShape{32, 4, 8, 768}, + .data_type = DataType::BFLOAT16, + .page_config = PageConfig(Layout::TILE), + .memory_config = + MemoryConfig{ + .memory_layout = TensorMemoryLayout::BLOCK_SHARDED, + .buffer_type = BufferType::L1, + .shard_spec = ShardSpec{ + num_cores_to_corerangeset(64, CoreCoord{8, 8}, /*row_wise=*/true), // tt::div_up(32 * 4 * 8, 128) * tt::div_up(768, 96) + {128, 96}, + ShardOrientation::COL_MAJOR, + false, + ShardMode::PHYSICAL} + } + }, + CreateShardedTensorWithAlignmentExpected{ + .physical_size = Size{1024, 768} + } } ) // Values // clang-format on