From b2f4ce95d5b7227d6a52ee5d89cccf2090309a83 Mon Sep 17 00:00:00 2001 From: Borys Bradel Date: Fri, 10 May 2024 22:50:22 +0000 Subject: [PATCH] #7511: Update matmul related tests to not use removed 1d program config --- .../ttnn/unit_tests/operations/test_linear.py | 34 ------------------- .../ttnn/unit_tests/operations/test_matmul.py | 31 ----------------- tests/ttnn/unit_tests/test_multi_device.py | 4 +-- 3 files changed, 2 insertions(+), 67 deletions(-) diff --git a/tests/ttnn/unit_tests/operations/test_linear.py b/tests/ttnn/unit_tests/operations/test_linear.py index b28e0e1b7c7a..7195ba7f4ad8 100644 --- a/tests/ttnn/unit_tests/operations/test_linear.py +++ b/tests/ttnn/unit_tests/operations/test_linear.py @@ -171,40 +171,6 @@ def test_wide_linear_with_argument_for_using_1D_systolic_array_set_to_true( assert_with_pcc(torch_output_tensor, output_tensor, 0.997) -@pytest.mark.parametrize("batch_size", [1, 8]) -@pytest.mark.parametrize("m_size", [32, 64]) -@pytest.mark.parametrize("k_size", [1024, 2048]) -@pytest.mark.parametrize("n_size", [1024, 2048]) -@pytest.mark.parametrize("activation", [None, "relu"]) -def test_linear_by_passing_in_1D_systolic_array_program_config(device, batch_size, m_size, k_size, n_size, activation): - torch.manual_seed(0) - - torch_input_tensor_a = torch.randn((batch_size, m_size, k_size), dtype=torch.bfloat16) - torch_input_tensor_b = torch.randn((k_size, n_size), dtype=torch.bfloat16) - torch_output_tensor = torch_input_tensor_a @ torch_input_tensor_b - if activation == "relu": - torch_output_tensor = torch.relu(torch_output_tensor) - - input_tensor_a = ttnn.from_torch(torch_input_tensor_a, layout=ttnn.TILE_LAYOUT, device=device) - input_tensor_b = ttnn.from_torch(torch_input_tensor_b, layout=ttnn.TILE_LAYOUT, device=device) - - program_config = ttnn.create_matmul_1d_systolic_array_program_config( - input_shape_a=input_tensor_a.shape, - input_shape_b=input_tensor_b.shape, - core_grid=device.core_grid, - activation=activation, - ) - - output_tensor = ttnn.linear( - input_tensor_a, - input_tensor_b, - program_config=program_config, - ) - - output_tensor = ttnn.to_torch(output_tensor) - assert_with_pcc(torch_output_tensor, output_tensor, 0.997) - - @pytest.mark.parametrize("m_size", [32, 512]) @pytest.mark.parametrize("k_size", [1024, 2048]) @pytest.mark.parametrize("n_size", [1024, 2048]) diff --git a/tests/ttnn/unit_tests/operations/test_matmul.py b/tests/ttnn/unit_tests/operations/test_matmul.py index 0fe808e2cc3c..80371ffe36a3 100644 --- a/tests/ttnn/unit_tests/operations/test_matmul.py +++ b/tests/ttnn/unit_tests/operations/test_matmul.py @@ -467,37 +467,6 @@ def test_tall_matmul_with_argument_for_using_1D_systolic_array_set_to_true(devic assert_with_pcc(torch_output_tensor, output_tensor, pcc=0.997) -@skip_for_wormhole_b0() -@pytest.mark.parametrize("batch_size", [1, 8]) -@pytest.mark.parametrize("m_size", [31, 63]) -@pytest.mark.parametrize("k_size", [1024, 2048]) -@pytest.mark.parametrize("n_size", [1023, 2047]) -def test_matmul_by_passing_in_1D_systolic_array_program_config(device, batch_size, m_size, k_size, n_size): - torch.manual_seed(0) - - torch_input_tensor_a = torch.randn((batch_size, m_size, k_size), dtype=torch.bfloat16) - torch_input_tensor_b = torch.randn((k_size, n_size), dtype=torch.bfloat16) - torch_output_tensor = torch_input_tensor_a @ torch_input_tensor_b - - input_tensor_a = ttnn.from_torch(torch_input_tensor_a, layout=ttnn.TILE_LAYOUT, device=device) - input_tensor_b = ttnn.from_torch(torch_input_tensor_b, layout=ttnn.TILE_LAYOUT, device=device) - - program_config = ttnn.create_matmul_1d_systolic_array_program_config( - input_shape_a=input_tensor_a.shape.with_tile_padding(), - input_shape_b=input_tensor_b.shape.with_tile_padding(), - core_grid=input_tensor_a.device().core_grid, - ) - - output_tensor = ttnn.matmul( - input_tensor_a, - input_tensor_b, - program_config=program_config, - ) - - output_tensor = ttnn.to_torch(output_tensor) - assert_with_pcc(torch_output_tensor, output_tensor, pcc=0.997) - - @skip_for_wormhole_b0() @pytest.mark.parametrize("batch_size", [1]) @pytest.mark.parametrize("m_size", [128]) diff --git a/tests/ttnn/unit_tests/test_multi_device.py b/tests/ttnn/unit_tests/test_multi_device.py index c7d814a6c825..5ccaa208f2dc 100644 --- a/tests/ttnn/unit_tests/test_multi_device.py +++ b/tests/ttnn/unit_tests/test_multi_device.py @@ -449,7 +449,7 @@ def test_sharded_matmul(t3k_device_mesh): def test_4b_tensor(device_mesh): tensor = ttnn.from_torch( - torch.randn(1, 1, 32, 32), + torch.randn(1, 1, 64, 64), dtype=ttnn.bfloat4_b, layout=ttnn.TILE_LAYOUT, device=device_mesh, @@ -457,7 +457,7 @@ def test_4b_tensor(device_mesh): ) tensor = ttnn.to_device(tensor, device_mesh) x = ttnn.from_torch( - torch.randn(1, 1, 32, 32), + torch.randn(1, 1, 64, 64), dtype=ttnn.bfloat16, layout=ttnn.TILE_LAYOUT, device=device_mesh,