Skip to content

Commit

Permalink
#7511: Update matmul related tests to not use removed 1d program config
Browse files Browse the repository at this point in the history
  • Loading branch information
bbradelTT committed May 10, 2024
1 parent 35088e9 commit b2f4ce9
Show file tree
Hide file tree
Showing 3 changed files with 2 additions and 67 deletions.
34 changes: 0 additions & 34 deletions tests/ttnn/unit_tests/operations/test_linear.py
Original file line number Diff line number Diff line change
Expand Up @@ -171,40 +171,6 @@ def test_wide_linear_with_argument_for_using_1D_systolic_array_set_to_true(
assert_with_pcc(torch_output_tensor, output_tensor, 0.997)


@pytest.mark.parametrize("batch_size", [1, 8])
@pytest.mark.parametrize("m_size", [32, 64])
@pytest.mark.parametrize("k_size", [1024, 2048])
@pytest.mark.parametrize("n_size", [1024, 2048])
@pytest.mark.parametrize("activation", [None, "relu"])
def test_linear_by_passing_in_1D_systolic_array_program_config(device, batch_size, m_size, k_size, n_size, activation):
torch.manual_seed(0)

torch_input_tensor_a = torch.randn((batch_size, m_size, k_size), dtype=torch.bfloat16)
torch_input_tensor_b = torch.randn((k_size, n_size), dtype=torch.bfloat16)
torch_output_tensor = torch_input_tensor_a @ torch_input_tensor_b
if activation == "relu":
torch_output_tensor = torch.relu(torch_output_tensor)

input_tensor_a = ttnn.from_torch(torch_input_tensor_a, layout=ttnn.TILE_LAYOUT, device=device)
input_tensor_b = ttnn.from_torch(torch_input_tensor_b, layout=ttnn.TILE_LAYOUT, device=device)

program_config = ttnn.create_matmul_1d_systolic_array_program_config(
input_shape_a=input_tensor_a.shape,
input_shape_b=input_tensor_b.shape,
core_grid=device.core_grid,
activation=activation,
)

output_tensor = ttnn.linear(
input_tensor_a,
input_tensor_b,
program_config=program_config,
)

output_tensor = ttnn.to_torch(output_tensor)
assert_with_pcc(torch_output_tensor, output_tensor, 0.997)


@pytest.mark.parametrize("m_size", [32, 512])
@pytest.mark.parametrize("k_size", [1024, 2048])
@pytest.mark.parametrize("n_size", [1024, 2048])
Expand Down
31 changes: 0 additions & 31 deletions tests/ttnn/unit_tests/operations/test_matmul.py
Original file line number Diff line number Diff line change
Expand Up @@ -467,37 +467,6 @@ def test_tall_matmul_with_argument_for_using_1D_systolic_array_set_to_true(devic
assert_with_pcc(torch_output_tensor, output_tensor, pcc=0.997)


@skip_for_wormhole_b0()
@pytest.mark.parametrize("batch_size", [1, 8])
@pytest.mark.parametrize("m_size", [31, 63])
@pytest.mark.parametrize("k_size", [1024, 2048])
@pytest.mark.parametrize("n_size", [1023, 2047])
def test_matmul_by_passing_in_1D_systolic_array_program_config(device, batch_size, m_size, k_size, n_size):
torch.manual_seed(0)

torch_input_tensor_a = torch.randn((batch_size, m_size, k_size), dtype=torch.bfloat16)
torch_input_tensor_b = torch.randn((k_size, n_size), dtype=torch.bfloat16)
torch_output_tensor = torch_input_tensor_a @ torch_input_tensor_b

input_tensor_a = ttnn.from_torch(torch_input_tensor_a, layout=ttnn.TILE_LAYOUT, device=device)
input_tensor_b = ttnn.from_torch(torch_input_tensor_b, layout=ttnn.TILE_LAYOUT, device=device)

program_config = ttnn.create_matmul_1d_systolic_array_program_config(
input_shape_a=input_tensor_a.shape.with_tile_padding(),
input_shape_b=input_tensor_b.shape.with_tile_padding(),
core_grid=input_tensor_a.device().core_grid,
)

output_tensor = ttnn.matmul(
input_tensor_a,
input_tensor_b,
program_config=program_config,
)

output_tensor = ttnn.to_torch(output_tensor)
assert_with_pcc(torch_output_tensor, output_tensor, pcc=0.997)


@skip_for_wormhole_b0()
@pytest.mark.parametrize("batch_size", [1])
@pytest.mark.parametrize("m_size", [128])
Expand Down
4 changes: 2 additions & 2 deletions tests/ttnn/unit_tests/test_multi_device.py
Original file line number Diff line number Diff line change
Expand Up @@ -449,15 +449,15 @@ def test_sharded_matmul(t3k_device_mesh):

def test_4b_tensor(device_mesh):
tensor = ttnn.from_torch(
torch.randn(1, 1, 32, 32),
torch.randn(1, 1, 64, 64),
dtype=ttnn.bfloat4_b,
layout=ttnn.TILE_LAYOUT,
device=device_mesh,
mesh_mapper=ReplicateTensorToMesh(device_mesh),
)
tensor = ttnn.to_device(tensor, device_mesh)
x = ttnn.from_torch(
torch.randn(1, 1, 32, 32),
torch.randn(1, 1, 64, 64),
dtype=ttnn.bfloat16,
layout=ttnn.TILE_LAYOUT,
device=device_mesh,
Expand Down

0 comments on commit b2f4ce9

Please sign in to comment.