From 11f0ea7c3dd5dd8f2389596823c847dbea7981b9 Mon Sep 17 00:00:00 2001 From: Filip Bajraktari Date: Mon, 4 Nov 2024 14:53:28 +0000 Subject: [PATCH] Added tests --- .../TTNN/sharding_matmul_override_0.mlir | 2 +- .../TTNN/sharding_matmul_override_32.mlir | 2 +- test/ttmlir/Silicon/TTNN/large_tensors.mlir | 19 +++++++++++++++++++ .../ttmlir/Silicon/TTNN/simple_fork_join.mlir | 18 ++++++++++++++++++ 4 files changed, 39 insertions(+), 2 deletions(-) create mode 100644 test/ttmlir/Silicon/TTNN/large_tensors.mlir create mode 100644 test/ttmlir/Silicon/TTNN/simple_fork_join.mlir diff --git a/test/ttmlir/Dialect/TTNN/sharding_matmul_override_0.mlir b/test/ttmlir/Dialect/TTNN/sharding_matmul_override_0.mlir index 9516f96f5..2e07f7f5c 100644 --- a/test/ttmlir/Dialect/TTNN/sharding_matmul_override_0.mlir +++ b/test/ttmlir/Dialect/TTNN/sharding_matmul_override_0.mlir @@ -2,7 +2,7 @@ #any_device_tile = #tt.operand_constraint module attributes {} { func.func @forward(%arg0: tensor<64x128xbf16>, %arg1: tensor<128x96xbf16>, %arg2: tensor<96x64xbf16>) -> tensor<64x64xbf16> { - // CHECK: #[[LAYOUT_7:layout7]] = #tt.layout<{{.*}}, memref<{{.*}}>, #dram>, {{.*}}> + // CHECK: #[[LAYOUT_7:layout7]] = #tt.layout<{{.*}}, memref<{{.*}}, #dram>, {{.*}}> %0 = tensor.empty() : tensor<64x96xbf16> // CHECK: {{.*}} = "ttnn.matmul"{{.*}} -> tensor<64x96xbf16, #[[LAYOUT_7]]> %1 = "ttir.matmul"(%arg0, %arg1, %0) <{operand_constraints = [#any_device_tile, #any_device_tile, #any_device_tile]}> : (tensor<64x128xbf16>, tensor<128x96xbf16>, tensor<64x96xbf16>) -> tensor<64x96xbf16> diff --git a/test/ttmlir/Dialect/TTNN/sharding_matmul_override_32.mlir b/test/ttmlir/Dialect/TTNN/sharding_matmul_override_32.mlir index 3e26d1490..8e984348f 100644 --- a/test/ttmlir/Dialect/TTNN/sharding_matmul_override_32.mlir +++ b/test/ttmlir/Dialect/TTNN/sharding_matmul_override_32.mlir @@ -3,7 +3,7 @@ module attributes {} { func.func @forward(%arg0: tensor<64x128xbf16>, %arg1: tensor<128x96xbf16>, %arg2: tensor<96x64xbf16>) -> tensor<64x64xbf16> { // CHECK: #[[L1_:.*]] = #tt.memory_space - // CHECK: #[[LAYOUT_7:layout7]] = #tt.layout<{{.*}}, memref<{{.*}}>, #l1_>, {{.*}}> + // CHECK: #[[LAYOUT_7:layout7]] = #tt.layout<{{.*}}, memref<{{.*}}, #l1_>, {{.*}}> %0 = tensor.empty() : tensor<64x96xbf16> // CHECK: {{.*}} = "ttnn.matmul"{{.*}} -> tensor<64x96xbf16, #[[LAYOUT_7]]> %1 = "ttir.matmul"(%arg0, %arg1, %0) <{operand_constraints = [#any_device_tile, #any_device_tile, #any_device_tile]}> : (tensor<64x128xbf16>, tensor<128x96xbf16>, tensor<64x96xbf16>) -> tensor<64x96xbf16> diff --git a/test/ttmlir/Silicon/TTNN/large_tensors.mlir b/test/ttmlir/Silicon/TTNN/large_tensors.mlir new file mode 100644 index 000000000..b258435db --- /dev/null +++ b/test/ttmlir/Silicon/TTNN/large_tensors.mlir @@ -0,0 +1,19 @@ +// RUN: ttmlir-opt --ttir-to-ttnn-backend-pipeline="system-desc-path=%system_desc_path% enable-optimizer=true memory-layout-analysis-enabled=true memory-layout-analysis-policy=L1Interleaved" %s > %t.mlir +// RUN: FileCheck %s --input-file=%t.mlir +// RUN: ttmlir-translate --ttnn-to-flatbuffer %t.mlir > %t.ttnn +#any_device = #tt.operand_constraint +module attributes {} { + func.func @forward(%arg0: tensor<8192x8192xbf16>, %arg1: tensor<8192x8192xbf16>, %arg2: tensor<8192x8192xbf16>) -> tensor<8192x8192xbf16> { + // CHECK: #[[LAYOUT_2:layout2]] = #tt.layout<{{.*}}, memref<{{.*}}, #dram>, {{.*}}> + %0 = tensor.empty() : tensor<8192x8192xbf16> + // CHECK: %{{.*}} = "ttnn.add"{{.*}} -> tensor<8192x8192xbf16, #[[LAYOUT_2]]> + %1 = "ttir.add"(%arg0, %arg1, %0) <{operandSegmentSizes = array, operand_constraints = [#any_device, #any_device, #any_device]}> : (tensor<8192x8192xbf16>, tensor<8192x8192xbf16>, tensor<8192x8192xbf16>) -> tensor<8192x8192xbf16> + %2 = tensor.empty() : tensor<8192x8192xbf16> + // CHECK: %{{.*}} = "ttnn.add"{{.*}} -> tensor<8192x8192xbf16, #[[LAYOUT_2]]> + %3 = "ttir.add"(%1, %arg2, %2) <{operandSegmentSizes = array, operand_constraints = [#any_device, #any_device, #any_device]}> : (tensor<8192x8192xbf16>, tensor<8192x8192xbf16>, tensor<8192x8192xbf16>) -> tensor<8192x8192xbf16> + %4 = tensor.empty() : tensor<8192x8192xbf16> + // CHECK: %{{.*}} = "ttnn.relu"{{.*}} -> tensor<8192x8192xbf16, #[[LAYOUT_2]]> + %7 = "ttir.relu"(%3, %4) <{operandSegmentSizes = array, operand_constraints = [#any_device, #any_device]}> : (tensor<8192x8192xbf16>, tensor<8192x8192xbf16>) -> tensor<8192x8192xbf16> + return %7 : tensor<8192x8192xbf16> + } +} diff --git a/test/ttmlir/Silicon/TTNN/simple_fork_join.mlir b/test/ttmlir/Silicon/TTNN/simple_fork_join.mlir new file mode 100644 index 000000000..981c26b49 --- /dev/null +++ b/test/ttmlir/Silicon/TTNN/simple_fork_join.mlir @@ -0,0 +1,18 @@ +// RUN: ttmlir-opt --ttir-to-ttnn-backend-pipeline="system-desc-path=%system_desc_path% enable-optimizer=true memory-layout-analysis-enabled=true memory-layout-analysis-policy=L1Interleaved" %s > %t.mlir +// RUN: FileCheck %s --input-file=%t.mlir +// RUN: ttmlir-translate --ttnn-to-flatbuffer %t.mlir > %t.ttnn +// UNSUPPORTED: true +#any_device = #tt.operand_constraint +module attributes {} { + func.func @forward(%arg0: tensor<64x128xbf16>, %arg1: tensor<64x128xbf16>, %arg2: tensor<64x128xbf16>, %arg3: tensor<64x128xbf16>) -> tensor<64x128xbf16> { + %0 = tensor.empty() : tensor<64x128xbf16> + %1 = "ttir.add"(%arg0, %arg1, %0) <{operandSegmentSizes = array, operand_constraints = [#any_device, #any_device, #any_device]}> : (tensor<64x128xbf16>, tensor<64x128xbf16>, tensor<64x128xbf16>) -> tensor<64x128xbf16> + %2 = tensor.empty() : tensor<64x128xbf16> + %3 = "ttir.add"(%arg2, %arg3, %2) <{operandSegmentSizes = array, operand_constraints = [#any_device, #any_device, #any_device]}> : (tensor<64x128xbf16>, tensor<64x128xbf16>, tensor<64x128xbf16>) -> tensor<64x128xbf16> + %4 = tensor.empty() : tensor<64x128xbf16> + %5 = "ttir.add"(%1, %3, %4) <{operandSegmentSizes = array, operand_constraints = [#any_device, #any_device, #any_device]}> : (tensor<64x128xbf16>, tensor<64x128xbf16>, tensor<64x128xbf16>) -> tensor<64x128xbf16> + %6 = tensor.empty() : tensor<64x128xbf16> + %7 = "ttir.relu"(%5, %6) <{operandSegmentSizes = array, operand_constraints = [#any_device, #any_device]}> : (tensor<64x128xbf16>, tensor<64x128xbf16>) -> tensor<64x128xbf16> + return %7 : tensor<64x128xbf16> + } +}