diff --git a/test/lit.cfg.py b/test/lit.cfg.py index d65acc7b2..e06958a38 100644 --- a/test/lit.cfg.py +++ b/test/lit.cfg.py @@ -98,3 +98,10 @@ def set_system_desc_features(system_desc): ], append_path=True, ) + +if "TT_METAL_HOME" in os.environ: + print(f"{os.environ['TT_METAL_HOME']}") + llvm_config.with_environment("TT_METAL_HOME", os.environ["TT_METAL_HOME"]) +else: + + llvm_config.with_environment("TT_METAL_HOME", r"/__w/tt-mlir/tt-mlir/third_party/tt-metal/src/tt-metal") \ No newline at end of file diff --git a/test/ttmlir/Silicon/TTNN/optimizer/mnist_sharding.mlir b/test/ttmlir/Silicon/TTNN/optimizer/mnist_sharding.mlir index 96798905c..bbe769b7a 100644 --- a/test/ttmlir/Silicon/TTNN/optimizer/mnist_sharding.mlir +++ b/test/ttmlir/Silicon/TTNN/optimizer/mnist_sharding.mlir @@ -1,11 +1,11 @@ -// RUN: ttmlir-opt --ttir-to-ttnn-backend-pipeline="system-desc-path=%system_desc_path% enable-optimizer=true memory-layout-analysis-enabled=true" %s > %t.mlir -// RUN: FileCheck %s --input-file=%t.mlir -// RUN: ttmlir-translate --ttnn-to-flatbuffer %t.mlir > %t.ttnn +// RUN: ttmlir-opt --ttir-to-ttnn-backend-pipeline="system-desc-path=%system_desc_path% enable-optimizer=true memory-layout-analysis-enabled=true" -o output_file.mlir %s +// RUN: FileCheck %s --input-file=output_file.mlir +// RUN: ttmlir-translate --ttnn-to-flatbuffer output_file.mlir > %t.ttnn #loc = loc("MNISTLinear":4294967295:0) module @"tt-forge-graph" attributes {} { func.func @main(%arg0: tensor<1x784xf32> loc("MNISTLinear":4294967295:0), %arg1: tensor<1x10xf32> loc("MNISTLinear":4294967295:0), %arg2: tensor<256x10xf32> loc("MNISTLinear":4294967295:0), %arg3: tensor<1x256xf32> loc("MNISTLinear":4294967295:0), %arg4: tensor<784x256xf32> loc("MNISTLinear":4294967295:0)) -> tensor<1x10xf32> { - // CHECK-DAG: #[[LAYOUT_10:.*]] = #ttnn.ttnn_layout<(d0, d1) -> (d0, d1), <1x8>, memref<1x1x!tt.tile<32x32, f32>, #l1_>, > - // CHECK-DAG: #[[LAYOUT_11:.*]] = #ttnn.ttnn_layout<(d0, d1) -> (d0, d1), <1x1>, memref<1x1x!tt.tile<32x32, f32>, #l1_>, > + // CHECK-DAG: #[[LAYOUT_10:.*]] = #ttnn.ttnn_layout<(d0, d1) -> (d0, d1), <1x8, (d0, d1) -> (0, d1 floordiv 8, d1 mod 8)>, memref<1x1x!tt.tile<32x32, f32>, #l1_>, > + // CHECK-DAG: #[[LAYOUT_11:.*]] = #ttnn.ttnn_layout<(d0, d1) -> (d0, d1), <1x1, (d0, d1) -> (0, d1 floordiv 8, d1 mod 8)>, memref<1x1x!tt.tile<32x32, f32>, #l1_>, > %0 = tensor.empty() : tensor<1x256xf32> loc(#loc8) // CHECK: %{{.*}} = "ttnn.matmul"{{.*}} -> tensor<1x256xf32, #[[LAYOUT_10]]> %1 = "ttir.matmul"(%arg0, %arg4, %0) : (tensor<1x784xf32>, tensor<784x256xf32>, tensor<1x256xf32>) -> tensor<1x256xf32> loc(#loc8)