From df2485208104c66bcefec74e80efae20bdf665aa Mon Sep 17 00:00:00 2001 From: Stefan Djordjevic Date: Thu, 26 Dec 2024 11:22:47 +0000 Subject: [PATCH 1/2] Reverting commit ad87a44878ffd95d54b9ef9d4d19faa4a5b8fce8 that introduces break in tt-forge-fe --- lib/Dialect/TTNN/Transforms/Passes.cpp | 78 ++++++++----------- .../ttnn/include/tt/runtime/ttnn/types.cpp | 35 ++++----- .../eltwise_binary_op_chain.mlir | 19 ++--- 3 files changed, 58 insertions(+), 74 deletions(-) diff --git a/lib/Dialect/TTNN/Transforms/Passes.cpp b/lib/Dialect/TTNN/Transforms/Passes.cpp index 73f2eadbb..9baa492cd 100644 --- a/lib/Dialect/TTNN/Transforms/Passes.cpp +++ b/lib/Dialect/TTNN/Transforms/Passes.cpp @@ -30,11 +30,6 @@ namespace mlir::tt::ttnn { #define GEN_PASS_DEF_TTNNMODIFYSIGNATURESFORDYLIB #include "ttmlir/Dialect/TTNN/Transforms/Passes.h.inc" -// TTNN supports device tilize for bf16 and fp32 -static bool canTilizeDataTypeOnDevice(DataType dataType) { - return dataType == DataType::BFloat16 or dataType == DataType::Float32; -} - class TTNNDeallocate : public impl::TTNNDeallocateBase { public: @@ -432,9 +427,9 @@ class TTNNDecomposeLayouts return; } - /* If we should tilize, and the data type can be tilized on device, tilize - * on device */ - if (info.shouldTilize() and canTilizeDataTypeOnDevice(output.dataType)) { + /* If we should tilize, and the data type is bfloat16, we can tilize on + * device */ + if (info.shouldTilize() and output.dataType == DataType::BFloat16) { currentInput = this->createToDeviceOpIfNeeded(op, rewriter, currentInput, info); currentInput = @@ -445,10 +440,9 @@ class TTNNDecomposeLayouts return; } - /* If we should tilize, and the data type cannot be tilized on device, - * tilize on host */ - if (info.shouldTilize() and - not canTilizeDataTypeOnDevice(output.dataType)) { + /* If we should tilize, and the data type is not bfloat16, we tilize on host + */ + if (info.shouldTilize() and output.dataType != DataType::BFloat16) { currentInput = this->createToLayoutOpIfNeeded(op, rewriter, currentInput, info); currentInput = @@ -519,9 +513,9 @@ class TTNNDecomposeLayouts return; } - /* If we need to tilize and the input datatype is tilizeable on device, + /* If we need to tilize and the input datatype is bfloat16 we can tilize on device and then typecast afterwards */ - if (info.shouldTilize() and canTilizeDataTypeOnDevice(input.dataType)) { + if (info.shouldTilize() and input.dataType == DataType::BFloat16) { currentInput = this->createToDeviceOpIfNeeded(op, rewriter, currentInput, info); currentInput = @@ -534,9 +528,9 @@ class TTNNDecomposeLayouts return; } - /* if we need to tilize and the output data type can be tilized on device, + /* if we need to tilize and the output data type is bfloat16 we can typecast on host and tilize on device */ - if (info.shouldTilize() and canTilizeDataTypeOnDevice(output.dataType)) { + if (info.shouldTilize() and output.dataType == DataType::BFloat16) { currentInput = this->createTypecastOpIfNeeded(op, rewriter, currentInput, info); currentInput = @@ -549,11 +543,10 @@ class TTNNDecomposeLayouts return; } - /* if we need to tilize and the input/output data types cannot be tilized on - * device, do everything on host */ - if (info.shouldTilize() and - not canTilizeDataTypeOnDevice(input.dataType) and - not canTilizeDataTypeOnDevice(output.dataType)) { + /* if we need to tilize and the input/ output data types are not bfloat16 do + * everything on host */ + if (info.shouldTilize() and input.dataType != DataType::BFloat16 and + output.dataType != DataType::BFloat16) { currentInput = this->createTypecastOpIfNeeded(op, rewriter, currentInput, info); currentInput = @@ -646,10 +639,9 @@ class TTNNDecomposeLayouts return; } - /* If we should tilize and the input data type can be tilized on device, - * tilize on device + /* If we should tilize and the input data type is bfloat16, tilize on device */ - if (info.shouldTilize() and canTilizeDataTypeOnDevice(input.dataType)) { + if (info.shouldTilize() and input.dataType == DataType::BFloat16) { currentInput = this->createToLayoutOpIfNeeded(op, rewriter, currentInput, info); currentInput = this->createToMemoryConfigOpIfNeeded(op, rewriter, @@ -660,10 +652,9 @@ class TTNNDecomposeLayouts return; } - /* If we should tilize and the input data type cannot be tilized on device, - * tilize on host */ - if (info.shouldTilize() and - not canTilizeDataTypeOnDevice(input.dataType) and + /* If we should tilize and the input data type is not bfloat16, tilize on + * host */ + if (info.shouldTilize() and input.dataType != DataType::BFloat16 and opsToCreate.createFromDeviceOp) { currentInput = this->createFromDeviceOpIfNeeded(op, rewriter, currentInput, info); @@ -673,10 +664,9 @@ class TTNNDecomposeLayouts return; } - /* If we want to tilize a device tensor whose data type cannot be tilized on - * device, we need to tilize on host and move it back */ - if (info.shouldTilize() and - not canTilizeDataTypeOnDevice(input.dataType) and + /* If we want to tilize a device tensor that is not bfloat16, we need to + * tilize on host and move it back */ + if (info.shouldTilize() and input.dataType != DataType::BFloat16 and not opsToCreate.createFromDeviceOp) { // Force-create a FromDeviceOp currentInput = @@ -791,9 +781,9 @@ class TTNNDecomposeLayouts return; } - /* If we should tilize and the input data type can be tilized on device, - * tilize and typecast on device */ - if (info.shouldTilize() and canTilizeDataTypeOnDevice(input.dataType)) { + /* If we should tilize and the input data type is bfloat16, tilize and + * typecast on device */ + if (info.shouldTilize() and input.dataType == DataType::BFloat16) { currentInput = this->createToLayoutOpIfNeeded(op, rewriter, currentInput, info); currentInput = @@ -806,10 +796,9 @@ class TTNNDecomposeLayouts return; } - /* If we should tilize and the input data type cannot be tilized on device, - and we want to read back from device, do everything on host */ - if (info.shouldTilize() and - not canTilizeDataTypeOnDevice(input.dataType) and + /* If we should tilize and the input data type is not bfloat16 and we want + to read back from device do everything on host */ + if (info.shouldTilize() and input.dataType != DataType::BFloat16 and opsToCreate.createFromDeviceOp) { currentInput = this->createFromDeviceOpIfNeeded(op, rewriter, currentInput, info); @@ -821,11 +810,10 @@ class TTNNDecomposeLayouts return; } - /* If we should tilize and the input data type cannot be tilized on device, - and we don't want to read back from device - tilize on host, move back to - device, and typecast on device */ - if (info.shouldTilize() and - not canTilizeDataTypeOnDevice(input.dataType) and + /* If we should tilize and the input data type is not bfloat 16 and we don't + want to read back from device: tilize on host, move back to device, and + typecast on device */ + if (info.shouldTilize() and input.dataType != DataType::BFloat16 and not opsToCreate.createFromDeviceOp) { // Force-create a FromDeviceOp currentInput = @@ -875,7 +863,7 @@ class TTNNDecomposeLayouts /* * Logic for creating ops. Conditions/constraints include: * - When possible, we want to execute operations on device. - * - Tilize on device requires dataformat of BFLOAT16 or FLOAT32. + * - Tilize on device requires dataformat of BFLOAT16. * - Typecast on device requires TILIZED tensor. * - Untilize on device requires even width, and page size > * sizeof(uint32_t). For now, we will always untilize on host. We rarely diff --git a/runtime/lib/ttnn/include/tt/runtime/ttnn/types.cpp b/runtime/lib/ttnn/include/tt/runtime/ttnn/types.cpp index 2f7159a88..87d081599 100644 --- a/runtime/lib/ttnn/include/tt/runtime/ttnn/types.cpp +++ b/runtime/lib/ttnn/include/tt/runtime/ttnn/types.cpp @@ -8,10 +8,6 @@ namespace tt::runtime::ttnn { -static bool canTilizeDataTypeOnDevice(::ttnn::DataType dataType) { - return dataType == ::ttnn::DataType::BFLOAT16 or - dataType == ::ttnn::DataType::FLOAT32; -} // // LayoutConverter APIs // @@ -107,14 +103,14 @@ ::ttnn::Tensor LayoutConverter::handleHostInputLayoutNoTypecast( return out; } - if (shouldTilize and canTilizeDataTypeOnDevice(outputDesc.dataType)) { + if (shouldTilize and outputDesc.dataType == ::ttnn::DataType::BFLOAT16) { ::ttnn::Tensor out = toDeviceIfNeeded(input, targetDevice); out = toLayoutIfNeeded(out); out = toMemoryConfigIfNeeded(out); return out; } - if (shouldTilize and canTilizeDataTypeOnDevice(outputDesc.dataType)) { + if (shouldTilize and outputDesc.dataType != ::ttnn::DataType::BFLOAT16) { ::ttnn::Tensor out = toLayoutIfNeeded(input); out = toDeviceIfNeeded(out, targetDevice); out = toMemoryConfigIfNeeded(out); @@ -151,7 +147,7 @@ ::ttnn::Tensor LayoutConverter::handleHostInputLayoutTypecast( return out; } - if (shouldTilize and canTilizeDataTypeOnDevice(inputDesc.dataType)) { + if (shouldTilize and inputDesc.dataType == ::ttnn::DataType::BFLOAT16) { ::ttnn::Tensor out = toDeviceIfNeeded(input, targetDevice); out = toLayoutIfNeeded(out); out = typecastIfNeeded(out); @@ -159,7 +155,7 @@ ::ttnn::Tensor LayoutConverter::handleHostInputLayoutTypecast( return out; } - if (shouldTilize and canTilizeDataTypeOnDevice(outputDesc.dataType)) { + if (shouldTilize and outputDesc.dataType == ::ttnn::DataType::BFLOAT16) { ::ttnn::Tensor out = typecastIfNeeded(input); out = toDeviceIfNeeded(out, targetDevice); out = toLayoutIfNeeded(input); @@ -167,8 +163,8 @@ ::ttnn::Tensor LayoutConverter::handleHostInputLayoutTypecast( return out; } - if (shouldTilize and not canTilizeDataTypeOnDevice(inputDesc.dataType) and - not canTilizeDataTypeOnDevice(outputDesc.dataType)) { + if (shouldTilize and inputDesc.dataType != ::ttnn::DataType::BFLOAT16 and + outputDesc.dataType != ::ttnn::DataType::BFLOAT16) { ::ttnn::Tensor out = typecastIfNeeded(input); out = toLayoutIfNeeded(out); out = toDeviceIfNeeded(out, targetDevice); @@ -221,26 +217,25 @@ ::ttnn::Tensor LayoutConverter::handleDeviceInputLayoutNoTypecast( return out; } - /* If we should tilize and the input data type can be tilized on device, - * tilize on device + /* If we should tilize and the input data type is bfloat16, tilize on device */ - if (shouldTilize and canTilizeDataTypeOnDevice(inputDesc.dataType)) { + if (shouldTilize and inputDesc.dataType == ::ttnn::DataType::BFLOAT16) { ::ttnn::Tensor out = toLayoutIfNeeded(input); out = toMemoryConfigIfNeeded(out); out = fromDeviceIfNeeded(out); return out; } - /* If we should tilize and the input data type cannot be tilized on device, - * tilize on host */ - if (shouldTilize and not canTilizeDataTypeOnDevice(inputDesc.dataType) and + /* If we should tilize and the input data type is not bfloat16, tilize on + * host */ + if (shouldTilize and inputDesc.dataType != ::ttnn::DataType::BFLOAT16 and shouldFromDevice) { ::ttnn::Tensor out = fromDeviceIfNeeded(input); out = toLayoutIfNeeded(out); return out; } - if (shouldTilize and not canTilizeDataTypeOnDevice(inputDesc.dataType) and + if (shouldTilize and inputDesc.dataType != ::ttnn::DataType::BFLOAT16 and not shouldFromDevice) { LOG_WARNING("Currently no constraint checking for on-device tilize."); ::ttnn::Tensor out = toLayoutIfNeeded(input); @@ -292,7 +287,7 @@ LayoutConverter::handleDeviceInputLayoutTypecast(const ::ttnn::Tensor &input) { return out; } - if (shouldTilize and canTilizeDataTypeOnDevice(inputDesc.dataType)) { + if (shouldTilize and inputDesc.dataType == ::ttnn::DataType::BFLOAT16) { ::ttnn::Tensor out = toLayoutIfNeeded(input); out = typecastIfNeeded(out); out = toMemoryConfigIfNeeded(out); @@ -300,7 +295,7 @@ LayoutConverter::handleDeviceInputLayoutTypecast(const ::ttnn::Tensor &input) { return out; } - if (shouldTilize and not canTilizeDataTypeOnDevice(inputDesc.dataType) and + if (shouldTilize and inputDesc.dataType != ::ttnn::DataType::BFLOAT16 and shouldFromDevice) { ::ttnn::Tensor out = fromDeviceIfNeeded(input); out = toLayoutIfNeeded(out); @@ -308,7 +303,7 @@ LayoutConverter::handleDeviceInputLayoutTypecast(const ::ttnn::Tensor &input) { return out; } - if (shouldTilize and not canTilizeDataTypeOnDevice(inputDesc.dataType) and + if (shouldTilize and inputDesc.dataType != ::ttnn::DataType::BFLOAT16 and not shouldFromDevice) { LOG_WARNING("Currently no constraint checking for on-device tilize."); ::ttnn::Tensor out = toLayoutIfNeeded(input); diff --git a/test/ttmlir/Runtime/TTNN/runtime_stitching/eltwise_binary_op_chain.mlir b/test/ttmlir/Runtime/TTNN/runtime_stitching/eltwise_binary_op_chain.mlir index a5a100cfb..35b4d9063 100644 --- a/test/ttmlir/Runtime/TTNN/runtime_stitching/eltwise_binary_op_chain.mlir +++ b/test/ttmlir/Runtime/TTNN/runtime_stitching/eltwise_binary_op_chain.mlir @@ -7,15 +7,16 @@ #dram = #ttnn.buffer_type #ttnn_layout = #ttnn.ttnn_layout<(d0, d1) -> (d0, d1), <1x1>, memref<64x128xbf16, #system_memory>> #ttnn_layout1 = #ttnn.ttnn_layout<(d0, d1) -> (d0, d1), <1x1>, memref<2x4x!tt.tile<32x32, bf16>, #dram>, > +#ttnn_layout2 = #ttnn.ttnn_layout<(d0, d1) -> (d0, d1), <1x1>, memref<64x128xbf16, #dram>, > module attributes {tt.device = #device} { func.func @add(%arg0: tensor<64x128xbf16, #ttnn_layout1>, %arg1: tensor<64x128xbf16, #ttnn_layout1>) -> tensor<64x128xbf16, #ttnn_layout> { %0 = "ttnn.get_device"() <{mesh_shape = #ttnn}> : () -> !tt.device<#device> %1 = "ttnn.to_layout"(%arg0) <{layout = #ttnn.layout}> : (tensor<64x128xbf16, #ttnn_layout1>) -> tensor<64x128xbf16, #ttnn_layout1> %2 = "ttnn.to_layout"(%arg1) <{layout = #ttnn.layout}> : (tensor<64x128xbf16, #ttnn_layout1>) -> tensor<64x128xbf16, #ttnn_layout1> - %3 = "ttnn.empty"(%0) <{dtype = #tt.supportedDataTypes, layout = #ttnn.layout, memory_config = #ttnn.memory_config<#dram, <<2x4>>, >, shape = #ttnn.shape<64x128>}> : (!tt.device<#device>) -> tensor<64x128xbf16, #ttnn_layout1> - %4 = "ttnn.add"(%1, %2, %3) <{operandSegmentSizes = array}> : (tensor<64x128xbf16, #ttnn_layout1>, tensor<64x128xbf16, #ttnn_layout1>, tensor<64x128xbf16, #ttnn_layout1>) -> tensor<64x128xbf16, #ttnn_layout1> - %5 = "ttnn.from_device"(%4) : (tensor<64x128xbf16, #ttnn_layout1>) -> tensor<64x128xbf16, #ttnn_layout> + %3 = "ttnn.empty"(%0) <{dtype = #tt.supportedDataTypes, layout = #ttnn.layout, memory_config = #ttnn.memory_config<#dram, <<64x128>>, >, shape = #ttnn.shape<64x128>}> : (!tt.device<#device>) -> tensor<64x128xbf16, #ttnn_layout2> + %4 = "ttnn.add"(%1, %2, %3) <{operandSegmentSizes = array}> : (tensor<64x128xbf16, #ttnn_layout1>, tensor<64x128xbf16, #ttnn_layout1>, tensor<64x128xbf16, #ttnn_layout2>) -> tensor<64x128xbf16, #ttnn_layout2> + %5 = "ttnn.from_device"(%4) : (tensor<64x128xbf16, #ttnn_layout2>) -> tensor<64x128xbf16, #ttnn_layout> %6 = "ttnn.to_layout"(%5) <{layout = #ttnn.layout}> : (tensor<64x128xbf16, #ttnn_layout>) -> tensor<64x128xbf16, #ttnn_layout> return %6 : tensor<64x128xbf16, #ttnn_layout> } @@ -26,9 +27,9 @@ module attributes {tt.device = #device} { %0 = "ttnn.get_device"() <{mesh_shape = #ttnn}> : () -> !tt.device<#device> %1 = "ttnn.to_layout"(%arg0) <{layout = #ttnn.layout}> : (tensor<64x128xbf16, #ttnn_layout1>) -> tensor<64x128xbf16, #ttnn_layout1> %2 = "ttnn.to_layout"(%arg1) <{layout = #ttnn.layout}> : (tensor<64x128xbf16, #ttnn_layout1>) -> tensor<64x128xbf16, #ttnn_layout1> - %3 = "ttnn.empty"(%0) <{dtype = #tt.supportedDataTypes, layout = #ttnn.layout, memory_config = #ttnn.memory_config<#dram, <<2x4>>, >, shape = #ttnn.shape<64x128>}> : (!tt.device<#device>) -> tensor<64x128xbf16, #ttnn_layout1> - %4 = "ttnn.multiply"(%1, %2, %3) <{operandSegmentSizes = array}> : (tensor<64x128xbf16, #ttnn_layout1>, tensor<64x128xbf16, #ttnn_layout1>, tensor<64x128xbf16, #ttnn_layout1>) -> tensor<64x128xbf16, #ttnn_layout1> - %5 = "ttnn.from_device"(%4) : (tensor<64x128xbf16, #ttnn_layout1>) -> tensor<64x128xbf16, #ttnn_layout> + %3 = "ttnn.empty"(%0) <{dtype = #tt.supportedDataTypes, layout = #ttnn.layout, memory_config = #ttnn.memory_config<#dram, <<64x128>>, >, shape = #ttnn.shape<64x128>}> : (!tt.device<#device>) -> tensor<64x128xbf16, #ttnn_layout2> + %4 = "ttnn.multiply"(%1, %2, %3) <{operandSegmentSizes = array}> : (tensor<64x128xbf16, #ttnn_layout1>, tensor<64x128xbf16, #ttnn_layout1>, tensor<64x128xbf16, #ttnn_layout2>) -> tensor<64x128xbf16, #ttnn_layout2> + %5 = "ttnn.from_device"(%4) : (tensor<64x128xbf16, #ttnn_layout2>) -> tensor<64x128xbf16, #ttnn_layout> %6 = "ttnn.to_layout"(%5) <{layout = #ttnn.layout}> : (tensor<64x128xbf16, #ttnn_layout>) -> tensor<64x128xbf16, #ttnn_layout> return %6 : tensor<64x128xbf16, #ttnn_layout> } @@ -39,9 +40,9 @@ module attributes {tt.device = #device} { %0 = "ttnn.get_device"() <{mesh_shape = #ttnn}> : () -> !tt.device<#device> %1 = "ttnn.to_layout"(%arg0) <{layout = #ttnn.layout}> : (tensor<64x128xbf16, #ttnn_layout1>) -> tensor<64x128xbf16, #ttnn_layout1> %2 = "ttnn.to_layout"(%arg1) <{layout = #ttnn.layout}> : (tensor<64x128xbf16, #ttnn_layout1>) -> tensor<64x128xbf16, #ttnn_layout1> - %3 = "ttnn.empty"(%0) <{dtype = #tt.supportedDataTypes, layout = #ttnn.layout, memory_config = #ttnn.memory_config<#dram, <<2x4>>, >, shape = #ttnn.shape<64x128>}> : (!tt.device<#device>) -> tensor<64x128xbf16, #ttnn_layout1> - %4 = "ttnn.subtract"(%1, %2, %3) <{operandSegmentSizes = array}> : (tensor<64x128xbf16, #ttnn_layout1>, tensor<64x128xbf16, #ttnn_layout1>, tensor<64x128xbf16, #ttnn_layout1>) -> tensor<64x128xbf16, #ttnn_layout1> - %5 = "ttnn.from_device"(%4) : (tensor<64x128xbf16, #ttnn_layout1>) -> tensor<64x128xbf16, #ttnn_layout> + %3 = "ttnn.empty"(%0) <{dtype = #tt.supportedDataTypes, layout = #ttnn.layout, memory_config = #ttnn.memory_config<#dram, <<64x128>>, >, shape = #ttnn.shape<64x128>}> : (!tt.device<#device>) -> tensor<64x128xbf16, #ttnn_layout2> + %4 = "ttnn.subtract"(%1, %2, %3) <{operandSegmentSizes = array}> : (tensor<64x128xbf16, #ttnn_layout1>, tensor<64x128xbf16, #ttnn_layout1>, tensor<64x128xbf16, #ttnn_layout2>) -> tensor<64x128xbf16, #ttnn_layout2> + %5 = "ttnn.from_device"(%4) : (tensor<64x128xbf16, #ttnn_layout2>) -> tensor<64x128xbf16, #ttnn_layout> %6 = "ttnn.to_layout"(%5) <{layout = #ttnn.layout}> : (tensor<64x128xbf16, #ttnn_layout>) -> tensor<64x128xbf16, #ttnn_layout> return %6 : tensor<64x128xbf16, #ttnn_layout> } From 602fa4e983c271af1178d179d97ab5156f7e0fe7 Mon Sep 17 00:00:00 2001 From: Stefan Djordjevic Date: Thu, 26 Dec 2024 11:25:44 +0000 Subject: [PATCH 2/2] Adding a repro test to verify revert --- test/ttmlir/Silicon/TTNN/matmul/llama_matmul.mlir | 11 +++++++++++ .../Silicon/TTNN/{ => matmul}/simple_matmul.mlir | 0 2 files changed, 11 insertions(+) create mode 100644 test/ttmlir/Silicon/TTNN/matmul/llama_matmul.mlir rename test/ttmlir/Silicon/TTNN/{ => matmul}/simple_matmul.mlir (100%) diff --git a/test/ttmlir/Silicon/TTNN/matmul/llama_matmul.mlir b/test/ttmlir/Silicon/TTNN/matmul/llama_matmul.mlir new file mode 100644 index 000000000..e777cd55a --- /dev/null +++ b/test/ttmlir/Silicon/TTNN/matmul/llama_matmul.mlir @@ -0,0 +1,11 @@ +// RUN: ttmlir-opt --ttir-to-ttnn-backend-pipeline="system-desc-path=%system_desc_path%" %s > %t.mlir +// RUN: FileCheck %s --input-file=%t.mlir +// RUN: ttmlir-translate --ttnn-to-flatbuffer %t.mlir > %t.ttnn +module attributes {} { + func.func @forward(%arg0: tensor<1x11x2048xf32>, %arg1: tensor<2048x128256xf32>) -> tensor<1x11x128256xf32> { + %0 = tensor.empty() : tensor<1x11x128256xf32> + // CHECK: %[[C:.*]] = "ttnn.matmul"[[C:.*]] + %1 = "ttir.matmul"(%arg0, %arg1, %0) : (tensor<1x11x2048xf32>, tensor<2048x128256xf32>, tensor<1x11x128256xf32>) -> tensor<1x11x128256xf32> + return %1 : tensor<1x11x128256xf32> + } +} diff --git a/test/ttmlir/Silicon/TTNN/simple_matmul.mlir b/test/ttmlir/Silicon/TTNN/matmul/simple_matmul.mlir similarity index 100% rename from test/ttmlir/Silicon/TTNN/simple_matmul.mlir rename to test/ttmlir/Silicon/TTNN/matmul/simple_matmul.mlir