Skip to content

Commit

Permalink
Fix unregistered dialects error message (ttmlir-translate) (#1341)
Browse files Browse the repository at this point in the history
  • Loading branch information
svuckovicTT authored Nov 21, 2024
1 parent c755792 commit a47dcc7
Show file tree
Hide file tree
Showing 6 changed files with 69 additions and 16 deletions.
6 changes: 3 additions & 3 deletions docs/src/ttmlir-translate.md
Original file line number Diff line number Diff line change
Expand Up @@ -5,15 +5,15 @@ The `ttmlir-translate` translation utility. Unlike `ttmlir-opt` tool which is us

```bash
# First, let's run `ttmlir-opt` to convert to proper dialect
./build/bin/ttmlir-opt --ttir-load-system-desc --ttir-layout --convert-ttir-to-ttnn --convert-ttnn-to-emitc test/ttmlir/Dialect/TTNN/simple_multiply.mlir -o c.mlir
./build/bin/ttmlir-opt --ttir-to-emitc-pipeline test/ttmlir/Dialect/TTNN/simple_multiply.mlir -o c.mlir

# Now run `ttmlir-translate` to produce C++ code
./build/bin/ttmlir-translate -mlir-to-cpp c.mlir -allow-unregistered-dialect
./build/bin/ttmlir-translate --mlir-to-cpp c.mlir
```

Bonus: These two commands can be piped, to avoid writing a `mlir` file to disk, like so:
```bash
./build/bin/ttmlir-opt --ttir-load-system-desc --ttir-layout --convert-ttir-to-ttnn --convert-ttnn-to-emitc test/ttmlir/Dialect/TTNN/simple_multiply.mlir | ./build/bin/ttmlir-translate -mlir-to-cpp -allow-unregistered-dialect
./build/bin/ttmlir-opt --ttir-to-emitc-pipeline test/ttmlir/Dialect/TTNN/simple_multiply.mlir | ./build/bin/ttmlir-translate -mlir-to-cpp
```

## Generate flatbuffer file from MLIR
Expand Down
33 changes: 33 additions & 0 deletions lib/Conversion/TTNNToEmitC/TTNNToEmitC.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -618,6 +618,35 @@ class DeallocateOpConversionPattern
}
};

// Module Op conversion pattern
//
// This conversion pattern removes attributes from the ModuleOp. Previously,
// ttmlir-translate would complain when translating to C++ if there were any
// attributes from "unregistered" dialects.
//
class ModuleOpConversionPattern
: public TTNNToEmitCBaseOpConversionPattern<mlir::ModuleOp> {

public:
ModuleOpConversionPattern(const TypeConverter &typeConverter,
MLIRContext *context, PatternBenefit benefit = 1)
: TTNNToEmitCBaseOpConversionPattern<mlir::ModuleOp>(typeConverter,
context, benefit) {}

LogicalResult
matchAndRewrite(mlir::ModuleOp srcOp, OpAdaptor adaptor,
ConversionPatternRewriter &rewriter) const override {

rewriter.modifyOpInPlace(srcOp, [&]() {
for (const NamedAttribute &attr : srcOp->getAttrs()) {
srcOp->removeAttr(attr.getName());
}
});

return success();
}
};

} // namespace

namespace mlir::tt {
Expand Down Expand Up @@ -720,6 +749,10 @@ void populateTTNNToEmitCPatterns(mlir::MLIRContext *ctx,
//
patterns.add<DefaultOpConversionPattern<ttnn::AllGatherOp>>(typeConverter,
ctx);

// Module op
//
patterns.add<ModuleOpConversionPattern>(typeConverter, ctx);
}

} // namespace mlir::tt
22 changes: 14 additions & 8 deletions lib/Conversion/TTNNToEmitC/TTNNToEmitCPass.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -4,6 +4,11 @@

#include "ttmlir/Conversion/TTNNToEmitC/TTNNToEmitC.h"

#include "ttmlir/Dialect/TTNN/IR/TTNN.h"
#include "ttmlir/Dialect/TTNN/IR/TTNNOps.h"
#include "ttmlir/Dialect/TTNN/IR/TTNNOpsAttrs.h"
#include "ttmlir/Dialect/TTNN/IR/TTNNOpsTypes.h"

#include "mlir/Dialect/EmitC/IR/EmitC.h"
#include "mlir/Dialect/Func/IR/FuncOps.h"
#include "mlir/Dialect/Func/Transforms/FuncConversions.h"
Expand All @@ -12,11 +17,6 @@
#include "mlir/Support/LogicalResult.h"
#include "mlir/Transforms/DialectConversion.h"

#include "ttmlir/Dialect/TTNN/IR/TTNN.h"
#include "ttmlir/Dialect/TTNN/IR/TTNNOps.h"
#include "ttmlir/Dialect/TTNN/IR/TTNNOpsAttrs.h"
#include "ttmlir/Dialect/TTNN/IR/TTNNOpsTypes.h"

using namespace mlir;
using namespace mlir::tt;

Expand Down Expand Up @@ -48,14 +48,20 @@ struct ConvertTTNNToEmitCPass
void runOnOperation() override {
mlir::ConversionTarget target(getContext());

// EmitC is legal, TTNN is illegal
//
target.addLegalDialect<emitc::EmitCDialect>();
target.addIllegalDialect<ttnn::TTNNDialect>();
target.addLegalOp<mlir::ModuleOp>();

// mlir::ModuleOp is legal only if no attributes are present on it
//
target.addDynamicallyLegalOp<mlir::ModuleOp>(
[&](mlir::ModuleOp op) { return op->getAttrs().empty(); });

// Add header imports to front of module
//
{
auto module = getOperation();
mlir::ModuleOp module = getOperation();
OpBuilder builder(module);

if (module.getBodyRegion().empty()) {
Expand Down Expand Up @@ -107,7 +113,7 @@ struct ConvertTTNNToEmitCPass
return;
}
}
};
}
};

} // namespace
Expand Down
16 changes: 16 additions & 0 deletions test/ttmlir/Silicon/TTNN/emitc/two_fns.mlir
Original file line number Diff line number Diff line change
@@ -0,0 +1,16 @@
// RUN: ttmlir-opt --ttir-to-ttnn-backend-pipeline="system-desc-path=%system_desc_path%" %s > %t.mlir
// RUN: ttmlir-translate --ttnn-to-flatbuffer %t.mlir > %t.ttnn

#any_device = #tt.operand_constraint<dram|l1|scalar|tile|any_device|any_device_tile>

func.func @add(%arg0: tensor<32x32xbf16>, %arg1: tensor<32x32xbf16>) -> tensor<32x32xbf16> {
%0 = tensor.empty() : tensor<32x32xbf16>
%1 = "ttir.add"(%arg0, %arg1, %0) <{operandSegmentSizes = array<i32: 2, 1>, operand_constraints = [#any_device, #any_device, #any_device]}> : (tensor<32x32xbf16>, tensor<32x32xbf16>, tensor<32x32xbf16>) -> tensor<32x32xbf16>
return %1 : tensor<32x32xbf16>
}

func.func @subtract(%arg0: tensor<32x32xbf16>, %arg1: tensor<32x32xbf16>) -> tensor<32x32xbf16> {
%0 = tensor.empty() : tensor<32x32xbf16>
%1 = "ttir.subtract"(%arg0, %arg1, %0) <{operandSegmentSizes = array<i32: 2, 1>, operand_constraints = [#any_device, #any_device, #any_device]}> : (tensor<32x32xbf16>, tensor<32x32xbf16>, tensor<32x32xbf16>) -> tensor<32x32xbf16>
return %1 : tensor<32x32xbf16>
}
2 changes: 1 addition & 1 deletion tools/ttnn-standalone/README.md
Original file line number Diff line number Diff line change
Expand Up @@ -14,7 +14,7 @@ Third party ML models (PyTorch, Jax, ONNX, ...) can be compiled to a set of TTNN

```bash
# Compile a model to C++ code
./build/bin/ttmlir-opt --ttir-load-system-desc --ttir-implicit-device --ttir-layout --convert-ttir-to-ttnn --ttnn-decompose-layouts --ttnn-deallocate --convert-ttnn-to-emitc test/ttmlir/Silicon/TTNN/emitc/simple_add.mlir | ./build/bin/ttmlir-translate --mlir-to-cpp -allow-unregistered-dialect
./build/bin/ttmlir-opt --ttir-to-emitc-pipeline test/ttmlir/Silicon/TTNN/emitc/simple_add.mlir | ./build/bin/ttmlir-translate --mlir-to-cpp

# Copy paste the generated function into `ttnn-standalone.cpp`.

Expand Down
6 changes: 2 additions & 4 deletions tools/ttnn-standalone/ttnn-standalone.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -5,11 +5,9 @@
#include "ttnn-precompiled.hpp"

// To generate forward function, run:
// ./build/bin/ttmlir-opt --ttir-load-system-desc --ttir-implicit-device
// --ttir-layout --convert-ttir-to-ttnn --ttnn-decompose-layouts
// --ttnn-deallocate --convert-ttnn-to-emitc
// ./build/bin/ttmlir-opt --ttir-to-emitc-pipeline
// test/ttmlir/Silicon/TTNN/emitc/simple_add.mlir | ./build/bin/ttmlir-translate
// --mlir-to-cpp -allow-unregistered-dialect
// --mlir-to-cpp

ttnn::Tensor forward(ttnn::Tensor v1, ttnn::Tensor v2) {
ttnn::Device *v3 = ttnn::DeviceGetter::getInstance();
Expand Down

0 comments on commit a47dcc7

Please sign in to comment.