Skip to content

Commit

Permalink
Add log operation (#1123)
Browse files Browse the repository at this point in the history
  • Loading branch information
jserbedzijaTT authored Nov 1, 2024
1 parent c78b606 commit b75d44d
Show file tree
Hide file tree
Showing 9 changed files with 49 additions and 2 deletions.
7 changes: 7 additions & 0 deletions include/ttmlir/Dialect/TTIR/IR/TTIROps.td
Original file line number Diff line number Diff line change
Expand Up @@ -274,6 +274,13 @@ def TTIR_TypecastOp: TTIR_ElementwiseUnaryOp<"typecast"> {
}];
}

def TTIR_LogOp: TTIR_ElementwiseUnaryOp<"log"> {
let summary = "Eltwise logarithm op.";
let description = [{
Eltwise logarithm operation. Calculates log(x) for all elements x in input tensor.
}];
}

class TTIR_ElementwiseBinaryOp<string mnemonic, list<Trait> traits = []> :
TTIR_ElementwiseOp<mnemonic, traits> {
let summary = "Eltwise binary op.";
Expand Down
7 changes: 7 additions & 0 deletions include/ttmlir/Dialect/TTNN/IR/TTNNOps.td
Original file line number Diff line number Diff line change
Expand Up @@ -237,6 +237,13 @@ def TTNN_SigmoidOp : TTNN_ElementwiseUnaryOp<"sigmoid"> {
}];
}

def TTNN_LogOp : TTNN_ElementwiseUnaryOp<"log"> {
let summary = "Eltwise logarithm.";
let description = [{
Eltwise logarithm operation.
}];
}

def TTNN_AddOp : TTNN_ElementwiseBinaryOp<"add"> {
let summary = "Eltwise add.";
let description = [{
Expand Down
3 changes: 2 additions & 1 deletion include/ttmlir/Target/TTNN/program.fbs
Original file line number Diff line number Diff line change
Expand Up @@ -85,7 +85,8 @@ enum EltwiseOpType: uint32 {
Minimum = 24,
Ceil = 25,
Sin = 26,
Cos = 27
Cos = 27,
Log = 28
}

union EltwiseOpParams {
Expand Down
1 change: 1 addition & 0 deletions lib/Conversion/TTIRToTTNN/TTIRToTTNN.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -879,6 +879,7 @@ void populateTTIRToTTNNPatterns(MLIRContext *ctx, RewritePatternSet &patterns,
ElementwiseOpConversionPattern<ttir::SigmoidOp, ttnn::SigmoidOp>,
ElementwiseOpConversionPattern<ttir::ReciprocalOp, ttnn::ReciprocalOp>,
ElementwiseOpConversionPattern<ttir::ExpOp, ttnn::ExpOp>,
ElementwiseOpConversionPattern<ttir::LogOp, ttnn::LogOp>,
ElementwiseOpConversionPattern<ttir::DivOp, ttnn::DivOp>,
ElementwiseOpConversionPattern<ttir::CeilOp, ttnn::CeilOp>,
ElementwiseOpConversionPattern<ttir::SinOp, ttnn::SinOp>,
Expand Down
3 changes: 2 additions & 1 deletion lib/Conversion/TTNNToEmitC/TTNNToEmitC.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -626,7 +626,8 @@ void populateTTNNToEmitCPatterns(mlir::MLIRContext *ctx,
DefaultOpConversionPattern<ttnn::ExpOp>,
DefaultOpConversionPattern<ttnn::CeilOp>,
DefaultOpConversionPattern<ttnn::SinOp>,
DefaultOpConversionPattern<ttnn::CosOp>>(typeConverter, ctx);
DefaultOpConversionPattern<ttnn::CosOp>,
DefaultOpConversionPattern<ttnn::LogOp>>(typeConverter, ctx);

// Eltwise binary ops
//
Expand Down
5 changes: 5 additions & 0 deletions lib/Target/TTNN/TTNNToFlatbuffer.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -351,6 +351,8 @@ createEltwiseOp(FlatbufferObjectCache &cache, EltwiseOp op) {
type = ::tt::target::ttnn::EltwiseOpType::Cos;
} else if constexpr (std::is_same_v<EltwiseOp, SinOp>) {
type = ::tt::target::ttnn::EltwiseOpType::Sin;
} else if constexpr (std::is_same_v<EltwiseOp, LogOp>) {
type = ::tt::target::ttnn::EltwiseOpType::Log;
} else {
llvm_unreachable("unhandled EltwiseOp");
}
Expand Down Expand Up @@ -597,6 +599,9 @@ emitTTNNOperation(FlatbufferObjectCache &cache, Operation *op,
if (auto expOp = dyn_cast<ExpOp>(op); expOp) {
return createOperation(cache, createEltwiseOp(cache, expOp), debugString);
}
if (auto logOp = dyn_cast<LogOp>(op); logOp) {
return createOperation(cache, createEltwiseOp(cache, logOp), debugString);
}
if (auto sigmoidOp = dyn_cast<SigmoidOp>(op); sigmoidOp) {
return createOperation(cache, createEltwiseOp(cache, sigmoidOp),
debugString);
Expand Down
4 changes: 4 additions & 0 deletions runtime/lib/ttnn/operations/eltwise/unary/unary.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -98,6 +98,10 @@ void run(const ::tt::target::ttnn::EltwiseOp *op, ProgramContext &context) {
runEltwiseUnaryWithFastAndApproximateModeOP(op, tensorPool, ::ttnn::exp);
break;
}
case ::tt::target::ttnn::EltwiseOpType::Log: {
runEltwiseUnaryOP(op, tensorPool, ::ttnn::log);
break;
}
default:
throw std::invalid_argument("Unsupported unary operation");
}
Expand Down
13 changes: 13 additions & 0 deletions test/ttmlir/Silicon/TTNN/perf_unit/test_perf_log.mlir
Original file line number Diff line number Diff line change
@@ -0,0 +1,13 @@
// RUN: ttmlir-opt --ttir-to-ttnn-backend-pipeline="system-desc-path=%system_desc_path%" %s > %t.mlir
// RUN: FileCheck %s --input-file=%t.mlir
// RUN: ttmlir-translate --ttnn-to-flatbuffer %t.mlir > %t.ttnn
#any_device = #tt.operand_constraint<dram|l1|scalar|tile|any_device|any_device_tile>
#any_device_tile = #tt.operand_constraint<dram|l1|tile|any_device_tile>

func.func @sqrt(%arg0: tensor<64x128xf32>) -> tensor<64x128xf32> {
// CHECK: %[[C:.*]] = "ttnn.empty"[[C:.*]]
%0 = tensor.empty() : tensor<64x128xf32>
// CHECK: %[[C:.*]] = "ttnn.log"[[C:.*]]
%1 = "ttir.log"(%arg0, %0) <{operandSegmentSizes = array<i32: 1, 1>, operand_constraints = [#any_device, #any_device]}> : (tensor<64x128xf32>, tensor<64x128xf32>) -> tensor<64x128xf32>
return %1 : tensor<64x128xf32>
}
8 changes: 8 additions & 0 deletions test/ttmlir/Silicon/TTNN/simple_eltwise.mlir
Original file line number Diff line number Diff line change
Expand Up @@ -183,3 +183,11 @@ func.func @typecast(%arg0: tensor<64x128xf32>) -> tensor<64x128xbf16> {
%1 = "ttir.typecast"(%arg0, %0) <{operandSegmentSizes = array<i32: 1, 1>, operand_constraints = [#any_device, #any_device]}> : (tensor<64x128xf32>, tensor<64x128xbf16>) -> tensor<64x128xbf16>
return %1 : tensor<64x128xbf16>
}

func.func @log(%arg0: tensor<64x128xf32>, %arg1: tensor<64x128xf32>) -> tensor<64x128xf32> {
// CHECK: %[[C:.*]] = "ttnn.empty"[[C:.*]]
%0 = tensor.empty() : tensor<64x128xf32>
// CHECK: %[[C:.*]] = "ttnn.log"[[C:.*]]
%1 = "ttir.log"(%arg0, %0) <{operandSegmentSizes = array<i32: 1, 1>, operand_constraints = [#any_device, #any_device, #any_device]}> : (tensor<64x128xf32>, tensor<64x128xf32>) -> tensor<64x128xf32>
return %1 : tensor<64x128xf32>
}

0 comments on commit b75d44d

Please sign in to comment.