Skip to content

Commit

Permalink
Add support for logp1 op (#1048)
Browse files Browse the repository at this point in the history
* Adding log1p

* Adding tests

* Formatting changes

* Added perf tests

* FOrmatting

* Formatting
  • Loading branch information
ajakovljevicTT authored Nov 1, 2024
1 parent b75d44d commit 9bd5963
Show file tree
Hide file tree
Showing 14 changed files with 91 additions and 1 deletion.
12 changes: 12 additions & 0 deletions include/ttmlir/Dialect/TTIR/IR/TTIROps.td
Original file line number Diff line number Diff line change
Expand Up @@ -281,6 +281,18 @@ def TTIR_LogOp: TTIR_ElementwiseUnaryOp<"log"> {
}];
}

def TTIR_Log1pOp: TTIR_ElementwiseUnaryOp<"log1p"> {
let summary = "Eltwise log1p operation.";
let description = [{
Performs element-wise logarithm plus one operation on `operand` tensor and
puts the result in the output tensor.

Example:
%a: [0.0, -0.999, 7.0, 6.38905621, 15.0]
"ttir.logp1"(%a, %out) -> %out: [0.0, -6.90776825, 2.07944155, 2.0, 2.77258873]
}];
}

class TTIR_ElementwiseBinaryOp<string mnemonic, list<Trait> traits = []> :
TTIR_ElementwiseOp<mnemonic, traits> {
let summary = "Eltwise binary op.";
Expand Down
12 changes: 12 additions & 0 deletions include/ttmlir/Dialect/TTNN/IR/TTNNOps.td
Original file line number Diff line number Diff line change
Expand Up @@ -244,6 +244,18 @@ def TTNN_LogOp : TTNN_ElementwiseUnaryOp<"log"> {
}];
}

def TTNN_Log1pOp: TTNN_ElementwiseUnaryOp<"log1p"> {
let summary = "Eltwise log1p operation.";
let description = [{
Performs element-wise logarithm plus one operation on `operand` tensor and
puts the result in the output tensor.

Example:
%a: [0.0, -0.999, 7.0, 6.38905621, 15.0]
"ttnn.logp1"(%a, %out) -> %out: [0.0, -6.90776825, 2.07944155, 2.0, 2.77258873]
}];
}

def TTNN_AddOp : TTNN_ElementwiseBinaryOp<"add"> {
let summary = "Eltwise add.";
let description = [{
Expand Down
3 changes: 2 additions & 1 deletion include/ttmlir/Target/TTNN/program.fbs
Original file line number Diff line number Diff line change
Expand Up @@ -86,7 +86,8 @@ enum EltwiseOpType: uint32 {
Ceil = 25,
Sin = 26,
Cos = 27,
Log = 28
Log = 28,
Log1p = 29,
}

union EltwiseOpParams {
Expand Down
2 changes: 2 additions & 0 deletions lib/Conversion/StableHLOToTTIR/StableHLOToTTIRPatterns.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -859,6 +859,8 @@ void addElementwiseUnaryOpsConversionPatterns(MLIRContext *ctx,
mlir::stablehlo::SineOp, mlir::tt::ttir::SinOp>>(typeConverter, ctx);
patterns.add<StableHLOToTTIROpDefaultConversionPattern<
mlir::stablehlo::SqrtOp, mlir::tt::ttir::SqrtOp>>(typeConverter, ctx);
patterns.add<StableHLOToTTIROpDefaultConversionPattern<
mlir::stablehlo::Log1pOp, mlir::tt::ttir::Log1pOp>>(typeConverter, ctx);
}

void addElementwiseBinaryOpsConversionPatterns(MLIRContext *ctx,
Expand Down
1 change: 1 addition & 0 deletions lib/Conversion/TTIRToTTNN/TTIRToTTNN.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -877,6 +877,7 @@ void populateTTIRToTTNNPatterns(MLIRContext *ctx, RewritePatternSet &patterns,
ElementwiseOpConversionPattern<ttir::SqrtOp, ttnn::SqrtOp>,
ElementwiseOpConversionPattern<ttir::RsqrtOp, ttnn::RsqrtOp>,
ElementwiseOpConversionPattern<ttir::SigmoidOp, ttnn::SigmoidOp>,
ElementwiseOpConversionPattern<ttir::Log1pOp, ttnn::Log1pOp>,
ElementwiseOpConversionPattern<ttir::ReciprocalOp, ttnn::ReciprocalOp>,
ElementwiseOpConversionPattern<ttir::ExpOp, ttnn::ExpOp>,
ElementwiseOpConversionPattern<ttir::LogOp, ttnn::LogOp>,
Expand Down
1 change: 1 addition & 0 deletions lib/Conversion/TTNNToEmitC/TTNNToEmitC.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -622,6 +622,7 @@ void populateTTNNToEmitCPatterns(mlir::MLIRContext *ctx,
DefaultOpConversionPattern<ttnn::SqrtOp>,
DefaultOpConversionPattern<ttnn::RsqrtOp>,
DefaultOpConversionPattern<ttnn::SigmoidOp>,
DefaultOpConversionPattern<ttnn::Log1pOp>,
DefaultOpConversionPattern<ttnn::ReciprocalOp>,
DefaultOpConversionPattern<ttnn::ExpOp>,
DefaultOpConversionPattern<ttnn::CeilOp>,
Expand Down
5 changes: 5 additions & 0 deletions lib/Target/TTNN/TTNNToFlatbuffer.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -343,6 +343,8 @@ createEltwiseOp(FlatbufferObjectCache &cache, EltwiseOp op) {
type = ::tt::target::ttnn::EltwiseOpType::Div;
} else if constexpr (std::is_same_v<EltwiseOp, SigmoidOp>) {
type = ::tt::target::ttnn::EltwiseOpType::Sigmoid;
} else if constexpr (std::is_same_v<EltwiseOp, Log1pOp>) {
type = ::tt::target::ttnn::EltwiseOpType::Log1p;
} else if constexpr (std::is_same_v<EltwiseOp, ExpOp>) {
type = ::tt::target::ttnn::EltwiseOpType::Exp;
} else if constexpr (std::is_same_v<EltwiseOp, CeilOp>) {
Expand Down Expand Up @@ -606,6 +608,9 @@ emitTTNNOperation(FlatbufferObjectCache &cache, Operation *op,
return createOperation(cache, createEltwiseOp(cache, sigmoidOp),
debugString);
}
if (auto log1pOp = dyn_cast<Log1pOp>(op); log1pOp) {
return createOperation(cache, createEltwiseOp(cache, log1pOp), debugString);
}
if (auto reciprocalOp = dyn_cast<ReciprocalOp>(op); reciprocalOp) {
return createOperation(cache, createEltwiseOp(cache, reciprocalOp),
debugString);
Expand Down
2 changes: 2 additions & 0 deletions runtime/lib/ttnn/operations/eltwise/unary/unary.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -86,10 +86,12 @@ void run(const ::tt::target::ttnn::EltwiseOp *op, ProgramContext &context) {
runEltwiseUnaryOP(op, tensorPool, ::ttnn::sigmoid);
break;
}

case ::tt::target::ttnn::EltwiseOpType::Sin: {
runEltwiseUnaryOP(op, tensorPool, ::ttnn::sin);
break;
}

case ::tt::target::ttnn::EltwiseOpType::Reciprocal: {
runEltwiseUnaryOP(op, tensorPool, ::ttnn::reciprocal);
break;
Expand Down
4 changes: 4 additions & 0 deletions runtime/lib/ttnn/operations/eltwise/unary/unary_composite.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -33,6 +33,10 @@ void run(const ::tt::target::ttnn::EltwiseOp *op, ProgramContext &context) {
runEltwiseUnaryCompositeOP(op, tensorPool, ::ttnn::cbrt);
break;
}
case ::tt::target::ttnn::EltwiseOpType::Log1p: {
runEltwiseUnaryCompositeOP(op, tensorPool, ::ttnn::log1p);
break;
}
default:
throw std::invalid_argument(
"Unsupported Eltwise Binary Composite operation");
Expand Down
2 changes: 2 additions & 0 deletions runtime/lib/ttnn/operations/eltwise/unary/unary_composite.h
Original file line number Diff line number Diff line change
Expand Up @@ -14,6 +14,8 @@ inline bool isUnaryCompositeOp(const ::tt::target::ttnn::EltwiseOp *op) {
switch (op->type()) {
case ::tt::target::ttnn::EltwiseOpType::Cbrt:
return true;
case ::tt::target::ttnn::EltwiseOpType::Log1p:
return true;
default:
return false;
}
Expand Down
12 changes: 12 additions & 0 deletions test/ttmlir/Conversion/StableHLOToTTIR/log_plus_one_op.mlir
Original file line number Diff line number Diff line change
@@ -0,0 +1,12 @@
// REQUIRES: stablehlo
// RUN: ttmlir-opt --stablehlo-to-ttir-pipeline %s | FileCheck %s
#any_device = #tt.operand_constraint<dram|l1|scalar|tile|any_device|any_device_tile>
module @jit_eltwise_log_plus_one attributes {} {
func.func public @test_log_plus_one(%arg0: tensor<13x21x3xf32>) -> tensor<13x21x3xf32> {
%0 = stablehlo.log_plus_one %arg0 : tensor<13x21x3xf32>
// CHECK: [[VAL0:%[0-9]+]] = tensor.empty() : [[TENSOR_SIZE:tensor<[0-9]+x[0-9]+x[0-9]+xf[0-9]+>]]
// CHECK: [[VAL1:%[0-9]+]] = "ttir.log1p"(%arg0, [[VAL0]]) <{operandSegmentSizes = array<i32: 1, 1>, operand_constraints = [#any_device_tile, #any_device_tile]}> : ([[TENSOR_SIZE]], [[TENSOR_SIZE]]) -> [[TENSOR_SIZE]]
return %0 : tensor<13x21x3xf32>
// CHECK: return [[VAL1]] : [[TENSOR_SIZE]]
}
}
12 changes: 12 additions & 0 deletions test/ttmlir/Dialect/TTNN/eltwise/unary/log1p/simple_log1p.mlir
Original file line number Diff line number Diff line change
@@ -0,0 +1,12 @@
// RUN: ttmlir-opt --ttir-to-ttnn-backend-pipeline %s | FileCheck %s
#any_device = #tt.operand_constraint<dram|l1|scalar|tile|any_device|any_device_tile>
module attributes {} {
func.func @forward(%arg0: tensor<64x128xf32>) -> tensor<64x128xf32> {
%0 = tensor.empty() : tensor<64x128xf32>
// CHECK: [[VAL0:%[0-9]+]] = "ttnn.empty"(%{{[0-9]+}}) <{dtype = {{.*}}, layout = {{.*}}, memory_config = {{.*}}, <{{.*}}>>, shape = #ttnn.shape<[[TENSOR_SHAPE:[0-9]+x[0-9]+]]>}>
%1 = "ttir.log1p"(%arg0, %0) <{operandSegmentSizes = array<i32: 1, 1>, operand_constraints = [#any_device, #any_device]}> : (tensor<64x128xf32>, tensor<64x128xf32>) -> tensor<64x128xf32>
// CHECK: %{{[0-9]+}} = "ttnn.log1p"(%{{[0-9]+}}, [[VAL0]]) <{operandSegmentSizes = array<i32: 1, 1>}> : (tensor<[[TENSOR_SHAPE]]x{{.*}}, {{.*}}>, tensor<[[TENSOR_SHAPE]]x{{.*}}, {{.*}}) -> tensor<[[TENSOR_SHAPE]]x{{.*}}, {{.*}}>
return %1 : tensor<64x128xf32>
// CHECK: return %{{[0-9]+}} : tensor<[[TENSOR_SHAPE]]xf32, {{.*}}>
}
}
15 changes: 15 additions & 0 deletions test/ttmlir/Silicon/TTNN/perf_unit/test_perf_log1p.mlir
Original file line number Diff line number Diff line change
@@ -0,0 +1,15 @@

// RUN: ttmlir-opt --ttir-to-ttnn-backend-pipeline="system-desc-path=%system_desc_path%" %s > %t.mlir
// RUN: FileCheck %s --input-file=%t.mlir
// RUN: ttmlir-translate --ttnn-to-flatbuffer %t.mlir > %t.ttnn
#any_device = #tt.operand_constraint<dram|l1|scalar|tile|any_device|any_device_tile>
#any_device_tile = #tt.operand_constraint<dram|l1|tile|any_device_tile>

func.func @log1p(%arg0: tensor<64x128xf32>) -> tensor<64x128xf32> {
%0 = tensor.empty() : tensor<64x128xf32>
// CHECK: [[VAL0:%[0-9]+]] = "ttnn.empty"(%{{[0-9]+}}) <{dtype = {{.*}}, layout = {{.*}}, memory_config = {{.*}}, <{{.*}}>>, shape = #ttnn.shape<[[TENSOR_SHAPE:[0-9]+x[0-9]+]]>}>
%1 = "ttir.log1p"(%arg0, %0) <{operandSegmentSizes = array<i32: 1, 1>, operand_constraints = [#any_device, #any_device]}> : (tensor<64x128xf32>, tensor<64x128xf32>) -> tensor<64x128xf32>
// CHECK: %{{[0-9]+}} = "ttnn.log1p"(%{{[0-9]+}}, [[VAL0]]) <{operandSegmentSizes = array<i32: 1, 1>}> : (tensor<[[TENSOR_SHAPE]]x{{.*}}, {{.*}}>, tensor<[[TENSOR_SHAPE]]x{{.*}}, {{.*}}) -> tensor<[[TENSOR_SHAPE]]x{{.*}}, {{.*}}>
return %1 : tensor<64x128xf32>
// CHECK: return %{{[0-9]+}} : tensor<[[TENSOR_SHAPE]]xf32, {{.*}}>
}
9 changes: 9 additions & 0 deletions test/ttmlir/Silicon/TTNN/simple_eltwise.mlir
Original file line number Diff line number Diff line change
Expand Up @@ -191,3 +191,12 @@ func.func @log(%arg0: tensor<64x128xf32>, %arg1: tensor<64x128xf32>) -> tensor<6
%1 = "ttir.log"(%arg0, %0) <{operandSegmentSizes = array<i32: 1, 1>, operand_constraints = [#any_device, #any_device, #any_device]}> : (tensor<64x128xf32>, tensor<64x128xf32>) -> tensor<64x128xf32>
return %1 : tensor<64x128xf32>
}

func.func @log1p(%arg0: tensor<64x128xf32>) -> tensor<64x128xf32> {
%0 = tensor.empty() : tensor<64x128xf32>
// CHECK: [[VAL0:%[0-9]+]] = "ttnn.empty"(%{{[0-9]+}}) <{dtype = {{.*}}, layout = {{.*}}, memory_config = {{.*}}, <{{.*}}>>, shape = #ttnn.shape<[[TENSOR_SHAPE:[0-9]+x[0-9]+]]>}>
%1 = "ttir.log1p"(%arg0, %0) <{operandSegmentSizes = array<i32: 1, 1>, operand_constraints = [#any_device, #any_device]}> : (tensor<64x128xf32>, tensor<64x128xf32>) -> tensor<64x128xf32>
// CHECK: %{{[0-9]+}} = "ttnn.log1p"(%{{[0-9]+}}, [[VAL0]]) <{operandSegmentSizes = array<i32: 1, 1>}> : (tensor<[[TENSOR_SHAPE]]x{{.*}}, {{.*}}>, tensor<[[TENSOR_SHAPE]]x{{.*}}, {{.*}}) -> tensor<[[TENSOR_SHAPE]]x{{.*}}, {{.*}}>
return %1 : tensor<64x128xf32>
// CHECK: return %{{[0-9]+}} : tensor<[[TENSOR_SHAPE]]xf32, {{.*}}>
}

0 comments on commit 9bd5963

Please sign in to comment.