Skip to content

Commit

Permalink
Changes for llvm pulldown 11/24 (#967)
Browse files Browse the repository at this point in the history
  • Loading branch information
Garra1980 authored Nov 21, 2024
1 parent 926ecbd commit 2b8422d
Show file tree
Hide file tree
Showing 32 changed files with 184 additions and 178 deletions.
2 changes: 1 addition & 1 deletion build_tools/llvm_version.txt
Original file line number Diff line number Diff line change
@@ -1 +1 @@
add6b2f35f2bcf1f59a2ab2d5b3dab124fe0895a
012dd8be4b5a4c00deb22345c630990f160b3aa3
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
From 45b150c9a0c4e4bd60c153e5142da17fd6cde6da Mon Sep 17 00:00:00 2001
From: izamyati <[email protected]>
Date: Tue, 24 Sep 2024 17:42:02 -0500
Subject: [PATCH] Add support for VectorAnyINTEL capability
From 6377f33cad48947728d2049e94aca8a567357017 Mon Sep 17 00:00:00 2001
From: Garra1980 <[email protected]>
Date: Mon, 18 Nov 2024 19:45:00 +0100
Subject: [PATCH 1/1] Add support for VectorAnyINTEL capability

---
.../mlir/Dialect/SPIRV/IR/SPIRVBase.td | 9 +-
Expand All @@ -24,10 +24,10 @@ Subject: [PATCH] Add support for VectorAnyINTEL capability
17 files changed, 316 insertions(+), 65 deletions(-)

diff --git a/mlir/include/mlir/Dialect/SPIRV/IR/SPIRVBase.td b/mlir/include/mlir/Dialect/SPIRV/IR/SPIRVBase.td
index 3b7da9b44a08..ddaeb13ef253 100644
index 27c82811aa00..18f481e52602 100644
--- a/mlir/include/mlir/Dialect/SPIRV/IR/SPIRVBase.td
+++ b/mlir/include/mlir/Dialect/SPIRV/IR/SPIRVBase.td
@@ -4142,7 +4142,12 @@ def SPIRV_Int32 : TypeAlias<I32, "Int32">;
@@ -4188,7 +4188,12 @@ def SPIRV_Int32 : TypeAlias<I32, "Int32">;
def SPIRV_Float32 : TypeAlias<F32, "Float32">;
def SPIRV_Float : FloatOfWidths<[16, 32, 64]>;
def SPIRV_Float16or32 : FloatOfWidths<[16, 32]>;
Expand All @@ -41,7 +41,7 @@ index 3b7da9b44a08..ddaeb13ef253 100644
[SPIRV_Bool, SPIRV_Integer, SPIRV_Float]>;
// Component type check is done in the type parser for the following SPIR-V
// dialect-specific types so we use "Any" here.
@@ -4185,7 +4190,7 @@ class SPIRV_CoopMatrixOfType<list<Type> allowedTypes> :
@@ -4231,7 +4236,7 @@ class SPIRV_CoopMatrixOfType<list<Type> allowedTypes> :
"Cooperative Matrix">;

class SPIRV_VectorOf<Type type> :
Expand All @@ -51,10 +51,10 @@ index 3b7da9b44a08..ddaeb13ef253 100644
class SPIRV_ScalarOrVectorOf<Type type> :
AnyTypeOf<[type, SPIRV_VectorOf<type>]>;
diff --git a/mlir/include/mlir/IR/CommonTypeConstraints.td b/mlir/include/mlir/IR/CommonTypeConstraints.td
index 211385245555..671ec270efe0 100644
index 48e4c24f8386..677074986d2d 100644
--- a/mlir/include/mlir/IR/CommonTypeConstraints.td
+++ b/mlir/include/mlir/IR/CommonTypeConstraints.td
@@ -637,6 +637,92 @@ class ScalableVectorOfRankAndLengthAndType<list<int> allowedRanks,
@@ -639,6 +639,92 @@ class ScalableVectorOfRankAndLengthAndType<list<int> allowedRanks,
ScalableVectorOfLength<allowedLengths>.summary,
"::mlir::VectorType">;

Expand Down Expand Up @@ -209,7 +209,7 @@ index 337df3a5a65f..542c6beba2e4 100644
capabilities.push_back(ref);
}
diff --git a/mlir/lib/Dialect/SPIRV/Transforms/SPIRVConversion.cpp b/mlir/lib/Dialect/SPIRV/Transforms/SPIRVConversion.cpp
index d833ec9309ba..36840582a114 100644
index f5700059f68e..915d1b0124f9 100644
--- a/mlir/lib/Dialect/SPIRV/Transforms/SPIRVConversion.cpp
+++ b/mlir/lib/Dialect/SPIRV/Transforms/SPIRVConversion.cpp
@@ -88,9 +88,13 @@ static std::optional<SmallVector<int64_t>> getTargetShape(VectorType vecType) {
Expand Down Expand Up @@ -539,7 +539,7 @@ index 3683e5b469b1..a95a6001fd20 100644
return
}
diff --git a/mlir/test/Dialect/SPIRV/IR/intel-ext-ops.mlir b/mlir/test/Dialect/SPIRV/IR/intel-ext-ops.mlir
index 53a1015de75b..6970b8ec0628 100644
index 6dd0353d9374..76b7110f0731 100644
--- a/mlir/test/Dialect/SPIRV/IR/intel-ext-ops.mlir
+++ b/mlir/test/Dialect/SPIRV/IR/intel-ext-ops.mlir
@@ -21,7 +21,7 @@ spirv.func @f32_to_bf16_vec(%arg0 : vector<2xf32>) "None" {
Expand Down Expand Up @@ -574,7 +574,7 @@ index 5c24f0e6a7d3..3ca61ab48096 100644
return
}
diff --git a/mlir/test/Dialect/SPIRV/IR/non-uniform-ops.mlir b/mlir/test/Dialect/SPIRV/IR/non-uniform-ops.mlir
index d8a26c71d12f..d22378817dbb 100644
index 60ae1584d29f..bc366c0e3a09 100644
--- a/mlir/test/Dialect/SPIRV/IR/non-uniform-ops.mlir
+++ b/mlir/test/Dialect/SPIRV/IR/non-uniform-ops.mlir
@@ -495,7 +495,7 @@ func.func @group_non_uniform_bitwise_and(%val: i32) -> i32 {
Expand All @@ -583,7 +583,7 @@ index d8a26c71d12f..d22378817dbb 100644
func.func @group_non_uniform_bitwise_and(%val: i1) -> i1 {
- // expected-error @+1 {{operand #0 must be 8/16/32/64-bit integer or vector of 8/16/32/64-bit integer values of length 2/3/4/8/16, but got 'i1'}}
+ // expected-error @+1 {{op operand #0 must be 8/16/32/64-bit integer or vector of 8/16/32/64-bit integer values of length 2-9223372036854775807, but got 'i1'}}
%0 = spirv.GroupNonUniformBitwiseAnd "Workgroup" "Reduce" %val : i1
%0 = spirv.GroupNonUniformBitwiseAnd <Workgroup> <Reduce> %val : i1 -> i1
return %0: i1
}
@@ -516,7 +516,7 @@ func.func @group_non_uniform_bitwise_or(%val: i32) -> i32 {
Expand All @@ -592,7 +592,7 @@ index d8a26c71d12f..d22378817dbb 100644
func.func @group_non_uniform_bitwise_or(%val: i1) -> i1 {
- // expected-error @+1 {{operand #0 must be 8/16/32/64-bit integer or vector of 8/16/32/64-bit integer values of length 2/3/4/8/16, but got 'i1'}}
+ // expected-error @+1 {{op operand #0 must be 8/16/32/64-bit integer or vector of 8/16/32/64-bit integer values of length 2-9223372036854775807, but got 'i1'}}
%0 = spirv.GroupNonUniformBitwiseOr "Workgroup" "Reduce" %val : i1
%0 = spirv.GroupNonUniformBitwiseOr <Workgroup> <Reduce> %val : i1 -> i1
return %0: i1
}
@@ -537,7 +537,7 @@ func.func @group_non_uniform_bitwise_xor(%val: i32) -> i32 {
Expand All @@ -601,7 +601,7 @@ index d8a26c71d12f..d22378817dbb 100644
func.func @group_non_uniform_bitwise_xor(%val: i1) -> i1 {
- // expected-error @+1 {{operand #0 must be 8/16/32/64-bit integer or vector of 8/16/32/64-bit integer values of length 2/3/4/8/16, but got 'i1'}}
+ // expected-error @+1 {{op operand #0 must be 8/16/32/64-bit integer or vector of 8/16/32/64-bit integer values of length 2-9223372036854775807, but got 'i1'}}
%0 = spirv.GroupNonUniformBitwiseXor "Workgroup" "Reduce" %val : i1
%0 = spirv.GroupNonUniformBitwiseXor <Workgroup> <Reduce> %val : i1 -> i1
return %0: i1
}
@@ -558,7 +558,7 @@ func.func @group_non_uniform_logical_and(%val: i1) -> i1 {
Expand All @@ -610,7 +610,7 @@ index d8a26c71d12f..d22378817dbb 100644
func.func @group_non_uniform_logical_and(%val: i32) -> i32 {
- // expected-error @+1 {{operand #0 must be bool or vector of bool values of length 2/3/4/8/16, but got 'i32'}}
+ // expected-error @+1 {{op operand #0 must be bool or vector of bool values of length 2-9223372036854775807, but got 'i32'}}
%0 = spirv.GroupNonUniformLogicalAnd "Workgroup" "Reduce" %val : i32
%0 = spirv.GroupNonUniformLogicalAnd <Workgroup> <Reduce> %val : i32 -> i32
return %0: i32
}
@@ -579,7 +579,7 @@ func.func @group_non_uniform_logical_or(%val: i1) -> i1 {
Expand All @@ -619,7 +619,7 @@ index d8a26c71d12f..d22378817dbb 100644
func.func @group_non_uniform_logical_or(%val: i32) -> i32 {
- // expected-error @+1 {{operand #0 must be bool or vector of bool values of length 2/3/4/8/16, but got 'i32'}}
+ // expected-error @+1 {{op operand #0 must be bool or vector of bool values of length 2-9223372036854775807, but got 'i32'}}
%0 = spirv.GroupNonUniformLogicalOr "Workgroup" "Reduce" %val : i32
%0 = spirv.GroupNonUniformLogicalOr <Workgroup> <Reduce> %val : i32 -> i32
return %0: i32
}
@@ -600,7 +600,7 @@ func.func @group_non_uniform_logical_xor(%val: i1) -> i1 {
Expand All @@ -628,11 +628,11 @@ index d8a26c71d12f..d22378817dbb 100644
func.func @group_non_uniform_logical_xor(%val: i32) -> i32 {
- // expected-error @+1 {{operand #0 must be bool or vector of bool values of length 2/3/4/8/16, but got 'i32'}}
+ // expected-error @+1 {{op operand #0 must be bool or vector of bool values of length 2-9223372036854775807, but got 'i32'}}
%0 = spirv.GroupNonUniformLogicalXor "Workgroup" "Reduce" %val : i32
%0 = spirv.GroupNonUniformLogicalXor <Workgroup> <Reduce> %val : i32 -> i32
return %0: i32
}
diff --git a/mlir/test/Dialect/SPIRV/IR/ocl-ops.mlir b/mlir/test/Dialect/SPIRV/IR/ocl-ops.mlir
index 81ba471d3f51..7a29abd44b34 100644
index 8f021ed3d663..21558b9607f8 100644
--- a/mlir/test/Dialect/SPIRV/IR/ocl-ops.mlir
+++ b/mlir/test/Dialect/SPIRV/IR/ocl-ops.mlir
@@ -27,7 +27,7 @@ func.func @exp(%arg0 : i32) -> () {
Expand Down
Original file line number Diff line number Diff line change
@@ -1,17 +1,17 @@
From 995779b01d0f50be5729eafc0198d777e9c82c8d Mon Sep 17 00:00:00 2001
From: Chao Chen <chao.chen@intel.com>
Date: Fri, 26 Apr 2024 20:59:53 +0000
Subject: [PATCH 7/7] Add-memref.extract_aligned_pointer_as_index-to-spirv
From 9b5db3d72169b878e1f607cd6dd530c72849028b Mon Sep 17 00:00:00 2001
From: izamyati <igor.zamyatin@intel.com>
Date: Fri, 25 Oct 2024 16:00:54 -0500
Subject: [PATCH 1/1] Add memref.extract aligned pointer as index-to-spirv

---
.../MemRefToSPIRV/MemRefToSPIRV.cpp | 37 ++++++++++++++++---
1 file changed, 32 insertions(+), 5 deletions(-)

diff --git a/mlir/lib/Conversion/MemRefToSPIRV/MemRefToSPIRV.cpp b/mlir/lib/Conversion/MemRefToSPIRV/MemRefToSPIRV.cpp
index 81b9f55cac80..0db46e6c2987 100644
index 49a391938eaf..60b8f379dfc1 100644
--- a/mlir/lib/Conversion/MemRefToSPIRV/MemRefToSPIRV.cpp
+++ b/mlir/lib/Conversion/MemRefToSPIRV/MemRefToSPIRV.cpp
@@ -308,6 +308,18 @@ public:
@@ -307,6 +307,18 @@ public:
}
};

Expand All @@ -30,7 +30,7 @@ index 81b9f55cac80..0db46e6c2987 100644
} // namespace

//===----------------------------------------------------------------------===//
@@ -922,6 +934,20 @@ LogicalResult ReinterpretCastPattern::matchAndRewrite(
@@ -921,6 +933,20 @@ LogicalResult ReinterpretCastPattern::matchAndRewrite(
return success();
}

Expand All @@ -51,9 +51,9 @@ index 81b9f55cac80..0db46e6c2987 100644
//===----------------------------------------------------------------------===//
// Pattern population
//===----------------------------------------------------------------------===//
@@ -929,10 +955,11 @@ LogicalResult ReinterpretCastPattern::matchAndRewrite(
@@ -928,10 +954,11 @@ LogicalResult ReinterpretCastPattern::matchAndRewrite(
namespace mlir {
void populateMemRefToSPIRVPatterns(SPIRVTypeConverter &typeConverter,
void populateMemRefToSPIRVPatterns(const SPIRVTypeConverter &typeConverter,
RewritePatternSet &patterns) {
- patterns.add<AllocaOpPattern, AllocOpPattern, AtomicRMWOpPattern,
- DeallocOpPattern, IntLoadOpPattern, IntStoreOpPattern,
Expand Down
Original file line number Diff line number Diff line change
@@ -1,16 +1,17 @@
From 0829723718f1e80834d9d0051069e263fcfea82a Mon Sep 17 00:00:00 2001
From 910b158b0e1c97a8e75906086c7dce25a63afa50 Mon Sep 17 00:00:00 2001
From: izamyati <[email protected]>
Date: Tue, 24 Sep 2024 18:25:53 -0500
Subject: [PATCH] xegpu temporary downstream defintion changes
Date: Fri, 25 Oct 2024 16:11:59 -0500
Subject: [PATCH 1/1] xegpu temporary downstream defintion changes and
vectortoxegpu patch

---
mlir/include/mlir/Dialect/XeGPU/IR/XeGPUOps.td | 6 ++++++
mlir/lib/Conversion/VectorToXeGPU/VectorToXeGPU.cpp | 1 +
mlir/lib/Conversion/VectorToXeGPU/VectorToXeGPU.cpp | 2 ++
mlir/lib/Dialect/XeGPU/IR/XeGPUOps.cpp | 2 +-
3 files changed, 8 insertions(+), 1 deletion(-)
3 files changed, 9 insertions(+), 1 deletion(-)

diff --git a/mlir/include/mlir/Dialect/XeGPU/IR/XeGPUOps.td b/mlir/include/mlir/Dialect/XeGPU/IR/XeGPUOps.td
index e24a056de2ca..948cc40e8595 100644
index 239ce0aa8e00..812d2d167297 100644
--- a/mlir/include/mlir/Dialect/XeGPU/IR/XeGPUOps.td
+++ b/mlir/include/mlir/Dialect/XeGPU/IR/XeGPUOps.td
@@ -302,6 +302,7 @@ def XeGPU_LoadNdOp : XeGPU_Op<"load_nd", [AllElementTypesMatch<["value", "Tensor
Expand All @@ -21,7 +22,7 @@ index e24a056de2ca..948cc40e8595 100644
OptionalAttr<XeGPU_CacheHintAttr>: $l1_hint,
OptionalAttr<XeGPU_CacheHintAttr>: $l2_hint,
OptionalAttr<XeGPU_CacheHintAttr>: $l3_hint);
@@ -850,4 +851,9 @@ def XeGPU_FenceOp: XeGPU_Op<"fence", []> {
@@ -871,4 +872,9 @@ def XeGPU_FenceOp: XeGPU_Op<"fence", []> {
let extraClassDeclaration = extraBaseClassDeclaration;
}

Expand All @@ -32,22 +33,30 @@ index e24a056de2ca..948cc40e8595 100644
+
#endif // MLIR_DIALECT_XEGPU_IR_XEGPUOPS_TD
diff --git a/mlir/lib/Conversion/VectorToXeGPU/VectorToXeGPU.cpp b/mlir/lib/Conversion/VectorToXeGPU/VectorToXeGPU.cpp
index fa0344276553..849de4fced8f 100644
index 215e1b1b8745..c05a9e2b86f5 100644
--- a/mlir/lib/Conversion/VectorToXeGPU/VectorToXeGPU.cpp
+++ b/mlir/lib/Conversion/VectorToXeGPU/VectorToXeGPU.cpp
@@ -184,6 +184,7 @@ struct TransferReadLowering : public OpRewritePattern<vector::TransferReadOp> {
@@ -199,6 +199,7 @@ struct TransferReadLowering : public OpRewritePattern<vector::TransferReadOp> {
xegpu::CachePolicyAttr hint = nullptr;
auto loadOp = rewriter.create<xegpu::LoadNdOp>(
loc, vecTy, ndDesc, /*packed=*/nullptr, transposeAttr,
+ /*transpose_bit_width*/nullptr,
/*l1_hint=*/hint,
/*l2_hint=*/hint, /*l3_hint=*/hint);
rewriter.replaceOp(readOp, loadOp);
@@ -265,6 +266,7 @@ struct LoadLowering : public OpRewritePattern<vector::LoadOp> {
xegpu::CachePolicyAttr hint = nullptr;
auto loadNdOp = rewriter.create<xegpu::LoadNdOp>(
loc, vecTy, ndDesc, /*packed=*/nullptr, /*transpose=*/nullptr,
+ /*transpose_bit_width*/nullptr,
/*l1_hint=*/hint,
/*l2_hint=*/hint, /*l3_hint=*/hint);
rewriter.replaceOp(loadOp, loadNdOp);
diff --git a/mlir/lib/Dialect/XeGPU/IR/XeGPUOps.cpp b/mlir/lib/Dialect/XeGPU/IR/XeGPUOps.cpp
index 1a7a6b347840..121a7007208b 100644
index 5bd3c370e385..898e8564e3fe 100644
--- a/mlir/lib/Dialect/XeGPU/IR/XeGPUOps.cpp
+++ b/mlir/lib/Dialect/XeGPU/IR/XeGPUOps.cpp
@@ -236,7 +236,7 @@ LogicalResult LoadNdOp::verify() {
@@ -237,7 +237,7 @@ LogicalResult LoadNdOp::verify() {
emitWarning("Invalid transpose attr. It is ignored.");
}

Expand Down
4 changes: 2 additions & 2 deletions include/imex/Utils/XeCommon.h
Original file line number Diff line number Diff line change
Expand Up @@ -415,10 +415,10 @@ template <typename OpType> unsigned encodeCacheHint(OpType op) {
}
return cacheHint;
}
class XeTypeConverter : public mlir::OneToNTypeConverter {
class XeTypeConverter : public mlir::TypeConverter {
public:
// friend class XeConversionPattern;
using mlir::OneToNTypeConverter::convertType;
using mlir::TypeConverter::convertType;

XeTypeConverter(mlir::MLIRContext &context) {
addConversion([&](xetile::TileType tileTy,
Expand Down
3 changes: 1 addition & 2 deletions lib/Conversion/DistToStandard/DistToStandard.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -1709,8 +1709,7 @@ struct ConvertDistToStandardPass

auto materializeArray =
[&](::mlir::OpBuilder &builder, ::imex::ndarray::NDArrayType type,
::mlir::ValueRange inputs,
::mlir::Location loc) -> std::optional<::mlir::Value> {
::mlir::ValueRange inputs, ::mlir::Location loc) -> ::mlir::Value {
assert(inputs.size() == 1);
auto input = inputs[0];
auto itype = input.getType();
Expand Down
7 changes: 3 additions & 4 deletions lib/Conversion/NDArrayToLinalg/NDArrayToLinalg.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -1281,10 +1281,9 @@ struct ConvertNDArrayToLinalgPass
typeConverter.addConversion(convT2T);
typeConverter.addConversion(convNDArray2RankedTensor);

auto materializeCast =
[](::mlir::OpBuilder &builder, ::mlir::Type type,
::mlir::ValueRange inputs,
::mlir::Location loc) -> std::optional<::mlir::Value> {
auto materializeCast = [](::mlir::OpBuilder &builder, ::mlir::Type type,
::mlir::ValueRange inputs,
::mlir::Location loc) -> ::mlir::Value {
if (inputs.size() == 1) {
auto input = inputs[0];
auto itype = input.getType();
Expand Down
32 changes: 15 additions & 17 deletions lib/Conversion/XeTileToXeGPU/XeTileToXeGPUConversion.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -89,23 +89,21 @@ XeOneToNTypeConverter::XeOneToNTypeConverter(mlir::MLIRContext &context)
addConversion(
[&](mlir::MemRefType type) -> std::optional<mlir::Type> { return type; });

addArgumentMaterialization(
[&](mlir::OpBuilder &builder, mlir::Type resultType,
mlir::ValueRange inputs,
mlir::Location loc) -> std::optional<mlir::Value> {
return builder
.create<mlir::UnrealizedConversionCastOp>(loc, resultType, inputs)
.getResult(0);
});

addSourceMaterialization(
[&](mlir::OpBuilder &builder, mlir::Type resultType,
mlir::ValueRange inputs,
mlir::Location loc) -> std::optional<mlir::Value> {
return builder
.create<mlir::UnrealizedConversionCastOp>(loc, resultType, inputs)
.getResult(0);
});
addArgumentMaterialization([&](mlir::OpBuilder &builder,
mlir::Type resultType, mlir::ValueRange inputs,
mlir::Location loc) -> mlir::Value {
return builder
.create<mlir::UnrealizedConversionCastOp>(loc, resultType, inputs)
.getResult(0);
});

addSourceMaterialization([&](mlir::OpBuilder &builder, mlir::Type resultType,
mlir::ValueRange inputs,
mlir::Location loc) -> mlir::Value {
return builder
.create<mlir::UnrealizedConversionCastOp>(loc, resultType, inputs)
.getResult(0);
});
}

std::optional<mlir::LogicalResult> XeOneToNTypeConverter::convertTileType(
Expand Down
2 changes: 1 addition & 1 deletion lib/Dialect/XeTile/Transforms/Canonicalization.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -396,7 +396,7 @@ struct XeTileCanonicalizationPass final
mlir::ValueRange inputs, mlir::Location loc) {
auto cast =
builder.create<mlir::UnrealizedConversionCastOp>(loc, type, inputs);
return std::optional<mlir::Value>(cast.getResult(0));
return cast.getResult(0);
};
typeConverter.addConversion([](mlir::Type type) { return type; });
typeConverter.addConversion([](imex::xetile::TileType tileTy) {
Expand Down
1 change: 1 addition & 0 deletions lib/Transforms/CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -20,6 +20,7 @@ add_mlir_library(IMEXTransforms

LINK_LIBS PUBLIC
MLIRFuncDialect
MLIRCopyOpInterface
MLIRGPUDialect
MLIRPass
MLIRSCFDialect
Expand Down
2 changes: 1 addition & 1 deletion lib/Transforms/OptimizeTranspose.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -969,7 +969,7 @@ struct OptimizeTransposePass final
auto addNToOneCast = [](OpBuilder &builder, Type type, ValueRange inputs,
Location loc) {
auto cast = builder.create<UnrealizedConversionCastOp>(loc, type, inputs);
return std::optional<Value>(cast.getResult(0));
return cast.getResult(0);
};
typeConverter.addSourceMaterialization(addNToOneCast);
typeConverter.addArgumentMaterialization(addNToOneCast);
Expand Down
Loading

0 comments on commit 2b8422d

Please sign in to comment.