From 2b8422d1e1a9267f6f7e8d725caee1c1f6111ee8 Mon Sep 17 00:00:00 2001 From: Igor Zamyatin Date: Thu, 21 Nov 2024 16:48:43 -0600 Subject: [PATCH] Changes for llvm pulldown 11/24 (#967) --- build_tools/llvm_version.txt | 2 +- ...upport-for-VectorAnyINTEL-capability.patch | 38 +++++++++---------- ...t-aligned-pointer-as-index-to-spirv.patch} | 18 ++++----- ...ownstream-defintion-changes-and-vec.patch} | 31 +++++++++------ include/imex/Utils/XeCommon.h | 4 +- .../DistToStandard/DistToStandard.cpp | 3 +- .../NDArrayToLinalg/NDArrayToLinalg.cpp | 7 ++-- .../XeTileToXeGPU/XeTileToXeGPUConversion.cpp | 32 ++++++++-------- .../XeTile/Transforms/Canonicalization.cpp | 2 +- lib/Transforms/CMakeLists.txt | 1 + lib/Transforms/OptimizeTranspose.cpp | 2 +- test/SPIRV/CppEdsl.Convolution_BF16.mlir | 16 ++++---- test/SPIRV/OpTest.ArgMax_BF16.mlir | 18 ++++----- test/SPIRV/OpTest.Argmax_FLOAT32.mlir | 18 ++++----- test/SPIRV/OpTest.BroadcastNonNumpy_BF16.mlir | 4 +- .../OpTest.BroadcastNonNumpy_FLOAT32.mlir | 6 +-- test/SPIRV/OpTest.Conv2D_FLOAT32.mlir | 16 ++++---- test/SPIRV/OpTest.EltwiseAdd_BF16.mlir | 6 +-- test/SPIRV/OpTest.EltwiseAdd_FLOAT32.mlir | 6 +-- ...twiseAdd_FLOAT32_explicit_memory_copy.mlir | 6 +-- .../SPIRV/OpTest.ExplicitPadding_FLOAT32.mlir | 8 ++-- test/SPIRV/OpTest.GEMM_BF16.mlir | 8 ++-- test/SPIRV/OpTest.GEMM_BF16_ACC_F32.mlir | 8 ++-- test/SPIRV/OpTest.GEMM_F16_ACC_F32.mlir | 8 ++-- test/SPIRV/OpTest.MaxPool1D_INT64.mlir | 8 ++-- test/SPIRV/OpTest.Quantize_FLOAT32.mlir | 8 ++-- test/SPIRV/OpTest.Relu_FLOAT32.mlir | 10 ++--- test/SPIRV/OpTest.SlmDynamic.mlir | 16 ++++---- test/SPIRV/OpTest.Softmax_FLOAT32.mlir | 32 ++++++++-------- test/SPIRV/OpTest.Sum_FLOAT32.mlir | 6 +-- test/SPIRV/OpTest.Transpose_FLOAT32.mlir | 6 +-- test/SPIRV/relu.slm.static.8x32.mlir | 8 ++-- 32 files changed, 184 insertions(+), 178 deletions(-) rename build_tools/patches/{0005-Add-memref.extract_aligned_pointer_as_index-to-spirv.patch => 0005-Add-memref.extract-aligned-pointer-as-index-to-spirv.patch} (84%) rename build_tools/patches/{0008-xegpu-temporary-downstream-defintion-changes.patch => 0008-xegpu-temporary-downstream-defintion-changes-and-vec.patch} (68%) diff --git a/build_tools/llvm_version.txt b/build_tools/llvm_version.txt index 33000613b..ffba62e22 100644 --- a/build_tools/llvm_version.txt +++ b/build_tools/llvm_version.txt @@ -1 +1 @@ -add6b2f35f2bcf1f59a2ab2d5b3dab124fe0895a +012dd8be4b5a4c00deb22345c630990f160b3aa3 diff --git a/build_tools/patches/0001-Add-support-for-VectorAnyINTEL-capability.patch b/build_tools/patches/0001-Add-support-for-VectorAnyINTEL-capability.patch index 531b66a3e..58e52ddc9 100644 --- a/build_tools/patches/0001-Add-support-for-VectorAnyINTEL-capability.patch +++ b/build_tools/patches/0001-Add-support-for-VectorAnyINTEL-capability.patch @@ -1,7 +1,7 @@ -From 45b150c9a0c4e4bd60c153e5142da17fd6cde6da Mon Sep 17 00:00:00 2001 -From: izamyati -Date: Tue, 24 Sep 2024 17:42:02 -0500 -Subject: [PATCH] Add support for VectorAnyINTEL capability +From 6377f33cad48947728d2049e94aca8a567357017 Mon Sep 17 00:00:00 2001 +From: Garra1980 +Date: Mon, 18 Nov 2024 19:45:00 +0100 +Subject: [PATCH 1/1] Add support for VectorAnyINTEL capability --- .../mlir/Dialect/SPIRV/IR/SPIRVBase.td | 9 +- @@ -24,10 +24,10 @@ Subject: [PATCH] Add support for VectorAnyINTEL capability 17 files changed, 316 insertions(+), 65 deletions(-) diff --git a/mlir/include/mlir/Dialect/SPIRV/IR/SPIRVBase.td b/mlir/include/mlir/Dialect/SPIRV/IR/SPIRVBase.td -index 3b7da9b44a08..ddaeb13ef253 100644 +index 27c82811aa00..18f481e52602 100644 --- a/mlir/include/mlir/Dialect/SPIRV/IR/SPIRVBase.td +++ b/mlir/include/mlir/Dialect/SPIRV/IR/SPIRVBase.td -@@ -4142,7 +4142,12 @@ def SPIRV_Int32 : TypeAlias; +@@ -4188,7 +4188,12 @@ def SPIRV_Int32 : TypeAlias; def SPIRV_Float32 : TypeAlias; def SPIRV_Float : FloatOfWidths<[16, 32, 64]>; def SPIRV_Float16or32 : FloatOfWidths<[16, 32]>; @@ -41,7 +41,7 @@ index 3b7da9b44a08..ddaeb13ef253 100644 [SPIRV_Bool, SPIRV_Integer, SPIRV_Float]>; // Component type check is done in the type parser for the following SPIR-V // dialect-specific types so we use "Any" here. -@@ -4185,7 +4190,7 @@ class SPIRV_CoopMatrixOfType allowedTypes> : +@@ -4231,7 +4236,7 @@ class SPIRV_CoopMatrixOfType allowedTypes> : "Cooperative Matrix">; class SPIRV_VectorOf : @@ -51,10 +51,10 @@ index 3b7da9b44a08..ddaeb13ef253 100644 class SPIRV_ScalarOrVectorOf : AnyTypeOf<[type, SPIRV_VectorOf]>; diff --git a/mlir/include/mlir/IR/CommonTypeConstraints.td b/mlir/include/mlir/IR/CommonTypeConstraints.td -index 211385245555..671ec270efe0 100644 +index 48e4c24f8386..677074986d2d 100644 --- a/mlir/include/mlir/IR/CommonTypeConstraints.td +++ b/mlir/include/mlir/IR/CommonTypeConstraints.td -@@ -637,6 +637,92 @@ class ScalableVectorOfRankAndLengthAndType allowedRanks, +@@ -639,6 +639,92 @@ class ScalableVectorOfRankAndLengthAndType allowedRanks, ScalableVectorOfLength.summary, "::mlir::VectorType">; @@ -209,7 +209,7 @@ index 337df3a5a65f..542c6beba2e4 100644 capabilities.push_back(ref); } diff --git a/mlir/lib/Dialect/SPIRV/Transforms/SPIRVConversion.cpp b/mlir/lib/Dialect/SPIRV/Transforms/SPIRVConversion.cpp -index d833ec9309ba..36840582a114 100644 +index f5700059f68e..915d1b0124f9 100644 --- a/mlir/lib/Dialect/SPIRV/Transforms/SPIRVConversion.cpp +++ b/mlir/lib/Dialect/SPIRV/Transforms/SPIRVConversion.cpp @@ -88,9 +88,13 @@ static std::optional> getTargetShape(VectorType vecType) { @@ -539,7 +539,7 @@ index 3683e5b469b1..a95a6001fd20 100644 return } diff --git a/mlir/test/Dialect/SPIRV/IR/intel-ext-ops.mlir b/mlir/test/Dialect/SPIRV/IR/intel-ext-ops.mlir -index 53a1015de75b..6970b8ec0628 100644 +index 6dd0353d9374..76b7110f0731 100644 --- a/mlir/test/Dialect/SPIRV/IR/intel-ext-ops.mlir +++ b/mlir/test/Dialect/SPIRV/IR/intel-ext-ops.mlir @@ -21,7 +21,7 @@ spirv.func @f32_to_bf16_vec(%arg0 : vector<2xf32>) "None" { @@ -574,7 +574,7 @@ index 5c24f0e6a7d3..3ca61ab48096 100644 return } diff --git a/mlir/test/Dialect/SPIRV/IR/non-uniform-ops.mlir b/mlir/test/Dialect/SPIRV/IR/non-uniform-ops.mlir -index d8a26c71d12f..d22378817dbb 100644 +index 60ae1584d29f..bc366c0e3a09 100644 --- a/mlir/test/Dialect/SPIRV/IR/non-uniform-ops.mlir +++ b/mlir/test/Dialect/SPIRV/IR/non-uniform-ops.mlir @@ -495,7 +495,7 @@ func.func @group_non_uniform_bitwise_and(%val: i32) -> i32 { @@ -583,7 +583,7 @@ index d8a26c71d12f..d22378817dbb 100644 func.func @group_non_uniform_bitwise_and(%val: i1) -> i1 { - // expected-error @+1 {{operand #0 must be 8/16/32/64-bit integer or vector of 8/16/32/64-bit integer values of length 2/3/4/8/16, but got 'i1'}} + // expected-error @+1 {{op operand #0 must be 8/16/32/64-bit integer or vector of 8/16/32/64-bit integer values of length 2-9223372036854775807, but got 'i1'}} - %0 = spirv.GroupNonUniformBitwiseAnd "Workgroup" "Reduce" %val : i1 + %0 = spirv.GroupNonUniformBitwiseAnd %val : i1 -> i1 return %0: i1 } @@ -516,7 +516,7 @@ func.func @group_non_uniform_bitwise_or(%val: i32) -> i32 { @@ -592,7 +592,7 @@ index d8a26c71d12f..d22378817dbb 100644 func.func @group_non_uniform_bitwise_or(%val: i1) -> i1 { - // expected-error @+1 {{operand #0 must be 8/16/32/64-bit integer or vector of 8/16/32/64-bit integer values of length 2/3/4/8/16, but got 'i1'}} + // expected-error @+1 {{op operand #0 must be 8/16/32/64-bit integer or vector of 8/16/32/64-bit integer values of length 2-9223372036854775807, but got 'i1'}} - %0 = spirv.GroupNonUniformBitwiseOr "Workgroup" "Reduce" %val : i1 + %0 = spirv.GroupNonUniformBitwiseOr %val : i1 -> i1 return %0: i1 } @@ -537,7 +537,7 @@ func.func @group_non_uniform_bitwise_xor(%val: i32) -> i32 { @@ -601,7 +601,7 @@ index d8a26c71d12f..d22378817dbb 100644 func.func @group_non_uniform_bitwise_xor(%val: i1) -> i1 { - // expected-error @+1 {{operand #0 must be 8/16/32/64-bit integer or vector of 8/16/32/64-bit integer values of length 2/3/4/8/16, but got 'i1'}} + // expected-error @+1 {{op operand #0 must be 8/16/32/64-bit integer or vector of 8/16/32/64-bit integer values of length 2-9223372036854775807, but got 'i1'}} - %0 = spirv.GroupNonUniformBitwiseXor "Workgroup" "Reduce" %val : i1 + %0 = spirv.GroupNonUniformBitwiseXor %val : i1 -> i1 return %0: i1 } @@ -558,7 +558,7 @@ func.func @group_non_uniform_logical_and(%val: i1) -> i1 { @@ -610,7 +610,7 @@ index d8a26c71d12f..d22378817dbb 100644 func.func @group_non_uniform_logical_and(%val: i32) -> i32 { - // expected-error @+1 {{operand #0 must be bool or vector of bool values of length 2/3/4/8/16, but got 'i32'}} + // expected-error @+1 {{op operand #0 must be bool or vector of bool values of length 2-9223372036854775807, but got 'i32'}} - %0 = spirv.GroupNonUniformLogicalAnd "Workgroup" "Reduce" %val : i32 + %0 = spirv.GroupNonUniformLogicalAnd %val : i32 -> i32 return %0: i32 } @@ -579,7 +579,7 @@ func.func @group_non_uniform_logical_or(%val: i1) -> i1 { @@ -619,7 +619,7 @@ index d8a26c71d12f..d22378817dbb 100644 func.func @group_non_uniform_logical_or(%val: i32) -> i32 { - // expected-error @+1 {{operand #0 must be bool or vector of bool values of length 2/3/4/8/16, but got 'i32'}} + // expected-error @+1 {{op operand #0 must be bool or vector of bool values of length 2-9223372036854775807, but got 'i32'}} - %0 = spirv.GroupNonUniformLogicalOr "Workgroup" "Reduce" %val : i32 + %0 = spirv.GroupNonUniformLogicalOr %val : i32 -> i32 return %0: i32 } @@ -600,7 +600,7 @@ func.func @group_non_uniform_logical_xor(%val: i1) -> i1 { @@ -628,11 +628,11 @@ index d8a26c71d12f..d22378817dbb 100644 func.func @group_non_uniform_logical_xor(%val: i32) -> i32 { - // expected-error @+1 {{operand #0 must be bool or vector of bool values of length 2/3/4/8/16, but got 'i32'}} + // expected-error @+1 {{op operand #0 must be bool or vector of bool values of length 2-9223372036854775807, but got 'i32'}} - %0 = spirv.GroupNonUniformLogicalXor "Workgroup" "Reduce" %val : i32 + %0 = spirv.GroupNonUniformLogicalXor %val : i32 -> i32 return %0: i32 } diff --git a/mlir/test/Dialect/SPIRV/IR/ocl-ops.mlir b/mlir/test/Dialect/SPIRV/IR/ocl-ops.mlir -index 81ba471d3f51..7a29abd44b34 100644 +index 8f021ed3d663..21558b9607f8 100644 --- a/mlir/test/Dialect/SPIRV/IR/ocl-ops.mlir +++ b/mlir/test/Dialect/SPIRV/IR/ocl-ops.mlir @@ -27,7 +27,7 @@ func.func @exp(%arg0 : i32) -> () { diff --git a/build_tools/patches/0005-Add-memref.extract_aligned_pointer_as_index-to-spirv.patch b/build_tools/patches/0005-Add-memref.extract-aligned-pointer-as-index-to-spirv.patch similarity index 84% rename from build_tools/patches/0005-Add-memref.extract_aligned_pointer_as_index-to-spirv.patch rename to build_tools/patches/0005-Add-memref.extract-aligned-pointer-as-index-to-spirv.patch index 37f0bc4a9..4ed9241df 100644 --- a/build_tools/patches/0005-Add-memref.extract_aligned_pointer_as_index-to-spirv.patch +++ b/build_tools/patches/0005-Add-memref.extract-aligned-pointer-as-index-to-spirv.patch @@ -1,17 +1,17 @@ -From 995779b01d0f50be5729eafc0198d777e9c82c8d Mon Sep 17 00:00:00 2001 -From: Chao Chen -Date: Fri, 26 Apr 2024 20:59:53 +0000 -Subject: [PATCH 7/7] Add-memref.extract_aligned_pointer_as_index-to-spirv +From 9b5db3d72169b878e1f607cd6dd530c72849028b Mon Sep 17 00:00:00 2001 +From: izamyati +Date: Fri, 25 Oct 2024 16:00:54 -0500 +Subject: [PATCH 1/1] Add memref.extract aligned pointer as index-to-spirv --- .../MemRefToSPIRV/MemRefToSPIRV.cpp | 37 ++++++++++++++++--- 1 file changed, 32 insertions(+), 5 deletions(-) diff --git a/mlir/lib/Conversion/MemRefToSPIRV/MemRefToSPIRV.cpp b/mlir/lib/Conversion/MemRefToSPIRV/MemRefToSPIRV.cpp -index 81b9f55cac80..0db46e6c2987 100644 +index 49a391938eaf..60b8f379dfc1 100644 --- a/mlir/lib/Conversion/MemRefToSPIRV/MemRefToSPIRV.cpp +++ b/mlir/lib/Conversion/MemRefToSPIRV/MemRefToSPIRV.cpp -@@ -308,6 +308,18 @@ public: +@@ -307,6 +307,18 @@ public: } }; @@ -30,7 +30,7 @@ index 81b9f55cac80..0db46e6c2987 100644 } // namespace //===----------------------------------------------------------------------===// -@@ -922,6 +934,20 @@ LogicalResult ReinterpretCastPattern::matchAndRewrite( +@@ -921,6 +933,20 @@ LogicalResult ReinterpretCastPattern::matchAndRewrite( return success(); } @@ -51,9 +51,9 @@ index 81b9f55cac80..0db46e6c2987 100644 //===----------------------------------------------------------------------===// // Pattern population //===----------------------------------------------------------------------===// -@@ -929,10 +955,11 @@ LogicalResult ReinterpretCastPattern::matchAndRewrite( +@@ -928,10 +954,11 @@ LogicalResult ReinterpretCastPattern::matchAndRewrite( namespace mlir { - void populateMemRefToSPIRVPatterns(SPIRVTypeConverter &typeConverter, + void populateMemRefToSPIRVPatterns(const SPIRVTypeConverter &typeConverter, RewritePatternSet &patterns) { - patterns.add -Date: Tue, 24 Sep 2024 18:25:53 -0500 -Subject: [PATCH] xegpu temporary downstream defintion changes +Date: Fri, 25 Oct 2024 16:11:59 -0500 +Subject: [PATCH 1/1] xegpu temporary downstream defintion changes and + vectortoxegpu patch --- mlir/include/mlir/Dialect/XeGPU/IR/XeGPUOps.td | 6 ++++++ - mlir/lib/Conversion/VectorToXeGPU/VectorToXeGPU.cpp | 1 + + mlir/lib/Conversion/VectorToXeGPU/VectorToXeGPU.cpp | 2 ++ mlir/lib/Dialect/XeGPU/IR/XeGPUOps.cpp | 2 +- - 3 files changed, 8 insertions(+), 1 deletion(-) + 3 files changed, 9 insertions(+), 1 deletion(-) diff --git a/mlir/include/mlir/Dialect/XeGPU/IR/XeGPUOps.td b/mlir/include/mlir/Dialect/XeGPU/IR/XeGPUOps.td -index e24a056de2ca..948cc40e8595 100644 +index 239ce0aa8e00..812d2d167297 100644 --- a/mlir/include/mlir/Dialect/XeGPU/IR/XeGPUOps.td +++ b/mlir/include/mlir/Dialect/XeGPU/IR/XeGPUOps.td @@ -302,6 +302,7 @@ def XeGPU_LoadNdOp : XeGPU_Op<"load_nd", [AllElementTypesMatch<["value", "Tensor @@ -21,7 +22,7 @@ index e24a056de2ca..948cc40e8595 100644 OptionalAttr: $l1_hint, OptionalAttr: $l2_hint, OptionalAttr: $l3_hint); -@@ -850,4 +851,9 @@ def XeGPU_FenceOp: XeGPU_Op<"fence", []> { +@@ -871,4 +872,9 @@ def XeGPU_FenceOp: XeGPU_Op<"fence", []> { let extraClassDeclaration = extraBaseClassDeclaration; } @@ -32,10 +33,10 @@ index e24a056de2ca..948cc40e8595 100644 + #endif // MLIR_DIALECT_XEGPU_IR_XEGPUOPS_TD diff --git a/mlir/lib/Conversion/VectorToXeGPU/VectorToXeGPU.cpp b/mlir/lib/Conversion/VectorToXeGPU/VectorToXeGPU.cpp -index fa0344276553..849de4fced8f 100644 +index 215e1b1b8745..c05a9e2b86f5 100644 --- a/mlir/lib/Conversion/VectorToXeGPU/VectorToXeGPU.cpp +++ b/mlir/lib/Conversion/VectorToXeGPU/VectorToXeGPU.cpp -@@ -184,6 +184,7 @@ struct TransferReadLowering : public OpRewritePattern { +@@ -199,6 +199,7 @@ struct TransferReadLowering : public OpRewritePattern { xegpu::CachePolicyAttr hint = nullptr; auto loadOp = rewriter.create( loc, vecTy, ndDesc, /*packed=*/nullptr, transposeAttr, @@ -43,11 +44,19 @@ index fa0344276553..849de4fced8f 100644 /*l1_hint=*/hint, /*l2_hint=*/hint, /*l3_hint=*/hint); rewriter.replaceOp(readOp, loadOp); +@@ -265,6 +266,7 @@ struct LoadLowering : public OpRewritePattern { + xegpu::CachePolicyAttr hint = nullptr; + auto loadNdOp = rewriter.create( + loc, vecTy, ndDesc, /*packed=*/nullptr, /*transpose=*/nullptr, ++ /*transpose_bit_width*/nullptr, + /*l1_hint=*/hint, + /*l2_hint=*/hint, /*l3_hint=*/hint); + rewriter.replaceOp(loadOp, loadNdOp); diff --git a/mlir/lib/Dialect/XeGPU/IR/XeGPUOps.cpp b/mlir/lib/Dialect/XeGPU/IR/XeGPUOps.cpp -index 1a7a6b347840..121a7007208b 100644 +index 5bd3c370e385..898e8564e3fe 100644 --- a/mlir/lib/Dialect/XeGPU/IR/XeGPUOps.cpp +++ b/mlir/lib/Dialect/XeGPU/IR/XeGPUOps.cpp -@@ -236,7 +236,7 @@ LogicalResult LoadNdOp::verify() { +@@ -237,7 +237,7 @@ LogicalResult LoadNdOp::verify() { emitWarning("Invalid transpose attr. It is ignored."); } diff --git a/include/imex/Utils/XeCommon.h b/include/imex/Utils/XeCommon.h index 23228b2ba..a7a551d07 100644 --- a/include/imex/Utils/XeCommon.h +++ b/include/imex/Utils/XeCommon.h @@ -415,10 +415,10 @@ template unsigned encodeCacheHint(OpType op) { } return cacheHint; } -class XeTypeConverter : public mlir::OneToNTypeConverter { +class XeTypeConverter : public mlir::TypeConverter { public: // friend class XeConversionPattern; - using mlir::OneToNTypeConverter::convertType; + using mlir::TypeConverter::convertType; XeTypeConverter(mlir::MLIRContext &context) { addConversion([&](xetile::TileType tileTy, diff --git a/lib/Conversion/DistToStandard/DistToStandard.cpp b/lib/Conversion/DistToStandard/DistToStandard.cpp index a9b0b417c..8e1b804ef 100644 --- a/lib/Conversion/DistToStandard/DistToStandard.cpp +++ b/lib/Conversion/DistToStandard/DistToStandard.cpp @@ -1709,8 +1709,7 @@ struct ConvertDistToStandardPass auto materializeArray = [&](::mlir::OpBuilder &builder, ::imex::ndarray::NDArrayType type, - ::mlir::ValueRange inputs, - ::mlir::Location loc) -> std::optional<::mlir::Value> { + ::mlir::ValueRange inputs, ::mlir::Location loc) -> ::mlir::Value { assert(inputs.size() == 1); auto input = inputs[0]; auto itype = input.getType(); diff --git a/lib/Conversion/NDArrayToLinalg/NDArrayToLinalg.cpp b/lib/Conversion/NDArrayToLinalg/NDArrayToLinalg.cpp index 620427c46..96a5dfea6 100644 --- a/lib/Conversion/NDArrayToLinalg/NDArrayToLinalg.cpp +++ b/lib/Conversion/NDArrayToLinalg/NDArrayToLinalg.cpp @@ -1281,10 +1281,9 @@ struct ConvertNDArrayToLinalgPass typeConverter.addConversion(convT2T); typeConverter.addConversion(convNDArray2RankedTensor); - auto materializeCast = - [](::mlir::OpBuilder &builder, ::mlir::Type type, - ::mlir::ValueRange inputs, - ::mlir::Location loc) -> std::optional<::mlir::Value> { + auto materializeCast = [](::mlir::OpBuilder &builder, ::mlir::Type type, + ::mlir::ValueRange inputs, + ::mlir::Location loc) -> ::mlir::Value { if (inputs.size() == 1) { auto input = inputs[0]; auto itype = input.getType(); diff --git a/lib/Conversion/XeTileToXeGPU/XeTileToXeGPUConversion.cpp b/lib/Conversion/XeTileToXeGPU/XeTileToXeGPUConversion.cpp index 31f405ae3..25b438430 100644 --- a/lib/Conversion/XeTileToXeGPU/XeTileToXeGPUConversion.cpp +++ b/lib/Conversion/XeTileToXeGPU/XeTileToXeGPUConversion.cpp @@ -89,23 +89,21 @@ XeOneToNTypeConverter::XeOneToNTypeConverter(mlir::MLIRContext &context) addConversion( [&](mlir::MemRefType type) -> std::optional { return type; }); - addArgumentMaterialization( - [&](mlir::OpBuilder &builder, mlir::Type resultType, - mlir::ValueRange inputs, - mlir::Location loc) -> std::optional { - return builder - .create(loc, resultType, inputs) - .getResult(0); - }); - - addSourceMaterialization( - [&](mlir::OpBuilder &builder, mlir::Type resultType, - mlir::ValueRange inputs, - mlir::Location loc) -> std::optional { - return builder - .create(loc, resultType, inputs) - .getResult(0); - }); + addArgumentMaterialization([&](mlir::OpBuilder &builder, + mlir::Type resultType, mlir::ValueRange inputs, + mlir::Location loc) -> mlir::Value { + return builder + .create(loc, resultType, inputs) + .getResult(0); + }); + + addSourceMaterialization([&](mlir::OpBuilder &builder, mlir::Type resultType, + mlir::ValueRange inputs, + mlir::Location loc) -> mlir::Value { + return builder + .create(loc, resultType, inputs) + .getResult(0); + }); } std::optional XeOneToNTypeConverter::convertTileType( diff --git a/lib/Dialect/XeTile/Transforms/Canonicalization.cpp b/lib/Dialect/XeTile/Transforms/Canonicalization.cpp index b41a97397..67086951b 100644 --- a/lib/Dialect/XeTile/Transforms/Canonicalization.cpp +++ b/lib/Dialect/XeTile/Transforms/Canonicalization.cpp @@ -396,7 +396,7 @@ struct XeTileCanonicalizationPass final mlir::ValueRange inputs, mlir::Location loc) { auto cast = builder.create(loc, type, inputs); - return std::optional(cast.getResult(0)); + return cast.getResult(0); }; typeConverter.addConversion([](mlir::Type type) { return type; }); typeConverter.addConversion([](imex::xetile::TileType tileTy) { diff --git a/lib/Transforms/CMakeLists.txt b/lib/Transforms/CMakeLists.txt index d6c1fb7f7..47d7e21b7 100644 --- a/lib/Transforms/CMakeLists.txt +++ b/lib/Transforms/CMakeLists.txt @@ -20,6 +20,7 @@ add_mlir_library(IMEXTransforms LINK_LIBS PUBLIC MLIRFuncDialect + MLIRCopyOpInterface MLIRGPUDialect MLIRPass MLIRSCFDialect diff --git a/lib/Transforms/OptimizeTranspose.cpp b/lib/Transforms/OptimizeTranspose.cpp index b639c16bc..3bc0052a2 100644 --- a/lib/Transforms/OptimizeTranspose.cpp +++ b/lib/Transforms/OptimizeTranspose.cpp @@ -969,7 +969,7 @@ struct OptimizeTransposePass final auto addNToOneCast = [](OpBuilder &builder, Type type, ValueRange inputs, Location loc) { auto cast = builder.create(loc, type, inputs); - return std::optional(cast.getResult(0)); + return cast.getResult(0); }; typeConverter.addSourceMaterialization(addNToOneCast); typeConverter.addArgumentMaterialization(addNToOneCast); diff --git a/test/SPIRV/CppEdsl.Convolution_BF16.mlir b/test/SPIRV/CppEdsl.Convolution_BF16.mlir index a08ed05cd..40ec7c250 100644 --- a/test/SPIRV/CppEdsl.Convolution_BF16.mlir +++ b/test/SPIRV/CppEdsl.Convolution_BF16.mlir @@ -114,7 +114,7 @@ spirv.module @__spv__init_kernel_0 Physical64 OpenCL requires #spirv.vce, CrossWorkgroup>, i64 + %14 = spirv.AccessChain %arg1[%13] : !spirv.ptr, CrossWorkgroup>, i64 -> !spirv.ptr spirv.Store "CrossWorkgroup" %14, %arg0 : i16 %15 = spirv.IAdd %4, %arg4 : i64 spirv.Branch ^bb1(%15 : i64) @@ -154,7 +154,7 @@ spirv.module @__spv__init_kernel_1 Physical64 OpenCL requires #spirv.vce, CrossWorkgroup>, i64 + %14 = spirv.AccessChain %arg1[%13] : !spirv.ptr, CrossWorkgroup>, i64 -> !spirv.ptr spirv.Store "CrossWorkgroup" %14, %arg0 : i16 %15 = spirv.IAdd %4, %arg4 : i64 spirv.Branch ^bb1(%15 : i64) @@ -194,7 +194,7 @@ spirv.module @__spv__copy_arg0_kernel Physical64 OpenCL requires #spirv.vce, CrossWorkgroup>, i64 + %14 = spirv.AccessChain %arg0[%13] : !spirv.ptr, CrossWorkgroup>, i64 -> !spirv.ptr %15 = spirv.Load "CrossWorkgroup" %14 : i16 %cst0_i64_1 = spirv.Constant 0 : i64 %cst200704_i64_2 = spirv.Constant 200704 : i64 @@ -209,7 +209,7 @@ spirv.module @__spv__copy_arg0_kernel Physical64 OpenCL requires #spirv.vce, CrossWorkgroup>, i64 + %24 = spirv.AccessChain %arg2[%23] : !spirv.ptr, CrossWorkgroup>, i64 -> !spirv.ptr spirv.Store "CrossWorkgroup" %24, %15 : i16 %25 = spirv.IAdd %4, %arg4 : i64 spirv.Branch ^bb1(%25 : i64) @@ -269,7 +269,7 @@ spirv.module @__spv__conv_kernel Physical64 OpenCL requires #spirv.vce, CrossWorkgroup>, i64 + %25 = spirv.AccessChain %arg0[%24] : !spirv.ptr, CrossWorkgroup>, i64 -> !spirv.ptr %26 = spirv.Load "CrossWorkgroup" %25 : i16 %cst0_i64_1 = spirv.Constant 0 : i64 %cst12288_i64 = spirv.Constant 12288 : i64 @@ -284,7 +284,7 @@ spirv.module @__spv__conv_kernel Physical64 OpenCL requires #spirv.vce, CrossWorkgroup>, i64 + %35 = spirv.AccessChain %arg2[%34] : !spirv.ptr, CrossWorkgroup>, i64 -> !spirv.ptr %36 = spirv.Load "CrossWorkgroup" %35 : i16 %cst0_i64_4 = spirv.Constant 0 : i64 %cst200704_i64 = spirv.Constant 200704 : i64 @@ -299,7 +299,7 @@ spirv.module @__spv__conv_kernel Physical64 OpenCL requires #spirv.vce, CrossWorkgroup>, i64 + %45 = spirv.AccessChain %arg3[%44] : !spirv.ptr, CrossWorkgroup>, i64 -> !spirv.ptr %46 = spirv.Load "CrossWorkgroup" %45 : i16 %f32_26 = spirv.INTEL.ConvertBF16ToF %26 : i16 to f32 @@ -328,7 +328,7 @@ spirv.module @__spv__conv_kernel Physical64 OpenCL requires #spirv.vce, CrossWorkgroup>, i64 + %57 = spirv.AccessChain %arg3[%56] : !spirv.ptr, CrossWorkgroup>, i64 -> !spirv.ptr spirv.Store "CrossWorkgroup" %57, %bf16_48 : i16 %58 = spirv.IAdd %13, %arg5 : i64 spirv.Branch ^bb1(%58 : i64) diff --git a/test/SPIRV/OpTest.ArgMax_BF16.mlir b/test/SPIRV/OpTest.ArgMax_BF16.mlir index 3e634bcfd..9152ee0e8 100644 --- a/test/SPIRV/OpTest.ArgMax_BF16.mlir +++ b/test/SPIRV/OpTest.ArgMax_BF16.mlir @@ -103,12 +103,12 @@ module @argmax attributes {gpu.container_module} { %cst1_i64 = spirv.Constant 1 : i64 %14 = spirv.IMul %cst1_i64, %6 : i64 %15 = spirv.IAdd %13, %14 : i64 - %16 = spirv.AccessChain %arg0[%15] : !spirv.ptr, CrossWorkgroup>, i64 + %16 = spirv.AccessChain %arg0[%15] : !spirv.ptr, CrossWorkgroup>, i64 -> !spirv.ptr %17 = spirv.Load "CrossWorkgroup" %16 : i16 %fp32_17 = spirv.INTEL.ConvertBF16ToF %17 : i16 to f32 %cst0_i64_0 = spirv.Constant 0 : i64 - %18 = spirv.AccessChain %arg2[%cst0_i64_0] : !spirv.ptr, CrossWorkgroup>, i64 + %18 = spirv.AccessChain %arg2[%cst0_i64_0] : !spirv.ptr, CrossWorkgroup>, i64 -> !spirv.ptr %19 = spirv.Load "CrossWorkgroup" %18 : i16 %fp32_19 = spirv.INTEL.ConvertBF16ToF %19 : i16 to f32 @@ -117,7 +117,7 @@ module @argmax attributes {gpu.container_module} { %bf16_21 = spirv.INTEL.ConvertFToBF16 %21 : f32 to i16 %cst0_i64_1 = spirv.Constant 0 : i64 - %22 = spirv.AccessChain %arg2[%cst0_i64_1] : !spirv.ptr, CrossWorkgroup>, i64 + %22 = spirv.AccessChain %arg2[%cst0_i64_1] : !spirv.ptr, CrossWorkgroup>, i64 -> !spirv.ptr spirv.Store "CrossWorkgroup" %22, %bf16_21 : i16 %23 = spirv.IAdd %6, %arg4 : i64 spirv.Branch ^bb1(%23 : i64) @@ -167,7 +167,7 @@ module @argmax attributes {gpu.container_module} { %cst1_i64 = spirv.Constant 1 : i64 %12 = spirv.IMul %cst1_i64, %4 : i64 %13 = spirv.IAdd %11, %12 : i64 - %14 = spirv.AccessChain %arg1[%13] : !spirv.ptr, CrossWorkgroup>, i64 + %14 = spirv.AccessChain %arg1[%13] : !spirv.ptr, CrossWorkgroup>, i64 -> !spirv.ptr spirv.Store "CrossWorkgroup" %14, %arg0 : i32 %15 = spirv.IAdd %4, %arg4 : i64 spirv.Branch ^bb1(%15 : i64) @@ -212,12 +212,12 @@ module @argmax attributes {gpu.container_module} { %cst1_i64 = spirv.Constant 1 : i64 %14 = spirv.IMul %cst1_i64, %6 : i64 %15 = spirv.IAdd %13, %14 : i64 - %16 = spirv.AccessChain %arg0[%15] : !spirv.ptr, CrossWorkgroup>, i64 + %16 = spirv.AccessChain %arg0[%15] : !spirv.ptr, CrossWorkgroup>, i64 -> !spirv.ptr %17 = spirv.Load "CrossWorkgroup" %16 : i16 %f32_17 = spirv.INTEL.ConvertBF16ToF %17 : i16 to f32 %cst0_i64_0 = spirv.Constant 0 : i64 - %18 = spirv.AccessChain %arg2[%cst0_i64_0] : !spirv.ptr, CrossWorkgroup>, i64 + %18 = spirv.AccessChain %arg2[%cst0_i64_0] : !spirv.ptr, CrossWorkgroup>, i64 -> !spirv.ptr %19 = spirv.Load "CrossWorkgroup" %18 : i16 %f32_19 = spirv.INTEL.ConvertBF16ToF %19 : i16 to f32 @@ -234,17 +234,17 @@ module @argmax attributes {gpu.container_module} { %cst1_i64_5 = spirv.Constant 1 : i64 %26 = spirv.IMul %cst1_i64_5, %6 : i64 %27 = spirv.IAdd %25, %26 : i64 - %28 = spirv.AccessChain %arg3[%27] : !spirv.ptr, CrossWorkgroup>, i64 + %28 = spirv.AccessChain %arg3[%27] : !spirv.ptr, CrossWorkgroup>, i64 -> !spirv.ptr %29 = spirv.Load "CrossWorkgroup" %28 : i32 %cst0_i64_6 = spirv.Constant 0 : i64 - %30 = spirv.AccessChain %arg4[%cst0_i64_6] : !spirv.ptr, CrossWorkgroup>, i64 + %30 = spirv.AccessChain %arg4[%cst0_i64_6] : !spirv.ptr, CrossWorkgroup>, i64 -> !spirv.ptr %31 = spirv.Load "CrossWorkgroup" %30 : i32 %32 = spirv.FOrdEqual %f32_17, %f32_19 : f32 %33 = spirv.Select %32, %29, %arg5 : i1, i32 %34 = spirv.UGreaterThan %31, %33 : i32 %35 = spirv.Select %34, %31, %33 : i1, i32 %cst0_i64_7 = spirv.Constant 0 : i64 - %36 = spirv.AccessChain %arg4[%cst0_i64_7] : !spirv.ptr, CrossWorkgroup>, i64 + %36 = spirv.AccessChain %arg4[%cst0_i64_7] : !spirv.ptr, CrossWorkgroup>, i64 -> !spirv.ptr spirv.Store "CrossWorkgroup" %36, %35 : i32 %37 = spirv.IAdd %6, %arg7 : i64 spirv.Branch ^bb1(%37 : i64) diff --git a/test/SPIRV/OpTest.Argmax_FLOAT32.mlir b/test/SPIRV/OpTest.Argmax_FLOAT32.mlir index 5555853d1..167895a94 100644 --- a/test/SPIRV/OpTest.Argmax_FLOAT32.mlir +++ b/test/SPIRV/OpTest.Argmax_FLOAT32.mlir @@ -87,15 +87,15 @@ module @argmax attributes {gpu.container_module} { %cst1_i64 = spirv.Constant 1 : i64 %14 = spirv.IMul %cst1_i64, %6 : i64 %15 = spirv.IAdd %13, %14 : i64 - %16 = spirv.AccessChain %arg0[%15] : !spirv.ptr, CrossWorkgroup>, i64 + %16 = spirv.AccessChain %arg0[%15] : !spirv.ptr, CrossWorkgroup>, i64 -> !spirv.ptr %17 = spirv.Load "CrossWorkgroup" %16 : f32 %cst0_i64_0 = spirv.Constant 0 : i64 - %18 = spirv.AccessChain %arg2[%cst0_i64_0] : !spirv.ptr, CrossWorkgroup>, i64 + %18 = spirv.AccessChain %arg2[%cst0_i64_0] : !spirv.ptr, CrossWorkgroup>, i64 -> !spirv.ptr %19 = spirv.Load "CrossWorkgroup" %18 : f32 %20 = spirv.FOrdGreaterThan %19, %17 : f32 %21 = spirv.Select %20, %19, %17 : i1, f32 %cst0_i64_1 = spirv.Constant 0 : i64 - %22 = spirv.AccessChain %arg2[%cst0_i64_1] : !spirv.ptr, CrossWorkgroup>, i64 + %22 = spirv.AccessChain %arg2[%cst0_i64_1] : !spirv.ptr, CrossWorkgroup>, i64 -> !spirv.ptr spirv.Store "CrossWorkgroup" %22, %21 : f32 %23 = spirv.IAdd %6, %arg4 : i64 spirv.Branch ^bb1(%23 : i64) @@ -160,7 +160,7 @@ module @argmax attributes {gpu.container_module} { %cst1_i64 = spirv.Constant 1 : i64 %12 = spirv.IMul %cst1_i64, %4 : i64 %13 = spirv.IAdd %11, %12 : i64 - %14 = spirv.AccessChain %arg1[%13] : !spirv.ptr, CrossWorkgroup>, i64 + %14 = spirv.AccessChain %arg1[%13] : !spirv.ptr, CrossWorkgroup>, i64 -> !spirv.ptr spirv.Store "CrossWorkgroup" %14, %arg0 : i32 %15 = spirv.IAdd %4, %arg4 : i64 spirv.Branch ^bb1(%15 : i64) @@ -214,10 +214,10 @@ module @argmax attributes {gpu.container_module} { %cst1_i64 = spirv.Constant 1 : i64 %14 = spirv.IMul %cst1_i64, %6 : i64 %15 = spirv.IAdd %13, %14 : i64 - %16 = spirv.AccessChain %arg0[%15] : !spirv.ptr, CrossWorkgroup>, i64 + %16 = spirv.AccessChain %arg0[%15] : !spirv.ptr, CrossWorkgroup>, i64 -> !spirv.ptr %17 = spirv.Load "CrossWorkgroup" %16 : f32 %cst0_i64_0 = spirv.Constant 0 : i64 - %18 = spirv.AccessChain %arg2[%cst0_i64_0] : !spirv.ptr, CrossWorkgroup>, i64 + %18 = spirv.AccessChain %arg2[%cst0_i64_0] : !spirv.ptr, CrossWorkgroup>, i64 -> !spirv.ptr %19 = spirv.Load "CrossWorkgroup" %18 : f32 %cst0_i64_1 = spirv.Constant 0 : i64 %cst48_i64_2 = spirv.Constant 48 : i64 @@ -232,17 +232,17 @@ module @argmax attributes {gpu.container_module} { %cst1_i64_5 = spirv.Constant 1 : i64 %26 = spirv.IMul %cst1_i64_5, %6 : i64 %27 = spirv.IAdd %25, %26 : i64 - %28 = spirv.AccessChain %arg3[%27] : !spirv.ptr, CrossWorkgroup>, i64 + %28 = spirv.AccessChain %arg3[%27] : !spirv.ptr, CrossWorkgroup>, i64 -> !spirv.ptr %29 = spirv.Load "CrossWorkgroup" %28 : i32 %cst0_i64_6 = spirv.Constant 0 : i64 - %30 = spirv.AccessChain %arg4[%cst0_i64_6] : !spirv.ptr, CrossWorkgroup>, i64 + %30 = spirv.AccessChain %arg4[%cst0_i64_6] : !spirv.ptr, CrossWorkgroup>, i64 -> !spirv.ptr %31 = spirv.Load "CrossWorkgroup" %30 : i32 %32 = spirv.FOrdEqual %17, %19 : f32 %33 = spirv.Select %32, %29, %arg5 : i1, i32 %34 = spirv.UGreaterThan %31, %33 : i32 %35 = spirv.Select %34, %31, %33 : i1, i32 %cst0_i64_7 = spirv.Constant 0 : i64 - %36 = spirv.AccessChain %arg4[%cst0_i64_7] : !spirv.ptr, CrossWorkgroup>, i64 + %36 = spirv.AccessChain %arg4[%cst0_i64_7] : !spirv.ptr, CrossWorkgroup>, i64 -> !spirv.ptr spirv.Store "CrossWorkgroup" %36, %35 : i32 %37 = spirv.IAdd %6, %arg7 : i64 spirv.Branch ^bb1(%37 : i64) diff --git a/test/SPIRV/OpTest.BroadcastNonNumpy_BF16.mlir b/test/SPIRV/OpTest.BroadcastNonNumpy_BF16.mlir index ac7c92939..105d9594b 100644 --- a/test/SPIRV/OpTest.BroadcastNonNumpy_BF16.mlir +++ b/test/SPIRV/OpTest.BroadcastNonNumpy_BF16.mlir @@ -60,7 +60,7 @@ spirv.module @__spv__broadcast_kernel Physical64 OpenCL requires #spirv.vce, CrossWorkgroup>, i64 + %6 = spirv.AccessChain %arg0[%5] : !spirv.ptr, CrossWorkgroup>, i64 -> !spirv.ptr %7 = spirv.Load "CrossWorkgroup" %6 : i16 %cst0_i64_1 = spirv.Constant 0 : i64 %cst4_i64 = spirv.Constant 4 : i64 @@ -69,7 +69,7 @@ spirv.module @__spv__broadcast_kernel Physical64 OpenCL requires #spirv.vce, CrossWorkgroup>, i64 + %12 = spirv.AccessChain %arg1[%11] : !spirv.ptr, CrossWorkgroup>, i64 -> !spirv.ptr spirv.Store "CrossWorkgroup" %12, %7 : i16 spirv.Return } diff --git a/test/SPIRV/OpTest.BroadcastNonNumpy_FLOAT32.mlir b/test/SPIRV/OpTest.BroadcastNonNumpy_FLOAT32.mlir index 8e8490994..e13019e1f 100644 --- a/test/SPIRV/OpTest.BroadcastNonNumpy_FLOAT32.mlir +++ b/test/SPIRV/OpTest.BroadcastNonNumpy_FLOAT32.mlir @@ -41,7 +41,7 @@ module @broadcast_non_numpy attributes {gpu.container_module} { %cst1_i64 = spirv.Constant 1 : i64 %6 = spirv.IMul %cst1_i64, %3 : i64 %7 = spirv.IAdd %5, %6 : i64 - %8 = spirv.AccessChain %arg1[%7] : !spirv.ptr, CrossWorkgroup>, i64 + %8 = spirv.AccessChain %arg1[%7] : !spirv.ptr, CrossWorkgroup>, i64 -> !spirv.ptr spirv.Store "CrossWorkgroup" %8, %arg0 : f32 spirv.Return } @@ -68,7 +68,7 @@ module @broadcast_non_numpy attributes {gpu.container_module} { %cst1_i64 = spirv.Constant 1 : i64 %4 = spirv.IMul %cst1_i64, %1 : i64 %5 = spirv.IAdd %cst0_i64, %4 : i64 - %6 = spirv.AccessChain %arg0[%5] : !spirv.ptr, CrossWorkgroup>, i64 + %6 = spirv.AccessChain %arg0[%5] : !spirv.ptr, CrossWorkgroup>, i64 -> !spirv.ptr %7 = spirv.Load "CrossWorkgroup" %6 : f32 %cst0_i64_1 = spirv.Constant 0 : i64 %cst4_i64 = spirv.Constant 4 : i64 @@ -77,7 +77,7 @@ module @broadcast_non_numpy attributes {gpu.container_module} { %cst1_i64_2 = spirv.Constant 1 : i64 %10 = spirv.IMul %cst1_i64_2, %3 : i64 %11 = spirv.IAdd %9, %10 : i64 - %12 = spirv.AccessChain %arg1[%11] : !spirv.ptr, CrossWorkgroup>, i64 + %12 = spirv.AccessChain %arg1[%11] : !spirv.ptr, CrossWorkgroup>, i64 -> !spirv.ptr spirv.Store "CrossWorkgroup" %12, %7 : f32 spirv.Return } diff --git a/test/SPIRV/OpTest.Conv2D_FLOAT32.mlir b/test/SPIRV/OpTest.Conv2D_FLOAT32.mlir index 2879b769e..743433f4a 100644 --- a/test/SPIRV/OpTest.Conv2D_FLOAT32.mlir +++ b/test/SPIRV/OpTest.Conv2D_FLOAT32.mlir @@ -97,7 +97,7 @@ module @complex_conv_2d attributes {gpu.container_module} { %cst1_i64 = spirv.Constant 1 : i64 %17 = spirv.IMul %cst1_i64, %7 : i64 %18 = spirv.IAdd %16, %17 : i64 - %19 = spirv.AccessChain %arg0[%18] : !spirv.ptr, CrossWorkgroup>, i64 + %19 = spirv.AccessChain %arg0[%18] : !spirv.ptr, CrossWorkgroup>, i64 -> !spirv.ptr %20 = spirv.Load "CrossWorkgroup" %19 : f32 %cst0_i64_1 = spirv.Constant 0 : i64 %cst451584_i64_2 = spirv.Constant 451584 : i64 @@ -115,7 +115,7 @@ module @complex_conv_2d attributes {gpu.container_module} { %cst1_i64_6 = spirv.Constant 1 : i64 %29 = spirv.IMul %cst1_i64_6, %7 : i64 %30 = spirv.IAdd %28, %29 : i64 - %31 = spirv.AccessChain %arg2[%30] : !spirv.ptr, CrossWorkgroup>, i64 + %31 = spirv.AccessChain %arg2[%30] : !spirv.ptr, CrossWorkgroup>, i64 -> !spirv.ptr spirv.Store "CrossWorkgroup" %31, %20 : f32 %32 = spirv.IAdd %7, %arg4 : i64 spirv.Branch ^bb1(%32 : i64) @@ -181,7 +181,7 @@ module @complex_conv_2d attributes {gpu.container_module} { %cst1_i64 = spirv.Constant 1 : i64 %17 = spirv.IMul %cst1_i64, %7 : i64 %18 = spirv.IAdd %16, %17 : i64 - %19 = spirv.AccessChain %arg1[%18] : !spirv.ptr, CrossWorkgroup>, i64 + %19 = spirv.AccessChain %arg1[%18] : !spirv.ptr, CrossWorkgroup>, i64 -> !spirv.ptr spirv.Store "CrossWorkgroup" %19, %arg0 : f32 %20 = spirv.IAdd %7, %arg4 : i64 spirv.Branch ^bb1(%20 : i64) @@ -246,7 +246,7 @@ module @complex_conv_2d attributes {gpu.container_module} { %cst1_i64 = spirv.Constant 1 : i64 %17 = spirv.IMul %cst1_i64, %7 : i64 %18 = spirv.IAdd %16, %17 : i64 - %19 = spirv.AccessChain %arg1[%18] : !spirv.ptr, CrossWorkgroup>, i64 + %19 = spirv.AccessChain %arg1[%18] : !spirv.ptr, CrossWorkgroup>, i64 -> !spirv.ptr spirv.Store "CrossWorkgroup" %19, %arg0 : f32 %20 = spirv.IAdd %7, %arg4 : i64 spirv.Branch ^bb1(%20 : i64) @@ -337,7 +337,7 @@ module @complex_conv_2d attributes {gpu.container_module} { %cst1_i64 = spirv.Constant 1 : i64 %32 = spirv.IMul %cst1_i64, %16 : i64 %33 = spirv.IAdd %31, %32 : i64 - %34 = spirv.AccessChain %arg0[%33] : !spirv.ptr, CrossWorkgroup>, i64 + %34 = spirv.AccessChain %arg0[%33] : !spirv.ptr, CrossWorkgroup>, i64 -> !spirv.ptr %35 = spirv.Load "CrossWorkgroup" %34 : f32 %cst0_i64_2 = spirv.Constant 0 : i64 %cst864_i64 = spirv.Constant 864 : i64 @@ -355,7 +355,7 @@ module @complex_conv_2d attributes {gpu.container_module} { %cst1_i64_3 = spirv.Constant 1 : i64 %44 = spirv.IMul %cst1_i64_3, %7 : i64 %45 = spirv.IAdd %43, %44 : i64 - %46 = spirv.AccessChain %arg2[%45] : !spirv.ptr, CrossWorkgroup>, i64 + %46 = spirv.AccessChain %arg2[%45] : !spirv.ptr, CrossWorkgroup>, i64 -> !spirv.ptr %47 = spirv.Load "CrossWorkgroup" %46 : f32 %cst0_i64_4 = spirv.Constant 0 : i64 %cst1204224_i64 = spirv.Constant 1204224 : i64 @@ -373,7 +373,7 @@ module @complex_conv_2d attributes {gpu.container_module} { %cst1_i64_7 = spirv.Constant 1 : i64 %56 = spirv.IMul %cst1_i64_7, %7 : i64 %57 = spirv.IAdd %55, %56 : i64 - %58 = spirv.AccessChain %arg3[%57] : !spirv.ptr, CrossWorkgroup>, i64 + %58 = spirv.AccessChain %arg3[%57] : !spirv.ptr, CrossWorkgroup>, i64 -> !spirv.ptr %59 = spirv.Load "CrossWorkgroup" %58 : f32 %60 = spirv.FMul %35, %47 : f32 %61 = spirv.FAdd %59, %60 : f32 @@ -393,7 +393,7 @@ module @complex_conv_2d attributes {gpu.container_module} { %cst1_i64_13 = spirv.Constant 1 : i64 %70 = spirv.IMul %cst1_i64_13, %7 : i64 %71 = spirv.IAdd %69, %70 : i64 - %72 = spirv.AccessChain %arg3[%71] : !spirv.ptr, CrossWorkgroup>, i64 + %72 = spirv.AccessChain %arg3[%71] : !spirv.ptr, CrossWorkgroup>, i64 -> !spirv.ptr spirv.Store "CrossWorkgroup" %72, %61 : f32 %73 = spirv.IAdd %16, %arg5 : i64 spirv.Branch ^bb1(%73 : i64) diff --git a/test/SPIRV/OpTest.EltwiseAdd_BF16.mlir b/test/SPIRV/OpTest.EltwiseAdd_BF16.mlir index 035c1d1b8..6533bc06f 100644 --- a/test/SPIRV/OpTest.EltwiseAdd_BF16.mlir +++ b/test/SPIRV/OpTest.EltwiseAdd_BF16.mlir @@ -90,11 +90,11 @@ func.func @main() attributes {llvm.emit_c_interface} { %3 = spirv.CompositeExtract %2[1 : i32] : vector<3xi64> %4 = spirv.IMul %1, %cst20_i64 : i64 %5 = spirv.IAdd %4, %3 : i64 - %6 = spirv.AccessChain %arg0[%5] : !spirv.ptr, CrossWorkgroup>, i64 + %6 = spirv.AccessChain %arg0[%5] : !spirv.ptr, CrossWorkgroup>, i64 -> !spirv.ptr %7 = spirv.Load "CrossWorkgroup" %6 : i16 %8 = spirv.IMul %1, %cst20_i64 : i64 %9 = spirv.IAdd %8, %3 : i64 - %10 = spirv.AccessChain %arg1[%9] : !spirv.ptr, CrossWorkgroup>, i64 + %10 = spirv.AccessChain %arg1[%9] : !spirv.ptr, CrossWorkgroup>, i64 -> !spirv.ptr %11 = spirv.Load "CrossWorkgroup" %10 : i16 // %12 = spirv.IAdd %7, %11 : i16 // *************************************** // @@ -109,7 +109,7 @@ func.func @main() attributes {llvm.emit_c_interface} { %13 = spirv.IMul %1, %cst20_i64 : i64 %14 = spirv.IAdd %13, %3 : i64 - %15 = spirv.AccessChain %arg2[%14] : !spirv.ptr, CrossWorkgroup>, i64 + %15 = spirv.AccessChain %arg2[%14] : !spirv.ptr, CrossWorkgroup>, i64 -> !spirv.ptr spirv.Store "CrossWorkgroup" %15, %12 : i16 spirv.Return } diff --git a/test/SPIRV/OpTest.EltwiseAdd_FLOAT32.mlir b/test/SPIRV/OpTest.EltwiseAdd_FLOAT32.mlir index dd2f97998..cc863867e 100644 --- a/test/SPIRV/OpTest.EltwiseAdd_FLOAT32.mlir +++ b/test/SPIRV/OpTest.EltwiseAdd_FLOAT32.mlir @@ -64,7 +64,7 @@ module @eltwise_add attributes {gpu.container_module} { %cst1_i64 = spirv.Constant 1 : i64 %6 = spirv.IMul %cst1_i64, %3 : i64 %7 = spirv.IAdd %5, %6 : i64 - %8 = spirv.AccessChain %arg0[%7] : !spirv.ptr, CrossWorkgroup>, i64 + %8 = spirv.AccessChain %arg0[%7] : !spirv.ptr, CrossWorkgroup>, i64 -> !spirv.ptr %9 = spirv.Load "CrossWorkgroup" %8 : f32 %cst0_i64_1 = spirv.Constant 0 : i64 %cst20_i64_2 = spirv.Constant 20 : i64 @@ -73,7 +73,7 @@ module @eltwise_add attributes {gpu.container_module} { %cst1_i64_3 = spirv.Constant 1 : i64 %12 = spirv.IMul %cst1_i64_3, %3 : i64 %13 = spirv.IAdd %11, %12 : i64 - %14 = spirv.AccessChain %arg1[%13] : !spirv.ptr, CrossWorkgroup>, i64 + %14 = spirv.AccessChain %arg1[%13] : !spirv.ptr, CrossWorkgroup>, i64 -> !spirv.ptr %15 = spirv.Load "CrossWorkgroup" %14 : f32 %16 = spirv.FAdd %9, %15 : f32 %cst0_i64_4 = spirv.Constant 0 : i64 @@ -83,7 +83,7 @@ module @eltwise_add attributes {gpu.container_module} { %cst1_i64_6 = spirv.Constant 1 : i64 %19 = spirv.IMul %cst1_i64_6, %3 : i64 %20 = spirv.IAdd %18, %19 : i64 - %21 = spirv.AccessChain %arg2[%20] : !spirv.ptr, CrossWorkgroup>, i64 + %21 = spirv.AccessChain %arg2[%20] : !spirv.ptr, CrossWorkgroup>, i64 -> !spirv.ptr spirv.Store "CrossWorkgroup" %21, %16 : f32 spirv.Return } diff --git a/test/SPIRV/OpTest.EltwiseAdd_FLOAT32_explicit_memory_copy.mlir b/test/SPIRV/OpTest.EltwiseAdd_FLOAT32_explicit_memory_copy.mlir index 4de11be5b..ba736d678 100644 --- a/test/SPIRV/OpTest.EltwiseAdd_FLOAT32_explicit_memory_copy.mlir +++ b/test/SPIRV/OpTest.EltwiseAdd_FLOAT32_explicit_memory_copy.mlir @@ -72,7 +72,7 @@ module @eltwise_add attributes {gpu.container_module} { %cst1_i64 = spirv.Constant 1 : i64 %6 = spirv.IMul %cst1_i64, %3 : i64 %7 = spirv.IAdd %5, %6 : i64 - %8 = spirv.AccessChain %arg0[%7] : !spirv.ptr, CrossWorkgroup>, i64 + %8 = spirv.AccessChain %arg0[%7] : !spirv.ptr, CrossWorkgroup>, i64 -> !spirv.ptr %9 = spirv.Load "CrossWorkgroup" %8 : f32 %cst0_i64_1 = spirv.Constant 0 : i64 %cst20_i64_2 = spirv.Constant 20 : i64 @@ -81,7 +81,7 @@ module @eltwise_add attributes {gpu.container_module} { %cst1_i64_3 = spirv.Constant 1 : i64 %12 = spirv.IMul %cst1_i64_3, %3 : i64 %13 = spirv.IAdd %11, %12 : i64 - %14 = spirv.AccessChain %arg1[%13] : !spirv.ptr, CrossWorkgroup>, i64 + %14 = spirv.AccessChain %arg1[%13] : !spirv.ptr, CrossWorkgroup>, i64 -> !spirv.ptr %15 = spirv.Load "CrossWorkgroup" %14 : f32 %16 = spirv.FAdd %9, %15 : f32 %cst0_i64_4 = spirv.Constant 0 : i64 @@ -91,7 +91,7 @@ module @eltwise_add attributes {gpu.container_module} { %cst1_i64_6 = spirv.Constant 1 : i64 %19 = spirv.IMul %cst1_i64_6, %3 : i64 %20 = spirv.IAdd %18, %19 : i64 - %21 = spirv.AccessChain %arg2[%20] : !spirv.ptr, CrossWorkgroup>, i64 + %21 = spirv.AccessChain %arg2[%20] : !spirv.ptr, CrossWorkgroup>, i64 -> !spirv.ptr spirv.Store "CrossWorkgroup" %21, %16 : f32 spirv.Return } diff --git a/test/SPIRV/OpTest.ExplicitPadding_FLOAT32.mlir b/test/SPIRV/OpTest.ExplicitPadding_FLOAT32.mlir index 3f01dc794..9069845c3 100644 --- a/test/SPIRV/OpTest.ExplicitPadding_FLOAT32.mlir +++ b/test/SPIRV/OpTest.ExplicitPadding_FLOAT32.mlir @@ -58,7 +58,7 @@ module @explicit_padding attributes {gpu.container_module} { %cst1_i64 = spirv.Constant 1 : i64 %6 = spirv.IMul %cst1_i64, %3 : i64 %7 = spirv.IAdd %5, %6 : i64 - %8 = spirv.AccessChain %arg1[%7] : !spirv.ptr, CrossWorkgroup>, i64 + %8 = spirv.AccessChain %arg1[%7] : !spirv.ptr, CrossWorkgroup>, i64 -> !spirv.ptr spirv.Store "CrossWorkgroup" %8, %arg0 : f32 spirv.Return } @@ -90,7 +90,7 @@ module @explicit_padding attributes {gpu.container_module} { %cst1_i64_1 = spirv.Constant 1 : i64 %6 = spirv.IMul %cst1_i64_1, %3 : i64 %7 = spirv.IAdd %5, %6 : i64 - %8 = spirv.AccessChain %arg0[%7] : !spirv.ptr, CrossWorkgroup>, i64 + %8 = spirv.AccessChain %arg0[%7] : !spirv.ptr, CrossWorkgroup>, i64 -> !spirv.ptr %9 = spirv.Load "CrossWorkgroup" %8 : f32 %10 = spirv.IAdd %1, %cst2_i64 : i64 %11 = spirv.IAdd %3, %cst1_i64 : i64 @@ -101,7 +101,7 @@ module @explicit_padding attributes {gpu.container_module} { %cst1_i64_3 = spirv.Constant 1 : i64 %14 = spirv.IMul %cst1_i64_3, %11 : i64 %15 = spirv.IAdd %13, %14 : i64 - %16 = spirv.AccessChain %arg1[%15] : !spirv.ptr, CrossWorkgroup>, i64 + %16 = spirv.AccessChain %arg1[%15] : !spirv.ptr, CrossWorkgroup>, i64 -> !spirv.ptr %17 = spirv.Load "CrossWorkgroup" %16 : f32 %18 = spirv.FAdd %9, %17 : f32 %cst0_i64_4 = spirv.Constant 0 : i64 @@ -111,7 +111,7 @@ module @explicit_padding attributes {gpu.container_module} { %cst1_i64_6 = spirv.Constant 1 : i64 %21 = spirv.IMul %cst1_i64_6, %11 : i64 %22 = spirv.IAdd %20, %21 : i64 - %23 = spirv.AccessChain %arg1[%22] : !spirv.ptr, CrossWorkgroup>, i64 + %23 = spirv.AccessChain %arg1[%22] : !spirv.ptr, CrossWorkgroup>, i64 -> !spirv.ptr spirv.Store "CrossWorkgroup" %23, %18 : f32 spirv.Return } diff --git a/test/SPIRV/OpTest.GEMM_BF16.mlir b/test/SPIRV/OpTest.GEMM_BF16.mlir index 54eaf982a..c94ced5db 100644 --- a/test/SPIRV/OpTest.GEMM_BF16.mlir +++ b/test/SPIRV/OpTest.GEMM_BF16.mlir @@ -87,7 +87,7 @@ module @gemm attributes {gpu.container_module} { %cst1_i64 = spirv.Constant 1 : i64 %8 = spirv.IMul %cst1_i64, %4 : i64 %9 = spirv.IAdd %7, %8 : i64 - %10 = spirv.AccessChain %arg0[%9] : !spirv.ptr, CrossWorkgroup>, i64 + %10 = spirv.AccessChain %arg0[%9] : !spirv.ptr, CrossWorkgroup>, i64 -> !spirv.ptr %11 = spirv.Load "CrossWorkgroup" %10 : i16 %cst0_i64_1 = spirv.Constant 0 : i64 %cst3_i64_2 = spirv.Constant 3 : i64 @@ -96,7 +96,7 @@ module @gemm attributes {gpu.container_module} { %cst1_i64_3 = spirv.Constant 1 : i64 %14 = spirv.IMul %cst1_i64_3, %3 : i64 %15 = spirv.IAdd %13, %14 : i64 - %16 = spirv.AccessChain %arg1[%15] : !spirv.ptr, CrossWorkgroup>, i64 + %16 = spirv.AccessChain %arg1[%15] : !spirv.ptr, CrossWorkgroup>, i64 -> !spirv.ptr %17 = spirv.Load "CrossWorkgroup" %16 : i16 %cst0_i64_4 = spirv.Constant 0 : i64 %cst3_i64_5 = spirv.Constant 3 : i64 @@ -105,7 +105,7 @@ module @gemm attributes {gpu.container_module} { %cst1_i64_6 = spirv.Constant 1 : i64 %20 = spirv.IMul %cst1_i64_6, %3 : i64 %21 = spirv.IAdd %19, %20 : i64 - %22 = spirv.AccessChain %arg2[%21] : !spirv.ptr, CrossWorkgroup>, i64 + %22 = spirv.AccessChain %arg2[%21] : !spirv.ptr, CrossWorkgroup>, i64 -> !spirv.ptr %23 = spirv.Load "CrossWorkgroup" %22 : i16 %231 = spirv.INTEL.ConvertBF16ToF %23 : i16 to f32 @@ -124,7 +124,7 @@ module @gemm attributes {gpu.container_module} { %cst1_i64_9 = spirv.Constant 1 : i64 %28 = spirv.IMul %cst1_i64_9, %3 : i64 %29 = spirv.IAdd %27, %28 : i64 - %30 = spirv.AccessChain %arg2[%29] : !spirv.ptr, CrossWorkgroup>, i64 + %30 = spirv.AccessChain %arg2[%29] : !spirv.ptr, CrossWorkgroup>, i64 -> !spirv.ptr spirv.Store "CrossWorkgroup" %30, %251 : i16 %31 = spirv.IAdd %4, %arg5 : i64 spirv.Branch ^bb1(%31 : i64) diff --git a/test/SPIRV/OpTest.GEMM_BF16_ACC_F32.mlir b/test/SPIRV/OpTest.GEMM_BF16_ACC_F32.mlir index 1347ff0ea..f66bafcef 100644 --- a/test/SPIRV/OpTest.GEMM_BF16_ACC_F32.mlir +++ b/test/SPIRV/OpTest.GEMM_BF16_ACC_F32.mlir @@ -80,7 +80,7 @@ module @gemm attributes {gpu.container_module} { %cst1_i64 = spirv.Constant 1 : i64 %8 = spirv.IMul %cst1_i64, %4 : i64 %9 = spirv.IAdd %7, %8 : i64 - %10 = spirv.AccessChain %arg0[%9] : !spirv.ptr, CrossWorkgroup>, i64 + %10 = spirv.AccessChain %arg0[%9] : !spirv.ptr, CrossWorkgroup>, i64 -> !spirv.ptr %11 = spirv.Load "CrossWorkgroup" %10 : i16 %cst0_i64_1 = spirv.Constant 0 : i64 %cst3_i64_2 = spirv.Constant 3 : i64 @@ -89,7 +89,7 @@ module @gemm attributes {gpu.container_module} { %cst1_i64_3 = spirv.Constant 1 : i64 %14 = spirv.IMul %cst1_i64_3, %3 : i64 %15 = spirv.IAdd %13, %14 : i64 - %16 = spirv.AccessChain %arg1[%15] : !spirv.ptr, CrossWorkgroup>, i64 + %16 = spirv.AccessChain %arg1[%15] : !spirv.ptr, CrossWorkgroup>, i64 -> !spirv.ptr %17 = spirv.Load "CrossWorkgroup" %16 : i16 %cst0_i64_4 = spirv.Constant 0 : i64 %cst3_i64_5 = spirv.Constant 3 : i64 @@ -98,7 +98,7 @@ module @gemm attributes {gpu.container_module} { %cst1_i64_6 = spirv.Constant 1 : i64 %20 = spirv.IMul %cst1_i64_6, %3 : i64 %21 = spirv.IAdd %19, %20 : i64 - %22 = spirv.AccessChain %arg2[%21] : !spirv.ptr, CrossWorkgroup>, i64 + %22 = spirv.AccessChain %arg2[%21] : !spirv.ptr, CrossWorkgroup>, i64 -> !spirv.ptr %23 = spirv.Load "CrossWorkgroup" %22 : f32 %24 = spirv.INTEL.ConvertBF16ToF %11 : i16 to f32 %25 = spirv.INTEL.ConvertBF16ToF %17 : i16 to f32 @@ -111,7 +111,7 @@ module @gemm attributes {gpu.container_module} { %cst1_i64_9 = spirv.Constant 1 : i64 %30 = spirv.IMul %cst1_i64_9, %3 : i64 %31 = spirv.IAdd %29, %30 : i64 - %32 = spirv.AccessChain %arg2[%31] : !spirv.ptr, CrossWorkgroup>, i64 + %32 = spirv.AccessChain %arg2[%31] : !spirv.ptr, CrossWorkgroup>, i64 -> !spirv.ptr spirv.Store "CrossWorkgroup" %32, %27 : f32 %33 = spirv.IAdd %4, %arg5 : i64 spirv.Branch ^bb1(%33 : i64) diff --git a/test/SPIRV/OpTest.GEMM_F16_ACC_F32.mlir b/test/SPIRV/OpTest.GEMM_F16_ACC_F32.mlir index 4d9675596..e282496b5 100644 --- a/test/SPIRV/OpTest.GEMM_F16_ACC_F32.mlir +++ b/test/SPIRV/OpTest.GEMM_F16_ACC_F32.mlir @@ -73,7 +73,7 @@ module @gemm attributes {gpu.container_module} { %cst1_i64 = spirv.Constant 1 : i64 %8 = spirv.IMul %cst1_i64, %4 : i64 %9 = spirv.IAdd %7, %8 : i64 - %10 = spirv.AccessChain %arg0[%9] : !spirv.ptr, CrossWorkgroup>, i64 + %10 = spirv.AccessChain %arg0[%9] : !spirv.ptr, CrossWorkgroup>, i64 -> !spirv.ptr %11 = spirv.Load "CrossWorkgroup" %10 : f16 %cst0_i64_1 = spirv.Constant 0 : i64 %cst3_i64_2 = spirv.Constant 3 : i64 @@ -82,7 +82,7 @@ module @gemm attributes {gpu.container_module} { %cst1_i64_3 = spirv.Constant 1 : i64 %14 = spirv.IMul %cst1_i64_3, %3 : i64 %15 = spirv.IAdd %13, %14 : i64 - %16 = spirv.AccessChain %arg1[%15] : !spirv.ptr, CrossWorkgroup>, i64 + %16 = spirv.AccessChain %arg1[%15] : !spirv.ptr, CrossWorkgroup>, i64 -> !spirv.ptr %17 = spirv.Load "CrossWorkgroup" %16 : f16 %cst0_i64_4 = spirv.Constant 0 : i64 %cst3_i64_5 = spirv.Constant 3 : i64 @@ -91,7 +91,7 @@ module @gemm attributes {gpu.container_module} { %cst1_i64_6 = spirv.Constant 1 : i64 %20 = spirv.IMul %cst1_i64_6, %3 : i64 %21 = spirv.IAdd %19, %20 : i64 - %22 = spirv.AccessChain %arg2[%21] : !spirv.ptr, CrossWorkgroup>, i64 + %22 = spirv.AccessChain %arg2[%21] : !spirv.ptr, CrossWorkgroup>, i64 -> !spirv.ptr %23 = spirv.Load "CrossWorkgroup" %22 : f32 %24 = spirv.FConvert %11 : f16 to f32 %25 = spirv.FConvert %17 : f16 to f32 @@ -104,7 +104,7 @@ module @gemm attributes {gpu.container_module} { %cst1_i64_9 = spirv.Constant 1 : i64 %30 = spirv.IMul %cst1_i64_9, %3 : i64 %31 = spirv.IAdd %29, %30 : i64 - %32 = spirv.AccessChain %arg2[%31] : !spirv.ptr, CrossWorkgroup>, i64 + %32 = spirv.AccessChain %arg2[%31] : !spirv.ptr, CrossWorkgroup>, i64 -> !spirv.ptr spirv.Store "CrossWorkgroup" %32, %27 : f32 %33 = spirv.IAdd %4, %arg5 : i64 spirv.Branch ^bb1(%33 : i64) diff --git a/test/SPIRV/OpTest.MaxPool1D_INT64.mlir b/test/SPIRV/OpTest.MaxPool1D_INT64.mlir index c84cf00ea..2895f25e3 100644 --- a/test/SPIRV/OpTest.MaxPool1D_INT64.mlir +++ b/test/SPIRV/OpTest.MaxPool1D_INT64.mlir @@ -48,7 +48,7 @@ module @max_pool_1d attributes {gpu.container_module} { %cst1_i64 = spirv.Constant 1 : i64 %0 = spirv.IMul %cst1_i64, %arg2 : i64 %1 = spirv.IAdd %cst0_i64, %0 : i64 - %2 = spirv.AccessChain %arg1[%1] : !spirv.ptr, CrossWorkgroup>, i64 + %2 = spirv.AccessChain %arg1[%1] : !spirv.ptr, CrossWorkgroup>, i64 -> !spirv.ptr spirv.Store "CrossWorkgroup" %2, %arg0 : i64 spirv.Return } @@ -72,13 +72,13 @@ module @max_pool_1d attributes {gpu.container_module} { %cst1_i64 = spirv.Constant 1 : i64 %2 = spirv.IMul %cst1_i64, %0 : i64 %3 = spirv.IAdd %cst0_i64, %2 : i64 - %4 = spirv.AccessChain %arg0[%3] : !spirv.ptr, CrossWorkgroup>, i64 + %4 = spirv.AccessChain %arg0[%3] : !spirv.ptr, CrossWorkgroup>, i64 -> !spirv.ptr %5 = spirv.Load "CrossWorkgroup" %4 : i64 %cst0_i64_0 = spirv.Constant 0 : i64 %cst1_i64_1 = spirv.Constant 1 : i64 %6 = spirv.IMul %cst1_i64_1, %arg2 : i64 %7 = spirv.IAdd %cst0_i64_0, %6 : i64 - %8 = spirv.AccessChain %arg1[%7] : !spirv.ptr, CrossWorkgroup>, i64 + %8 = spirv.AccessChain %arg1[%7] : !spirv.ptr, CrossWorkgroup>, i64 -> !spirv.ptr %9 = spirv.Load "CrossWorkgroup" %8 : i64 %10 = spirv.UGreaterThan %9, %5 : i64 %11 = spirv.Select %10, %9, %5 : i1, i64 @@ -86,7 +86,7 @@ module @max_pool_1d attributes {gpu.container_module} { %cst1_i64_3 = spirv.Constant 1 : i64 %12 = spirv.IMul %cst1_i64_3, %arg2 : i64 %13 = spirv.IAdd %cst0_i64_2, %12 : i64 - %14 = spirv.AccessChain %arg1[%13] : !spirv.ptr, CrossWorkgroup>, i64 + %14 = spirv.AccessChain %arg1[%13] : !spirv.ptr, CrossWorkgroup>, i64 -> !spirv.ptr spirv.Store "CrossWorkgroup" %14, %11 : i64 %15 = spirv.IAdd %0, %arg4 : i64 spirv.Branch ^bb1(%15 : i64) diff --git a/test/SPIRV/OpTest.Quantize_FLOAT32.mlir b/test/SPIRV/OpTest.Quantize_FLOAT32.mlir index 1dfb018bc..1b17f1b71 100644 --- a/test/SPIRV/OpTest.Quantize_FLOAT32.mlir +++ b/test/SPIRV/OpTest.Quantize_FLOAT32.mlir @@ -34,14 +34,14 @@ module @quantize attributes {gpu.container_module} { %cst1_i64 = spirv.Constant 1 : i64 %2 = spirv.IMul %cst1_i64, %1 : i64 %3 = spirv.IAdd %cst0_i64, %2 : i64 - %4 = spirv.AccessChain %arg0[%3] : !spirv.ptr, CrossWorkgroup>, i64 + %4 = spirv.AccessChain %arg0[%3] : !spirv.ptr, CrossWorkgroup>, i64 -> !spirv.ptr %5 = spirv.Load "CrossWorkgroup" %4 : f32 %6 = spirv.FMul %5, %arg1 : f32 %cst0_i64_0 = spirv.Constant 0 : i64 %cst1_i64_1 = spirv.Constant 1 : i64 %7 = spirv.IMul %cst1_i64_1, %1 : i64 %8 = spirv.IAdd %cst0_i64_0, %7 : i64 - %9 = spirv.AccessChain %arg2[%8] : !spirv.ptr, CrossWorkgroup>, i64 + %9 = spirv.AccessChain %arg2[%8] : !spirv.ptr, CrossWorkgroup>, i64 -> !spirv.ptr spirv.Store "CrossWorkgroup" %9, %6 : f32 spirv.Return } @@ -66,14 +66,14 @@ module @quantize attributes {gpu.container_module} { %cst1_i64 = spirv.Constant 1 : i64 %2 = spirv.IMul %cst1_i64, %1 : i64 %3 = spirv.IAdd %cst0_i64, %2 : i64 - %4 = spirv.AccessChain %arg0[%3] : !spirv.ptr, CrossWorkgroup>, i64 + %4 = spirv.AccessChain %arg0[%3] : !spirv.ptr, CrossWorkgroup>, i64 -> !spirv.ptr %5 = spirv.Load "CrossWorkgroup" %4 : f32 %6 = spirv.ConvertFToS %5 : f32 to i32 %cst0_i64_0 = spirv.Constant 0 : i64 %cst1_i64_1 = spirv.Constant 1 : i64 %7 = spirv.IMul %cst1_i64_1, %1 : i64 %8 = spirv.IAdd %cst0_i64_0, %7 : i64 - %9 = spirv.AccessChain %arg1[%8] : !spirv.ptr, CrossWorkgroup>, i64 + %9 = spirv.AccessChain %arg1[%8] : !spirv.ptr, CrossWorkgroup>, i64 -> !spirv.ptr spirv.Store "CrossWorkgroup" %9, %6 : i32 spirv.Return } diff --git a/test/SPIRV/OpTest.Relu_FLOAT32.mlir b/test/SPIRV/OpTest.Relu_FLOAT32.mlir index 942f67dc0..7a41eeef8 100644 --- a/test/SPIRV/OpTest.Relu_FLOAT32.mlir +++ b/test/SPIRV/OpTest.Relu_FLOAT32.mlir @@ -73,7 +73,7 @@ module @relu attributes {gpu.container_module} { %cst1_i64 = spirv.Constant 1 : i64 %6 = spirv.IMul %cst1_i64, %3 : i64 %7 = spirv.IAdd %5, %6 : i64 - %8 = spirv.AccessChain %arg0[%7] : !spirv.ptr, CrossWorkgroup>, i64 + %8 = spirv.AccessChain %arg0[%7] : !spirv.ptr, CrossWorkgroup>, i64 -> !spirv.ptr %9 = spirv.Load "CrossWorkgroup" %8 : f32 %10 = spirv.FOrdLessThan %9, %arg1 : f32 %cst0_i64_1 = spirv.Constant 0 : i64 @@ -83,7 +83,7 @@ module @relu attributes {gpu.container_module} { %cst1_i64_3 = spirv.Constant 1 : i64 %13 = spirv.IMul %cst1_i64_3, %3 : i64 %14 = spirv.IAdd %12, %13 : i64 - %15 = spirv.AccessChain %arg2[%14] : !spirv.ptr, CrossWorkgroup>, i64 + %15 = spirv.AccessChain %arg2[%14] : !spirv.ptr, CrossWorkgroup>, i64 -> !spirv.ptr %cst0_i8 = spirv.Constant 0 : i8 %cst1_i8 = spirv.Constant 1 : i8 %16 = spirv.Select %10, %cst1_i8, %cst0_i8 : i1, i8 @@ -118,7 +118,7 @@ module @relu attributes {gpu.container_module} { %cst1_i64 = spirv.Constant 1 : i64 %6 = spirv.IMul %cst1_i64, %3 : i64 %7 = spirv.IAdd %5, %6 : i64 - %8 = spirv.AccessChain %arg0[%7] : !spirv.ptr, CrossWorkgroup>, i64 + %8 = spirv.AccessChain %arg0[%7] : !spirv.ptr, CrossWorkgroup>, i64 -> !spirv.ptr %9 = spirv.Load "CrossWorkgroup" %8 : i8 %cst1_i8 = spirv.Constant 1 : i8 %10 = spirv.IEqual %9, %cst1_i8 : i8 @@ -129,7 +129,7 @@ module @relu attributes {gpu.container_module} { %cst1_i64_3 = spirv.Constant 1 : i64 %13 = spirv.IMul %cst1_i64_3, %3 : i64 %14 = spirv.IAdd %12, %13 : i64 - %15 = spirv.AccessChain %arg1[%14] : !spirv.ptr, CrossWorkgroup>, i64 + %15 = spirv.AccessChain %arg1[%14] : !spirv.ptr, CrossWorkgroup>, i64 -> !spirv.ptr %16 = spirv.Load "CrossWorkgroup" %15 : f32 %17 = spirv.Select %10, %arg2, %16 : i1, f32 %cst0_i64_4 = spirv.Constant 0 : i64 @@ -139,7 +139,7 @@ module @relu attributes {gpu.container_module} { %cst1_i64_6 = spirv.Constant 1 : i64 %20 = spirv.IMul %cst1_i64_6, %3 : i64 %21 = spirv.IAdd %19, %20 : i64 - %22 = spirv.AccessChain %arg3[%21] : !spirv.ptr, CrossWorkgroup>, i64 + %22 = spirv.AccessChain %arg3[%21] : !spirv.ptr, CrossWorkgroup>, i64 -> !spirv.ptr spirv.Store "CrossWorkgroup" %22, %17 : f32 spirv.Return } diff --git a/test/SPIRV/OpTest.SlmDynamic.mlir b/test/SPIRV/OpTest.SlmDynamic.mlir index a96336d3e..eedb06b88 100644 --- a/test/SPIRV/OpTest.SlmDynamic.mlir +++ b/test/SPIRV/OpTest.SlmDynamic.mlir @@ -123,7 +123,7 @@ module @slm attributes {gpu.container_module} { %cst1_i64 = spirv.Constant 1 : i64 %19 = spirv.IMul %cst1_i64, %9 : i64 %20 = spirv.IAdd %18, %19 : i64 - %21 = spirv.AccessChain %arg0[%20] : !spirv.ptr, CrossWorkgroup>, i64 + %21 = spirv.AccessChain %arg0[%20] : !spirv.ptr, CrossWorkgroup>, i64 -> !spirv.ptr %22 = spirv.Load "CrossWorkgroup" %21 : f32 %cst0_i64_1 = spirv.Constant 0 : i64 %cst128_i64_2 = spirv.Constant 128 : i64 @@ -132,7 +132,7 @@ module @slm attributes {gpu.container_module} { %cst1_i64_3 = spirv.Constant 1 : i64 %25 = spirv.IMul %cst1_i64_3, %16 : i64 %26 = spirv.IAdd %24, %25 : i64 - %27 = spirv.AccessChain %arg0[%26] : !spirv.ptr, CrossWorkgroup>, i64 + %27 = spirv.AccessChain %arg0[%26] : !spirv.ptr, CrossWorkgroup>, i64 -> !spirv.ptr %28 = spirv.Load "CrossWorkgroup" %27 : f32 %cst0_i64_4 = spirv.Constant 0 : i64 %cst128_i64_5 = spirv.Constant 128 : i64 @@ -141,7 +141,7 @@ module @slm attributes {gpu.container_module} { %cst1_i64_6 = spirv.Constant 1 : i64 %31 = spirv.IMul %cst1_i64_6, %9 : i64 %32 = spirv.IAdd %30, %31 : i64 - %33 = spirv.AccessChain %arg2[%32] : !spirv.ptr, Workgroup>, i64 + %33 = spirv.AccessChain %arg2[%32] : !spirv.ptr, Workgroup>, i64 -> !spirv.ptr spirv.Store "Workgroup" %33, %22 : f32 %cst0_i64_7 = spirv.Constant 0 : i64 %cst128_i64_8 = spirv.Constant 128 : i64 @@ -150,7 +150,7 @@ module @slm attributes {gpu.container_module} { %cst1_i64_9 = spirv.Constant 1 : i64 %36 = spirv.IMul %cst1_i64_9, %16 : i64 %37 = spirv.IAdd %35, %36 : i64 - %38 = spirv.AccessChain %arg2[%37] : !spirv.ptr, Workgroup>, i64 + %38 = spirv.AccessChain %arg2[%37] : !spirv.ptr, Workgroup>, i64 -> !spirv.ptr spirv.Store "Workgroup" %38, %28 : f32 spirv.ControlBarrier , , %cst0_i64_10 = spirv.Constant 0 : i64 @@ -160,7 +160,7 @@ module @slm attributes {gpu.container_module} { %cst1_i64_12 = spirv.Constant 1 : i64 %41 = spirv.IMul %cst1_i64_12, %9 : i64 %42 = spirv.IAdd %40, %41 : i64 - %43 = spirv.AccessChain %arg2[%42] : !spirv.ptr, Workgroup>, i64 + %43 = spirv.AccessChain %arg2[%42] : !spirv.ptr, Workgroup>, i64 -> !spirv.ptr %44 = spirv.Load "Workgroup" %43 : f32 %cst0_i64_13 = spirv.Constant 0 : i64 %cst128_i64_14 = spirv.Constant 128 : i64 @@ -169,7 +169,7 @@ module @slm attributes {gpu.container_module} { %cst1_i64_15 = spirv.Constant 1 : i64 %47 = spirv.IMul %cst1_i64_15, %16 : i64 %48 = spirv.IAdd %46, %47 : i64 - %49 = spirv.AccessChain %arg2[%48] : !spirv.ptr, Workgroup>, i64 + %49 = spirv.AccessChain %arg2[%48] : !spirv.ptr, Workgroup>, i64 -> !spirv.ptr %50 = spirv.Load "Workgroup" %49 : f32 %cst0_i64_16 = spirv.Constant 0 : i64 %cst128_i64_17 = spirv.Constant 128 : i64 @@ -178,7 +178,7 @@ module @slm attributes {gpu.container_module} { %cst1_i64_18 = spirv.Constant 1 : i64 %53 = spirv.IMul %cst1_i64_18, %9 : i64 %54 = spirv.IAdd %52, %53 : i64 - %55 = spirv.AccessChain %arg1[%54] : !spirv.ptr, CrossWorkgroup>, i64 + %55 = spirv.AccessChain %arg1[%54] : !spirv.ptr, CrossWorkgroup>, i64 -> !spirv.ptr spirv.Store "CrossWorkgroup" %55, %44 : f32 %cst0_i64_19 = spirv.Constant 0 : i64 %cst128_i64_20 = spirv.Constant 128 : i64 @@ -187,7 +187,7 @@ module @slm attributes {gpu.container_module} { %cst1_i64_21 = spirv.Constant 1 : i64 %58 = spirv.IMul %cst1_i64_21, %16 : i64 %59 = spirv.IAdd %57, %58 : i64 - %60 = spirv.AccessChain %arg1[%59] : !spirv.ptr, CrossWorkgroup>, i64 + %60 = spirv.AccessChain %arg1[%59] : !spirv.ptr, CrossWorkgroup>, i64 -> !spirv.ptr spirv.Store "CrossWorkgroup" %60, %50 : f32 spirv.Return } diff --git a/test/SPIRV/OpTest.Softmax_FLOAT32.mlir b/test/SPIRV/OpTest.Softmax_FLOAT32.mlir index bf8a8133a..cf53efe5f 100644 --- a/test/SPIRV/OpTest.Softmax_FLOAT32.mlir +++ b/test/SPIRV/OpTest.Softmax_FLOAT32.mlir @@ -99,7 +99,7 @@ module @softmax attributes {gpu.container_module} { %cst1_i64_0 = spirv.Constant 1 : i64 %4 = spirv.IMul %cst1_i64_0, %arg2 : i64 %5 = spirv.IAdd %3, %4 : i64 - %6 = spirv.AccessChain %arg1[%5] : !spirv.ptr, CrossWorkgroup>, i64 + %6 = spirv.AccessChain %arg1[%5] : !spirv.ptr, CrossWorkgroup>, i64 -> !spirv.ptr spirv.Store "CrossWorkgroup" %6, %arg0 : f32 spirv.Return } @@ -131,7 +131,7 @@ module @softmax attributes {gpu.container_module} { %cst1_i64 = spirv.Constant 1 : i64 %6 = spirv.IMul %cst1_i64, %2 : i64 %7 = spirv.IAdd %5, %6 : i64 - %8 = spirv.AccessChain %arg0[%7] : !spirv.ptr, CrossWorkgroup>, i64 + %8 = spirv.AccessChain %arg0[%7] : !spirv.ptr, CrossWorkgroup>, i64 -> !spirv.ptr %9 = spirv.Load "CrossWorkgroup" %8 : f32 %cst0_i64_0 = spirv.Constant 0 : i64 %cst1_i64_1 = spirv.Constant 1 : i64 @@ -140,7 +140,7 @@ module @softmax attributes {gpu.container_module} { %cst1_i64_2 = spirv.Constant 1 : i64 %12 = spirv.IMul %cst1_i64_2, %arg2 : i64 %13 = spirv.IAdd %11, %12 : i64 - %14 = spirv.AccessChain %arg1[%13] : !spirv.ptr, CrossWorkgroup>, i64 + %14 = spirv.AccessChain %arg1[%13] : !spirv.ptr, CrossWorkgroup>, i64 -> !spirv.ptr %15 = spirv.Load "CrossWorkgroup" %14 : f32 %16 = spirv.FOrdGreaterThan %15, %9 : f32 %17 = spirv.Select %16, %15, %9 : i1, f32 @@ -151,7 +151,7 @@ module @softmax attributes {gpu.container_module} { %cst1_i64_5 = spirv.Constant 1 : i64 %20 = spirv.IMul %cst1_i64_5, %arg2 : i64 %21 = spirv.IAdd %19, %20 : i64 - %22 = spirv.AccessChain %arg1[%21] : !spirv.ptr, CrossWorkgroup>, i64 + %22 = spirv.AccessChain %arg1[%21] : !spirv.ptr, CrossWorkgroup>, i64 -> !spirv.ptr spirv.Store "CrossWorkgroup" %22, %17 : f32 %23 = spirv.IAdd %2, %arg4 : i64 spirv.Branch ^bb1(%23 : i64) @@ -191,7 +191,7 @@ module @softmax attributes {gpu.container_module} { %cst1_i64 = spirv.Constant 1 : i64 %6 = spirv.IMul %cst1_i64, %3 : i64 %7 = spirv.IAdd %5, %6 : i64 - %8 = spirv.AccessChain %arg0[%7] : !spirv.ptr, CrossWorkgroup>, i64 + %8 = spirv.AccessChain %arg0[%7] : !spirv.ptr, CrossWorkgroup>, i64 -> !spirv.ptr %9 = spirv.Load "CrossWorkgroup" %8 : f32 %cst0_i64_1 = spirv.Constant 0 : i64 %cst1_i64_2 = spirv.Constant 1 : i64 @@ -200,7 +200,7 @@ module @softmax attributes {gpu.container_module} { %cst1_i64_3 = spirv.Constant 1 : i64 %12 = spirv.IMul %cst1_i64_3, %arg2 : i64 %13 = spirv.IAdd %11, %12 : i64 - %14 = spirv.AccessChain %arg1[%13] : !spirv.ptr, CrossWorkgroup>, i64 + %14 = spirv.AccessChain %arg1[%13] : !spirv.ptr, CrossWorkgroup>, i64 -> !spirv.ptr %15 = spirv.Load "CrossWorkgroup" %14 : f32 %16 = spirv.FSub %9, %15 : f32 %cst0_i64_4 = spirv.Constant 0 : i64 @@ -210,7 +210,7 @@ module @softmax attributes {gpu.container_module} { %cst1_i64_6 = spirv.Constant 1 : i64 %19 = spirv.IMul %cst1_i64_6, %3 : i64 %20 = spirv.IAdd %18, %19 : i64 - %21 = spirv.AccessChain %arg3[%20] : !spirv.ptr, CrossWorkgroup>, i64 + %21 = spirv.AccessChain %arg3[%20] : !spirv.ptr, CrossWorkgroup>, i64 -> !spirv.ptr spirv.Store "CrossWorkgroup" %21, %16 : f32 spirv.Return } @@ -243,7 +243,7 @@ module @softmax attributes {gpu.container_module} { %cst1_i64 = spirv.Constant 1 : i64 %6 = spirv.IMul %cst1_i64, %3 : i64 %7 = spirv.IAdd %5, %6 : i64 - %8 = spirv.AccessChain %arg0[%7] : !spirv.ptr, CrossWorkgroup>, i64 + %8 = spirv.AccessChain %arg0[%7] : !spirv.ptr, CrossWorkgroup>, i64 -> !spirv.ptr %9 = spirv.Load "CrossWorkgroup" %8 : f32 %10 = spirv.CL.exp %9 : f32 %cst0_i64_1 = spirv.Constant 0 : i64 @@ -253,7 +253,7 @@ module @softmax attributes {gpu.container_module} { %cst1_i64_3 = spirv.Constant 1 : i64 %13 = spirv.IMul %cst1_i64_3, %3 : i64 %14 = spirv.IAdd %12, %13 : i64 - %15 = spirv.AccessChain %arg1[%14] : !spirv.ptr, CrossWorkgroup>, i64 + %15 = spirv.AccessChain %arg1[%14] : !spirv.ptr, CrossWorkgroup>, i64 -> !spirv.ptr spirv.Store "CrossWorkgroup" %15, %10 : f32 spirv.Return } @@ -282,7 +282,7 @@ module @softmax attributes {gpu.container_module} { %cst1_i64_0 = spirv.Constant 1 : i64 %4 = spirv.IMul %cst1_i64_0, %arg2 : i64 %5 = spirv.IAdd %3, %4 : i64 - %6 = spirv.AccessChain %arg1[%5] : !spirv.ptr, CrossWorkgroup>, i64 + %6 = spirv.AccessChain %arg1[%5] : !spirv.ptr, CrossWorkgroup>, i64 -> !spirv.ptr spirv.Store "CrossWorkgroup" %6, %arg0 : f32 spirv.Return } @@ -314,7 +314,7 @@ module @softmax attributes {gpu.container_module} { %cst1_i64 = spirv.Constant 1 : i64 %6 = spirv.IMul %cst1_i64, %2 : i64 %7 = spirv.IAdd %5, %6 : i64 - %8 = spirv.AccessChain %arg0[%7] : !spirv.ptr, CrossWorkgroup>, i64 + %8 = spirv.AccessChain %arg0[%7] : !spirv.ptr, CrossWorkgroup>, i64 -> !spirv.ptr %9 = spirv.Load "CrossWorkgroup" %8 : f32 %cst0_i64_0 = spirv.Constant 0 : i64 %cst1_i64_1 = spirv.Constant 1 : i64 @@ -323,7 +323,7 @@ module @softmax attributes {gpu.container_module} { %cst1_i64_2 = spirv.Constant 1 : i64 %12 = spirv.IMul %cst1_i64_2, %arg2 : i64 %13 = spirv.IAdd %11, %12 : i64 - %14 = spirv.AccessChain %arg1[%13] : !spirv.ptr, CrossWorkgroup>, i64 + %14 = spirv.AccessChain %arg1[%13] : !spirv.ptr, CrossWorkgroup>, i64 -> !spirv.ptr %15 = spirv.Load "CrossWorkgroup" %14 : f32 %16 = spirv.FAdd %15, %9 : f32 %cst0_i64_3 = spirv.Constant 0 : i64 @@ -333,7 +333,7 @@ module @softmax attributes {gpu.container_module} { %cst1_i64_5 = spirv.Constant 1 : i64 %19 = spirv.IMul %cst1_i64_5, %arg2 : i64 %20 = spirv.IAdd %18, %19 : i64 - %21 = spirv.AccessChain %arg1[%20] : !spirv.ptr, CrossWorkgroup>, i64 + %21 = spirv.AccessChain %arg1[%20] : !spirv.ptr, CrossWorkgroup>, i64 -> !spirv.ptr spirv.Store "CrossWorkgroup" %21, %16 : f32 %22 = spirv.IAdd %2, %arg4 : i64 spirv.Branch ^bb1(%22 : i64) @@ -372,7 +372,7 @@ module @softmax attributes {gpu.container_module} { %cst1_i64 = spirv.Constant 1 : i64 %6 = spirv.IMul %cst1_i64, %3 : i64 %7 = spirv.IAdd %5, %6 : i64 - %8 = spirv.AccessChain %arg0[%7] : !spirv.ptr, CrossWorkgroup>, i64 + %8 = spirv.AccessChain %arg0[%7] : !spirv.ptr, CrossWorkgroup>, i64 -> !spirv.ptr %9 = spirv.Load "CrossWorkgroup" %8 : f32 %cst0_i64_1 = spirv.Constant 0 : i64 %cst1_i64_2 = spirv.Constant 1 : i64 @@ -381,7 +381,7 @@ module @softmax attributes {gpu.container_module} { %cst1_i64_3 = spirv.Constant 1 : i64 %12 = spirv.IMul %cst1_i64_3, %arg2 : i64 %13 = spirv.IAdd %11, %12 : i64 - %14 = spirv.AccessChain %arg1[%13] : !spirv.ptr, CrossWorkgroup>, i64 + %14 = spirv.AccessChain %arg1[%13] : !spirv.ptr, CrossWorkgroup>, i64 -> !spirv.ptr %15 = spirv.Load "CrossWorkgroup" %14 : f32 %16 = spirv.FDiv %9, %15 : f32 %cst0_i64_4 = spirv.Constant 0 : i64 @@ -391,7 +391,7 @@ module @softmax attributes {gpu.container_module} { %cst1_i64_6 = spirv.Constant 1 : i64 %19 = spirv.IMul %cst1_i64_6, %3 : i64 %20 = spirv.IAdd %18, %19 : i64 - %21 = spirv.AccessChain %arg3[%20] : !spirv.ptr, CrossWorkgroup>, i64 + %21 = spirv.AccessChain %arg3[%20] : !spirv.ptr, CrossWorkgroup>, i64 -> !spirv.ptr spirv.Store "CrossWorkgroup" %21, %16 : f32 spirv.Return } diff --git a/test/SPIRV/OpTest.Sum_FLOAT32.mlir b/test/SPIRV/OpTest.Sum_FLOAT32.mlir index 8b11d4e89..58285afb6 100644 --- a/test/SPIRV/OpTest.Sum_FLOAT32.mlir +++ b/test/SPIRV/OpTest.Sum_FLOAT32.mlir @@ -61,14 +61,14 @@ module @sum attributes {gpu.container_module} { %cst1_i64 = spirv.Constant 1 : i64 %7 = spirv.IMul %cst1_i64, %3 : i64 %8 = spirv.IAdd %6, %7 : i64 - %9 = spirv.AccessChain %arg0[%8] : !spirv.ptr, CrossWorkgroup>, i64 + %9 = spirv.AccessChain %arg0[%8] : !spirv.ptr, CrossWorkgroup>, i64 -> !spirv.ptr %10 = spirv.Load "CrossWorkgroup" %9 : f32 %cst0_i64_0 = spirv.Constant 0 : i64 - %11 = spirv.AccessChain %arg1[%cst0_i64_0] : !spirv.ptr, CrossWorkgroup>, i64 + %11 = spirv.AccessChain %arg1[%cst0_i64_0] : !spirv.ptr, CrossWorkgroup>, i64 -> !spirv.ptr %12 = spirv.Load "CrossWorkgroup" %11 : f32 %13 = spirv.FAdd %12, %10 : f32 %cst0_i64_1 = spirv.Constant 0 : i64 - %14 = spirv.AccessChain %arg1[%cst0_i64_1] : !spirv.ptr, CrossWorkgroup>, i64 + %14 = spirv.AccessChain %arg1[%cst0_i64_1] : !spirv.ptr, CrossWorkgroup>, i64 -> !spirv.ptr spirv.Store "CrossWorkgroup" %14, %13 : f32 %15 = spirv.IAdd %3, %arg4 : i64 spirv.Branch ^bb1(%15 : i64) diff --git a/test/SPIRV/OpTest.Transpose_FLOAT32.mlir b/test/SPIRV/OpTest.Transpose_FLOAT32.mlir index 73ef05c1e..efcf221eb 100644 --- a/test/SPIRV/OpTest.Transpose_FLOAT32.mlir +++ b/test/SPIRV/OpTest.Transpose_FLOAT32.mlir @@ -75,7 +75,7 @@ module @transpose attributes {gpu.container_module} { %cst1_i64 = spirv.Constant 1 : i64 %6 = spirv.IMul %cst1_i64, %3 : i64 %7 = spirv.IAdd %5, %6 : i64 - %8 = spirv.AccessChain %arg1[%7] : !spirv.ptr, CrossWorkgroup>, i64 + %8 = spirv.AccessChain %arg1[%7] : !spirv.ptr, CrossWorkgroup>, i64 -> !spirv.ptr spirv.Store "CrossWorkgroup" %8, %arg0 : f32 spirv.Return } @@ -105,7 +105,7 @@ module @transpose attributes {gpu.container_module} { %cst1_i64 = spirv.Constant 1 : i64 %6 = spirv.IMul %cst1_i64, %1 : i64 %7 = spirv.IAdd %5, %6 : i64 - %8 = spirv.AccessChain %arg0[%7] : !spirv.ptr, CrossWorkgroup>, i64 + %8 = spirv.AccessChain %arg0[%7] : !spirv.ptr, CrossWorkgroup>, i64 -> !spirv.ptr %9 = spirv.Load "CrossWorkgroup" %8 : f32 %cst0_i64_1 = spirv.Constant 0 : i64 %cst10_i64 = spirv.Constant 10 : i64 @@ -114,7 +114,7 @@ module @transpose attributes {gpu.container_module} { %cst1_i64_2 = spirv.Constant 1 : i64 %12 = spirv.IMul %cst1_i64_2, %3 : i64 %13 = spirv.IAdd %11, %12 : i64 - %14 = spirv.AccessChain %arg1[%13] : !spirv.ptr, CrossWorkgroup>, i64 + %14 = spirv.AccessChain %arg1[%13] : !spirv.ptr, CrossWorkgroup>, i64 -> !spirv.ptr spirv.Store "CrossWorkgroup" %14, %9 : f32 spirv.Return } diff --git a/test/SPIRV/relu.slm.static.8x32.mlir b/test/SPIRV/relu.slm.static.8x32.mlir index e73eda5fd..1a7bfab71 100644 --- a/test/SPIRV/relu.slm.static.8x32.mlir +++ b/test/SPIRV/relu.slm.static.8x32.mlir @@ -45,7 +45,7 @@ module @test attributes {gpu.container_module} { %cst1_i64 = spirv.Constant 1 : i64 %6 = spirv.IMul %cst1_i64, %3 : i64 %7 = spirv.IAdd %5, %6 : i64 - %8 = spirv.AccessChain %arg0[%7] : !spirv.ptr, CrossWorkgroup>, i64 + %8 = spirv.AccessChain %arg0[%7] : !spirv.ptr, CrossWorkgroup>, i64 -> !spirv.ptr %9 = spirv.Load "CrossWorkgroup" %8 : f32 %10 = spirv.FUnordGreaterThan %9, %arg1 : f32 %11 = spirv.Select %10, %9, %arg1 : i1, f32 @@ -57,7 +57,7 @@ module @test attributes {gpu.container_module} { %cst1_i64_3 = spirv.Constant 1 : i64 %14 = spirv.IMul %cst1_i64_3, %3 : i64 %15 = spirv.IAdd %13, %14 : i64 - %16 = spirv.AccessChain %__workgroup_mem__1_addr[%15] : !spirv.ptr, Workgroup>, i64 + %16 = spirv.AccessChain %__workgroup_mem__1_addr[%15] : !spirv.ptr, Workgroup>, i64 -> !spirv.ptr spirv.Store "Workgroup" %16, %11 : f32 spirv.ControlBarrier , , %cst0_i64_4 = spirv.Constant 0 : i64 @@ -67,7 +67,7 @@ module @test attributes {gpu.container_module} { %cst1_i64_6 = spirv.Constant 1 : i64 %19 = spirv.IMul %cst1_i64_6, %3 : i64 %20 = spirv.IAdd %18, %19 : i64 - %21 = spirv.AccessChain %__workgroup_mem__1_addr[%20] : !spirv.ptr, Workgroup>, i64 + %21 = spirv.AccessChain %__workgroup_mem__1_addr[%20] : !spirv.ptr, Workgroup>, i64 -> !spirv.ptr %22 = spirv.Load "Workgroup" %21 : f32 %cst0_i64_7 = spirv.Constant 0 : i64 %cst32_i64_8 = spirv.Constant 32 : i64 @@ -76,7 +76,7 @@ module @test attributes {gpu.container_module} { %cst1_i64_9 = spirv.Constant 1 : i64 %25 = spirv.IMul %cst1_i64_9, %3 : i64 %26 = spirv.IAdd %24, %25 : i64 - %27 = spirv.AccessChain %arg2[%26] : !spirv.ptr, CrossWorkgroup>, i64 + %27 = spirv.AccessChain %arg2[%26] : !spirv.ptr, CrossWorkgroup>, i64 -> !spirv.ptr spirv.Store "CrossWorkgroup" %27, %22 : f32 spirv.Return }