From 8664666823b3eb8d96fde58f79d71d36bd7f9115 Mon Sep 17 00:00:00 2001 From: Eli Friedman Date: Thu, 1 Aug 2024 16:18:20 -0700 Subject: [PATCH 01/29] Fix codegen of consteval functions returning an empty class, and related issues (#93115) Fix codegen of consteval functions returning an empty class, and related issues If a class is empty, don't store it to memory: the store might overwrite useful data. Similarly, if a class has tail padding that might overlap other fields, don't store the tail padding to memory. The problem here turned out a bit more general than I initially thought: basically all uses of EmitAggregateStore were broken. Call lowering had a method that did mostly the right thing, though: CreateCoercedStore. Adapt CreateCoercedStore so it always does the conservatively right thing, and use it for both calls and ConstantExpr. Also, along the way, fix the "overlap" bit in AggValueSlot: the bit was set incorrectly for empty classes in some cases. Fixes #93040. (cherry picked from commit 1762e01cca0186f1862db561cfd9019164b8c654) --- clang/lib/CodeGen/CGCall.cpp | 146 ++++++++---------- clang/lib/CodeGen/CGExprAgg.cpp | 23 +-- clang/lib/CodeGen/CodeGenFunction.h | 7 +- clang/test/CodeGen/arm-mve-intrinsics/vld24.c | 43 ++++-- clang/test/CodeGen/arm-vfp16-arguments2.cpp | 10 +- .../amdgpu-kernel-arg-pointer-type.cu | 11 +- clang/test/CodeGenCUDA/builtins-amdgcn.cu | 125 +++++++-------- .../test/CodeGenCUDA/builtins-spirv-amdgcn.cu | 123 +++++++-------- .../CodeGenCXX/address-space-cast-coerce.cpp | 6 +- clang/test/CodeGenCXX/cxx2a-consteval.cpp | 24 ++- clang/test/CodeGenCXX/trivial_abi.cpp | 20 +++ clang/test/CodeGenHIP/dpp-const-fold.hip | 8 +- .../spirv-amdgcn-dpp-const-fold.hip | 8 +- .../CodeGenOpenCL/addr-space-struct-arg.cl | 11 +- .../amdgpu-abi-struct-arg-byref.cl | 42 +++-- 15 files changed, 320 insertions(+), 287 deletions(-) diff --git a/clang/lib/CodeGen/CGCall.cpp b/clang/lib/CodeGen/CGCall.cpp index 6e69e84a2344..d7ebffa8c5e4 100644 --- a/clang/lib/CodeGen/CGCall.cpp +++ b/clang/lib/CodeGen/CGCall.cpp @@ -1336,75 +1336,50 @@ static llvm::Value *CreateCoercedLoad(Address Src, llvm::Type *Ty, return CGF.Builder.CreateLoad(Tmp); } -// Function to store a first-class aggregate into memory. We prefer to -// store the elements rather than the aggregate to be more friendly to -// fast-isel. -// FIXME: Do we need to recurse here? -void CodeGenFunction::EmitAggregateStore(llvm::Value *Val, Address Dest, - bool DestIsVolatile) { - // Prefer scalar stores to first-class aggregate stores. - if (llvm::StructType *STy = dyn_cast(Val->getType())) { - for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) { - Address EltPtr = Builder.CreateStructGEP(Dest, i); - llvm::Value *Elt = Builder.CreateExtractValue(Val, i); - Builder.CreateStore(Elt, EltPtr, DestIsVolatile); - } - } else { - Builder.CreateStore(Val, Dest, DestIsVolatile); - } -} - -/// CreateCoercedStore - Create a store to \arg DstPtr from \arg Src, -/// where the source and destination may have different types. The -/// destination is known to be aligned to \arg DstAlign bytes. -/// -/// This safely handles the case when the src type is larger than the -/// destination type; the upper bits of the src will be lost. -static void CreateCoercedStore(llvm::Value *Src, - Address Dst, - bool DstIsVolatile, - CodeGenFunction &CGF) { - llvm::Type *SrcTy = Src->getType(); - llvm::Type *DstTy = Dst.getElementType(); - if (SrcTy == DstTy) { - CGF.Builder.CreateStore(Src, Dst, DstIsVolatile); - return; - } - - llvm::TypeSize SrcSize = CGF.CGM.getDataLayout().getTypeAllocSize(SrcTy); - - if (llvm::StructType *DstSTy = dyn_cast(DstTy)) { - Dst = EnterStructPointerForCoercedAccess(Dst, DstSTy, - SrcSize.getFixedValue(), CGF); - DstTy = Dst.getElementType(); - } - - llvm::PointerType *SrcPtrTy = llvm::dyn_cast(SrcTy); - llvm::PointerType *DstPtrTy = llvm::dyn_cast(DstTy); - if (SrcPtrTy && DstPtrTy && - SrcPtrTy->getAddressSpace() != DstPtrTy->getAddressSpace()) { - Src = CGF.Builder.CreateAddrSpaceCast(Src, DstTy); - CGF.Builder.CreateStore(Src, Dst, DstIsVolatile); +void CodeGenFunction::CreateCoercedStore(llvm::Value *Src, Address Dst, + llvm::TypeSize DstSize, + bool DstIsVolatile) { + if (!DstSize) return; - } - // If the source and destination are integer or pointer types, just do an - // extension or truncation to the desired type. - if ((isa(SrcTy) || isa(SrcTy)) && - (isa(DstTy) || isa(DstTy))) { - Src = CoerceIntOrPtrToIntOrPtr(Src, DstTy, CGF); - CGF.Builder.CreateStore(Src, Dst, DstIsVolatile); - return; + llvm::Type *SrcTy = Src->getType(); + llvm::TypeSize SrcSize = CGM.getDataLayout().getTypeAllocSize(SrcTy); + + // GEP into structs to try to make types match. + // FIXME: This isn't really that useful with opaque types, but it impacts a + // lot of regression tests. + if (SrcTy != Dst.getElementType()) { + if (llvm::StructType *DstSTy = + dyn_cast(Dst.getElementType())) { + assert(!SrcSize.isScalable()); + Dst = EnterStructPointerForCoercedAccess(Dst, DstSTy, + SrcSize.getFixedValue(), *this); + } } - llvm::TypeSize DstSize = CGF.CGM.getDataLayout().getTypeAllocSize(DstTy); - - // If store is legal, just bitcast the src pointer. - if (isa(SrcTy) || - isa(DstTy) || - SrcSize.getFixedValue() <= DstSize.getFixedValue()) { - Dst = Dst.withElementType(SrcTy); - CGF.EmitAggregateStore(Src, Dst, DstIsVolatile); + if (SrcSize.isScalable() || SrcSize <= DstSize) { + if (SrcTy->isIntegerTy() && Dst.getElementType()->isPointerTy() && + SrcSize == CGM.getDataLayout().getTypeAllocSize(Dst.getElementType())) { + // If the value is supposed to be a pointer, convert it before storing it. + Src = CoerceIntOrPtrToIntOrPtr(Src, Dst.getElementType(), *this); + Builder.CreateStore(Src, Dst, DstIsVolatile); + } else if (llvm::StructType *STy = + dyn_cast(Src->getType())) { + // Prefer scalar stores to first-class aggregate stores. + Dst = Dst.withElementType(SrcTy); + for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) { + Address EltPtr = Builder.CreateStructGEP(Dst, i); + llvm::Value *Elt = Builder.CreateExtractValue(Src, i); + Builder.CreateStore(Elt, EltPtr, DstIsVolatile); + } + } else { + Builder.CreateStore(Src, Dst.withElementType(SrcTy), DstIsVolatile); + } + } else if (SrcTy->isIntegerTy()) { + // If the source is a simple integer, coerce it directly. + llvm::Type *DstIntTy = Builder.getIntNTy(DstSize.getFixedValue() * 8); + Src = CoerceIntOrPtrToIntOrPtr(Src, DstIntTy, *this); + Builder.CreateStore(Src, Dst.withElementType(DstIntTy), DstIsVolatile); } else { // Otherwise do coercion through memory. This is stupid, but // simple. @@ -1416,12 +1391,12 @@ static void CreateCoercedStore(llvm::Value *Src, // FIXME: Assert that we aren't truncating non-padding bits when have access // to that information. RawAddress Tmp = - CreateTempAllocaForCoercion(CGF, SrcTy, Dst.getAlignment()); - CGF.Builder.CreateStore(Src, Tmp); - CGF.Builder.CreateMemCpy( - Dst.emitRawPointer(CGF), Dst.getAlignment().getAsAlign(), - Tmp.getPointer(), Tmp.getAlignment().getAsAlign(), - llvm::ConstantInt::get(CGF.IntPtrTy, DstSize.getFixedValue())); + CreateTempAllocaForCoercion(*this, SrcTy, Dst.getAlignment()); + Builder.CreateStore(Src, Tmp); + Builder.CreateMemCpy(Dst.emitRawPointer(*this), + Dst.getAlignment().getAsAlign(), Tmp.getPointer(), + Tmp.getAlignment().getAsAlign(), + Builder.CreateTypeSize(IntPtrTy, DstSize)); } } @@ -3309,7 +3284,12 @@ void CodeGenFunction::EmitFunctionProlog(const CGFunctionInfo &FI, assert(NumIRArgs == 1); auto AI = Fn->getArg(FirstIRArg); AI->setName(Arg->getName() + ".coerce"); - CreateCoercedStore(AI, Ptr, /*DstIsVolatile=*/false, *this); + CreateCoercedStore( + AI, Ptr, + llvm::TypeSize::getFixed( + getContext().getTypeSizeInChars(Ty).getQuantity() - + ArgI.getDirectOffset()), + /*DstIsVolatile=*/false); } // Match to what EmitParmDecl is expecting for this type. @@ -5939,17 +5919,8 @@ RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo, llvm::Value *Imag = Builder.CreateExtractValue(CI, 1); return RValue::getComplex(std::make_pair(Real, Imag)); } - case TEK_Aggregate: { - Address DestPtr = ReturnValue.getAddress(); - bool DestIsVolatile = ReturnValue.isVolatile(); - - if (!DestPtr.isValid()) { - DestPtr = CreateMemTemp(RetTy, "agg.tmp"); - DestIsVolatile = false; - } - EmitAggregateStore(CI, DestPtr, DestIsVolatile); - return RValue::getAggregate(DestPtr); - } + case TEK_Aggregate: + break; case TEK_Scalar: { // If the argument doesn't match, perform a bitcast to coerce it. // This can happen due to trivial type mismatches. @@ -5959,7 +5930,6 @@ RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo, return RValue::get(V); } } - llvm_unreachable("bad evaluation kind"); } // If coercing a fixed vector from a scalable vector for ABI @@ -5981,10 +5951,13 @@ RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo, Address DestPtr = ReturnValue.getValue(); bool DestIsVolatile = ReturnValue.isVolatile(); + uint64_t DestSize = + getContext().getTypeInfoDataSizeInChars(RetTy).Width.getQuantity(); if (!DestPtr.isValid()) { DestPtr = CreateMemTemp(RetTy, "coerce"); DestIsVolatile = false; + DestSize = getContext().getTypeSizeInChars(RetTy).getQuantity(); } // An empty record can overlap other data (if declared with @@ -5993,7 +5966,10 @@ RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo, if (!isEmptyRecord(getContext(), RetTy, true)) { // If the value is offset in memory, apply the offset now. Address StorePtr = emitAddressAtOffset(*this, DestPtr, RetAI); - CreateCoercedStore(CI, StorePtr, DestIsVolatile, *this); + CreateCoercedStore( + CI, StorePtr, + llvm::TypeSize::getFixed(DestSize - RetAI.getDirectOffset()), + DestIsVolatile); } return convertTempToRValue(DestPtr, RetTy, SourceLocation()); diff --git a/clang/lib/CodeGen/CGExprAgg.cpp b/clang/lib/CodeGen/CGExprAgg.cpp index c3c10e73ff05..d9f44f4be617 100644 --- a/clang/lib/CodeGen/CGExprAgg.cpp +++ b/clang/lib/CodeGen/CGExprAgg.cpp @@ -131,15 +131,12 @@ class AggExprEmitter : public StmtVisitor { EnsureDest(E->getType()); if (llvm::Value *Result = ConstantEmitter(CGF).tryEmitConstantExpr(E)) { - Address StoreDest = Dest.getAddress(); - // The emitted value is guaranteed to have the same size as the - // destination but can have a different type. Just do a bitcast in this - // case to avoid incorrect GEPs. - if (Result->getType() != StoreDest.getType()) - StoreDest = StoreDest.withElementType(Result->getType()); - - CGF.EmitAggregateStore(Result, StoreDest, - E->getType().isVolatileQualified()); + CGF.CreateCoercedStore( + Result, Dest.getAddress(), + llvm::TypeSize::getFixed( + Dest.getPreferredSize(CGF.getContext(), E->getType()) + .getQuantity()), + E->getType().isVolatileQualified()); return; } return Visit(E->getSubExpr()); @@ -2050,6 +2047,10 @@ CodeGenFunction::getOverlapForFieldInit(const FieldDecl *FD) { if (!FD->hasAttr() || !FD->getType()->isRecordType()) return AggValueSlot::DoesNotOverlap; + // Empty fields can overlap earlier fields. + if (FD->getType()->getAsCXXRecordDecl()->isEmpty()) + return AggValueSlot::MayOverlap; + // If the field lies entirely within the enclosing class's nvsize, its tail // padding cannot overlap any already-initialized object. (The only subobjects // with greater addresses that might already be initialized are vbases.) @@ -2072,6 +2073,10 @@ AggValueSlot::Overlap_t CodeGenFunction::getOverlapForBaseInit( if (IsVirtual) return AggValueSlot::MayOverlap; + // Empty bases can overlap earlier bases. + if (BaseRD->isEmpty()) + return AggValueSlot::MayOverlap; + // If the base class is laid out entirely within the nvsize of the derived // class, its tail padding cannot yet be initialized, so we can issue // stores at the full width of the base class. diff --git a/clang/lib/CodeGen/CodeGenFunction.h b/clang/lib/CodeGen/CodeGenFunction.h index ba7b565d9755..60e6841e1b3d 100644 --- a/clang/lib/CodeGen/CodeGenFunction.h +++ b/clang/lib/CodeGen/CodeGenFunction.h @@ -4838,9 +4838,10 @@ class CodeGenFunction : public CodeGenTypeCache { void EmitAggFinalDestCopy(QualType Type, AggValueSlot Dest, const LValue &Src, ExprValueKind SrcKind); - /// Build all the stores needed to initialize an aggregate at Dest with the - /// value Val. - void EmitAggregateStore(llvm::Value *Val, Address Dest, bool DestIsVolatile); + /// Create a store to \arg DstPtr from \arg Src, truncating the stored value + /// to at most \arg DstSize bytes. + void CreateCoercedStore(llvm::Value *Src, Address Dst, llvm::TypeSize DstSize, + bool DstIsVolatile); /// EmitExtendGCLifetime - Given a pointer to an Objective-C object, /// make sure it survives garbage collection until this point. diff --git a/clang/test/CodeGen/arm-mve-intrinsics/vld24.c b/clang/test/CodeGen/arm-mve-intrinsics/vld24.c index 03c870e28154..15619bef5373 100644 --- a/clang/test/CodeGen/arm-mve-intrinsics/vld24.c +++ b/clang/test/CodeGen/arm-mve-intrinsics/vld24.c @@ -48,10 +48,13 @@ uint8x16x4_t test_vld4q_u8(const uint8_t *addr) // CHECK-LABEL: @test_vst2q_u32( // CHECK-NEXT: entry: -// CHECK-NEXT: [[VALUE_COERCE_FCA_0_0_EXTRACT:%.*]] = extractvalue [[STRUCT_UINT32X4X2_T:%.*]] [[VALUE_COERCE:%.*]], 0, 0 -// CHECK-NEXT: [[VALUE_COERCE_FCA_0_1_EXTRACT:%.*]] = extractvalue [[STRUCT_UINT32X4X2_T]] [[VALUE_COERCE]], 0, 1 -// CHECK-NEXT: call void @llvm.arm.mve.vst2q.p0.v4i32(ptr [[ADDR:%.*]], <4 x i32> [[VALUE_COERCE_FCA_0_0_EXTRACT]], <4 x i32> [[VALUE_COERCE_FCA_0_1_EXTRACT]], i32 0) -// CHECK-NEXT: call void @llvm.arm.mve.vst2q.p0.v4i32(ptr [[ADDR]], <4 x i32> [[VALUE_COERCE_FCA_0_0_EXTRACT]], <4 x i32> [[VALUE_COERCE_FCA_0_1_EXTRACT]], i32 1) +// CHECK-NEXT: [[TMP0:%.*]] = extractvalue [[STRUCT_UINT32X4X2_T:%.*]] [[VALUE_COERCE:%.*]], 0 +// CHECK-NEXT: [[DOTFCA_0_EXTRACT:%.*]] = extractvalue [2 x <4 x i32>] [[TMP0]], 0 +// CHECK-NEXT: [[DOTFCA_1_EXTRACT:%.*]] = extractvalue [2 x <4 x i32>] [[TMP0]], 1 +// CHECK-NEXT: [[DOTFCA_0_0_INSERT:%.*]] = insertvalue [[STRUCT_UINT32X4X2_T]] poison, <4 x i32> [[DOTFCA_0_EXTRACT]], 0, 0 +// CHECK-NEXT: [[DOTFCA_0_1_INSERT:%.*]] = insertvalue [[STRUCT_UINT32X4X2_T]] [[DOTFCA_0_0_INSERT]], <4 x i32> [[DOTFCA_1_EXTRACT]], 0, 1 +// CHECK-NEXT: call void @llvm.arm.mve.vst2q.p0.v4i32(ptr [[ADDR:%.*]], <4 x i32> [[DOTFCA_0_EXTRACT]], <4 x i32> [[DOTFCA_1_EXTRACT]], i32 0) +// CHECK-NEXT: call void @llvm.arm.mve.vst2q.p0.v4i32(ptr [[ADDR]], <4 x i32> [[DOTFCA_0_EXTRACT]], <4 x i32> [[DOTFCA_1_EXTRACT]], i32 1) // CHECK-NEXT: ret void // void test_vst2q_u32(uint32_t *addr, uint32x4x2_t value) @@ -65,14 +68,19 @@ void test_vst2q_u32(uint32_t *addr, uint32x4x2_t value) // CHECK-LABEL: @test_vst4q_s8( // CHECK-NEXT: entry: -// CHECK-NEXT: [[VALUE_COERCE_FCA_0_0_EXTRACT:%.*]] = extractvalue [[STRUCT_INT8X16X4_T:%.*]] [[VALUE_COERCE:%.*]], 0, 0 -// CHECK-NEXT: [[VALUE_COERCE_FCA_0_1_EXTRACT:%.*]] = extractvalue [[STRUCT_INT8X16X4_T]] [[VALUE_COERCE]], 0, 1 -// CHECK-NEXT: [[VALUE_COERCE_FCA_0_2_EXTRACT:%.*]] = extractvalue [[STRUCT_INT8X16X4_T]] [[VALUE_COERCE]], 0, 2 -// CHECK-NEXT: [[VALUE_COERCE_FCA_0_3_EXTRACT:%.*]] = extractvalue [[STRUCT_INT8X16X4_T]] [[VALUE_COERCE]], 0, 3 -// CHECK-NEXT: call void @llvm.arm.mve.vst4q.p0.v16i8(ptr [[ADDR:%.*]], <16 x i8> [[VALUE_COERCE_FCA_0_0_EXTRACT]], <16 x i8> [[VALUE_COERCE_FCA_0_1_EXTRACT]], <16 x i8> [[VALUE_COERCE_FCA_0_2_EXTRACT]], <16 x i8> [[VALUE_COERCE_FCA_0_3_EXTRACT]], i32 0) -// CHECK-NEXT: call void @llvm.arm.mve.vst4q.p0.v16i8(ptr [[ADDR]], <16 x i8> [[VALUE_COERCE_FCA_0_0_EXTRACT]], <16 x i8> [[VALUE_COERCE_FCA_0_1_EXTRACT]], <16 x i8> [[VALUE_COERCE_FCA_0_2_EXTRACT]], <16 x i8> [[VALUE_COERCE_FCA_0_3_EXTRACT]], i32 1) -// CHECK-NEXT: call void @llvm.arm.mve.vst4q.p0.v16i8(ptr [[ADDR]], <16 x i8> [[VALUE_COERCE_FCA_0_0_EXTRACT]], <16 x i8> [[VALUE_COERCE_FCA_0_1_EXTRACT]], <16 x i8> [[VALUE_COERCE_FCA_0_2_EXTRACT]], <16 x i8> [[VALUE_COERCE_FCA_0_3_EXTRACT]], i32 2) -// CHECK-NEXT: call void @llvm.arm.mve.vst4q.p0.v16i8(ptr [[ADDR]], <16 x i8> [[VALUE_COERCE_FCA_0_0_EXTRACT]], <16 x i8> [[VALUE_COERCE_FCA_0_1_EXTRACT]], <16 x i8> [[VALUE_COERCE_FCA_0_2_EXTRACT]], <16 x i8> [[VALUE_COERCE_FCA_0_3_EXTRACT]], i32 3) +// CHECK-NEXT: [[TMP0:%.*]] = extractvalue [[STRUCT_INT8X16X4_T:%.*]] [[VALUE_COERCE:%.*]], 0 +// CHECK-NEXT: [[DOTFCA_0_EXTRACT:%.*]] = extractvalue [4 x <16 x i8>] [[TMP0]], 0 +// CHECK-NEXT: [[DOTFCA_1_EXTRACT:%.*]] = extractvalue [4 x <16 x i8>] [[TMP0]], 1 +// CHECK-NEXT: [[DOTFCA_2_EXTRACT:%.*]] = extractvalue [4 x <16 x i8>] [[TMP0]], 2 +// CHECK-NEXT: [[DOTFCA_3_EXTRACT:%.*]] = extractvalue [4 x <16 x i8>] [[TMP0]], 3 +// CHECK-NEXT: [[DOTFCA_0_0_INSERT:%.*]] = insertvalue [[STRUCT_INT8X16X4_T]] poison, <16 x i8> [[DOTFCA_0_EXTRACT]], 0, 0 +// CHECK-NEXT: [[DOTFCA_0_1_INSERT:%.*]] = insertvalue [[STRUCT_INT8X16X4_T]] [[DOTFCA_0_0_INSERT]], <16 x i8> [[DOTFCA_1_EXTRACT]], 0, 1 +// CHECK-NEXT: [[DOTFCA_0_2_INSERT:%.*]] = insertvalue [[STRUCT_INT8X16X4_T]] [[DOTFCA_0_1_INSERT]], <16 x i8> [[DOTFCA_2_EXTRACT]], 0, 2 +// CHECK-NEXT: [[DOTFCA_0_3_INSERT:%.*]] = insertvalue [[STRUCT_INT8X16X4_T]] [[DOTFCA_0_2_INSERT]], <16 x i8> [[DOTFCA_3_EXTRACT]], 0, 3 +// CHECK-NEXT: call void @llvm.arm.mve.vst4q.p0.v16i8(ptr [[ADDR:%.*]], <16 x i8> [[DOTFCA_0_EXTRACT]], <16 x i8> [[DOTFCA_1_EXTRACT]], <16 x i8> [[DOTFCA_2_EXTRACT]], <16 x i8> [[DOTFCA_3_EXTRACT]], i32 0) +// CHECK-NEXT: call void @llvm.arm.mve.vst4q.p0.v16i8(ptr [[ADDR]], <16 x i8> [[DOTFCA_0_EXTRACT]], <16 x i8> [[DOTFCA_1_EXTRACT]], <16 x i8> [[DOTFCA_2_EXTRACT]], <16 x i8> [[DOTFCA_3_EXTRACT]], i32 1) +// CHECK-NEXT: call void @llvm.arm.mve.vst4q.p0.v16i8(ptr [[ADDR]], <16 x i8> [[DOTFCA_0_EXTRACT]], <16 x i8> [[DOTFCA_1_EXTRACT]], <16 x i8> [[DOTFCA_2_EXTRACT]], <16 x i8> [[DOTFCA_3_EXTRACT]], i32 2) +// CHECK-NEXT: call void @llvm.arm.mve.vst4q.p0.v16i8(ptr [[ADDR]], <16 x i8> [[DOTFCA_0_EXTRACT]], <16 x i8> [[DOTFCA_1_EXTRACT]], <16 x i8> [[DOTFCA_2_EXTRACT]], <16 x i8> [[DOTFCA_3_EXTRACT]], i32 3) // CHECK-NEXT: ret void // void test_vst4q_s8(int8_t *addr, int8x16x4_t value) @@ -86,10 +94,13 @@ void test_vst4q_s8(int8_t *addr, int8x16x4_t value) // CHECK-LABEL: @test_vst2q_f16( // CHECK-NEXT: entry: -// CHECK-NEXT: [[VALUE_COERCE_FCA_0_0_EXTRACT:%.*]] = extractvalue [[STRUCT_FLOAT16X8X2_T:%.*]] [[VALUE_COERCE:%.*]], 0, 0 -// CHECK-NEXT: [[VALUE_COERCE_FCA_0_1_EXTRACT:%.*]] = extractvalue [[STRUCT_FLOAT16X8X2_T]] [[VALUE_COERCE]], 0, 1 -// CHECK-NEXT: call void @llvm.arm.mve.vst2q.p0.v8f16(ptr [[ADDR:%.*]], <8 x half> [[VALUE_COERCE_FCA_0_0_EXTRACT]], <8 x half> [[VALUE_COERCE_FCA_0_1_EXTRACT]], i32 0) -// CHECK-NEXT: call void @llvm.arm.mve.vst2q.p0.v8f16(ptr [[ADDR]], <8 x half> [[VALUE_COERCE_FCA_0_0_EXTRACT]], <8 x half> [[VALUE_COERCE_FCA_0_1_EXTRACT]], i32 1) +// CHECK-NEXT: [[TMP0:%.*]] = extractvalue [[STRUCT_FLOAT16X8X2_T:%.*]] [[VALUE_COERCE:%.*]], 0 +// CHECK-NEXT: [[DOTFCA_0_EXTRACT:%.*]] = extractvalue [2 x <8 x half>] [[TMP0]], 0 +// CHECK-NEXT: [[DOTFCA_1_EXTRACT:%.*]] = extractvalue [2 x <8 x half>] [[TMP0]], 1 +// CHECK-NEXT: [[DOTFCA_0_0_INSERT:%.*]] = insertvalue [[STRUCT_FLOAT16X8X2_T]] poison, <8 x half> [[DOTFCA_0_EXTRACT]], 0, 0 +// CHECK-NEXT: [[DOTFCA_0_1_INSERT:%.*]] = insertvalue [[STRUCT_FLOAT16X8X2_T]] [[DOTFCA_0_0_INSERT]], <8 x half> [[DOTFCA_1_EXTRACT]], 0, 1 +// CHECK-NEXT: call void @llvm.arm.mve.vst2q.p0.v8f16(ptr [[ADDR:%.*]], <8 x half> [[DOTFCA_0_EXTRACT]], <8 x half> [[DOTFCA_1_EXTRACT]], i32 0) +// CHECK-NEXT: call void @llvm.arm.mve.vst2q.p0.v8f16(ptr [[ADDR]], <8 x half> [[DOTFCA_0_EXTRACT]], <8 x half> [[DOTFCA_1_EXTRACT]], i32 1) // CHECK-NEXT: ret void // void test_vst2q_f16(float16_t *addr, float16x8x2_t value) diff --git a/clang/test/CodeGen/arm-vfp16-arguments2.cpp b/clang/test/CodeGen/arm-vfp16-arguments2.cpp index 6221e85e856b..b7c6852c47b7 100644 --- a/clang/test/CodeGen/arm-vfp16-arguments2.cpp +++ b/clang/test/CodeGen/arm-vfp16-arguments2.cpp @@ -44,20 +44,20 @@ struct S1 f1(struct S1 s1) { return s1; } // CHECK-SOFT: define{{.*}} void @_Z2f22S2(ptr dead_on_unwind noalias nocapture writable writeonly sret(%struct.S2) align 8 %agg.result, [4 x i32] %s2.coerce) // CHECK-HARD: define{{.*}} arm_aapcs_vfpcc [2 x <2 x i32>] @_Z2f22S2([2 x <2 x i32>] returned %s2.coerce) -// CHECK-FULL: define{{.*}} arm_aapcs_vfpcc %struct.S2 @_Z2f22S2(%struct.S2 returned %s2.coerce) +// CHECK-FULL: define{{.*}} arm_aapcs_vfpcc %struct.S2 @_Z2f22S2(%struct.S2 %s2.coerce) struct S2 f2(struct S2 s2) { return s2; } // CHECK-SOFT: define{{.*}} void @_Z2f32S3(ptr dead_on_unwind noalias nocapture writable writeonly sret(%struct.S3) align 8 %agg.result, [2 x i64] %s3.coerce) // CHECK-HARD: define{{.*}} arm_aapcs_vfpcc [2 x <2 x i32>] @_Z2f32S3([2 x <2 x i32>] returned %s3.coerce) -// CHECK-FULL: define{{.*}} arm_aapcs_vfpcc %struct.S3 @_Z2f32S3(%struct.S3 returned %s3.coerce) +// CHECK-FULL: define{{.*}} arm_aapcs_vfpcc %struct.S3 @_Z2f32S3(%struct.S3 %s3.coerce) struct S3 f3(struct S3 s3) { return s3; } // CHECK-SOFT: define{{.*}} void @_Z2f42S4(ptr dead_on_unwind noalias nocapture writable writeonly sret(%struct.S4) align 8 %agg.result, [2 x i64] %s4.coerce) // CHECK-HARD: define{{.*}} arm_aapcs_vfpcc [2 x <2 x i32>] @_Z2f42S4([2 x <2 x i32>] returned %s4.coerce) -// CHECK-FULL: define{{.*}} arm_aapcs_vfpcc %struct.S4 @_Z2f42S4(%struct.S4 returned %s4.coerce) +// CHECK-FULL: define{{.*}} arm_aapcs_vfpcc %struct.S4 @_Z2f42S4(%struct.S4 %s4.coerce) struct S4 f4(struct S4 s4) { return s4; } // CHECK-SOFT: define{{.*}} void @_Z2f52S5(ptr dead_on_unwind noalias nocapture writable writeonly sret(%struct.S5) align 8 %agg.result, [2 x i64] %s5.coerce) -// CHECK-HARD: define{{.*}} arm_aapcs_vfpcc %struct.S5 @_Z2f52S5(%struct.S5 returned %s5.coerce) -// CHECK-FULL: define{{.*}} arm_aapcs_vfpcc %struct.S5 @_Z2f52S5(%struct.S5 returned %s5.coerce) +// CHECK-HARD: define{{.*}} arm_aapcs_vfpcc %struct.S5 @_Z2f52S5(%struct.S5 %s5.coerce) +// CHECK-FULL: define{{.*}} arm_aapcs_vfpcc %struct.S5 @_Z2f52S5(%struct.S5 %s5.coerce) struct S5 f5(struct S5 s5) { return s5; } diff --git a/clang/test/CodeGenCUDA/amdgpu-kernel-arg-pointer-type.cu b/clang/test/CodeGenCUDA/amdgpu-kernel-arg-pointer-type.cu index a5135ab01f0f..70c86cbb8c3d 100644 --- a/clang/test/CodeGenCUDA/amdgpu-kernel-arg-pointer-type.cu +++ b/clang/test/CodeGenCUDA/amdgpu-kernel-arg-pointer-type.cu @@ -16,9 +16,8 @@ // HOST: define{{.*}} void @_Z22__device_stub__kernel1Pi(ptr noundef %x) // COMMON-LABEL: define{{.*}} amdgpu_kernel void @_Z7kernel1Pi(ptr addrspace(1){{.*}} %x.coerce) -// CHECK: ={{.*}} addrspacecast ptr addrspace(1) %{{.*}} to ptr // CHECK-NOT: ={{.*}} addrspacecast ptr addrspace(1) %{{.*}} to ptr -// OPT: [[VAL:%.*]] = load i32, ptr addrspace(1) %x.coerce, align 4, !amdgpu.noclobber ![[MD:[0-9]+]] +// OPT: [[VAL:%.*]] = load i32, ptr addrspace(1) %x.coerce, align 4{{$}} // OPT: [[INC:%.*]] = add nsw i32 [[VAL]], 1 // OPT: store i32 [[INC]], ptr addrspace(1) %x.coerce, align 4 // OPT: ret void @@ -28,9 +27,8 @@ __global__ void kernel1(int *x) { // HOST: define{{.*}} void @_Z22__device_stub__kernel2Ri(ptr noundef nonnull align 4 dereferenceable(4) %x) // COMMON-LABEL: define{{.*}} amdgpu_kernel void @_Z7kernel2Ri(ptr addrspace(1){{.*}} nonnull align 4 dereferenceable(4) %x.coerce) -// CHECK: ={{.*}} addrspacecast ptr addrspace(1) %{{.*}} to ptr // CHECK-NOT: ={{.*}} addrspacecast ptr addrspace(1) %{{.*}} to ptr -// OPT: [[VAL:%.*]] = load i32, ptr addrspace(1) %x.coerce, align 4, !amdgpu.noclobber ![[MD]] +// OPT: [[VAL:%.*]] = load i32, ptr addrspace(1) %x.coerce, align 4{{$}} // OPT: [[INC:%.*]] = add nsw i32 [[VAL]], 1 // OPT: store i32 [[INC]], ptr addrspace(1) %x.coerce, align 4 // OPT: ret void @@ -67,7 +65,7 @@ struct S { // OPT: [[R1:%.*]] = getelementptr inbounds i8, ptr addrspace(4) %0, i64 8 // OPT: [[P1:%.*]] = load ptr, ptr addrspace(4) [[R1]], align 8 // OPT: [[G1:%.*]] ={{.*}} addrspacecast ptr [[P1]] to ptr addrspace(1) -// OPT: [[V0:%.*]] = load i32, ptr addrspace(1) [[G0]], align 4, !amdgpu.noclobber ![[MD]] +// OPT: [[V0:%.*]] = load i32, ptr addrspace(1) [[G0]], align 4, !amdgpu.noclobber ![[MD:[0-9]+]] // OPT: [[INC:%.*]] = add nsw i32 [[V0]], 1 // OPT: store i32 [[INC]], ptr addrspace(1) [[G0]], align 4 // OPT: [[V1:%.*]] = load float, ptr addrspace(1) [[G1]], align 4 @@ -126,9 +124,8 @@ struct SS { }; // HOST: define{{.*}} void @_Z22__device_stub__kernel82SS(ptr %a.coerce) // COMMON-LABEL: define{{.*}} amdgpu_kernel void @_Z7kernel82SS(ptr addrspace(1){{.*}} %a.coerce) -// CHECK: ={{.*}} addrspacecast ptr addrspace(1) %{{.*}} to ptr // CHECK-NOT: ={{.*}} addrspacecast ptr addrspace(1) %{{.*}} to ptr -// OPT: [[VAL:%.*]] = load float, ptr addrspace(1) %a.coerce, align 4, !amdgpu.noclobber ![[MD]] +// OPT: [[VAL:%.*]] = load float, ptr addrspace(1) %a.coerce, align 4{{$}} // OPT: [[INC:%.*]] = fadd contract float [[VAL]], 3.000000e+00 // OPT: store float [[INC]], ptr addrspace(1) %a.coerce, align 4 // OPT: ret void diff --git a/clang/test/CodeGenCUDA/builtins-amdgcn.cu b/clang/test/CodeGenCUDA/builtins-amdgcn.cu index 2e88afac813f..4bf23e529c7a 100644 --- a/clang/test/CodeGenCUDA/builtins-amdgcn.cu +++ b/clang/test/CodeGenCUDA/builtins-amdgcn.cu @@ -17,17 +17,16 @@ // CHECK-NEXT: [[OUT_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[OUT]] to ptr // CHECK-NEXT: [[OUT_ADDR_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[OUT_ADDR]] to ptr // CHECK-NEXT: [[DISPATCH_PTR_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[DISPATCH_PTR]] to ptr -// CHECK-NEXT: [[TMP0:%.*]] = addrspacecast ptr addrspace(1) [[OUT_COERCE:%.*]] to ptr -// CHECK-NEXT: store ptr [[TMP0]], ptr [[OUT_ASCAST]], align 8 +// CHECK-NEXT: store ptr addrspace(1) [[OUT_COERCE:%.*]], ptr [[OUT_ASCAST]], align 8 // CHECK-NEXT: [[OUT1:%.*]] = load ptr, ptr [[OUT_ASCAST]], align 8 // CHECK-NEXT: store ptr [[OUT1]], ptr [[OUT_ADDR_ASCAST]], align 8 -// CHECK-NEXT: [[TMP1:%.*]] = call align 4 dereferenceable(64) ptr addrspace(4) @llvm.amdgcn.dispatch.ptr() -// CHECK-NEXT: [[TMP2:%.*]] = addrspacecast ptr addrspace(4) [[TMP1]] to ptr -// CHECK-NEXT: store ptr [[TMP2]], ptr [[DISPATCH_PTR_ASCAST]], align 8 -// CHECK-NEXT: [[TMP3:%.*]] = load ptr, ptr [[DISPATCH_PTR_ASCAST]], align 8 -// CHECK-NEXT: [[TMP4:%.*]] = load i32, ptr [[TMP3]], align 4 -// CHECK-NEXT: [[TMP5:%.*]] = load ptr, ptr [[OUT_ADDR_ASCAST]], align 8 -// CHECK-NEXT: store i32 [[TMP4]], ptr [[TMP5]], align 4 +// CHECK-NEXT: [[TMP0:%.*]] = call align 4 dereferenceable(64) ptr addrspace(4) @llvm.amdgcn.dispatch.ptr() +// CHECK-NEXT: [[TMP1:%.*]] = addrspacecast ptr addrspace(4) [[TMP0]] to ptr +// CHECK-NEXT: store ptr [[TMP1]], ptr [[DISPATCH_PTR_ASCAST]], align 8 +// CHECK-NEXT: [[TMP2:%.*]] = load ptr, ptr [[DISPATCH_PTR_ASCAST]], align 8 +// CHECK-NEXT: [[TMP3:%.*]] = load i32, ptr [[TMP2]], align 4 +// CHECK-NEXT: [[TMP4:%.*]] = load ptr, ptr [[OUT_ADDR_ASCAST]], align 8 +// CHECK-NEXT: store i32 [[TMP3]], ptr [[TMP4]], align 4 // CHECK-NEXT: ret void // __global__ void use_dispatch_ptr(int* out) { @@ -43,17 +42,16 @@ __global__ void use_dispatch_ptr(int* out) { // CHECK-NEXT: [[OUT_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[OUT]] to ptr // CHECK-NEXT: [[OUT_ADDR_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[OUT_ADDR]] to ptr // CHECK-NEXT: [[QUEUE_PTR_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[QUEUE_PTR]] to ptr -// CHECK-NEXT: [[TMP0:%.*]] = addrspacecast ptr addrspace(1) [[OUT_COERCE:%.*]] to ptr -// CHECK-NEXT: store ptr [[TMP0]], ptr [[OUT_ASCAST]], align 8 +// CHECK-NEXT: store ptr addrspace(1) [[OUT_COERCE:%.*]], ptr [[OUT_ASCAST]], align 8 // CHECK-NEXT: [[OUT1:%.*]] = load ptr, ptr [[OUT_ASCAST]], align 8 // CHECK-NEXT: store ptr [[OUT1]], ptr [[OUT_ADDR_ASCAST]], align 8 -// CHECK-NEXT: [[TMP1:%.*]] = call ptr addrspace(4) @llvm.amdgcn.queue.ptr() -// CHECK-NEXT: [[TMP2:%.*]] = addrspacecast ptr addrspace(4) [[TMP1]] to ptr -// CHECK-NEXT: store ptr [[TMP2]], ptr [[QUEUE_PTR_ASCAST]], align 8 -// CHECK-NEXT: [[TMP3:%.*]] = load ptr, ptr [[QUEUE_PTR_ASCAST]], align 8 -// CHECK-NEXT: [[TMP4:%.*]] = load i32, ptr [[TMP3]], align 4 -// CHECK-NEXT: [[TMP5:%.*]] = load ptr, ptr [[OUT_ADDR_ASCAST]], align 8 -// CHECK-NEXT: store i32 [[TMP4]], ptr [[TMP5]], align 4 +// CHECK-NEXT: [[TMP0:%.*]] = call ptr addrspace(4) @llvm.amdgcn.queue.ptr() +// CHECK-NEXT: [[TMP1:%.*]] = addrspacecast ptr addrspace(4) [[TMP0]] to ptr +// CHECK-NEXT: store ptr [[TMP1]], ptr [[QUEUE_PTR_ASCAST]], align 8 +// CHECK-NEXT: [[TMP2:%.*]] = load ptr, ptr [[QUEUE_PTR_ASCAST]], align 8 +// CHECK-NEXT: [[TMP3:%.*]] = load i32, ptr [[TMP2]], align 4 +// CHECK-NEXT: [[TMP4:%.*]] = load ptr, ptr [[OUT_ADDR_ASCAST]], align 8 +// CHECK-NEXT: store i32 [[TMP3]], ptr [[TMP4]], align 4 // CHECK-NEXT: ret void // __global__ void use_queue_ptr(int* out) { @@ -69,17 +67,16 @@ __global__ void use_queue_ptr(int* out) { // CHECK-NEXT: [[OUT_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[OUT]] to ptr // CHECK-NEXT: [[OUT_ADDR_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[OUT_ADDR]] to ptr // CHECK-NEXT: [[IMPLICITARG_PTR_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[IMPLICITARG_PTR]] to ptr -// CHECK-NEXT: [[TMP0:%.*]] = addrspacecast ptr addrspace(1) [[OUT_COERCE:%.*]] to ptr -// CHECK-NEXT: store ptr [[TMP0]], ptr [[OUT_ASCAST]], align 8 +// CHECK-NEXT: store ptr addrspace(1) [[OUT_COERCE:%.*]], ptr [[OUT_ASCAST]], align 8 // CHECK-NEXT: [[OUT1:%.*]] = load ptr, ptr [[OUT_ASCAST]], align 8 // CHECK-NEXT: store ptr [[OUT1]], ptr [[OUT_ADDR_ASCAST]], align 8 -// CHECK-NEXT: [[TMP1:%.*]] = call ptr addrspace(4) @llvm.amdgcn.implicitarg.ptr() -// CHECK-NEXT: [[TMP2:%.*]] = addrspacecast ptr addrspace(4) [[TMP1]] to ptr -// CHECK-NEXT: store ptr [[TMP2]], ptr [[IMPLICITARG_PTR_ASCAST]], align 8 -// CHECK-NEXT: [[TMP3:%.*]] = load ptr, ptr [[IMPLICITARG_PTR_ASCAST]], align 8 -// CHECK-NEXT: [[TMP4:%.*]] = load i32, ptr [[TMP3]], align 4 -// CHECK-NEXT: [[TMP5:%.*]] = load ptr, ptr [[OUT_ADDR_ASCAST]], align 8 -// CHECK-NEXT: store i32 [[TMP4]], ptr [[TMP5]], align 4 +// CHECK-NEXT: [[TMP0:%.*]] = call ptr addrspace(4) @llvm.amdgcn.implicitarg.ptr() +// CHECK-NEXT: [[TMP1:%.*]] = addrspacecast ptr addrspace(4) [[TMP0]] to ptr +// CHECK-NEXT: store ptr [[TMP1]], ptr [[IMPLICITARG_PTR_ASCAST]], align 8 +// CHECK-NEXT: [[TMP2:%.*]] = load ptr, ptr [[IMPLICITARG_PTR_ASCAST]], align 8 +// CHECK-NEXT: [[TMP3:%.*]] = load i32, ptr [[TMP2]], align 4 +// CHECK-NEXT: [[TMP4:%.*]] = load ptr, ptr [[OUT_ADDR_ASCAST]], align 8 +// CHECK-NEXT: store i32 [[TMP3]], ptr [[TMP4]], align 4 // CHECK-NEXT: ret void // __global__ void use_implicitarg_ptr(int* out) { @@ -134,16 +131,15 @@ __global__ void test_ds_fadd(float src) { // CHECK-NEXT: [[SRC_ADDR_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[SRC_ADDR]] to ptr // CHECK-NEXT: [[SHARED_ADDR_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[SHARED_ADDR]] to ptr // CHECK-NEXT: [[X_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[X]] to ptr -// CHECK-NEXT: [[TMP0:%.*]] = addrspacecast ptr addrspace(1) [[SHARED_COERCE:%.*]] to ptr -// CHECK-NEXT: store ptr [[TMP0]], ptr [[SHARED_ASCAST]], align 8 +// CHECK-NEXT: store ptr addrspace(1) [[SHARED_COERCE:%.*]], ptr [[SHARED_ASCAST]], align 8 // CHECK-NEXT: [[SHARED1:%.*]] = load ptr, ptr [[SHARED_ASCAST]], align 8 // CHECK-NEXT: store float [[SRC:%.*]], ptr [[SRC_ADDR_ASCAST]], align 4 // CHECK-NEXT: store ptr [[SHARED1]], ptr [[SHARED_ADDR_ASCAST]], align 8 -// CHECK-NEXT: [[TMP1:%.*]] = load ptr, ptr [[SHARED_ADDR_ASCAST]], align 8 -// CHECK-NEXT: [[TMP2:%.*]] = addrspacecast ptr [[TMP1]] to ptr addrspace(3) -// CHECK-NEXT: [[TMP3:%.*]] = load float, ptr [[SRC_ADDR_ASCAST]], align 4 -// CHECK-NEXT: [[TMP4:%.*]] = atomicrmw fmin ptr addrspace(3) [[TMP2]], float [[TMP3]] monotonic, align 4 -// CHECK-NEXT: store volatile float [[TMP4]], ptr [[X_ASCAST]], align 4 +// CHECK-NEXT: [[TMP0:%.*]] = load ptr, ptr [[SHARED_ADDR_ASCAST]], align 8 +// CHECK-NEXT: [[TMP1:%.*]] = addrspacecast ptr [[TMP0]] to ptr addrspace(3) +// CHECK-NEXT: [[TMP2:%.*]] = load float, ptr [[SRC_ADDR_ASCAST]], align 4 +// CHECK-NEXT: [[TMP3:%.*]] = atomicrmw fmin ptr addrspace(3) [[TMP1]], float [[TMP2]] monotonic, align 4 +// CHECK-NEXT: store volatile float [[TMP3]], ptr [[X_ASCAST]], align 4 // CHECK-NEXT: ret void // __global__ void test_ds_fmin(float src, float *shared) { @@ -184,17 +180,16 @@ __global__ void endpgm() { // CHECK-NEXT: [[OUT_ADDR_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[OUT_ADDR]] to ptr // CHECK-NEXT: [[A_ADDR_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[A_ADDR]] to ptr // CHECK-NEXT: [[B_ADDR_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[B_ADDR]] to ptr -// CHECK-NEXT: [[TMP0:%.*]] = addrspacecast ptr addrspace(1) [[OUT_COERCE:%.*]] to ptr -// CHECK-NEXT: store ptr [[TMP0]], ptr [[OUT_ASCAST]], align 8 +// CHECK-NEXT: store ptr addrspace(1) [[OUT_COERCE:%.*]], ptr [[OUT_ASCAST]], align 8 // CHECK-NEXT: [[OUT1:%.*]] = load ptr, ptr [[OUT_ASCAST]], align 8 // CHECK-NEXT: store ptr [[OUT1]], ptr [[OUT_ADDR_ASCAST]], align 8 // CHECK-NEXT: store i64 [[A:%.*]], ptr [[A_ADDR_ASCAST]], align 8 // CHECK-NEXT: store i64 [[B:%.*]], ptr [[B_ADDR_ASCAST]], align 8 -// CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr [[A_ADDR_ASCAST]], align 8 -// CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr [[B_ADDR_ASCAST]], align 8 -// CHECK-NEXT: [[TMP3:%.*]] = call i64 @llvm.amdgcn.icmp.i64.i64(i64 [[TMP1]], i64 [[TMP2]], i32 35) -// CHECK-NEXT: [[TMP4:%.*]] = load ptr, ptr [[OUT_ADDR_ASCAST]], align 8 -// CHECK-NEXT: store i64 [[TMP3]], ptr [[TMP4]], align 8 +// CHECK-NEXT: [[TMP0:%.*]] = load i64, ptr [[A_ADDR_ASCAST]], align 8 +// CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr [[B_ADDR_ASCAST]], align 8 +// CHECK-NEXT: [[TMP2:%.*]] = call i64 @llvm.amdgcn.icmp.i64.i64(i64 [[TMP0]], i64 [[TMP1]], i32 35) +// CHECK-NEXT: [[TMP3:%.*]] = load ptr, ptr [[OUT_ADDR_ASCAST]], align 8 +// CHECK-NEXT: store i64 [[TMP2]], ptr [[TMP3]], align 8 // CHECK-NEXT: ret void // __global__ void test_uicmp_i64(unsigned long long *out, unsigned long long a, unsigned long long b) @@ -210,13 +205,12 @@ __global__ void test_uicmp_i64(unsigned long long *out, unsigned long long a, un // CHECK-NEXT: [[OUT_ADDR:%.*]] = alloca ptr, align 8, addrspace(5) // CHECK-NEXT: [[OUT_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[OUT]] to ptr // CHECK-NEXT: [[OUT_ADDR_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[OUT_ADDR]] to ptr -// CHECK-NEXT: [[TMP0:%.*]] = addrspacecast ptr addrspace(1) [[OUT_COERCE:%.*]] to ptr -// CHECK-NEXT: store ptr [[TMP0]], ptr [[OUT_ASCAST]], align 8 +// CHECK-NEXT: store ptr addrspace(1) [[OUT_COERCE:%.*]], ptr [[OUT_ASCAST]], align 8 // CHECK-NEXT: [[OUT1:%.*]] = load ptr, ptr [[OUT_ASCAST]], align 8 // CHECK-NEXT: store ptr [[OUT1]], ptr [[OUT_ADDR_ASCAST]], align 8 -// CHECK-NEXT: [[TMP1:%.*]] = call i64 @llvm.amdgcn.s.memtime() -// CHECK-NEXT: [[TMP2:%.*]] = load ptr, ptr [[OUT_ADDR_ASCAST]], align 8 -// CHECK-NEXT: store i64 [[TMP1]], ptr [[TMP2]], align 8 +// CHECK-NEXT: [[TMP0:%.*]] = call i64 @llvm.amdgcn.s.memtime() +// CHECK-NEXT: [[TMP1:%.*]] = load ptr, ptr [[OUT_ADDR_ASCAST]], align 8 +// CHECK-NEXT: store i64 [[TMP0]], ptr [[TMP1]], align 8 // CHECK-NEXT: ret void // __global__ void test_s_memtime(unsigned long long* out) @@ -237,18 +231,17 @@ __device__ void func(float *x); // CHECK-NEXT: [[SRC_ADDR_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[SRC_ADDR]] to ptr // CHECK-NEXT: [[SHARED_ADDR_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[SHARED_ADDR]] to ptr // CHECK-NEXT: [[X_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[X]] to ptr -// CHECK-NEXT: [[TMP0:%.*]] = addrspacecast ptr addrspace(1) [[SHARED_COERCE:%.*]] to ptr -// CHECK-NEXT: store ptr [[TMP0]], ptr [[SHARED_ASCAST]], align 8 +// CHECK-NEXT: store ptr addrspace(1) [[SHARED_COERCE:%.*]], ptr [[SHARED_ASCAST]], align 8 // CHECK-NEXT: [[SHARED1:%.*]] = load ptr, ptr [[SHARED_ASCAST]], align 8 // CHECK-NEXT: store float [[SRC:%.*]], ptr [[SRC_ADDR_ASCAST]], align 4 // CHECK-NEXT: store ptr [[SHARED1]], ptr [[SHARED_ADDR_ASCAST]], align 8 -// CHECK-NEXT: [[TMP1:%.*]] = load ptr, ptr [[SHARED_ADDR_ASCAST]], align 8 -// CHECK-NEXT: [[TMP2:%.*]] = addrspacecast ptr [[TMP1]] to ptr addrspace(3) -// CHECK-NEXT: [[TMP3:%.*]] = load float, ptr [[SRC_ADDR_ASCAST]], align 4 -// CHECK-NEXT: [[TMP4:%.*]] = atomicrmw fmin ptr addrspace(3) [[TMP2]], float [[TMP3]] monotonic, align 4 -// CHECK-NEXT: store volatile float [[TMP4]], ptr [[X_ASCAST]], align 4 -// CHECK-NEXT: [[TMP5:%.*]] = load ptr, ptr [[SHARED_ADDR_ASCAST]], align 8 -// CHECK-NEXT: call void @_Z4funcPf(ptr noundef [[TMP5]]) #[[ATTR7:[0-9]+]] +// CHECK-NEXT: [[TMP0:%.*]] = load ptr, ptr [[SHARED_ADDR_ASCAST]], align 8 +// CHECK-NEXT: [[TMP1:%.*]] = addrspacecast ptr [[TMP0]] to ptr addrspace(3) +// CHECK-NEXT: [[TMP2:%.*]] = load float, ptr [[SRC_ADDR_ASCAST]], align 4 +// CHECK-NEXT: [[TMP3:%.*]] = atomicrmw fmin ptr addrspace(3) [[TMP1]], float [[TMP2]] monotonic, align 4 +// CHECK-NEXT: store volatile float [[TMP3]], ptr [[X_ASCAST]], align 4 +// CHECK-NEXT: [[TMP4:%.*]] = load ptr, ptr [[SHARED_ADDR_ASCAST]], align 8 +// CHECK-NEXT: call void @_Z4funcPf(ptr noundef [[TMP4]]) #[[ATTR7:[0-9]+]] // CHECK-NEXT: ret void // __global__ void test_ds_fmin_func(float src, float *__restrict shared) { @@ -264,14 +257,13 @@ __global__ void test_ds_fmin_func(float src, float *__restrict shared) { // CHECK-NEXT: [[X_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[X]] to ptr // CHECK-NEXT: [[X_ADDR_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[X_ADDR]] to ptr // CHECK-NEXT: [[RET_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[RET]] to ptr -// CHECK-NEXT: [[TMP0:%.*]] = addrspacecast ptr addrspace(1) [[X_COERCE:%.*]] to ptr -// CHECK-NEXT: store ptr [[TMP0]], ptr [[X_ASCAST]], align 8 +// CHECK-NEXT: store ptr addrspace(1) [[X_COERCE:%.*]], ptr [[X_ASCAST]], align 8 // CHECK-NEXT: [[X1:%.*]] = load ptr, ptr [[X_ASCAST]], align 8 // CHECK-NEXT: store ptr [[X1]], ptr [[X_ADDR_ASCAST]], align 8 -// CHECK-NEXT: [[TMP1:%.*]] = load ptr, ptr [[X_ADDR_ASCAST]], align 8 -// CHECK-NEXT: [[TMP2:%.*]] = call i1 @llvm.amdgcn.is.shared(ptr [[TMP1]]) -// CHECK-NEXT: [[FROMBOOL:%.*]] = zext i1 [[TMP2]] to i8 -// CHECK-NEXT: store i8 [[FROMBOOL]], ptr [[RET_ASCAST]], align 1 +// CHECK-NEXT: [[TMP0:%.*]] = load ptr, ptr [[X_ADDR_ASCAST]], align 8 +// CHECK-NEXT: [[TMP1:%.*]] = call i1 @llvm.amdgcn.is.shared(ptr [[TMP0]]) +// CHECK-NEXT: [[STOREDV:%.*]] = zext i1 [[TMP1]] to i8 +// CHECK-NEXT: store i8 [[STOREDV]], ptr [[RET_ASCAST]], align 1 // CHECK-NEXT: ret void // __global__ void test_is_shared(float *x){ @@ -286,14 +278,13 @@ __global__ void test_is_shared(float *x){ // CHECK-NEXT: [[X_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[X]] to ptr // CHECK-NEXT: [[X_ADDR_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[X_ADDR]] to ptr // CHECK-NEXT: [[RET_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[RET]] to ptr -// CHECK-NEXT: [[TMP0:%.*]] = addrspacecast ptr addrspace(1) [[X_COERCE:%.*]] to ptr -// CHECK-NEXT: store ptr [[TMP0]], ptr [[X_ASCAST]], align 8 +// CHECK-NEXT: store ptr addrspace(1) [[X_COERCE:%.*]], ptr [[X_ASCAST]], align 8 // CHECK-NEXT: [[X1:%.*]] = load ptr, ptr [[X_ASCAST]], align 8 // CHECK-NEXT: store ptr [[X1]], ptr [[X_ADDR_ASCAST]], align 8 -// CHECK-NEXT: [[TMP1:%.*]] = load ptr, ptr [[X_ADDR_ASCAST]], align 8 -// CHECK-NEXT: [[TMP2:%.*]] = call i1 @llvm.amdgcn.is.private(ptr [[TMP1]]) -// CHECK-NEXT: [[FROMBOOL:%.*]] = zext i1 [[TMP2]] to i8 -// CHECK-NEXT: store i8 [[FROMBOOL]], ptr [[RET_ASCAST]], align 1 +// CHECK-NEXT: [[TMP0:%.*]] = load ptr, ptr [[X_ADDR_ASCAST]], align 8 +// CHECK-NEXT: [[TMP1:%.*]] = call i1 @llvm.amdgcn.is.private(ptr [[TMP0]]) +// CHECK-NEXT: [[STOREDV:%.*]] = zext i1 [[TMP1]] to i8 +// CHECK-NEXT: store i8 [[STOREDV]], ptr [[RET_ASCAST]], align 1 // CHECK-NEXT: ret void // __global__ void test_is_private(int *x){ diff --git a/clang/test/CodeGenCUDA/builtins-spirv-amdgcn.cu b/clang/test/CodeGenCUDA/builtins-spirv-amdgcn.cu index 32851805298f..1cbe358910b8 100644 --- a/clang/test/CodeGenCUDA/builtins-spirv-amdgcn.cu +++ b/clang/test/CodeGenCUDA/builtins-spirv-amdgcn.cu @@ -17,16 +17,15 @@ // CHECK-NEXT: [[OUT_ASCAST:%.*]] = addrspacecast ptr [[OUT]] to ptr addrspace(4) // CHECK-NEXT: [[OUT_ADDR_ASCAST:%.*]] = addrspacecast ptr [[OUT_ADDR]] to ptr addrspace(4) // CHECK-NEXT: [[DISPATCH_PTR_ASCAST:%.*]] = addrspacecast ptr [[DISPATCH_PTR]] to ptr addrspace(4) -// CHECK-NEXT: [[TMP0:%.*]] = addrspacecast ptr addrspace(1) [[OUT_COERCE:%.*]] to ptr addrspace(4) -// CHECK-NEXT: store ptr addrspace(4) [[TMP0]], ptr addrspace(4) [[OUT_ASCAST]], align 8 +// CHECK-NEXT: store ptr addrspace(1) [[OUT_COERCE:%.*]], ptr addrspace(4) [[OUT_ASCAST]], align 8 // CHECK-NEXT: [[OUT1:%.*]] = load ptr addrspace(4), ptr addrspace(4) [[OUT_ASCAST]], align 8 // CHECK-NEXT: store ptr addrspace(4) [[OUT1]], ptr addrspace(4) [[OUT_ADDR_ASCAST]], align 8 -// CHECK-NEXT: [[TMP1:%.*]] = call align 4 dereferenceable(64) addrspace(4) ptr addrspace(4) @llvm.amdgcn.dispatch.ptr() -// CHECK-NEXT: store ptr addrspace(4) [[TMP1]], ptr addrspace(4) [[DISPATCH_PTR_ASCAST]], align 8 -// CHECK-NEXT: [[TMP2:%.*]] = load ptr addrspace(4), ptr addrspace(4) [[DISPATCH_PTR_ASCAST]], align 8 -// CHECK-NEXT: [[TMP3:%.*]] = load i32, ptr addrspace(4) [[TMP2]], align 4 -// CHECK-NEXT: [[TMP4:%.*]] = load ptr addrspace(4), ptr addrspace(4) [[OUT_ADDR_ASCAST]], align 8 -// CHECK-NEXT: store i32 [[TMP3]], ptr addrspace(4) [[TMP4]], align 4 +// CHECK-NEXT: [[TMP0:%.*]] = call align 4 dereferenceable(64) addrspace(4) ptr addrspace(4) @llvm.amdgcn.dispatch.ptr() +// CHECK-NEXT: store ptr addrspace(4) [[TMP0]], ptr addrspace(4) [[DISPATCH_PTR_ASCAST]], align 8 +// CHECK-NEXT: [[TMP1:%.*]] = load ptr addrspace(4), ptr addrspace(4) [[DISPATCH_PTR_ASCAST]], align 8 +// CHECK-NEXT: [[TMP2:%.*]] = load i32, ptr addrspace(4) [[TMP1]], align 4 +// CHECK-NEXT: [[TMP3:%.*]] = load ptr addrspace(4), ptr addrspace(4) [[OUT_ADDR_ASCAST]], align 8 +// CHECK-NEXT: store i32 [[TMP2]], ptr addrspace(4) [[TMP3]], align 4 // CHECK-NEXT: ret void // __global__ void use_dispatch_ptr(int* out) { @@ -42,16 +41,15 @@ __global__ void use_dispatch_ptr(int* out) { // CHECK-NEXT: [[OUT_ASCAST:%.*]] = addrspacecast ptr [[OUT]] to ptr addrspace(4) // CHECK-NEXT: [[OUT_ADDR_ASCAST:%.*]] = addrspacecast ptr [[OUT_ADDR]] to ptr addrspace(4) // CHECK-NEXT: [[QUEUE_PTR_ASCAST:%.*]] = addrspacecast ptr [[QUEUE_PTR]] to ptr addrspace(4) -// CHECK-NEXT: [[TMP0:%.*]] = addrspacecast ptr addrspace(1) [[OUT_COERCE:%.*]] to ptr addrspace(4) -// CHECK-NEXT: store ptr addrspace(4) [[TMP0]], ptr addrspace(4) [[OUT_ASCAST]], align 8 +// CHECK-NEXT: store ptr addrspace(1) [[OUT_COERCE:%.*]], ptr addrspace(4) [[OUT_ASCAST]], align 8 // CHECK-NEXT: [[OUT1:%.*]] = load ptr addrspace(4), ptr addrspace(4) [[OUT_ASCAST]], align 8 // CHECK-NEXT: store ptr addrspace(4) [[OUT1]], ptr addrspace(4) [[OUT_ADDR_ASCAST]], align 8 -// CHECK-NEXT: [[TMP1:%.*]] = call addrspace(4) ptr addrspace(4) @llvm.amdgcn.queue.ptr() -// CHECK-NEXT: store ptr addrspace(4) [[TMP1]], ptr addrspace(4) [[QUEUE_PTR_ASCAST]], align 8 -// CHECK-NEXT: [[TMP2:%.*]] = load ptr addrspace(4), ptr addrspace(4) [[QUEUE_PTR_ASCAST]], align 8 -// CHECK-NEXT: [[TMP3:%.*]] = load i32, ptr addrspace(4) [[TMP2]], align 4 -// CHECK-NEXT: [[TMP4:%.*]] = load ptr addrspace(4), ptr addrspace(4) [[OUT_ADDR_ASCAST]], align 8 -// CHECK-NEXT: store i32 [[TMP3]], ptr addrspace(4) [[TMP4]], align 4 +// CHECK-NEXT: [[TMP0:%.*]] = call addrspace(4) ptr addrspace(4) @llvm.amdgcn.queue.ptr() +// CHECK-NEXT: store ptr addrspace(4) [[TMP0]], ptr addrspace(4) [[QUEUE_PTR_ASCAST]], align 8 +// CHECK-NEXT: [[TMP1:%.*]] = load ptr addrspace(4), ptr addrspace(4) [[QUEUE_PTR_ASCAST]], align 8 +// CHECK-NEXT: [[TMP2:%.*]] = load i32, ptr addrspace(4) [[TMP1]], align 4 +// CHECK-NEXT: [[TMP3:%.*]] = load ptr addrspace(4), ptr addrspace(4) [[OUT_ADDR_ASCAST]], align 8 +// CHECK-NEXT: store i32 [[TMP2]], ptr addrspace(4) [[TMP3]], align 4 // CHECK-NEXT: ret void // __global__ void use_queue_ptr(int* out) { @@ -67,16 +65,15 @@ __global__ void use_queue_ptr(int* out) { // CHECK-NEXT: [[OUT_ASCAST:%.*]] = addrspacecast ptr [[OUT]] to ptr addrspace(4) // CHECK-NEXT: [[OUT_ADDR_ASCAST:%.*]] = addrspacecast ptr [[OUT_ADDR]] to ptr addrspace(4) // CHECK-NEXT: [[IMPLICITARG_PTR_ASCAST:%.*]] = addrspacecast ptr [[IMPLICITARG_PTR]] to ptr addrspace(4) -// CHECK-NEXT: [[TMP0:%.*]] = addrspacecast ptr addrspace(1) [[OUT_COERCE:%.*]] to ptr addrspace(4) -// CHECK-NEXT: store ptr addrspace(4) [[TMP0]], ptr addrspace(4) [[OUT_ASCAST]], align 8 +// CHECK-NEXT: store ptr addrspace(1) [[OUT_COERCE:%.*]], ptr addrspace(4) [[OUT_ASCAST]], align 8 // CHECK-NEXT: [[OUT1:%.*]] = load ptr addrspace(4), ptr addrspace(4) [[OUT_ASCAST]], align 8 // CHECK-NEXT: store ptr addrspace(4) [[OUT1]], ptr addrspace(4) [[OUT_ADDR_ASCAST]], align 8 -// CHECK-NEXT: [[TMP1:%.*]] = call addrspace(4) ptr addrspace(4) @llvm.amdgcn.implicitarg.ptr() -// CHECK-NEXT: store ptr addrspace(4) [[TMP1]], ptr addrspace(4) [[IMPLICITARG_PTR_ASCAST]], align 8 -// CHECK-NEXT: [[TMP2:%.*]] = load ptr addrspace(4), ptr addrspace(4) [[IMPLICITARG_PTR_ASCAST]], align 8 -// CHECK-NEXT: [[TMP3:%.*]] = load i32, ptr addrspace(4) [[TMP2]], align 4 -// CHECK-NEXT: [[TMP4:%.*]] = load ptr addrspace(4), ptr addrspace(4) [[OUT_ADDR_ASCAST]], align 8 -// CHECK-NEXT: store i32 [[TMP3]], ptr addrspace(4) [[TMP4]], align 4 +// CHECK-NEXT: [[TMP0:%.*]] = call addrspace(4) ptr addrspace(4) @llvm.amdgcn.implicitarg.ptr() +// CHECK-NEXT: store ptr addrspace(4) [[TMP0]], ptr addrspace(4) [[IMPLICITARG_PTR_ASCAST]], align 8 +// CHECK-NEXT: [[TMP1:%.*]] = load ptr addrspace(4), ptr addrspace(4) [[IMPLICITARG_PTR_ASCAST]], align 8 +// CHECK-NEXT: [[TMP2:%.*]] = load i32, ptr addrspace(4) [[TMP1]], align 4 +// CHECK-NEXT: [[TMP3:%.*]] = load ptr addrspace(4), ptr addrspace(4) [[OUT_ADDR_ASCAST]], align 8 +// CHECK-NEXT: store i32 [[TMP2]], ptr addrspace(4) [[TMP3]], align 4 // CHECK-NEXT: ret void // __global__ void use_implicitarg_ptr(int* out) { @@ -131,16 +128,15 @@ __global__ void test_ds_fadd(float src) { // CHECK-NEXT: [[SRC_ADDR_ASCAST:%.*]] = addrspacecast ptr [[SRC_ADDR]] to ptr addrspace(4) // CHECK-NEXT: [[SHARED_ADDR_ASCAST:%.*]] = addrspacecast ptr [[SHARED_ADDR]] to ptr addrspace(4) // CHECK-NEXT: [[X_ASCAST:%.*]] = addrspacecast ptr [[X]] to ptr addrspace(4) -// CHECK-NEXT: [[TMP0:%.*]] = addrspacecast ptr addrspace(1) [[SHARED_COERCE:%.*]] to ptr addrspace(4) -// CHECK-NEXT: store ptr addrspace(4) [[TMP0]], ptr addrspace(4) [[SHARED_ASCAST]], align 8 +// CHECK-NEXT: store ptr addrspace(1) [[SHARED_COERCE:%.*]], ptr addrspace(4) [[SHARED_ASCAST]], align 8 // CHECK-NEXT: [[SHARED1:%.*]] = load ptr addrspace(4), ptr addrspace(4) [[SHARED_ASCAST]], align 8 // CHECK-NEXT: store float [[SRC:%.*]], ptr addrspace(4) [[SRC_ADDR_ASCAST]], align 4 // CHECK-NEXT: store ptr addrspace(4) [[SHARED1]], ptr addrspace(4) [[SHARED_ADDR_ASCAST]], align 8 -// CHECK-NEXT: [[TMP1:%.*]] = load ptr addrspace(4), ptr addrspace(4) [[SHARED_ADDR_ASCAST]], align 8 -// CHECK-NEXT: [[TMP2:%.*]] = addrspacecast ptr addrspace(4) [[TMP1]] to ptr addrspace(3) -// CHECK-NEXT: [[TMP3:%.*]] = load float, ptr addrspace(4) [[SRC_ADDR_ASCAST]], align 4 -// CHECK-NEXT: [[TMP4:%.*]] = atomicrmw fmin ptr addrspace(3) [[TMP2]], float [[TMP3]] monotonic, align 4 -// CHECK-NEXT: store volatile float [[TMP4]], ptr addrspace(4) [[X_ASCAST]], align 4 +// CHECK-NEXT: [[TMP0:%.*]] = load ptr addrspace(4), ptr addrspace(4) [[SHARED_ADDR_ASCAST]], align 8 +// CHECK-NEXT: [[TMP1:%.*]] = addrspacecast ptr addrspace(4) [[TMP0]] to ptr addrspace(3) +// CHECK-NEXT: [[TMP2:%.*]] = load float, ptr addrspace(4) [[SRC_ADDR_ASCAST]], align 4 +// CHECK-NEXT: [[TMP3:%.*]] = atomicrmw fmin ptr addrspace(3) [[TMP1]], float [[TMP2]] monotonic, align 4 +// CHECK-NEXT: store volatile float [[TMP3]], ptr addrspace(4) [[X_ASCAST]], align 4 // CHECK-NEXT: ret void // __global__ void test_ds_fmin(float src, float *shared) { @@ -175,17 +171,16 @@ __global__ void endpgm() { // CHECK-NEXT: [[OUT_ADDR_ASCAST:%.*]] = addrspacecast ptr [[OUT_ADDR]] to ptr addrspace(4) // CHECK-NEXT: [[A_ADDR_ASCAST:%.*]] = addrspacecast ptr [[A_ADDR]] to ptr addrspace(4) // CHECK-NEXT: [[B_ADDR_ASCAST:%.*]] = addrspacecast ptr [[B_ADDR]] to ptr addrspace(4) -// CHECK-NEXT: [[TMP0:%.*]] = addrspacecast ptr addrspace(1) [[OUT_COERCE:%.*]] to ptr addrspace(4) -// CHECK-NEXT: store ptr addrspace(4) [[TMP0]], ptr addrspace(4) [[OUT_ASCAST]], align 8 +// CHECK-NEXT: store ptr addrspace(1) [[OUT_COERCE:%.*]], ptr addrspace(4) [[OUT_ASCAST]], align 8 // CHECK-NEXT: [[OUT1:%.*]] = load ptr addrspace(4), ptr addrspace(4) [[OUT_ASCAST]], align 8 // CHECK-NEXT: store ptr addrspace(4) [[OUT1]], ptr addrspace(4) [[OUT_ADDR_ASCAST]], align 8 // CHECK-NEXT: store i64 [[A:%.*]], ptr addrspace(4) [[A_ADDR_ASCAST]], align 8 // CHECK-NEXT: store i64 [[B:%.*]], ptr addrspace(4) [[B_ADDR_ASCAST]], align 8 -// CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr addrspace(4) [[A_ADDR_ASCAST]], align 8 -// CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr addrspace(4) [[B_ADDR_ASCAST]], align 8 -// CHECK-NEXT: [[TMP3:%.*]] = call addrspace(4) i64 @llvm.amdgcn.icmp.i64.i64(i64 [[TMP1]], i64 [[TMP2]], i32 35) -// CHECK-NEXT: [[TMP4:%.*]] = load ptr addrspace(4), ptr addrspace(4) [[OUT_ADDR_ASCAST]], align 8 -// CHECK-NEXT: store i64 [[TMP3]], ptr addrspace(4) [[TMP4]], align 8 +// CHECK-NEXT: [[TMP0:%.*]] = load i64, ptr addrspace(4) [[A_ADDR_ASCAST]], align 8 +// CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr addrspace(4) [[B_ADDR_ASCAST]], align 8 +// CHECK-NEXT: [[TMP2:%.*]] = call addrspace(4) i64 @llvm.amdgcn.icmp.i64.i64(i64 [[TMP0]], i64 [[TMP1]], i32 35) +// CHECK-NEXT: [[TMP3:%.*]] = load ptr addrspace(4), ptr addrspace(4) [[OUT_ADDR_ASCAST]], align 8 +// CHECK-NEXT: store i64 [[TMP2]], ptr addrspace(4) [[TMP3]], align 8 // CHECK-NEXT: ret void // __global__ void test_uicmp_i64(unsigned long long *out, unsigned long long a, unsigned long long b) @@ -201,13 +196,12 @@ __global__ void test_uicmp_i64(unsigned long long *out, unsigned long long a, un // CHECK-NEXT: [[OUT_ADDR:%.*]] = alloca ptr addrspace(4), align 8 // CHECK-NEXT: [[OUT_ASCAST:%.*]] = addrspacecast ptr [[OUT]] to ptr addrspace(4) // CHECK-NEXT: [[OUT_ADDR_ASCAST:%.*]] = addrspacecast ptr [[OUT_ADDR]] to ptr addrspace(4) -// CHECK-NEXT: [[TMP0:%.*]] = addrspacecast ptr addrspace(1) [[OUT_COERCE:%.*]] to ptr addrspace(4) -// CHECK-NEXT: store ptr addrspace(4) [[TMP0]], ptr addrspace(4) [[OUT_ASCAST]], align 8 +// CHECK-NEXT: store ptr addrspace(1) [[OUT_COERCE:%.*]], ptr addrspace(4) [[OUT_ASCAST]], align 8 // CHECK-NEXT: [[OUT1:%.*]] = load ptr addrspace(4), ptr addrspace(4) [[OUT_ASCAST]], align 8 // CHECK-NEXT: store ptr addrspace(4) [[OUT1]], ptr addrspace(4) [[OUT_ADDR_ASCAST]], align 8 -// CHECK-NEXT: [[TMP1:%.*]] = call addrspace(4) i64 @llvm.amdgcn.s.memtime() -// CHECK-NEXT: [[TMP2:%.*]] = load ptr addrspace(4), ptr addrspace(4) [[OUT_ADDR_ASCAST]], align 8 -// CHECK-NEXT: store i64 [[TMP1]], ptr addrspace(4) [[TMP2]], align 8 +// CHECK-NEXT: [[TMP0:%.*]] = call addrspace(4) i64 @llvm.amdgcn.s.memtime() +// CHECK-NEXT: [[TMP1:%.*]] = load ptr addrspace(4), ptr addrspace(4) [[OUT_ADDR_ASCAST]], align 8 +// CHECK-NEXT: store i64 [[TMP0]], ptr addrspace(4) [[TMP1]], align 8 // CHECK-NEXT: ret void // __global__ void test_s_memtime(unsigned long long* out) @@ -228,18 +222,17 @@ __device__ void func(float *x); // CHECK-NEXT: [[SRC_ADDR_ASCAST:%.*]] = addrspacecast ptr [[SRC_ADDR]] to ptr addrspace(4) // CHECK-NEXT: [[SHARED_ADDR_ASCAST:%.*]] = addrspacecast ptr [[SHARED_ADDR]] to ptr addrspace(4) // CHECK-NEXT: [[X_ASCAST:%.*]] = addrspacecast ptr [[X]] to ptr addrspace(4) -// CHECK-NEXT: [[TMP0:%.*]] = addrspacecast ptr addrspace(1) [[SHARED_COERCE:%.*]] to ptr addrspace(4) -// CHECK-NEXT: store ptr addrspace(4) [[TMP0]], ptr addrspace(4) [[SHARED_ASCAST]], align 8 +// CHECK-NEXT: store ptr addrspace(1) [[SHARED_COERCE:%.*]], ptr addrspace(4) [[SHARED_ASCAST]], align 8 // CHECK-NEXT: [[SHARED1:%.*]] = load ptr addrspace(4), ptr addrspace(4) [[SHARED_ASCAST]], align 8 // CHECK-NEXT: store float [[SRC:%.*]], ptr addrspace(4) [[SRC_ADDR_ASCAST]], align 4 // CHECK-NEXT: store ptr addrspace(4) [[SHARED1]], ptr addrspace(4) [[SHARED_ADDR_ASCAST]], align 8 -// CHECK-NEXT: [[TMP1:%.*]] = load ptr addrspace(4), ptr addrspace(4) [[SHARED_ADDR_ASCAST]], align 8 -// CHECK-NEXT: [[TMP2:%.*]] = addrspacecast ptr addrspace(4) [[TMP1]] to ptr addrspace(3) -// CHECK-NEXT: [[TMP3:%.*]] = load float, ptr addrspace(4) [[SRC_ADDR_ASCAST]], align 4 -// CHECK-NEXT: [[TMP4:%.*]] = atomicrmw fmin ptr addrspace(3) [[TMP2]], float [[TMP3]] monotonic, align 4 -// CHECK-NEXT: store volatile float [[TMP4]], ptr addrspace(4) [[X_ASCAST]], align 4 -// CHECK-NEXT: [[TMP5:%.*]] = load ptr addrspace(4), ptr addrspace(4) [[SHARED_ADDR_ASCAST]], align 8 -// CHECK-NEXT: call spir_func addrspace(4) void @_Z4funcPf(ptr addrspace(4) noundef [[TMP5]]) #[[ATTR6:[0-9]+]] +// CHECK-NEXT: [[TMP0:%.*]] = load ptr addrspace(4), ptr addrspace(4) [[SHARED_ADDR_ASCAST]], align 8 +// CHECK-NEXT: [[TMP1:%.*]] = addrspacecast ptr addrspace(4) [[TMP0]] to ptr addrspace(3) +// CHECK-NEXT: [[TMP2:%.*]] = load float, ptr addrspace(4) [[SRC_ADDR_ASCAST]], align 4 +// CHECK-NEXT: [[TMP3:%.*]] = atomicrmw fmin ptr addrspace(3) [[TMP1]], float [[TMP2]] monotonic, align 4 +// CHECK-NEXT: store volatile float [[TMP3]], ptr addrspace(4) [[X_ASCAST]], align 4 +// CHECK-NEXT: [[TMP4:%.*]] = load ptr addrspace(4), ptr addrspace(4) [[SHARED_ADDR_ASCAST]], align 8 +// CHECK-NEXT: call spir_func addrspace(4) void @_Z4funcPf(ptr addrspace(4) noundef [[TMP4]]) #[[ATTR6:[0-9]+]] // CHECK-NEXT: ret void // __global__ void test_ds_fmin_func(float src, float *__restrict shared) { @@ -255,15 +248,14 @@ __global__ void test_ds_fmin_func(float src, float *__restrict shared) { // CHECK-NEXT: [[X_ASCAST:%.*]] = addrspacecast ptr [[X]] to ptr addrspace(4) // CHECK-NEXT: [[X_ADDR_ASCAST:%.*]] = addrspacecast ptr [[X_ADDR]] to ptr addrspace(4) // CHECK-NEXT: [[RET_ASCAST:%.*]] = addrspacecast ptr [[RET]] to ptr addrspace(4) -// CHECK-NEXT: [[TMP0:%.*]] = addrspacecast ptr addrspace(1) [[X_COERCE:%.*]] to ptr addrspace(4) -// CHECK-NEXT: store ptr addrspace(4) [[TMP0]], ptr addrspace(4) [[X_ASCAST]], align 8 +// CHECK-NEXT: store ptr addrspace(1) [[X_COERCE:%.*]], ptr addrspace(4) [[X_ASCAST]], align 8 // CHECK-NEXT: [[X1:%.*]] = load ptr addrspace(4), ptr addrspace(4) [[X_ASCAST]], align 8 // CHECK-NEXT: store ptr addrspace(4) [[X1]], ptr addrspace(4) [[X_ADDR_ASCAST]], align 8 -// CHECK-NEXT: [[TMP1:%.*]] = load ptr addrspace(4), ptr addrspace(4) [[X_ADDR_ASCAST]], align 8 -// CHECK-NEXT: [[TMP2:%.*]] = addrspacecast ptr addrspace(4) [[TMP1]] to ptr -// CHECK-NEXT: [[TMP3:%.*]] = call addrspace(4) i1 @llvm.amdgcn.is.shared(ptr [[TMP2]]) -// CHECK-NEXT: [[FROMBOOL:%.*]] = zext i1 [[TMP3]] to i8 -// CHECK-NEXT: store i8 [[FROMBOOL]], ptr addrspace(4) [[RET_ASCAST]], align 1 +// CHECK-NEXT: [[TMP0:%.*]] = load ptr addrspace(4), ptr addrspace(4) [[X_ADDR_ASCAST]], align 8 +// CHECK-NEXT: [[TMP1:%.*]] = addrspacecast ptr addrspace(4) [[TMP0]] to ptr +// CHECK-NEXT: [[TMP2:%.*]] = call addrspace(4) i1 @llvm.amdgcn.is.shared(ptr [[TMP1]]) +// CHECK-NEXT: [[STOREDV:%.*]] = zext i1 [[TMP2]] to i8 +// CHECK-NEXT: store i8 [[STOREDV]], ptr addrspace(4) [[RET_ASCAST]], align 1 // CHECK-NEXT: ret void // __global__ void test_is_shared(float *x){ @@ -278,15 +270,14 @@ __global__ void test_is_shared(float *x){ // CHECK-NEXT: [[X_ASCAST:%.*]] = addrspacecast ptr [[X]] to ptr addrspace(4) // CHECK-NEXT: [[X_ADDR_ASCAST:%.*]] = addrspacecast ptr [[X_ADDR]] to ptr addrspace(4) // CHECK-NEXT: [[RET_ASCAST:%.*]] = addrspacecast ptr [[RET]] to ptr addrspace(4) -// CHECK-NEXT: [[TMP0:%.*]] = addrspacecast ptr addrspace(1) [[X_COERCE:%.*]] to ptr addrspace(4) -// CHECK-NEXT: store ptr addrspace(4) [[TMP0]], ptr addrspace(4) [[X_ASCAST]], align 8 +// CHECK-NEXT: store ptr addrspace(1) [[X_COERCE:%.*]], ptr addrspace(4) [[X_ASCAST]], align 8 // CHECK-NEXT: [[X1:%.*]] = load ptr addrspace(4), ptr addrspace(4) [[X_ASCAST]], align 8 // CHECK-NEXT: store ptr addrspace(4) [[X1]], ptr addrspace(4) [[X_ADDR_ASCAST]], align 8 -// CHECK-NEXT: [[TMP1:%.*]] = load ptr addrspace(4), ptr addrspace(4) [[X_ADDR_ASCAST]], align 8 -// CHECK-NEXT: [[TMP2:%.*]] = addrspacecast ptr addrspace(4) [[TMP1]] to ptr -// CHECK-NEXT: [[TMP3:%.*]] = call addrspace(4) i1 @llvm.amdgcn.is.private(ptr [[TMP2]]) -// CHECK-NEXT: [[FROMBOOL:%.*]] = zext i1 [[TMP3]] to i8 -// CHECK-NEXT: store i8 [[FROMBOOL]], ptr addrspace(4) [[RET_ASCAST]], align 1 +// CHECK-NEXT: [[TMP0:%.*]] = load ptr addrspace(4), ptr addrspace(4) [[X_ADDR_ASCAST]], align 8 +// CHECK-NEXT: [[TMP1:%.*]] = addrspacecast ptr addrspace(4) [[TMP0]] to ptr +// CHECK-NEXT: [[TMP2:%.*]] = call addrspace(4) i1 @llvm.amdgcn.is.private(ptr [[TMP1]]) +// CHECK-NEXT: [[STOREDV:%.*]] = zext i1 [[TMP2]] to i8 +// CHECK-NEXT: store i8 [[STOREDV]], ptr addrspace(4) [[RET_ASCAST]], align 1 // CHECK-NEXT: ret void // __global__ void test_is_private(int *x){ diff --git a/clang/test/CodeGenCXX/address-space-cast-coerce.cpp b/clang/test/CodeGenCXX/address-space-cast-coerce.cpp index 7279b6c7f23a..1ad46042b6ef 100644 --- a/clang/test/CodeGenCXX/address-space-cast-coerce.cpp +++ b/clang/test/CodeGenCXX/address-space-cast-coerce.cpp @@ -46,9 +46,9 @@ int mane() { char1 f1{1}; char1 f2{1}; -// CHECK: [[TMP:%.+]] = alloca i16 -// CHECK: [[COERCE:%.+]] = addrspacecast ptr addrspace(5) [[TMP]] to ptr -// CHECK: call void @llvm.memcpy.p0.p0.i64(ptr align 1 %{{.+}}, ptr align 2 [[COERCE]], i64 1, i1 false) +// CHECK: [[CALL:%.*]] = call i16 +// CHECK: [[TRUNC:%.*]] = trunc i16 [[CALL]] to i8 +// CHECK: store i8 [[TRUNC]] char1 f3 = f1 + f2; } diff --git a/clang/test/CodeGenCXX/cxx2a-consteval.cpp b/clang/test/CodeGenCXX/cxx2a-consteval.cpp index 075cab58358a..6c09053a74d2 100644 --- a/clang/test/CodeGenCXX/cxx2a-consteval.cpp +++ b/clang/test/CodeGenCXX/cxx2a-consteval.cpp @@ -1,4 +1,3 @@ -// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py // RUN: %clang_cc1 -emit-llvm %s -std=c++2a -triple x86_64-unknown-linux-gnu -o %t.ll // RUN: FileCheck -check-prefix=EVAL -input-file=%t.ll %s // RUN: FileCheck -check-prefix=EVAL-STATIC -input-file=%t.ll %s @@ -275,3 +274,26 @@ void f() { // EVAL-FN: call void @_ZN7GH821542S3C2Ei } } + +namespace GH93040 { +struct C { char c = 1; }; +struct Empty { consteval Empty() {} }; +struct Empty2 { consteval Empty2() {} }; +struct Test : C, Empty { + [[no_unique_address]] Empty2 e; +}; +static_assert(sizeof(Test) == 1); +void f() { + Test test; + +// Make sure we don't overwrite the initialization of c. + +// EVAL-FN-LABEL: define {{.*}} void @_ZN7GH930404TestC2Ev +// EVAL-FN: entry: +// EVAL-FN-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 8 +// EVAL-FN-NEXT: store ptr {{.*}}, ptr [[THIS_ADDR]], align 8 +// EVAL-FN-NEXT: [[THIS:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8 +// EVAL-FN-NEXT: call void @_ZN7GH930401CC2Ev(ptr noundef nonnull align 1 dereferenceable(1) [[THIS]]) +// EVAL-FN-NEXT: ret void +} +} diff --git a/clang/test/CodeGenCXX/trivial_abi.cpp b/clang/test/CodeGenCXX/trivial_abi.cpp index 3012b0f2bc33..54912a617c28 100644 --- a/clang/test/CodeGenCXX/trivial_abi.cpp +++ b/clang/test/CodeGenCXX/trivial_abi.cpp @@ -262,6 +262,26 @@ void testExceptionLarge() { calleeExceptionLarge(Large(), Large()); } +// CHECK: define void @_ZN7GH930401gEPNS_1SE +// CHECK: [[CALL:%.*]] = call i64 @_ZN7GH930401fEv +// CHECK-NEXT: [[TRUNC:%.*]] = trunc i64 [[CALL]] to i56 +// CHECK-NEXT: store i56 [[TRUNC]] +// CHECK-NEXT: ret void +void* operator new(unsigned long, void*); +namespace GH93040 { +struct [[clang::trivial_abi]] S { + char a; + int x; + __attribute((aligned(2))) char y; + S(); +} __attribute((packed)); +S f(); +void g(S* s) { new(s) S(f()); } +struct S2 { [[no_unique_address]] S s; char c;}; +static_assert(sizeof(S) == 8 && sizeof(S2) == 8, ""); +} + + // PR42961 // CHECK: define{{.*}} @"_ZN3$_08__invokeEv"() diff --git a/clang/test/CodeGenHIP/dpp-const-fold.hip b/clang/test/CodeGenHIP/dpp-const-fold.hip index f5a97c6b0e77..c5450ec4b841 100644 --- a/clang/test/CodeGenHIP/dpp-const-fold.hip +++ b/clang/test/CodeGenHIP/dpp-const-fold.hip @@ -22,25 +22,25 @@ constexpr static bool BountCtrl() return true & false; } -// CHECK: call i32 @llvm.amdgcn.update.dpp.i32(i32 %1, i32 %2, i32 16, i32 0, i32 0, i1 false) +// CHECK: call i32 @llvm.amdgcn.update.dpp.i32(i32 %0, i32 %1, i32 16, i32 0, i32 0, i1 false) __attribute__((global)) void test_update_dpp_const_fold_imm_operand_2(int* out, int a, int b) { *out = __builtin_amdgcn_update_dpp(a, b, OpCtrl(), 0, 0, false); } -// CHECK: call i32 @llvm.amdgcn.update.dpp.i32(i32 %1, i32 %2, i32 0, i32 4, i32 0, i1 false) +// CHECK: call i32 @llvm.amdgcn.update.dpp.i32(i32 %0, i32 %1, i32 0, i32 4, i32 0, i1 false) __attribute__((global)) void test_update_dpp_const_fold_imm_operand_3(int* out, int a, int b) { *out = __builtin_amdgcn_update_dpp(a, b, 0, RowMask(), 0, false); } -// CHECK: call i32 @llvm.amdgcn.update.dpp.i32(i32 %1, i32 %2, i32 0, i32 0, i32 3, i1 false) +// CHECK: call i32 @llvm.amdgcn.update.dpp.i32(i32 %0, i32 %1, i32 0, i32 0, i32 3, i1 false) __attribute__((global)) void test_update_dpp_const_fold_imm_operand_4(int* out, int a, int b) { *out = __builtin_amdgcn_update_dpp(a, b, 0, 0, BankMask(), false); } -// CHECK: call i32 @llvm.amdgcn.update.dpp.i32(i32 %1, i32 %2, i32 0, i32 0, i32 0, i1 false) +// CHECK: call i32 @llvm.amdgcn.update.dpp.i32(i32 %0, i32 %1, i32 0, i32 0, i32 0, i1 false) __attribute__((global)) void test_update_dpp_const_fold_imm_operand_5(int* out, int a, int b) { *out = __builtin_amdgcn_update_dpp(a, b, 0, 0, 0, BountCtrl()); diff --git a/clang/test/CodeGenHIP/spirv-amdgcn-dpp-const-fold.hip b/clang/test/CodeGenHIP/spirv-amdgcn-dpp-const-fold.hip index 2b785200e8ee..71270bc1c68d 100644 --- a/clang/test/CodeGenHIP/spirv-amdgcn-dpp-const-fold.hip +++ b/clang/test/CodeGenHIP/spirv-amdgcn-dpp-const-fold.hip @@ -21,25 +21,25 @@ constexpr static bool BountCtrl() return true & false; } -// CHECK: call{{.*}} i32 @llvm.amdgcn.update.dpp.i32(i32 %1, i32 %2, i32 16, i32 0, i32 0, i1 false) +// CHECK: call{{.*}} i32 @llvm.amdgcn.update.dpp.i32(i32 %0, i32 %1, i32 16, i32 0, i32 0, i1 false) __attribute__((global)) void test_update_dpp_const_fold_imm_operand_2(int* out, int a, int b) { *out = __builtin_amdgcn_update_dpp(a, b, OpCtrl(), 0, 0, false); } -// CHECK: call{{.*}} i32 @llvm.amdgcn.update.dpp.i32(i32 %1, i32 %2, i32 0, i32 4, i32 0, i1 false) +// CHECK: call{{.*}} i32 @llvm.amdgcn.update.dpp.i32(i32 %0, i32 %1, i32 0, i32 4, i32 0, i1 false) __attribute__((global)) void test_update_dpp_const_fold_imm_operand_3(int* out, int a, int b) { *out = __builtin_amdgcn_update_dpp(a, b, 0, RowMask(), 0, false); } -// CHECK: call{{.*}} i32 @llvm.amdgcn.update.dpp.i32(i32 %1, i32 %2, i32 0, i32 0, i32 3, i1 false) +// CHECK: call{{.*}} i32 @llvm.amdgcn.update.dpp.i32(i32 %0, i32 %1, i32 0, i32 0, i32 3, i1 false) __attribute__((global)) void test_update_dpp_const_fold_imm_operand_4(int* out, int a, int b) { *out = __builtin_amdgcn_update_dpp(a, b, 0, 0, BankMask(), false); } -// CHECK: call{{.*}} i32 @llvm.amdgcn.update.dpp.i32(i32 %1, i32 %2, i32 0, i32 0, i32 0, i1 false) +// CHECK: call{{.*}} i32 @llvm.amdgcn.update.dpp.i32(i32 %0, i32 %1, i32 0, i32 0, i32 0, i1 false) __attribute__((global)) void test_update_dpp_const_fold_imm_operand_5(int* out, int a, int b) { *out = __builtin_amdgcn_update_dpp(a, b, 0, 0, 0, BountCtrl()); diff --git a/clang/test/CodeGenOpenCL/addr-space-struct-arg.cl b/clang/test/CodeGenOpenCL/addr-space-struct-arg.cl index 385f8a753cd8..1651cb379a20 100644 --- a/clang/test/CodeGenOpenCL/addr-space-struct-arg.cl +++ b/clang/test/CodeGenOpenCL/addr-space-struct-arg.cl @@ -145,7 +145,9 @@ kernel void KernelOneMemberSpir(global struct StructOneMember* u) { // AMDGCN-LABEL: define{{.*}} amdgpu_kernel void @KernelLargeOneMember( // AMDGCN: %[[U:.*]] = alloca %struct.LargeStructOneMember, align 8, addrspace(5) -// AMDGCN: store %struct.LargeStructOneMember %u.coerce, ptr addrspace(5) %[[U]], align 8 +// AMDGCN: %[[U_ELEM:.*]] = getelementptr inbounds %struct.LargeStructOneMember, ptr addrspace(5) %[[U]], i32 0, i32 0 +// AMDGCN: %[[EXTRACT:.*]] = extractvalue %struct.LargeStructOneMember %u.coerce, 0 +// AMDGCN: store [100 x <2 x i32>] %[[EXTRACT]], ptr addrspace(5) %[[U_ELEM]], align 8 // AMDGCN: call void @FuncOneLargeMember(ptr addrspace(5) noundef byref(%struct.LargeStructOneMember) align 8 %[[U]]) kernel void KernelLargeOneMember(struct LargeStructOneMember u) { FuncOneLargeMember(u); @@ -177,7 +179,12 @@ kernel void KernelTwoMember(struct StructTwoMember u) { // AMDGCN-LABEL: define{{.*}} amdgpu_kernel void @KernelLargeTwoMember // AMDGCN-SAME: (%struct.LargeStructTwoMember %[[u_coerce:.*]]) // AMDGCN: %[[u:.*]] = alloca %struct.LargeStructTwoMember, align 8, addrspace(5) -// AMDGCN: store %struct.LargeStructTwoMember %[[u_coerce]], ptr addrspace(5) %[[u]] +// AMDGCN: %[[U_PTR0:.*]] = getelementptr inbounds %struct.LargeStructTwoMember, ptr addrspace(5) %[[u]], i32 0, i32 0 +// AMDGCN: %[[EXTRACT0:.*]] = extractvalue %struct.LargeStructTwoMember %u.coerce, 0 +// AMDGCN: store [40 x <2 x i32>] %[[EXTRACT0]], ptr addrspace(5) %[[U_PTR0]] +// AMDGCN: %[[U_PTR1:.*]] = getelementptr inbounds %struct.LargeStructTwoMember, ptr addrspace(5) %[[u]], i32 0, i32 1 +// AMDGCN: %[[EXTRACT1:.*]] = extractvalue %struct.LargeStructTwoMember %u.coerce, 1 +// AMDGCN: store [20 x <2 x i32>] %[[EXTRACT1]], ptr addrspace(5) %[[U_PTR1]] // AMDGCN: call void @FuncLargeTwoMember(ptr addrspace(5) noundef byref(%struct.LargeStructTwoMember) align 8 %[[u]]) kernel void KernelLargeTwoMember(struct LargeStructTwoMember u) { FuncLargeTwoMember(u); diff --git a/clang/test/CodeGenOpenCL/amdgpu-abi-struct-arg-byref.cl b/clang/test/CodeGenOpenCL/amdgpu-abi-struct-arg-byref.cl index fa83a38a01b0..fe0a2f9578db 100644 --- a/clang/test/CodeGenOpenCL/amdgpu-abi-struct-arg-byref.cl +++ b/clang/test/CodeGenOpenCL/amdgpu-abi-struct-arg-byref.cl @@ -61,7 +61,7 @@ Mat4X4 __attribute__((noinline)) foo(Mat3X3 in) { // the return value. // AMDGCN-LABEL: define dso_local amdgpu_kernel void @ker -// AMDGCN-SAME: (ptr addrspace(1) noundef align 4 [[IN:%.*]], ptr addrspace(1) noundef align 4 [[OUT:%.*]]) #[[ATTR1:[0-9]+]] !kernel_arg_addr_space !4 !kernel_arg_access_qual !5 !kernel_arg_type !6 !kernel_arg_base_type !6 !kernel_arg_type_qual !7 { +// AMDGCN-SAME: (ptr addrspace(1) noundef align 4 [[IN:%.*]], ptr addrspace(1) noundef align 4 [[OUT:%.*]]) #[[ATTR1:[0-9]+]] !kernel_arg_addr_space [[META4:![0-9]+]] !kernel_arg_access_qual [[META5:![0-9]+]] !kernel_arg_type [[META6:![0-9]+]] !kernel_arg_base_type [[META6]] !kernel_arg_type_qual [[META7:![0-9]+]] { // AMDGCN-NEXT: entry: // AMDGCN-NEXT: [[IN_ADDR:%.*]] = alloca ptr addrspace(1), align 8, addrspace(5) // AMDGCN-NEXT: [[OUT_ADDR:%.*]] = alloca ptr addrspace(1), align 8, addrspace(5) @@ -74,7 +74,7 @@ Mat4X4 __attribute__((noinline)) foo(Mat3X3 in) { // AMDGCN-NEXT: [[ARRAYIDX1:%.*]] = getelementptr inbounds [[STRUCT_MAT3X3:%.*]], ptr addrspace(1) [[TMP1]], i64 1 // AMDGCN-NEXT: [[TMP2:%.*]] = getelementptr inbounds [[STRUCT_MAT3X3]], ptr addrspace(1) [[ARRAYIDX1]], i32 0, i32 0 // AMDGCN-NEXT: [[TMP3:%.*]] = load [9 x i32], ptr addrspace(1) [[TMP2]], align 4 -// AMDGCN-NEXT: [[CALL:%.*]] = call [[STRUCT_MAT4X4]] @foo([9 x i32] [[TMP3]]) #[[ATTR3:[0-9]+]] +// AMDGCN-NEXT: [[CALL:%.*]] = call [[STRUCT_MAT4X4]] @[[FOO:[a-zA-Z0-9_$\"\\.-]*[a-zA-Z_$\"\\.-][a-zA-Z0-9_$\"\\.-]*]]([9 x i32] [[TMP3]]) #[[ATTR3:[0-9]+]] // AMDGCN-NEXT: [[TMP4:%.*]] = getelementptr inbounds [[STRUCT_MAT4X4]], ptr addrspace(5) [[TMP]], i32 0, i32 0 // AMDGCN-NEXT: [[TMP5:%.*]] = extractvalue [[STRUCT_MAT4X4]] [[CALL]], 0 // AMDGCN-NEXT: store [16 x i32] [[TMP5]], ptr addrspace(5) [[TMP4]], align 4 @@ -98,7 +98,7 @@ Mat64X64 __attribute__((noinline)) foo_large(Mat32X32 in) { } // AMDGCN-LABEL: define dso_local amdgpu_kernel void @ker_large -// AMDGCN-SAME: (ptr addrspace(1) noundef align 4 [[IN:%.*]], ptr addrspace(1) noundef align 4 [[OUT:%.*]]) #[[ATTR1]] !kernel_arg_addr_space !4 !kernel_arg_access_qual !5 !kernel_arg_type !8 !kernel_arg_base_type !8 !kernel_arg_type_qual !7 { +// AMDGCN-SAME: (ptr addrspace(1) noundef align 4 [[IN:%.*]], ptr addrspace(1) noundef align 4 [[OUT:%.*]]) #[[ATTR1]] !kernel_arg_addr_space [[META4]] !kernel_arg_access_qual [[META5]] !kernel_arg_type [[META8:![0-9]+]] !kernel_arg_base_type [[META8]] !kernel_arg_type_qual [[META7]] { // AMDGCN-NEXT: entry: // AMDGCN-NEXT: [[IN_ADDR:%.*]] = alloca ptr addrspace(1), align 8, addrspace(5) // AMDGCN-NEXT: [[OUT_ADDR:%.*]] = alloca ptr addrspace(1), align 8, addrspace(5) @@ -168,7 +168,7 @@ void test_indirect_arg_globl(void) { #endif // AMDGCN-LABEL: define dso_local amdgpu_kernel void @test_indirect_arg_local -// AMDGCN-SAME: () #[[ATTR1]] !kernel_arg_addr_space !9 !kernel_arg_access_qual !9 !kernel_arg_type !9 !kernel_arg_base_type !9 !kernel_arg_type_qual !9 { +// AMDGCN-SAME: () #[[ATTR1]] !kernel_arg_addr_space [[META9:![0-9]+]] !kernel_arg_access_qual [[META9]] !kernel_arg_type [[META9]] !kernel_arg_base_type [[META9]] !kernel_arg_type_qual [[META9]] { // AMDGCN-NEXT: entry: // AMDGCN-NEXT: [[BYVAL_TEMP:%.*]] = alloca [[STRUCT_LARGESTRUCTONEMEMBER:%.*]], align 8, addrspace(5) // AMDGCN-NEXT: call void @llvm.memcpy.p5.p3.i64(ptr addrspace(5) align 8 [[BYVAL_TEMP]], ptr addrspace(3) align 8 @test_indirect_arg_local.l_s, i64 800, i1 false) @@ -193,7 +193,7 @@ void test_indirect_arg_private(void) { } // AMDGCN-LABEL: define dso_local amdgpu_kernel void @KernelOneMember -// AMDGCN-SAME: (<2 x i32> [[U_COERCE:%.*]]) #[[ATTR1]] !kernel_arg_addr_space !10 !kernel_arg_access_qual !11 !kernel_arg_type !12 !kernel_arg_base_type !12 !kernel_arg_type_qual !13 { +// AMDGCN-SAME: (<2 x i32> [[U_COERCE:%.*]]) #[[ATTR1]] !kernel_arg_addr_space [[META10:![0-9]+]] !kernel_arg_access_qual [[META11:![0-9]+]] !kernel_arg_type [[META12:![0-9]+]] !kernel_arg_base_type [[META12]] !kernel_arg_type_qual [[META13:![0-9]+]] { // AMDGCN-NEXT: entry: // AMDGCN-NEXT: [[U:%.*]] = alloca [[STRUCT_STRUCTONEMEMBER:%.*]], align 8, addrspace(5) // AMDGCN-NEXT: [[COERCE_DIVE:%.*]] = getelementptr inbounds [[STRUCT_STRUCTONEMEMBER]], ptr addrspace(5) [[U]], i32 0, i32 0 @@ -208,7 +208,7 @@ kernel void KernelOneMember(struct StructOneMember u) { } // AMDGCN-LABEL: define dso_local amdgpu_kernel void @KernelOneMemberSpir -// AMDGCN-SAME: (ptr addrspace(1) noundef align 8 [[U:%.*]]) #[[ATTR1]] !kernel_arg_addr_space !14 !kernel_arg_access_qual !11 !kernel_arg_type !15 !kernel_arg_base_type !15 !kernel_arg_type_qual !13 { +// AMDGCN-SAME: (ptr addrspace(1) noundef align 8 [[U:%.*]]) #[[ATTR1]] !kernel_arg_addr_space [[META14:![0-9]+]] !kernel_arg_access_qual [[META11]] !kernel_arg_type [[META15:![0-9]+]] !kernel_arg_base_type [[META15]] !kernel_arg_type_qual [[META13]] { // AMDGCN-NEXT: entry: // AMDGCN-NEXT: [[U_ADDR:%.*]] = alloca ptr addrspace(1), align 8, addrspace(5) // AMDGCN-NEXT: store ptr addrspace(1) [[U]], ptr addrspace(5) [[U_ADDR]], align 8 @@ -223,10 +223,12 @@ kernel void KernelOneMemberSpir(global struct StructOneMember* u) { } // AMDGCN-LABEL: define dso_local amdgpu_kernel void @KernelLargeOneMember -// AMDGCN-SAME: ([[STRUCT_LARGESTRUCTONEMEMBER:%.*]] [[U_COERCE:%.*]]) #[[ATTR1]] !kernel_arg_addr_space !10 !kernel_arg_access_qual !11 !kernel_arg_type !16 !kernel_arg_base_type !16 !kernel_arg_type_qual !13 { +// AMDGCN-SAME: ([[STRUCT_LARGESTRUCTONEMEMBER:%.*]] [[U_COERCE:%.*]]) #[[ATTR1]] !kernel_arg_addr_space [[META10]] !kernel_arg_access_qual [[META11]] !kernel_arg_type [[META16:![0-9]+]] !kernel_arg_base_type [[META16]] !kernel_arg_type_qual [[META13]] { // AMDGCN-NEXT: entry: // AMDGCN-NEXT: [[U:%.*]] = alloca [[STRUCT_LARGESTRUCTONEMEMBER]], align 8, addrspace(5) -// AMDGCN-NEXT: store [[STRUCT_LARGESTRUCTONEMEMBER]] [[U_COERCE]], ptr addrspace(5) [[U]], align 8 +// AMDGCN-NEXT: [[TMP0:%.*]] = getelementptr inbounds [[STRUCT_LARGESTRUCTONEMEMBER]], ptr addrspace(5) [[U]], i32 0, i32 0 +// AMDGCN-NEXT: [[TMP1:%.*]] = extractvalue [[STRUCT_LARGESTRUCTONEMEMBER]] [[U_COERCE]], 0 +// AMDGCN-NEXT: store [100 x <2 x i32>] [[TMP1]], ptr addrspace(5) [[TMP0]], align 8 // AMDGCN-NEXT: call void @FuncOneLargeMember(ptr addrspace(5) noundef byref([[STRUCT_LARGESTRUCTONEMEMBER]]) align 8 [[U]]) #[[ATTR3]] // AMDGCN-NEXT: ret void // @@ -271,15 +273,20 @@ void FuncLargeTwoMember(struct LargeStructTwoMember u) { } // AMDGCN-LABEL: define dso_local amdgpu_kernel void @KernelTwoMember -// AMDGCN-SAME: ([[STRUCT_STRUCTTWOMEMBER:%.*]] [[U_COERCE:%.*]]) #[[ATTR1]] !kernel_arg_addr_space !10 !kernel_arg_access_qual !11 !kernel_arg_type !17 !kernel_arg_base_type !17 !kernel_arg_type_qual !13 { +// AMDGCN-SAME: ([[STRUCT_STRUCTTWOMEMBER:%.*]] [[U_COERCE:%.*]]) #[[ATTR1]] !kernel_arg_addr_space [[META10]] !kernel_arg_access_qual [[META11]] !kernel_arg_type [[META17:![0-9]+]] !kernel_arg_base_type [[META17]] !kernel_arg_type_qual [[META13]] { // AMDGCN-NEXT: entry: // AMDGCN-NEXT: [[U:%.*]] = alloca [[STRUCT_STRUCTTWOMEMBER]], align 8, addrspace(5) -// AMDGCN-NEXT: store [[STRUCT_STRUCTTWOMEMBER]] [[U_COERCE]], ptr addrspace(5) [[U]], align 8 // AMDGCN-NEXT: [[TMP0:%.*]] = getelementptr inbounds [[STRUCT_STRUCTTWOMEMBER]], ptr addrspace(5) [[U]], i32 0, i32 0 -// AMDGCN-NEXT: [[TMP1:%.*]] = load <2 x i32>, ptr addrspace(5) [[TMP0]], align 8 +// AMDGCN-NEXT: [[TMP1:%.*]] = extractvalue [[STRUCT_STRUCTTWOMEMBER]] [[U_COERCE]], 0 +// AMDGCN-NEXT: store <2 x i32> [[TMP1]], ptr addrspace(5) [[TMP0]], align 8 // AMDGCN-NEXT: [[TMP2:%.*]] = getelementptr inbounds [[STRUCT_STRUCTTWOMEMBER]], ptr addrspace(5) [[U]], i32 0, i32 1 -// AMDGCN-NEXT: [[TMP3:%.*]] = load <2 x i32>, ptr addrspace(5) [[TMP2]], align 8 -// AMDGCN-NEXT: call void @FuncTwoMember(<2 x i32> [[TMP1]], <2 x i32> [[TMP3]]) #[[ATTR3]] +// AMDGCN-NEXT: [[TMP3:%.*]] = extractvalue [[STRUCT_STRUCTTWOMEMBER]] [[U_COERCE]], 1 +// AMDGCN-NEXT: store <2 x i32> [[TMP3]], ptr addrspace(5) [[TMP2]], align 8 +// AMDGCN-NEXT: [[TMP4:%.*]] = getelementptr inbounds [[STRUCT_STRUCTTWOMEMBER]], ptr addrspace(5) [[U]], i32 0, i32 0 +// AMDGCN-NEXT: [[TMP5:%.*]] = load <2 x i32>, ptr addrspace(5) [[TMP4]], align 8 +// AMDGCN-NEXT: [[TMP6:%.*]] = getelementptr inbounds [[STRUCT_STRUCTTWOMEMBER]], ptr addrspace(5) [[U]], i32 0, i32 1 +// AMDGCN-NEXT: [[TMP7:%.*]] = load <2 x i32>, ptr addrspace(5) [[TMP6]], align 8 +// AMDGCN-NEXT: call void @FuncTwoMember(<2 x i32> [[TMP5]], <2 x i32> [[TMP7]]) #[[ATTR3]] // AMDGCN-NEXT: ret void // kernel void KernelTwoMember(struct StructTwoMember u) { @@ -287,10 +294,15 @@ kernel void KernelTwoMember(struct StructTwoMember u) { } // AMDGCN-LABEL: define dso_local amdgpu_kernel void @KernelLargeTwoMember -// AMDGCN-SAME: ([[STRUCT_LARGESTRUCTTWOMEMBER:%.*]] [[U_COERCE:%.*]]) #[[ATTR1]] !kernel_arg_addr_space !10 !kernel_arg_access_qual !11 !kernel_arg_type !18 !kernel_arg_base_type !18 !kernel_arg_type_qual !13 { +// AMDGCN-SAME: ([[STRUCT_LARGESTRUCTTWOMEMBER:%.*]] [[U_COERCE:%.*]]) #[[ATTR1]] !kernel_arg_addr_space [[META10]] !kernel_arg_access_qual [[META11]] !kernel_arg_type [[META18:![0-9]+]] !kernel_arg_base_type [[META18]] !kernel_arg_type_qual [[META13]] { // AMDGCN-NEXT: entry: // AMDGCN-NEXT: [[U:%.*]] = alloca [[STRUCT_LARGESTRUCTTWOMEMBER]], align 8, addrspace(5) -// AMDGCN-NEXT: store [[STRUCT_LARGESTRUCTTWOMEMBER]] [[U_COERCE]], ptr addrspace(5) [[U]], align 8 +// AMDGCN-NEXT: [[TMP0:%.*]] = getelementptr inbounds [[STRUCT_LARGESTRUCTTWOMEMBER]], ptr addrspace(5) [[U]], i32 0, i32 0 +// AMDGCN-NEXT: [[TMP1:%.*]] = extractvalue [[STRUCT_LARGESTRUCTTWOMEMBER]] [[U_COERCE]], 0 +// AMDGCN-NEXT: store [40 x <2 x i32>] [[TMP1]], ptr addrspace(5) [[TMP0]], align 8 +// AMDGCN-NEXT: [[TMP2:%.*]] = getelementptr inbounds [[STRUCT_LARGESTRUCTTWOMEMBER]], ptr addrspace(5) [[U]], i32 0, i32 1 +// AMDGCN-NEXT: [[TMP3:%.*]] = extractvalue [[STRUCT_LARGESTRUCTTWOMEMBER]] [[U_COERCE]], 1 +// AMDGCN-NEXT: store [20 x <2 x i32>] [[TMP3]], ptr addrspace(5) [[TMP2]], align 8 // AMDGCN-NEXT: call void @FuncLargeTwoMember(ptr addrspace(5) noundef byref([[STRUCT_LARGESTRUCTTWOMEMBER]]) align 8 [[U]]) #[[ATTR3]] // AMDGCN-NEXT: ret void // From e657e0256509f6f665917904078a5389684fc716 Mon Sep 17 00:00:00 2001 From: Tom Stellard Date: Fri, 26 Jul 2024 07:38:53 -0700 Subject: [PATCH 02/29] workflows: Fix tag name for release sources job (#100752) (cherry picked from commit 3c2ce7088886a22ab8dc0e9488600c43644b5102) --- .github/workflows/release-sources.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/release-sources.yml b/.github/workflows/release-sources.yml index edb0449ef7e2..a6c86823f99d 100644 --- a/.github/workflows/release-sources.yml +++ b/.github/workflows/release-sources.yml @@ -51,7 +51,7 @@ jobs: steps: - id: inputs run: | - ref=${{ inputs.release-version || github.sha }} + ref=${{ (inputs.release-version && format('llvmorg-{0}', inputs.release-version)) || github.sha }} if [ -n "${{ inputs.release-version }}" ]; then export_args="-release ${{ inputs.release-version }} -final" else From 82a11e46ce87ea570358e4c25ee445929402a490 Mon Sep 17 00:00:00 2001 From: cor3ntin Date: Wed, 4 Sep 2024 10:02:55 +0200 Subject: [PATCH 03/29] [Clang] Workaround dependent source location issues (#106925) In #78436 we made some SourceLocExpr dependent to deal with the fact that their value should reflect the name of specialized function - rather than the rtemplate in which they are first used. However SourceLocExpr are unusual in two ways - They don't depend on template arguments - They morally depend on the context in which they are used (rather than called from). It's fair to say that this is quite novels and confuses clang. In particular, in some cases, we used to create dependent SourceLocExpr and never subsequently transform them, leaving dependent objects in instantiated functions types. To work around that we avoid replacing SourceLocExpr when we think they could remain dependent. It's certainly not perfect but it fixes a number of reported bugs, and seem to only affect scenarios in which the value of the SourceLocExpr does not matter (overload resolution). Fixes #106428 Fixes #81155 Fixes #80210 Fixes #85373 --------- Co-authored-by: Aaron Ballman --- clang/docs/ReleaseNotes.rst | 2 + clang/lib/Sema/SemaExpr.cpp | 21 +++++++-- clang/test/SemaCXX/source_location.cpp | 60 ++++++++++++++++++++++++++ 3 files changed, 79 insertions(+), 4 deletions(-) diff --git a/clang/docs/ReleaseNotes.rst b/clang/docs/ReleaseNotes.rst index 5eed9827343d..53d819c6c445 100644 --- a/clang/docs/ReleaseNotes.rst +++ b/clang/docs/ReleaseNotes.rst @@ -1121,6 +1121,8 @@ Bug Fixes to C++ Support Fixes (#GH85992). - Fixed a crash-on-invalid bug involving extraneous template parameter with concept substitution. (#GH73885) - Fixed assertion failure by skipping the analysis of an invalid field declaration. (#GH99868) +- Fix an issue with dependent source location expressions (#GH106428), (#GH81155), (#GH80210), (#GH85373) + Bug Fixes to AST Handling ^^^^^^^^^^^^^^^^^^^^^^^^^ diff --git a/clang/lib/Sema/SemaExpr.cpp b/clang/lib/Sema/SemaExpr.cpp index edb8b79a2220..f56ca398cda8 100644 --- a/clang/lib/Sema/SemaExpr.cpp +++ b/clang/lib/Sema/SemaExpr.cpp @@ -5430,11 +5430,24 @@ struct EnsureImmediateInvocationInDefaultArgs // Rewrite to source location to refer to the context in which they are used. ExprResult TransformSourceLocExpr(SourceLocExpr *E) { - if (E->getParentContext() == SemaRef.CurContext) + DeclContext *DC = E->getParentContext(); + if (DC == SemaRef.CurContext) return E; - return getDerived().RebuildSourceLocExpr(E->getIdentKind(), E->getType(), - E->getBeginLoc(), E->getEndLoc(), - SemaRef.CurContext); + + // FIXME: During instantiation, because the rebuild of defaults arguments + // is not always done in the context of the template instantiator, + // we run the risk of producing a dependent source location + // that would never be rebuilt. + // This usually happens during overload resolution, or in contexts + // where the value of the source location does not matter. + // However, we should find a better way to deal with source location + // of function templates. + if (!SemaRef.CurrentInstantiationScope || + !SemaRef.CurContext->isDependentContext() || DC->isDependentContext()) + DC = SemaRef.CurContext; + + return getDerived().RebuildSourceLocExpr( + E->getIdentKind(), E->getType(), E->getBeginLoc(), E->getEndLoc(), DC); } }; diff --git a/clang/test/SemaCXX/source_location.cpp b/clang/test/SemaCXX/source_location.cpp index 6b3610d703e7..34177bfe287f 100644 --- a/clang/test/SemaCXX/source_location.cpp +++ b/clang/test/SemaCXX/source_location.cpp @@ -929,3 +929,63 @@ void test() { } } + +namespace GH106428 { + +struct add_fn { + template + constexpr auto operator()(T lhs, T rhs, + const std::source_location loc = std::source_location::current()) + const -> T + { + return lhs + rhs; + } +}; + + +template +decltype(_Fp{}(0, 0)) +__invoke(_Fp&& __f); + +template +struct type_identity { using type = T; }; + +template +struct invoke_result : type_identity {}; + +using i = invoke_result::type; +static_assert(__is_same(i, int)); + +} + +#if __cplusplus >= 202002L + +namespace GH81155 { +struct buff { + buff(buff &, const char * = __builtin_FUNCTION()); +}; + +template +Ty declval(); + +template +auto Call(buff arg) -> decltype(Fx{}(arg)); + +template +struct F {}; + +template +struct InvocableR : F(declval()))> { + static constexpr bool value = false; +}; + +template ::value> +void Help(Fx) {} + +void Test() { + Help([](buff) {}); +} + +} + +#endif From 5cf78453b3de39247364ddf97b1c18c011283948 Mon Sep 17 00:00:00 2001 From: Yingwei Zheng Date: Wed, 4 Sep 2024 13:36:32 +0800 Subject: [PATCH 04/29] [Clang][CodeGen] Don't emit assumptions if current block is unreachable. (#106936) Fixes https://github.com/llvm/llvm-project/issues/106898. When emitting an infinite loop, clang codegen will delete the whole block and leave builder's current block as nullptr: https://github.com/llvm/llvm-project/blob/837ee5b46a5f7f898f0de7e46a19600b896a0a1f/clang/lib/CodeGen/CGStmt.cpp#L597-L600 Then clang will create `zext (icmp slt %a, %b)` without parent block for `a < b`. It will crash here: https://github.com/llvm/llvm-project/blob/837ee5b46a5f7f898f0de7e46a19600b896a0a1f/clang/lib/CodeGen/CGExprScalar.cpp#L416-L420 Even if we disabled this optimization, it still crashes in `Builder.CreateAssumption`: https://github.com/llvm/llvm-project/blob/837ee5b46a5f7f898f0de7e46a19600b896a0a1f/llvm/lib/IR/IRBuilder.cpp#L551-L561 This patch disables assumptions emission if current block is null. (cherry picked from commit c94bd96c277e0b48e198fdc831bb576d9a04aced) --- clang/lib/CodeGen/CGStmt.cpp | 2 +- clang/test/SemaCXX/cxx23-assume.cpp | 9 +++++++++ 2 files changed, 10 insertions(+), 1 deletion(-) diff --git a/clang/lib/CodeGen/CGStmt.cpp b/clang/lib/CodeGen/CGStmt.cpp index aa97f685ac7a..2f466602d2f6 100644 --- a/clang/lib/CodeGen/CGStmt.cpp +++ b/clang/lib/CodeGen/CGStmt.cpp @@ -745,7 +745,7 @@ void CodeGenFunction::EmitAttributedStmt(const AttributedStmt &S) { } break; case attr::CXXAssume: { const Expr *Assumption = cast(A)->getAssumption(); - if (getLangOpts().CXXAssumptions && + if (getLangOpts().CXXAssumptions && Builder.GetInsertBlock() && !Assumption->HasSideEffects(getContext())) { llvm::Value *AssumptionVal = EvaluateExprAsBool(Assumption); Builder.CreateAssumption(AssumptionVal); diff --git a/clang/test/SemaCXX/cxx23-assume.cpp b/clang/test/SemaCXX/cxx23-assume.cpp index 9138501d726d..eeae59daea3f 100644 --- a/clang/test/SemaCXX/cxx23-assume.cpp +++ b/clang/test/SemaCXX/cxx23-assume.cpp @@ -158,3 +158,12 @@ foo (int x, int y) return x + y; } } + +// Do not crash when assumptions are unreachable. +namespace gh106898 { +int foo () { + while(1); + int a = 0, b = 1; + __attribute__((assume (a < b))); +} +} From 52e5a72e9200667e8a62436268fdaff4411f7216 Mon Sep 17 00:00:00 2001 From: Sander de Smalen Date: Thu, 5 Sep 2024 17:54:57 +0100 Subject: [PATCH 05/29] [AArch64] Remove redundant COPY from loadRegFromStackSlot (#107396) This removes a redundant 'COPY' instruction that #81716 probably forgot to remove. This redundant COPY led to an issue because because code in LiveRangeSplitting expects that the instruction emitted by `loadRegFromStackSlot` is an instruction that accesses memory, which isn't the case for the COPY instruction. (cherry picked from commit 91a3c6f3d66b866bcda8a0f7d4815bc8f2dbd86c) --- llvm/lib/Target/AArch64/AArch64InstrInfo.cpp | 4 --- llvm/test/CodeGen/AArch64/spillfill-sve.mir | 37 +++++++++++++++++++- 2 files changed, 36 insertions(+), 5 deletions(-) diff --git a/llvm/lib/Target/AArch64/AArch64InstrInfo.cpp b/llvm/lib/Target/AArch64/AArch64InstrInfo.cpp index 377bcd5868fb..805684ef69a5 100644 --- a/llvm/lib/Target/AArch64/AArch64InstrInfo.cpp +++ b/llvm/lib/Target/AArch64/AArch64InstrInfo.cpp @@ -5144,10 +5144,6 @@ void AArch64InstrInfo::loadRegFromStackSlot(MachineBasicBlock &MBB, if (PNRReg.isValid() && !PNRReg.isVirtual()) MI.addDef(PNRReg, RegState::Implicit); MI.addMemOperand(MMO); - - if (PNRReg.isValid() && PNRReg.isVirtual()) - BuildMI(MBB, MBBI, DebugLoc(), get(TargetOpcode::COPY), PNRReg) - .addReg(DestReg); } bool llvm::isNZCVTouchedInInstructionRange(const MachineInstr &DefMI, diff --git a/llvm/test/CodeGen/AArch64/spillfill-sve.mir b/llvm/test/CodeGen/AArch64/spillfill-sve.mir index 11cf388e3853..83c9b73c5757 100644 --- a/llvm/test/CodeGen/AArch64/spillfill-sve.mir +++ b/llvm/test/CodeGen/AArch64/spillfill-sve.mir @@ -11,6 +11,7 @@ define aarch64_sve_vector_pcs void @spills_fills_stack_id_ppr2mul2() #0 { entry: unreachable } define aarch64_sve_vector_pcs void @spills_fills_stack_id_pnr() #1 { entry: unreachable } define aarch64_sve_vector_pcs void @spills_fills_stack_id_virtreg_pnr() #1 { entry: unreachable } + define aarch64_sve_vector_pcs void @spills_fills_stack_id_virtreg_ppr_to_pnr() #1 { entry: unreachable } define aarch64_sve_vector_pcs void @spills_fills_stack_id_zpr() #0 { entry: unreachable } define aarch64_sve_vector_pcs void @spills_fills_stack_id_zpr2() #0 { entry: unreachable } define aarch64_sve_vector_pcs void @spills_fills_stack_id_zpr2strided() #0 { entry: unreachable } @@ -216,7 +217,7 @@ body: | ; EXPAND: STR_PXI killed renamable $pn8, $sp, 7 ; ; EXPAND: renamable $pn8 = LDR_PXI $sp, 7 - ; EXPAND: $p0 = PEXT_PCI_B killed renamable $pn8, 0 + ; EXPAND-NEXT: $p0 = PEXT_PCI_B killed renamable $pn8, 0 %0:pnr_p8to15 = WHILEGE_CXX_B undef $x0, undef $x0, 0, implicit-def dead $nzcv @@ -242,6 +243,40 @@ body: | RET_ReallyLR ... --- +name: spills_fills_stack_id_virtreg_ppr_to_pnr +tracksRegLiveness: true +registers: + - { id: 0, class: ppr } + - { id: 1, class: pnr_p8to15 } +stack: +body: | + bb.0.entry: + liveins: $p0 + + %0:ppr = COPY $p0 + + $pn0 = IMPLICIT_DEF + $pn1 = IMPLICIT_DEF + $pn2 = IMPLICIT_DEF + $pn3 = IMPLICIT_DEF + $pn4 = IMPLICIT_DEF + $pn5 = IMPLICIT_DEF + $pn6 = IMPLICIT_DEF + $pn7 = IMPLICIT_DEF + $pn8 = IMPLICIT_DEF + $pn9 = IMPLICIT_DEF + $pn10 = IMPLICIT_DEF + $pn11 = IMPLICIT_DEF + $pn12 = IMPLICIT_DEF + $pn13 = IMPLICIT_DEF + $pn14 = IMPLICIT_DEF + $pn15 = IMPLICIT_DEF + + %1:pnr_p8to15 = COPY %0 + $p0 = PEXT_PCI_B %1, 0 + RET_ReallyLR +... +--- name: spills_fills_stack_id_zpr tracksRegLiveness: true registers: From 64015eee93062b34df290338c45e87868fa750a9 Mon Sep 17 00:00:00 2001 From: Hans Wennborg Date: Mon, 9 Sep 2024 10:56:37 +0200 Subject: [PATCH 06/29] Release note about targets built in the Windows packages LLVM_TARGETS_TO_BUILD was set in #106059 --- llvm/docs/ReleaseNotes.rst | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/llvm/docs/ReleaseNotes.rst b/llvm/docs/ReleaseNotes.rst index ac7bdf723a16..7b9e15da9b2b 100644 --- a/llvm/docs/ReleaseNotes.rst +++ b/llvm/docs/ReleaseNotes.rst @@ -42,6 +42,10 @@ Non-comprehensive list of changes in this release functionality, or simply have a lot to talk about), see the `NOTE` below for adding a new subsection. +* Starting with LLVM 19, the Windows installers only include support for the + X86, ARM, and AArch64 targets in order to keep the build size within the + limits of the NSIS installer framework. + * ... Update on required toolchains to build LLVM From 11e2a1552f92ccb080d08083ceb71f7e6ed4db78 Mon Sep 17 00:00:00 2001 From: Orlando Cazalet-Hyams Date: Thu, 29 Aug 2024 14:12:02 +0100 Subject: [PATCH 07/29] [RemoveDIs] Fix spliceDebugInfo splice-to-end edge case (#105671) Fix #105571 which demonstrates an end() iterator dereference when performing a non-empty splice to end() from a region that ends at Src::end(). Rather than calling Instruction::adoptDbgRecords from Dest, create a marker (which takes an iterator) and absorbDebugValues onto that. The "absorb" variant doesn't clean up the source marker, which in this case we know is a trailing marker, so we have to do that manually. (cherry picked from commit 43661a1214353ea1773a711f403f8d1118e9ca0f) --- llvm/lib/IR/BasicBlock.cpp | 12 ++++- llvm/unittests/IR/BasicBlockDbgInfoTest.cpp | 52 +++++++++++++++++++++ 2 files changed, 62 insertions(+), 2 deletions(-) diff --git a/llvm/lib/IR/BasicBlock.cpp b/llvm/lib/IR/BasicBlock.cpp index 0a9498f051cb..46896d3cdf7d 100644 --- a/llvm/lib/IR/BasicBlock.cpp +++ b/llvm/lib/IR/BasicBlock.cpp @@ -975,8 +975,16 @@ void BasicBlock::spliceDebugInfoImpl(BasicBlock::iterator Dest, BasicBlock *Src, if (ReadFromTail && Src->getMarker(Last)) { DbgMarker *FromLast = Src->getMarker(Last); if (LastIsEnd) { - Dest->adoptDbgRecords(Src, Last, true); - // adoptDbgRecords will release any trailers. + if (Dest == end()) { + // Abosrb the trailing markers from Src. + assert(FromLast == Src->getTrailingDbgRecords()); + createMarker(Dest)->absorbDebugValues(*FromLast, true); + FromLast->eraseFromParent(); + Src->deleteTrailingDbgRecords(); + } else { + // adoptDbgRecords will release any trailers. + Dest->adoptDbgRecords(Src, Last, true); + } assert(!Src->getTrailingDbgRecords()); } else { // FIXME: can we use adoptDbgRecords here to reduce allocations? diff --git a/llvm/unittests/IR/BasicBlockDbgInfoTest.cpp b/llvm/unittests/IR/BasicBlockDbgInfoTest.cpp index 835780e63aaf..5ce14d3f6b9c 100644 --- a/llvm/unittests/IR/BasicBlockDbgInfoTest.cpp +++ b/llvm/unittests/IR/BasicBlockDbgInfoTest.cpp @@ -1525,4 +1525,56 @@ TEST(BasicBlockDbgInfoTest, DbgMoveToEnd) { EXPECT_FALSE(Ret->hasDbgRecords()); } +TEST(BasicBlockDbgInfoTest, CloneTrailingRecordsToEmptyBlock) { + LLVMContext C; + std::unique_ptr M = parseIR(C, R"( + define i16 @foo(i16 %a) !dbg !6 { + entry: + %b = add i16 %a, 0 + #dbg_value(i16 %b, !9, !DIExpression(), !11) + ret i16 0, !dbg !11 + } + + !llvm.dbg.cu = !{!0} + !llvm.module.flags = !{!5} + + !0 = distinct !DICompileUnit(language: DW_LANG_C, file: !1, producer: "debugify", isOptimized: true, runtimeVersion: 0, emissionKind: FullDebug, enums: !2) + !1 = !DIFile(filename: "t.ll", directory: "/") + !2 = !{} + !5 = !{i32 2, !"Debug Info Version", i32 3} + !6 = distinct !DISubprogram(name: "foo", linkageName: "foo", scope: null, file: !1, line: 1, type: !7, scopeLine: 1, spFlags: DISPFlagDefinition | DISPFlagOptimized, unit: !0, retainedNodes: !8) + !7 = !DISubroutineType(types: !2) + !8 = !{!9} + !9 = !DILocalVariable(name: "1", scope: !6, file: !1, line: 1, type: !10) + !10 = !DIBasicType(name: "ty16", size: 16, encoding: DW_ATE_unsigned) + !11 = !DILocation(line: 1, column: 1, scope: !6) +)"); + ASSERT_TRUE(M); + + Function *F = M->getFunction("foo"); + BasicBlock &BB = F->getEntryBlock(); + // Start with no trailing records. + ASSERT_FALSE(BB.getTrailingDbgRecords()); + + BasicBlock::iterator Ret = std::prev(BB.end()); + BasicBlock::iterator B = std::prev(Ret); + + // Delete terminator which has debug records: we now get trailing records. + Ret->eraseFromParent(); + EXPECT_TRUE(BB.getTrailingDbgRecords()); + + BasicBlock *NewBB = BasicBlock::Create(C, "NewBB", F); + NewBB->splice(NewBB->end(), &BB, B, BB.end()); + + // The trailing records should've been absorbed into NewBB. + EXPECT_FALSE(BB.getTrailingDbgRecords()); + EXPECT_TRUE(NewBB->getTrailingDbgRecords()); + if (DbgMarker *Trailing = NewBB->getTrailingDbgRecords()) { + EXPECT_EQ(llvm::range_size(Trailing->getDbgRecordRange()), 1u); + // Drop the trailing records now, to prevent a cleanup assertion. + Trailing->eraseFromParent(); + NewBB->deleteTrailingDbgRecords(); + } +} + } // End anonymous namespace. From 42f18eedc2cf2d1f64fd5d78fda376adf39a9b3d Mon Sep 17 00:00:00 2001 From: Alexey Bataev Date: Tue, 3 Sep 2024 04:52:47 -0700 Subject: [PATCH 08/29] [SLP]Fix PR107036: Check if the type of the user is sizable before requesting its size. Only some instructions should be considered as potentially reducing the size of the operands types, not all instructions should be considered. Fixes https://github.com/llvm/llvm-project/issues/107036 (cherry picked from commit f381cd069965dabfeb277f30a4e532d7fd498f6e) --- .../Transforms/Vectorize/SLPVectorizer.cpp | 5 +++ .../X86/minbw-user-non-sizable.ll | 31 +++++++++++++++++++ 2 files changed, 36 insertions(+) create mode 100644 llvm/test/Transforms/SLPVectorizer/X86/minbw-user-non-sizable.ll diff --git a/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp b/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp index cca9eeebaa53..2f3d6b27378a 100644 --- a/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp +++ b/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp @@ -15539,6 +15539,11 @@ void BoUpSLP::computeMinimumValueSizes() { const TreeEntry *UserTE = E.UserTreeIndices.back().UserTE; if (TE == UserTE || !TE) return false; + if (!isa(U) || + !isa(UserTE->getMainOp())) + return true; unsigned UserTESz = DL->getTypeSizeInBits( UserTE->Scalars.front()->getType()); auto It = MinBWs.find(TE); diff --git a/llvm/test/Transforms/SLPVectorizer/X86/minbw-user-non-sizable.ll b/llvm/test/Transforms/SLPVectorizer/X86/minbw-user-non-sizable.ll new file mode 100644 index 000000000000..7e7d4352e277 --- /dev/null +++ b/llvm/test/Transforms/SLPVectorizer/X86/minbw-user-non-sizable.ll @@ -0,0 +1,31 @@ +; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 5 +; RUN: opt -S --passes=slp-vectorizer -mtriple=x86_64-unknown-linux-gnu < %s -slp-threshold=-100 | FileCheck %s + +define void @test(ptr %i) { +; CHECK-LABEL: define void @test( +; CHECK-SAME: ptr [[I:%.*]]) { +; CHECK-NEXT: [[BB:.*]]: +; CHECK-NEXT: br label %[[BB2:.*]] +; CHECK: [[BB2]]: +; CHECK-NEXT: [[TMP0:%.*]] = phi <2 x i32> [ [[TMP3:%.*]], %[[BB2]] ], [ zeroinitializer, %[[BB]] ] +; CHECK-NEXT: store <2 x i32> [[TMP0]], ptr [[I]], align 4 +; CHECK-NEXT: [[TMP1:%.*]] = shufflevector <2 x i32> [[TMP0]], <2 x i32> , <2 x i32> +; CHECK-NEXT: [[TMP2:%.*]] = trunc <2 x i32> [[TMP1]] to <2 x i1> +; CHECK-NEXT: [[TMP3]] = select <2 x i1> [[TMP2]], <2 x i32> zeroinitializer, <2 x i32> zeroinitializer +; CHECK-NEXT: br label %[[BB2]] +; +bb: + %i1 = getelementptr i8, ptr %i, i64 4 + br label %bb2 + +bb2: + %i3 = phi i32 [ %i6, %bb2 ], [ 0, %bb ] + %i4 = phi i32 [ %i8, %bb2 ], [ 0, %bb ] + store i32 %i3, ptr %i + store i32 %i4, ptr %i1 + %i5 = trunc i32 0 to i1 + %i6 = select i1 %i5, i32 0, i32 0 + %i7 = trunc i32 %i4 to i1 + %i8 = select i1 %i7, i32 0, i32 0 + br label %bb2 +} From 5e1a55eaa0bb592dd04f1b8474b8f064aded7b2e Mon Sep 17 00:00:00 2001 From: Sander de Smalen Date: Thu, 5 Sep 2024 15:06:19 +0100 Subject: [PATCH 09/29] [AArch64] Disable SVE paired ld1/st1 for callee-saves. The functionality to make use of SVE's load/store pair instructions for the callee-saves is broken because the offsets used in the instructions are incorrect. This is addressed by #105518 but given the complexity of this code and the subtleties around calculating the right offsets, we favour disabling the behaviour altogether for LLVM 19. This fix is critical for any programs being compiled with `+sme2`. --- .../Target/AArch64/AArch64FrameLowering.cpp | 33 - llvm/test/CodeGen/AArch64/sme-vg-to-stack.ll | 104 +- .../CodeGen/AArch64/sme2-intrinsics-ld1.ll | 1488 +++++++++++------ .../CodeGen/AArch64/sme2-intrinsics-ldnt1.ll | 1488 +++++++++++------ .../AArch64/sve-callee-save-restore-pairs.ll | 140 +- 5 files changed, 2036 insertions(+), 1217 deletions(-) diff --git a/llvm/lib/Target/AArch64/AArch64FrameLowering.cpp b/llvm/lib/Target/AArch64/AArch64FrameLowering.cpp index ba46ededc63a..87e057a468af 100644 --- a/llvm/lib/Target/AArch64/AArch64FrameLowering.cpp +++ b/llvm/lib/Target/AArch64/AArch64FrameLowering.cpp @@ -2931,16 +2931,6 @@ struct RegPairInfo { } // end anonymous namespace -unsigned findFreePredicateReg(BitVector &SavedRegs) { - for (unsigned PReg = AArch64::P8; PReg <= AArch64::P15; ++PReg) { - if (SavedRegs.test(PReg)) { - unsigned PNReg = PReg - AArch64::P0 + AArch64::PN0; - return PNReg; - } - } - return AArch64::NoRegister; -} - static void computeCalleeSaveRegisterPairs( MachineFunction &MF, ArrayRef CSI, const TargetRegisterInfo *TRI, SmallVectorImpl &RegPairs, @@ -3645,7 +3635,6 @@ void AArch64FrameLowering::determineCalleeSaves(MachineFunction &MF, unsigned ExtraCSSpill = 0; bool HasUnpairedGPR64 = false; - bool HasPairZReg = false; // Figure out which callee-saved registers to save/restore. for (unsigned i = 0; CSRegs[i]; ++i) { const unsigned Reg = CSRegs[i]; @@ -3699,28 +3688,6 @@ void AArch64FrameLowering::determineCalleeSaves(MachineFunction &MF, !RegInfo->isReservedReg(MF, PairedReg)) ExtraCSSpill = PairedReg; } - // Check if there is a pair of ZRegs, so it can select PReg for spill/fill - HasPairZReg |= (AArch64::ZPRRegClass.contains(Reg, CSRegs[i ^ 1]) && - SavedRegs.test(CSRegs[i ^ 1])); - } - - if (HasPairZReg && (Subtarget.hasSVE2p1() || Subtarget.hasSME2())) { - AArch64FunctionInfo *AFI = MF.getInfo(); - // Find a suitable predicate register for the multi-vector spill/fill - // instructions. - unsigned PnReg = findFreePredicateReg(SavedRegs); - if (PnReg != AArch64::NoRegister) - AFI->setPredicateRegForFillSpill(PnReg); - // If no free callee-save has been found assign one. - if (!AFI->getPredicateRegForFillSpill() && - MF.getFunction().getCallingConv() == - CallingConv::AArch64_SVE_VectorCall) { - SavedRegs.set(AArch64::P8); - AFI->setPredicateRegForFillSpill(AArch64::PN8); - } - - assert(!RegInfo->isReservedReg(MF, AFI->getPredicateRegForFillSpill()) && - "Predicate cannot be a reserved register"); } if (MF.getFunction().getCallingConv() == CallingConv::Win64 && diff --git a/llvm/test/CodeGen/AArch64/sme-vg-to-stack.ll b/llvm/test/CodeGen/AArch64/sme-vg-to-stack.ll index 6264ce0cf4ae..fa8f92cb0a2c 100644 --- a/llvm/test/CodeGen/AArch64/sme-vg-to-stack.ll +++ b/llvm/test/CodeGen/AArch64/sme-vg-to-stack.ll @@ -329,27 +329,34 @@ define void @vg_unwind_with_sve_args( %x) #0 { ; CHECK-NEXT: .cfi_offset w29, -32 ; CHECK-NEXT: addvl sp, sp, #-18 ; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x8f, 0x00, 0x11, 0x20, 0x22, 0x11, 0x90, 0x01, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 32 + 144 * VG -; CHECK-NEXT: str p8, [sp, #11, mul vl] // 2-byte Folded Spill -; CHECK-NEXT: ptrue pn8.b ; CHECK-NEXT: str p15, [sp, #4, mul vl] // 2-byte Folded Spill -; CHECK-NEXT: st1b { z22.b, z23.b }, pn8, [sp, #4, mul vl] // 32-byte Folded Spill -; CHECK-NEXT: st1b { z20.b, z21.b }, pn8, [sp, #8, mul vl] // 32-byte Folded Spill ; CHECK-NEXT: str p14, [sp, #5, mul vl] // 2-byte Folded Spill -; CHECK-NEXT: st1b { z18.b, z19.b }, pn8, [sp, #12, mul vl] // 32-byte Folded Spill -; CHECK-NEXT: st1b { z16.b, z17.b }, pn8, [sp, #16, mul vl] // 32-byte Folded Spill ; CHECK-NEXT: str p13, [sp, #6, mul vl] // 2-byte Folded Spill -; CHECK-NEXT: st1b { z14.b, z15.b }, pn8, [sp, #20, mul vl] // 32-byte Folded Spill -; CHECK-NEXT: st1b { z12.b, z13.b }, pn8, [sp, #24, mul vl] // 32-byte Folded Spill ; CHECK-NEXT: str p12, [sp, #7, mul vl] // 2-byte Folded Spill -; CHECK-NEXT: st1b { z10.b, z11.b }, pn8, [sp, #28, mul vl] // 32-byte Folded Spill ; CHECK-NEXT: str p11, [sp, #8, mul vl] // 2-byte Folded Spill ; CHECK-NEXT: str p10, [sp, #9, mul vl] // 2-byte Folded Spill ; CHECK-NEXT: str p9, [sp, #10, mul vl] // 2-byte Folded Spill +; CHECK-NEXT: str p8, [sp, #11, mul vl] // 2-byte Folded Spill ; CHECK-NEXT: str p7, [sp, #12, mul vl] // 2-byte Folded Spill ; CHECK-NEXT: str p6, [sp, #13, mul vl] // 2-byte Folded Spill ; CHECK-NEXT: str p5, [sp, #14, mul vl] // 2-byte Folded Spill ; CHECK-NEXT: str p4, [sp, #15, mul vl] // 2-byte Folded Spill -; CHECK-NEXT: st1b { z8.b, z9.b }, pn8, [sp, #32, mul vl] // 32-byte Folded Spill +; CHECK-NEXT: str z23, [sp, #2, mul vl] // 16-byte Folded Spill +; CHECK-NEXT: str z22, [sp, #3, mul vl] // 16-byte Folded Spill +; CHECK-NEXT: str z21, [sp, #4, mul vl] // 16-byte Folded Spill +; CHECK-NEXT: str z20, [sp, #5, mul vl] // 16-byte Folded Spill +; CHECK-NEXT: str z19, [sp, #6, mul vl] // 16-byte Folded Spill +; CHECK-NEXT: str z18, [sp, #7, mul vl] // 16-byte Folded Spill +; CHECK-NEXT: str z17, [sp, #8, mul vl] // 16-byte Folded Spill +; CHECK-NEXT: str z16, [sp, #9, mul vl] // 16-byte Folded Spill +; CHECK-NEXT: str z15, [sp, #10, mul vl] // 16-byte Folded Spill +; CHECK-NEXT: str z14, [sp, #11, mul vl] // 16-byte Folded Spill +; CHECK-NEXT: str z13, [sp, #12, mul vl] // 16-byte Folded Spill +; CHECK-NEXT: str z12, [sp, #13, mul vl] // 16-byte Folded Spill +; CHECK-NEXT: str z11, [sp, #14, mul vl] // 16-byte Folded Spill +; CHECK-NEXT: str z10, [sp, #15, mul vl] // 16-byte Folded Spill +; CHECK-NEXT: str z9, [sp, #16, mul vl] // 16-byte Folded Spill +; CHECK-NEXT: str z8, [sp, #17, mul vl] // 16-byte Folded Spill ; CHECK-NEXT: .cfi_escape 0x10, 0x48, 0x0a, 0x11, 0x60, 0x22, 0x11, 0x78, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d8 @ cfa - 32 - 8 * VG ; CHECK-NEXT: .cfi_escape 0x10, 0x49, 0x0a, 0x11, 0x60, 0x22, 0x11, 0x70, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d9 @ cfa - 32 - 16 * VG ; CHECK-NEXT: .cfi_escape 0x10, 0x4a, 0x0a, 0x11, 0x60, 0x22, 0x11, 0x68, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d10 @ cfa - 32 - 24 * VG @@ -371,16 +378,23 @@ define void @vg_unwind_with_sve_args( %x) #0 { ; CHECK-NEXT: .cfi_restore vg ; CHECK-NEXT: addvl sp, sp, #1 ; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x8f, 0x00, 0x11, 0x20, 0x22, 0x11, 0x90, 0x01, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 32 + 144 * VG -; CHECK-NEXT: ptrue pn8.b +; CHECK-NEXT: ldr z23, [sp, #2, mul vl] // 16-byte Folded Reload +; CHECK-NEXT: ldr z22, [sp, #3, mul vl] // 16-byte Folded Reload +; CHECK-NEXT: ldr z21, [sp, #4, mul vl] // 16-byte Folded Reload +; CHECK-NEXT: ldr z20, [sp, #5, mul vl] // 16-byte Folded Reload +; CHECK-NEXT: ldr z19, [sp, #6, mul vl] // 16-byte Folded Reload +; CHECK-NEXT: ldr z18, [sp, #7, mul vl] // 16-byte Folded Reload +; CHECK-NEXT: ldr z17, [sp, #8, mul vl] // 16-byte Folded Reload +; CHECK-NEXT: ldr z16, [sp, #9, mul vl] // 16-byte Folded Reload +; CHECK-NEXT: ldr z15, [sp, #10, mul vl] // 16-byte Folded Reload +; CHECK-NEXT: ldr z14, [sp, #11, mul vl] // 16-byte Folded Reload +; CHECK-NEXT: ldr z13, [sp, #12, mul vl] // 16-byte Folded Reload +; CHECK-NEXT: ldr z12, [sp, #13, mul vl] // 16-byte Folded Reload +; CHECK-NEXT: ldr z11, [sp, #14, mul vl] // 16-byte Folded Reload +; CHECK-NEXT: ldr z10, [sp, #15, mul vl] // 16-byte Folded Reload +; CHECK-NEXT: ldr z9, [sp, #16, mul vl] // 16-byte Folded Reload +; CHECK-NEXT: ldr z8, [sp, #17, mul vl] // 16-byte Folded Reload ; CHECK-NEXT: ldr p15, [sp, #4, mul vl] // 2-byte Folded Reload -; CHECK-NEXT: ld1b { z22.b, z23.b }, pn8/z, [sp, #4, mul vl] // 32-byte Folded Reload -; CHECK-NEXT: ld1b { z20.b, z21.b }, pn8/z, [sp, #8, mul vl] // 32-byte Folded Reload -; CHECK-NEXT: ld1b { z18.b, z19.b }, pn8/z, [sp, #12, mul vl] // 32-byte Folded Reload -; CHECK-NEXT: ld1b { z16.b, z17.b }, pn8/z, [sp, #16, mul vl] // 32-byte Folded Reload -; CHECK-NEXT: ld1b { z14.b, z15.b }, pn8/z, [sp, #20, mul vl] // 32-byte Folded Reload -; CHECK-NEXT: ld1b { z12.b, z13.b }, pn8/z, [sp, #24, mul vl] // 32-byte Folded Reload -; CHECK-NEXT: ld1b { z10.b, z11.b }, pn8/z, [sp, #28, mul vl] // 32-byte Folded Reload -; CHECK-NEXT: ld1b { z8.b, z9.b }, pn8/z, [sp, #32, mul vl] // 32-byte Folded Reload ; CHECK-NEXT: ldr p14, [sp, #5, mul vl] // 2-byte Folded Reload ; CHECK-NEXT: ldr p13, [sp, #6, mul vl] // 2-byte Folded Reload ; CHECK-NEXT: ldr p12, [sp, #7, mul vl] // 2-byte Folded Reload @@ -424,27 +438,34 @@ define void @vg_unwind_with_sve_args( %x) #0 { ; FP-CHECK-NEXT: .cfi_offset w30, -40 ; FP-CHECK-NEXT: .cfi_offset w29, -48 ; FP-CHECK-NEXT: addvl sp, sp, #-18 -; FP-CHECK-NEXT: str p8, [sp, #11, mul vl] // 2-byte Folded Spill -; FP-CHECK-NEXT: ptrue pn8.b ; FP-CHECK-NEXT: str p15, [sp, #4, mul vl] // 2-byte Folded Spill -; FP-CHECK-NEXT: st1b { z22.b, z23.b }, pn8, [sp, #4, mul vl] // 32-byte Folded Spill -; FP-CHECK-NEXT: st1b { z20.b, z21.b }, pn8, [sp, #8, mul vl] // 32-byte Folded Spill ; FP-CHECK-NEXT: str p14, [sp, #5, mul vl] // 2-byte Folded Spill -; FP-CHECK-NEXT: st1b { z18.b, z19.b }, pn8, [sp, #12, mul vl] // 32-byte Folded Spill -; FP-CHECK-NEXT: st1b { z16.b, z17.b }, pn8, [sp, #16, mul vl] // 32-byte Folded Spill ; FP-CHECK-NEXT: str p13, [sp, #6, mul vl] // 2-byte Folded Spill -; FP-CHECK-NEXT: st1b { z14.b, z15.b }, pn8, [sp, #20, mul vl] // 32-byte Folded Spill -; FP-CHECK-NEXT: st1b { z12.b, z13.b }, pn8, [sp, #24, mul vl] // 32-byte Folded Spill ; FP-CHECK-NEXT: str p12, [sp, #7, mul vl] // 2-byte Folded Spill -; FP-CHECK-NEXT: st1b { z10.b, z11.b }, pn8, [sp, #28, mul vl] // 32-byte Folded Spill ; FP-CHECK-NEXT: str p11, [sp, #8, mul vl] // 2-byte Folded Spill ; FP-CHECK-NEXT: str p10, [sp, #9, mul vl] // 2-byte Folded Spill ; FP-CHECK-NEXT: str p9, [sp, #10, mul vl] // 2-byte Folded Spill +; FP-CHECK-NEXT: str p8, [sp, #11, mul vl] // 2-byte Folded Spill ; FP-CHECK-NEXT: str p7, [sp, #12, mul vl] // 2-byte Folded Spill ; FP-CHECK-NEXT: str p6, [sp, #13, mul vl] // 2-byte Folded Spill ; FP-CHECK-NEXT: str p5, [sp, #14, mul vl] // 2-byte Folded Spill ; FP-CHECK-NEXT: str p4, [sp, #15, mul vl] // 2-byte Folded Spill -; FP-CHECK-NEXT: st1b { z8.b, z9.b }, pn8, [sp, #32, mul vl] // 32-byte Folded Spill +; FP-CHECK-NEXT: str z23, [sp, #2, mul vl] // 16-byte Folded Spill +; FP-CHECK-NEXT: str z22, [sp, #3, mul vl] // 16-byte Folded Spill +; FP-CHECK-NEXT: str z21, [sp, #4, mul vl] // 16-byte Folded Spill +; FP-CHECK-NEXT: str z20, [sp, #5, mul vl] // 16-byte Folded Spill +; FP-CHECK-NEXT: str z19, [sp, #6, mul vl] // 16-byte Folded Spill +; FP-CHECK-NEXT: str z18, [sp, #7, mul vl] // 16-byte Folded Spill +; FP-CHECK-NEXT: str z17, [sp, #8, mul vl] // 16-byte Folded Spill +; FP-CHECK-NEXT: str z16, [sp, #9, mul vl] // 16-byte Folded Spill +; FP-CHECK-NEXT: str z15, [sp, #10, mul vl] // 16-byte Folded Spill +; FP-CHECK-NEXT: str z14, [sp, #11, mul vl] // 16-byte Folded Spill +; FP-CHECK-NEXT: str z13, [sp, #12, mul vl] // 16-byte Folded Spill +; FP-CHECK-NEXT: str z12, [sp, #13, mul vl] // 16-byte Folded Spill +; FP-CHECK-NEXT: str z11, [sp, #14, mul vl] // 16-byte Folded Spill +; FP-CHECK-NEXT: str z10, [sp, #15, mul vl] // 16-byte Folded Spill +; FP-CHECK-NEXT: str z9, [sp, #16, mul vl] // 16-byte Folded Spill +; FP-CHECK-NEXT: str z8, [sp, #17, mul vl] // 16-byte Folded Spill ; FP-CHECK-NEXT: .cfi_escape 0x10, 0x48, 0x0a, 0x11, 0x50, 0x22, 0x11, 0x78, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d8 @ cfa - 48 - 8 * VG ; FP-CHECK-NEXT: .cfi_escape 0x10, 0x49, 0x0a, 0x11, 0x50, 0x22, 0x11, 0x70, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d9 @ cfa - 48 - 16 * VG ; FP-CHECK-NEXT: .cfi_escape 0x10, 0x4a, 0x0a, 0x11, 0x50, 0x22, 0x11, 0x68, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d10 @ cfa - 48 - 24 * VG @@ -464,16 +485,23 @@ define void @vg_unwind_with_sve_args( %x) #0 { ; FP-CHECK-NEXT: smstart sm ; FP-CHECK-NEXT: .cfi_restore vg ; FP-CHECK-NEXT: addvl sp, sp, #1 -; FP-CHECK-NEXT: ptrue pn8.b +; FP-CHECK-NEXT: ldr z23, [sp, #2, mul vl] // 16-byte Folded Reload +; FP-CHECK-NEXT: ldr z22, [sp, #3, mul vl] // 16-byte Folded Reload +; FP-CHECK-NEXT: ldr z21, [sp, #4, mul vl] // 16-byte Folded Reload +; FP-CHECK-NEXT: ldr z20, [sp, #5, mul vl] // 16-byte Folded Reload +; FP-CHECK-NEXT: ldr z19, [sp, #6, mul vl] // 16-byte Folded Reload +; FP-CHECK-NEXT: ldr z18, [sp, #7, mul vl] // 16-byte Folded Reload +; FP-CHECK-NEXT: ldr z17, [sp, #8, mul vl] // 16-byte Folded Reload +; FP-CHECK-NEXT: ldr z16, [sp, #9, mul vl] // 16-byte Folded Reload +; FP-CHECK-NEXT: ldr z15, [sp, #10, mul vl] // 16-byte Folded Reload +; FP-CHECK-NEXT: ldr z14, [sp, #11, mul vl] // 16-byte Folded Reload +; FP-CHECK-NEXT: ldr z13, [sp, #12, mul vl] // 16-byte Folded Reload +; FP-CHECK-NEXT: ldr z12, [sp, #13, mul vl] // 16-byte Folded Reload +; FP-CHECK-NEXT: ldr z11, [sp, #14, mul vl] // 16-byte Folded Reload +; FP-CHECK-NEXT: ldr z10, [sp, #15, mul vl] // 16-byte Folded Reload +; FP-CHECK-NEXT: ldr z9, [sp, #16, mul vl] // 16-byte Folded Reload +; FP-CHECK-NEXT: ldr z8, [sp, #17, mul vl] // 16-byte Folded Reload ; FP-CHECK-NEXT: ldr p15, [sp, #4, mul vl] // 2-byte Folded Reload -; FP-CHECK-NEXT: ld1b { z22.b, z23.b }, pn8/z, [sp, #4, mul vl] // 32-byte Folded Reload -; FP-CHECK-NEXT: ld1b { z20.b, z21.b }, pn8/z, [sp, #8, mul vl] // 32-byte Folded Reload -; FP-CHECK-NEXT: ld1b { z18.b, z19.b }, pn8/z, [sp, #12, mul vl] // 32-byte Folded Reload -; FP-CHECK-NEXT: ld1b { z16.b, z17.b }, pn8/z, [sp, #16, mul vl] // 32-byte Folded Reload -; FP-CHECK-NEXT: ld1b { z14.b, z15.b }, pn8/z, [sp, #20, mul vl] // 32-byte Folded Reload -; FP-CHECK-NEXT: ld1b { z12.b, z13.b }, pn8/z, [sp, #24, mul vl] // 32-byte Folded Reload -; FP-CHECK-NEXT: ld1b { z10.b, z11.b }, pn8/z, [sp, #28, mul vl] // 32-byte Folded Reload -; FP-CHECK-NEXT: ld1b { z8.b, z9.b }, pn8/z, [sp, #32, mul vl] // 32-byte Folded Reload ; FP-CHECK-NEXT: ldr p14, [sp, #5, mul vl] // 2-byte Folded Reload ; FP-CHECK-NEXT: ldr p13, [sp, #6, mul vl] // 2-byte Folded Reload ; FP-CHECK-NEXT: ldr p12, [sp, #7, mul vl] // 2-byte Folded Reload diff --git a/llvm/test/CodeGen/AArch64/sme2-intrinsics-ld1.ll b/llvm/test/CodeGen/AArch64/sme2-intrinsics-ld1.ll index 29d3d68fc4c3..013d8a0512b1 100644 --- a/llvm/test/CodeGen/AArch64/sme2-intrinsics-ld1.ll +++ b/llvm/test/CodeGen/AArch64/sme2-intrinsics-ld1.ll @@ -55,31 +55,45 @@ define @ld1_x2_i8_z0_z8( %unused, @ld1_x2_i8_z0_z8( %unused, @ld1_x2_i8_z0_z8( %unused, @ld1_x2_i8_z0_z8_scalar( %unused, @ld1_x2_i8_z0_z8_scalar( %unused, @ld1_x2_i8_z0_z8_scalar( %unused, @ld1_x2_i16_z0_z8( %unused, @ld1_x2_i16_z0_z8( %unused, @ld1_x2_i16_z0_z8( %unused, @ld1_x2_i16_z0_z8_scalar( %unused, ; STRIDED-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill ; STRIDED-NEXT: addvl sp, sp, #-17 ; STRIDED-NEXT: str p8, [sp, #7, mul vl] // 2-byte Folded Spill -; STRIDED-NEXT: ptrue pn8.b -; STRIDED-NEXT: st1b { z22.b, z23.b }, pn8, [sp, #2, mul vl] // 32-byte Folded Spill -; STRIDED-NEXT: st1b { z20.b, z21.b }, pn8, [sp, #6, mul vl] // 32-byte Folded Spill -; STRIDED-NEXT: st1b { z18.b, z19.b }, pn8, [sp, #10, mul vl] // 32-byte Folded Spill -; STRIDED-NEXT: st1b { z16.b, z17.b }, pn8, [sp, #14, mul vl] // 32-byte Folded Spill -; STRIDED-NEXT: st1b { z14.b, z15.b }, pn8, [sp, #18, mul vl] // 32-byte Folded Spill -; STRIDED-NEXT: st1b { z12.b, z13.b }, pn8, [sp, #22, mul vl] // 32-byte Folded Spill -; STRIDED-NEXT: st1b { z10.b, z11.b }, pn8, [sp, #26, mul vl] // 32-byte Folded Spill -; STRIDED-NEXT: st1b { z8.b, z9.b }, pn8, [sp, #30, mul vl] // 32-byte Folded Spill ; STRIDED-NEXT: mov p8.b, p0.b +; STRIDED-NEXT: str z23, [sp, #1, mul vl] // 16-byte Folded Spill +; STRIDED-NEXT: str z22, [sp, #2, mul vl] // 16-byte Folded Spill +; STRIDED-NEXT: str z21, [sp, #3, mul vl] // 16-byte Folded Spill +; STRIDED-NEXT: str z20, [sp, #4, mul vl] // 16-byte Folded Spill +; STRIDED-NEXT: str z19, [sp, #5, mul vl] // 16-byte Folded Spill +; STRIDED-NEXT: str z18, [sp, #6, mul vl] // 16-byte Folded Spill +; STRIDED-NEXT: str z17, [sp, #7, mul vl] // 16-byte Folded Spill +; STRIDED-NEXT: str z16, [sp, #8, mul vl] // 16-byte Folded Spill +; STRIDED-NEXT: str z15, [sp, #9, mul vl] // 16-byte Folded Spill +; STRIDED-NEXT: str z14, [sp, #10, mul vl] // 16-byte Folded Spill +; STRIDED-NEXT: str z13, [sp, #11, mul vl] // 16-byte Folded Spill +; STRIDED-NEXT: str z12, [sp, #12, mul vl] // 16-byte Folded Spill +; STRIDED-NEXT: str z11, [sp, #13, mul vl] // 16-byte Folded Spill +; STRIDED-NEXT: str z10, [sp, #14, mul vl] // 16-byte Folded Spill +; STRIDED-NEXT: str z9, [sp, #15, mul vl] // 16-byte Folded Spill +; STRIDED-NEXT: str z8, [sp, #16, mul vl] // 16-byte Folded Spill ; STRIDED-NEXT: ld1h { z0.h, z8.h }, pn8/z, [x0, x1, lsl #1] ; STRIDED-NEXT: //APP ; STRIDED-NEXT: nop ; STRIDED-NEXT: //NO_APP -; STRIDED-NEXT: ptrue pn8.b -; STRIDED-NEXT: ld1b { z22.b, z23.b }, pn8/z, [sp, #2, mul vl] // 32-byte Folded Reload -; STRIDED-NEXT: ld1b { z20.b, z21.b }, pn8/z, [sp, #6, mul vl] // 32-byte Folded Reload -; STRIDED-NEXT: ld1b { z18.b, z19.b }, pn8/z, [sp, #10, mul vl] // 32-byte Folded Reload -; STRIDED-NEXT: ld1b { z16.b, z17.b }, pn8/z, [sp, #14, mul vl] // 32-byte Folded Reload -; STRIDED-NEXT: ld1b { z14.b, z15.b }, pn8/z, [sp, #18, mul vl] // 32-byte Folded Reload -; STRIDED-NEXT: ld1b { z12.b, z13.b }, pn8/z, [sp, #22, mul vl] // 32-byte Folded Reload -; STRIDED-NEXT: mov z1.d, z8.d -; STRIDED-NEXT: ld1b { z10.b, z11.b }, pn8/z, [sp, #26, mul vl] // 32-byte Folded Reload -; STRIDED-NEXT: ld1b { z8.b, z9.b }, pn8/z, [sp, #30, mul vl] // 32-byte Folded Reload ; STRIDED-NEXT: ldr p8, [sp, #7, mul vl] // 2-byte Folded Reload +; STRIDED-NEXT: ldr z23, [sp, #1, mul vl] // 16-byte Folded Reload +; STRIDED-NEXT: ldr z22, [sp, #2, mul vl] // 16-byte Folded Reload +; STRIDED-NEXT: ldr z21, [sp, #3, mul vl] // 16-byte Folded Reload +; STRIDED-NEXT: ldr z20, [sp, #4, mul vl] // 16-byte Folded Reload +; STRIDED-NEXT: ldr z19, [sp, #5, mul vl] // 16-byte Folded Reload +; STRIDED-NEXT: ldr z18, [sp, #6, mul vl] // 16-byte Folded Reload +; STRIDED-NEXT: mov z1.d, z8.d +; STRIDED-NEXT: ldr z17, [sp, #7, mul vl] // 16-byte Folded Reload +; STRIDED-NEXT: ldr z16, [sp, #8, mul vl] // 16-byte Folded Reload +; STRIDED-NEXT: ldr z15, [sp, #9, mul vl] // 16-byte Folded Reload +; STRIDED-NEXT: ldr z14, [sp, #10, mul vl] // 16-byte Folded Reload +; STRIDED-NEXT: ldr z13, [sp, #11, mul vl] // 16-byte Folded Reload +; STRIDED-NEXT: ldr z12, [sp, #12, mul vl] // 16-byte Folded Reload +; STRIDED-NEXT: ldr z11, [sp, #13, mul vl] // 16-byte Folded Reload +; STRIDED-NEXT: ldr z10, [sp, #14, mul vl] // 16-byte Folded Reload +; STRIDED-NEXT: ldr z9, [sp, #15, mul vl] // 16-byte Folded Reload +; STRIDED-NEXT: ldr z8, [sp, #16, mul vl] // 16-byte Folded Reload ; STRIDED-NEXT: addvl sp, sp, #17 ; STRIDED-NEXT: ldr x29, [sp], #16 // 8-byte Folded Reload ; STRIDED-NEXT: ret @@ -477,14 +569,20 @@ define @ld1_x2_i16_z0_z8_scalar( %unused, ; CONTIGUOUS-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill ; CONTIGUOUS-NEXT: addvl sp, sp, #-16 ; CONTIGUOUS-NEXT: str p8, [sp, #7, mul vl] // 2-byte Folded Spill -; CONTIGUOUS-NEXT: ptrue pn8.b -; CONTIGUOUS-NEXT: st1b { z22.b, z23.b }, pn8, [sp, #2, mul vl] // 32-byte Folded Spill -; CONTIGUOUS-NEXT: st1b { z20.b, z21.b }, pn8, [sp, #6, mul vl] // 32-byte Folded Spill -; CONTIGUOUS-NEXT: st1b { z18.b, z19.b }, pn8, [sp, #10, mul vl] // 32-byte Folded Spill -; CONTIGUOUS-NEXT: st1b { z16.b, z17.b }, pn8, [sp, #14, mul vl] // 32-byte Folded Spill -; CONTIGUOUS-NEXT: st1b { z14.b, z15.b }, pn8, [sp, #18, mul vl] // 32-byte Folded Spill -; CONTIGUOUS-NEXT: st1b { z12.b, z13.b }, pn8, [sp, #22, mul vl] // 32-byte Folded Spill -; CONTIGUOUS-NEXT: st1b { z10.b, z11.b }, pn8, [sp, #26, mul vl] // 32-byte Folded Spill +; CONTIGUOUS-NEXT: str z23, [sp, #1, mul vl] // 16-byte Folded Spill +; CONTIGUOUS-NEXT: str z22, [sp, #2, mul vl] // 16-byte Folded Spill +; CONTIGUOUS-NEXT: str z21, [sp, #3, mul vl] // 16-byte Folded Spill +; CONTIGUOUS-NEXT: str z20, [sp, #4, mul vl] // 16-byte Folded Spill +; CONTIGUOUS-NEXT: str z19, [sp, #5, mul vl] // 16-byte Folded Spill +; CONTIGUOUS-NEXT: str z18, [sp, #6, mul vl] // 16-byte Folded Spill +; CONTIGUOUS-NEXT: str z17, [sp, #7, mul vl] // 16-byte Folded Spill +; CONTIGUOUS-NEXT: str z16, [sp, #8, mul vl] // 16-byte Folded Spill +; CONTIGUOUS-NEXT: str z15, [sp, #9, mul vl] // 16-byte Folded Spill +; CONTIGUOUS-NEXT: str z14, [sp, #10, mul vl] // 16-byte Folded Spill +; CONTIGUOUS-NEXT: str z13, [sp, #11, mul vl] // 16-byte Folded Spill +; CONTIGUOUS-NEXT: str z12, [sp, #12, mul vl] // 16-byte Folded Spill +; CONTIGUOUS-NEXT: str z11, [sp, #13, mul vl] // 16-byte Folded Spill +; CONTIGUOUS-NEXT: str z10, [sp, #14, mul vl] // 16-byte Folded Spill ; CONTIGUOUS-NEXT: str z9, [sp, #15, mul vl] // 16-byte Folded Spill ; CONTIGUOUS-NEXT: addvl sp, sp, #-2 ; CONTIGUOUS-NEXT: mov p8.b, p0.b @@ -497,15 +595,21 @@ define @ld1_x2_i16_z0_z8_scalar( %unused, ; CONTIGUOUS-NEXT: ldr z0, [sp] ; CONTIGUOUS-NEXT: ldr z1, [sp, #1, mul vl] ; CONTIGUOUS-NEXT: addvl sp, sp, #2 -; CONTIGUOUS-NEXT: ptrue pn8.b +; CONTIGUOUS-NEXT: ldr z23, [sp, #1, mul vl] // 16-byte Folded Reload +; CONTIGUOUS-NEXT: ldr z22, [sp, #2, mul vl] // 16-byte Folded Reload +; CONTIGUOUS-NEXT: ldr z21, [sp, #3, mul vl] // 16-byte Folded Reload +; CONTIGUOUS-NEXT: ldr z20, [sp, #4, mul vl] // 16-byte Folded Reload +; CONTIGUOUS-NEXT: ldr z19, [sp, #5, mul vl] // 16-byte Folded Reload +; CONTIGUOUS-NEXT: ldr z18, [sp, #6, mul vl] // 16-byte Folded Reload +; CONTIGUOUS-NEXT: ldr z17, [sp, #7, mul vl] // 16-byte Folded Reload +; CONTIGUOUS-NEXT: ldr z16, [sp, #8, mul vl] // 16-byte Folded Reload +; CONTIGUOUS-NEXT: ldr z15, [sp, #9, mul vl] // 16-byte Folded Reload +; CONTIGUOUS-NEXT: ldr z14, [sp, #10, mul vl] // 16-byte Folded Reload +; CONTIGUOUS-NEXT: ldr z13, [sp, #11, mul vl] // 16-byte Folded Reload +; CONTIGUOUS-NEXT: ldr z12, [sp, #12, mul vl] // 16-byte Folded Reload +; CONTIGUOUS-NEXT: ldr z11, [sp, #13, mul vl] // 16-byte Folded Reload +; CONTIGUOUS-NEXT: ldr z10, [sp, #14, mul vl] // 16-byte Folded Reload ; CONTIGUOUS-NEXT: ldr z9, [sp, #15, mul vl] // 16-byte Folded Reload -; CONTIGUOUS-NEXT: ld1b { z22.b, z23.b }, pn8/z, [sp, #2, mul vl] // 32-byte Folded Reload -; CONTIGUOUS-NEXT: ld1b { z20.b, z21.b }, pn8/z, [sp, #6, mul vl] // 32-byte Folded Reload -; CONTIGUOUS-NEXT: ld1b { z18.b, z19.b }, pn8/z, [sp, #10, mul vl] // 32-byte Folded Reload -; CONTIGUOUS-NEXT: ld1b { z16.b, z17.b }, pn8/z, [sp, #14, mul vl] // 32-byte Folded Reload -; CONTIGUOUS-NEXT: ld1b { z14.b, z15.b }, pn8/z, [sp, #18, mul vl] // 32-byte Folded Reload -; CONTIGUOUS-NEXT: ld1b { z12.b, z13.b }, pn8/z, [sp, #22, mul vl] // 32-byte Folded Reload -; CONTIGUOUS-NEXT: ld1b { z10.b, z11.b }, pn8/z, [sp, #26, mul vl] // 32-byte Folded Reload ; CONTIGUOUS-NEXT: ldr p8, [sp, #7, mul vl] // 2-byte Folded Reload ; CONTIGUOUS-NEXT: addvl sp, sp, #16 ; CONTIGUOUS-NEXT: ldr x29, [sp], #16 // 8-byte Folded Reload @@ -573,31 +677,45 @@ define @ld1_x2_i32_z0_z8( %unused, @ld1_x2_i32_z0_z8( %unused, @ld1_x2_i32_z0_z8( %unused, @ld1_x2_i32_z0_z8_scalar( %unused, < ; STRIDED-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill ; STRIDED-NEXT: addvl sp, sp, #-17 ; STRIDED-NEXT: str p8, [sp, #7, mul vl] // 2-byte Folded Spill -; STRIDED-NEXT: ptrue pn8.b -; STRIDED-NEXT: st1b { z22.b, z23.b }, pn8, [sp, #2, mul vl] // 32-byte Folded Spill -; STRIDED-NEXT: st1b { z20.b, z21.b }, pn8, [sp, #6, mul vl] // 32-byte Folded Spill -; STRIDED-NEXT: st1b { z18.b, z19.b }, pn8, [sp, #10, mul vl] // 32-byte Folded Spill -; STRIDED-NEXT: st1b { z16.b, z17.b }, pn8, [sp, #14, mul vl] // 32-byte Folded Spill -; STRIDED-NEXT: st1b { z14.b, z15.b }, pn8, [sp, #18, mul vl] // 32-byte Folded Spill -; STRIDED-NEXT: st1b { z12.b, z13.b }, pn8, [sp, #22, mul vl] // 32-byte Folded Spill -; STRIDED-NEXT: st1b { z10.b, z11.b }, pn8, [sp, #26, mul vl] // 32-byte Folded Spill -; STRIDED-NEXT: st1b { z8.b, z9.b }, pn8, [sp, #30, mul vl] // 32-byte Folded Spill ; STRIDED-NEXT: mov p8.b, p0.b +; STRIDED-NEXT: str z23, [sp, #1, mul vl] // 16-byte Folded Spill +; STRIDED-NEXT: str z22, [sp, #2, mul vl] // 16-byte Folded Spill +; STRIDED-NEXT: str z21, [sp, #3, mul vl] // 16-byte Folded Spill +; STRIDED-NEXT: str z20, [sp, #4, mul vl] // 16-byte Folded Spill +; STRIDED-NEXT: str z19, [sp, #5, mul vl] // 16-byte Folded Spill +; STRIDED-NEXT: str z18, [sp, #6, mul vl] // 16-byte Folded Spill +; STRIDED-NEXT: str z17, [sp, #7, mul vl] // 16-byte Folded Spill +; STRIDED-NEXT: str z16, [sp, #8, mul vl] // 16-byte Folded Spill +; STRIDED-NEXT: str z15, [sp, #9, mul vl] // 16-byte Folded Spill +; STRIDED-NEXT: str z14, [sp, #10, mul vl] // 16-byte Folded Spill +; STRIDED-NEXT: str z13, [sp, #11, mul vl] // 16-byte Folded Spill +; STRIDED-NEXT: str z12, [sp, #12, mul vl] // 16-byte Folded Spill +; STRIDED-NEXT: str z11, [sp, #13, mul vl] // 16-byte Folded Spill +; STRIDED-NEXT: str z10, [sp, #14, mul vl] // 16-byte Folded Spill +; STRIDED-NEXT: str z9, [sp, #15, mul vl] // 16-byte Folded Spill +; STRIDED-NEXT: str z8, [sp, #16, mul vl] // 16-byte Folded Spill ; STRIDED-NEXT: ld1w { z0.s, z8.s }, pn8/z, [x0, x1, lsl #2] ; STRIDED-NEXT: //APP ; STRIDED-NEXT: nop ; STRIDED-NEXT: //NO_APP -; STRIDED-NEXT: ptrue pn8.b -; STRIDED-NEXT: ld1b { z22.b, z23.b }, pn8/z, [sp, #2, mul vl] // 32-byte Folded Reload -; STRIDED-NEXT: ld1b { z20.b, z21.b }, pn8/z, [sp, #6, mul vl] // 32-byte Folded Reload -; STRIDED-NEXT: ld1b { z18.b, z19.b }, pn8/z, [sp, #10, mul vl] // 32-byte Folded Reload -; STRIDED-NEXT: ld1b { z16.b, z17.b }, pn8/z, [sp, #14, mul vl] // 32-byte Folded Reload -; STRIDED-NEXT: ld1b { z14.b, z15.b }, pn8/z, [sp, #18, mul vl] // 32-byte Folded Reload -; STRIDED-NEXT: ld1b { z12.b, z13.b }, pn8/z, [sp, #22, mul vl] // 32-byte Folded Reload -; STRIDED-NEXT: mov z1.d, z8.d -; STRIDED-NEXT: ld1b { z10.b, z11.b }, pn8/z, [sp, #26, mul vl] // 32-byte Folded Reload -; STRIDED-NEXT: ld1b { z8.b, z9.b }, pn8/z, [sp, #30, mul vl] // 32-byte Folded Reload ; STRIDED-NEXT: ldr p8, [sp, #7, mul vl] // 2-byte Folded Reload +; STRIDED-NEXT: ldr z23, [sp, #1, mul vl] // 16-byte Folded Reload +; STRIDED-NEXT: ldr z22, [sp, #2, mul vl] // 16-byte Folded Reload +; STRIDED-NEXT: ldr z21, [sp, #3, mul vl] // 16-byte Folded Reload +; STRIDED-NEXT: ldr z20, [sp, #4, mul vl] // 16-byte Folded Reload +; STRIDED-NEXT: ldr z19, [sp, #5, mul vl] // 16-byte Folded Reload +; STRIDED-NEXT: ldr z18, [sp, #6, mul vl] // 16-byte Folded Reload +; STRIDED-NEXT: mov z1.d, z8.d +; STRIDED-NEXT: ldr z17, [sp, #7, mul vl] // 16-byte Folded Reload +; STRIDED-NEXT: ldr z16, [sp, #8, mul vl] // 16-byte Folded Reload +; STRIDED-NEXT: ldr z15, [sp, #9, mul vl] // 16-byte Folded Reload +; STRIDED-NEXT: ldr z14, [sp, #10, mul vl] // 16-byte Folded Reload +; STRIDED-NEXT: ldr z13, [sp, #11, mul vl] // 16-byte Folded Reload +; STRIDED-NEXT: ldr z12, [sp, #12, mul vl] // 16-byte Folded Reload +; STRIDED-NEXT: ldr z11, [sp, #13, mul vl] // 16-byte Folded Reload +; STRIDED-NEXT: ldr z10, [sp, #14, mul vl] // 16-byte Folded Reload +; STRIDED-NEXT: ldr z9, [sp, #15, mul vl] // 16-byte Folded Reload +; STRIDED-NEXT: ldr z8, [sp, #16, mul vl] // 16-byte Folded Reload ; STRIDED-NEXT: addvl sp, sp, #17 ; STRIDED-NEXT: ldr x29, [sp], #16 // 8-byte Folded Reload ; STRIDED-NEXT: ret @@ -736,14 +880,20 @@ define @ld1_x2_i32_z0_z8_scalar( %unused, < ; CONTIGUOUS-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill ; CONTIGUOUS-NEXT: addvl sp, sp, #-16 ; CONTIGUOUS-NEXT: str p8, [sp, #7, mul vl] // 2-byte Folded Spill -; CONTIGUOUS-NEXT: ptrue pn8.b -; CONTIGUOUS-NEXT: st1b { z22.b, z23.b }, pn8, [sp, #2, mul vl] // 32-byte Folded Spill -; CONTIGUOUS-NEXT: st1b { z20.b, z21.b }, pn8, [sp, #6, mul vl] // 32-byte Folded Spill -; CONTIGUOUS-NEXT: st1b { z18.b, z19.b }, pn8, [sp, #10, mul vl] // 32-byte Folded Spill -; CONTIGUOUS-NEXT: st1b { z16.b, z17.b }, pn8, [sp, #14, mul vl] // 32-byte Folded Spill -; CONTIGUOUS-NEXT: st1b { z14.b, z15.b }, pn8, [sp, #18, mul vl] // 32-byte Folded Spill -; CONTIGUOUS-NEXT: st1b { z12.b, z13.b }, pn8, [sp, #22, mul vl] // 32-byte Folded Spill -; CONTIGUOUS-NEXT: st1b { z10.b, z11.b }, pn8, [sp, #26, mul vl] // 32-byte Folded Spill +; CONTIGUOUS-NEXT: str z23, [sp, #1, mul vl] // 16-byte Folded Spill +; CONTIGUOUS-NEXT: str z22, [sp, #2, mul vl] // 16-byte Folded Spill +; CONTIGUOUS-NEXT: str z21, [sp, #3, mul vl] // 16-byte Folded Spill +; CONTIGUOUS-NEXT: str z20, [sp, #4, mul vl] // 16-byte Folded Spill +; CONTIGUOUS-NEXT: str z19, [sp, #5, mul vl] // 16-byte Folded Spill +; CONTIGUOUS-NEXT: str z18, [sp, #6, mul vl] // 16-byte Folded Spill +; CONTIGUOUS-NEXT: str z17, [sp, #7, mul vl] // 16-byte Folded Spill +; CONTIGUOUS-NEXT: str z16, [sp, #8, mul vl] // 16-byte Folded Spill +; CONTIGUOUS-NEXT: str z15, [sp, #9, mul vl] // 16-byte Folded Spill +; CONTIGUOUS-NEXT: str z14, [sp, #10, mul vl] // 16-byte Folded Spill +; CONTIGUOUS-NEXT: str z13, [sp, #11, mul vl] // 16-byte Folded Spill +; CONTIGUOUS-NEXT: str z12, [sp, #12, mul vl] // 16-byte Folded Spill +; CONTIGUOUS-NEXT: str z11, [sp, #13, mul vl] // 16-byte Folded Spill +; CONTIGUOUS-NEXT: str z10, [sp, #14, mul vl] // 16-byte Folded Spill ; CONTIGUOUS-NEXT: str z9, [sp, #15, mul vl] // 16-byte Folded Spill ; CONTIGUOUS-NEXT: addvl sp, sp, #-2 ; CONTIGUOUS-NEXT: mov p8.b, p0.b @@ -756,15 +906,21 @@ define @ld1_x2_i32_z0_z8_scalar( %unused, < ; CONTIGUOUS-NEXT: ldr z0, [sp] ; CONTIGUOUS-NEXT: ldr z1, [sp, #1, mul vl] ; CONTIGUOUS-NEXT: addvl sp, sp, #2 -; CONTIGUOUS-NEXT: ptrue pn8.b +; CONTIGUOUS-NEXT: ldr z23, [sp, #1, mul vl] // 16-byte Folded Reload +; CONTIGUOUS-NEXT: ldr z22, [sp, #2, mul vl] // 16-byte Folded Reload +; CONTIGUOUS-NEXT: ldr z21, [sp, #3, mul vl] // 16-byte Folded Reload +; CONTIGUOUS-NEXT: ldr z20, [sp, #4, mul vl] // 16-byte Folded Reload +; CONTIGUOUS-NEXT: ldr z19, [sp, #5, mul vl] // 16-byte Folded Reload +; CONTIGUOUS-NEXT: ldr z18, [sp, #6, mul vl] // 16-byte Folded Reload +; CONTIGUOUS-NEXT: ldr z17, [sp, #7, mul vl] // 16-byte Folded Reload +; CONTIGUOUS-NEXT: ldr z16, [sp, #8, mul vl] // 16-byte Folded Reload +; CONTIGUOUS-NEXT: ldr z15, [sp, #9, mul vl] // 16-byte Folded Reload +; CONTIGUOUS-NEXT: ldr z14, [sp, #10, mul vl] // 16-byte Folded Reload +; CONTIGUOUS-NEXT: ldr z13, [sp, #11, mul vl] // 16-byte Folded Reload +; CONTIGUOUS-NEXT: ldr z12, [sp, #12, mul vl] // 16-byte Folded Reload +; CONTIGUOUS-NEXT: ldr z11, [sp, #13, mul vl] // 16-byte Folded Reload +; CONTIGUOUS-NEXT: ldr z10, [sp, #14, mul vl] // 16-byte Folded Reload ; CONTIGUOUS-NEXT: ldr z9, [sp, #15, mul vl] // 16-byte Folded Reload -; CONTIGUOUS-NEXT: ld1b { z22.b, z23.b }, pn8/z, [sp, #2, mul vl] // 32-byte Folded Reload -; CONTIGUOUS-NEXT: ld1b { z20.b, z21.b }, pn8/z, [sp, #6, mul vl] // 32-byte Folded Reload -; CONTIGUOUS-NEXT: ld1b { z18.b, z19.b }, pn8/z, [sp, #10, mul vl] // 32-byte Folded Reload -; CONTIGUOUS-NEXT: ld1b { z16.b, z17.b }, pn8/z, [sp, #14, mul vl] // 32-byte Folded Reload -; CONTIGUOUS-NEXT: ld1b { z14.b, z15.b }, pn8/z, [sp, #18, mul vl] // 32-byte Folded Reload -; CONTIGUOUS-NEXT: ld1b { z12.b, z13.b }, pn8/z, [sp, #22, mul vl] // 32-byte Folded Reload -; CONTIGUOUS-NEXT: ld1b { z10.b, z11.b }, pn8/z, [sp, #26, mul vl] // 32-byte Folded Reload ; CONTIGUOUS-NEXT: ldr p8, [sp, #7, mul vl] // 2-byte Folded Reload ; CONTIGUOUS-NEXT: addvl sp, sp, #16 ; CONTIGUOUS-NEXT: ldr x29, [sp], #16 // 8-byte Folded Reload @@ -832,31 +988,45 @@ define @ld1_x2_i64_z0_z8( %unused, @ld1_x2_i64_z0_z8( %unused, @ld1_x2_i64_z0_z8( %unused, @ld1_x2_i64_z0_z8_scalar( %unused, < ; STRIDED-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill ; STRIDED-NEXT: addvl sp, sp, #-17 ; STRIDED-NEXT: str p8, [sp, #7, mul vl] // 2-byte Folded Spill -; STRIDED-NEXT: ptrue pn8.b -; STRIDED-NEXT: st1b { z22.b, z23.b }, pn8, [sp, #2, mul vl] // 32-byte Folded Spill -; STRIDED-NEXT: st1b { z20.b, z21.b }, pn8, [sp, #6, mul vl] // 32-byte Folded Spill -; STRIDED-NEXT: st1b { z18.b, z19.b }, pn8, [sp, #10, mul vl] // 32-byte Folded Spill -; STRIDED-NEXT: st1b { z16.b, z17.b }, pn8, [sp, #14, mul vl] // 32-byte Folded Spill -; STRIDED-NEXT: st1b { z14.b, z15.b }, pn8, [sp, #18, mul vl] // 32-byte Folded Spill -; STRIDED-NEXT: st1b { z12.b, z13.b }, pn8, [sp, #22, mul vl] // 32-byte Folded Spill -; STRIDED-NEXT: st1b { z10.b, z11.b }, pn8, [sp, #26, mul vl] // 32-byte Folded Spill -; STRIDED-NEXT: st1b { z8.b, z9.b }, pn8, [sp, #30, mul vl] // 32-byte Folded Spill ; STRIDED-NEXT: mov p8.b, p0.b +; STRIDED-NEXT: str z23, [sp, #1, mul vl] // 16-byte Folded Spill +; STRIDED-NEXT: str z22, [sp, #2, mul vl] // 16-byte Folded Spill +; STRIDED-NEXT: str z21, [sp, #3, mul vl] // 16-byte Folded Spill +; STRIDED-NEXT: str z20, [sp, #4, mul vl] // 16-byte Folded Spill +; STRIDED-NEXT: str z19, [sp, #5, mul vl] // 16-byte Folded Spill +; STRIDED-NEXT: str z18, [sp, #6, mul vl] // 16-byte Folded Spill +; STRIDED-NEXT: str z17, [sp, #7, mul vl] // 16-byte Folded Spill +; STRIDED-NEXT: str z16, [sp, #8, mul vl] // 16-byte Folded Spill +; STRIDED-NEXT: str z15, [sp, #9, mul vl] // 16-byte Folded Spill +; STRIDED-NEXT: str z14, [sp, #10, mul vl] // 16-byte Folded Spill +; STRIDED-NEXT: str z13, [sp, #11, mul vl] // 16-byte Folded Spill +; STRIDED-NEXT: str z12, [sp, #12, mul vl] // 16-byte Folded Spill +; STRIDED-NEXT: str z11, [sp, #13, mul vl] // 16-byte Folded Spill +; STRIDED-NEXT: str z10, [sp, #14, mul vl] // 16-byte Folded Spill +; STRIDED-NEXT: str z9, [sp, #15, mul vl] // 16-byte Folded Spill +; STRIDED-NEXT: str z8, [sp, #16, mul vl] // 16-byte Folded Spill ; STRIDED-NEXT: ld1d { z0.d, z8.d }, pn8/z, [x0, x1, lsl #3] ; STRIDED-NEXT: //APP ; STRIDED-NEXT: nop ; STRIDED-NEXT: //NO_APP -; STRIDED-NEXT: ptrue pn8.b -; STRIDED-NEXT: ld1b { z22.b, z23.b }, pn8/z, [sp, #2, mul vl] // 32-byte Folded Reload -; STRIDED-NEXT: ld1b { z20.b, z21.b }, pn8/z, [sp, #6, mul vl] // 32-byte Folded Reload -; STRIDED-NEXT: ld1b { z18.b, z19.b }, pn8/z, [sp, #10, mul vl] // 32-byte Folded Reload -; STRIDED-NEXT: ld1b { z16.b, z17.b }, pn8/z, [sp, #14, mul vl] // 32-byte Folded Reload -; STRIDED-NEXT: ld1b { z14.b, z15.b }, pn8/z, [sp, #18, mul vl] // 32-byte Folded Reload -; STRIDED-NEXT: ld1b { z12.b, z13.b }, pn8/z, [sp, #22, mul vl] // 32-byte Folded Reload -; STRIDED-NEXT: mov z1.d, z8.d -; STRIDED-NEXT: ld1b { z10.b, z11.b }, pn8/z, [sp, #26, mul vl] // 32-byte Folded Reload -; STRIDED-NEXT: ld1b { z8.b, z9.b }, pn8/z, [sp, #30, mul vl] // 32-byte Folded Reload ; STRIDED-NEXT: ldr p8, [sp, #7, mul vl] // 2-byte Folded Reload +; STRIDED-NEXT: ldr z23, [sp, #1, mul vl] // 16-byte Folded Reload +; STRIDED-NEXT: ldr z22, [sp, #2, mul vl] // 16-byte Folded Reload +; STRIDED-NEXT: ldr z21, [sp, #3, mul vl] // 16-byte Folded Reload +; STRIDED-NEXT: ldr z20, [sp, #4, mul vl] // 16-byte Folded Reload +; STRIDED-NEXT: ldr z19, [sp, #5, mul vl] // 16-byte Folded Reload +; STRIDED-NEXT: ldr z18, [sp, #6, mul vl] // 16-byte Folded Reload +; STRIDED-NEXT: mov z1.d, z8.d +; STRIDED-NEXT: ldr z17, [sp, #7, mul vl] // 16-byte Folded Reload +; STRIDED-NEXT: ldr z16, [sp, #8, mul vl] // 16-byte Folded Reload +; STRIDED-NEXT: ldr z15, [sp, #9, mul vl] // 16-byte Folded Reload +; STRIDED-NEXT: ldr z14, [sp, #10, mul vl] // 16-byte Folded Reload +; STRIDED-NEXT: ldr z13, [sp, #11, mul vl] // 16-byte Folded Reload +; STRIDED-NEXT: ldr z12, [sp, #12, mul vl] // 16-byte Folded Reload +; STRIDED-NEXT: ldr z11, [sp, #13, mul vl] // 16-byte Folded Reload +; STRIDED-NEXT: ldr z10, [sp, #14, mul vl] // 16-byte Folded Reload +; STRIDED-NEXT: ldr z9, [sp, #15, mul vl] // 16-byte Folded Reload +; STRIDED-NEXT: ldr z8, [sp, #16, mul vl] // 16-byte Folded Reload ; STRIDED-NEXT: addvl sp, sp, #17 ; STRIDED-NEXT: ldr x29, [sp], #16 // 8-byte Folded Reload ; STRIDED-NEXT: ret @@ -995,14 +1191,20 @@ define @ld1_x2_i64_z0_z8_scalar( %unused, < ; CONTIGUOUS-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill ; CONTIGUOUS-NEXT: addvl sp, sp, #-16 ; CONTIGUOUS-NEXT: str p8, [sp, #7, mul vl] // 2-byte Folded Spill -; CONTIGUOUS-NEXT: ptrue pn8.b -; CONTIGUOUS-NEXT: st1b { z22.b, z23.b }, pn8, [sp, #2, mul vl] // 32-byte Folded Spill -; CONTIGUOUS-NEXT: st1b { z20.b, z21.b }, pn8, [sp, #6, mul vl] // 32-byte Folded Spill -; CONTIGUOUS-NEXT: st1b { z18.b, z19.b }, pn8, [sp, #10, mul vl] // 32-byte Folded Spill -; CONTIGUOUS-NEXT: st1b { z16.b, z17.b }, pn8, [sp, #14, mul vl] // 32-byte Folded Spill -; CONTIGUOUS-NEXT: st1b { z14.b, z15.b }, pn8, [sp, #18, mul vl] // 32-byte Folded Spill -; CONTIGUOUS-NEXT: st1b { z12.b, z13.b }, pn8, [sp, #22, mul vl] // 32-byte Folded Spill -; CONTIGUOUS-NEXT: st1b { z10.b, z11.b }, pn8, [sp, #26, mul vl] // 32-byte Folded Spill +; CONTIGUOUS-NEXT: str z23, [sp, #1, mul vl] // 16-byte Folded Spill +; CONTIGUOUS-NEXT: str z22, [sp, #2, mul vl] // 16-byte Folded Spill +; CONTIGUOUS-NEXT: str z21, [sp, #3, mul vl] // 16-byte Folded Spill +; CONTIGUOUS-NEXT: str z20, [sp, #4, mul vl] // 16-byte Folded Spill +; CONTIGUOUS-NEXT: str z19, [sp, #5, mul vl] // 16-byte Folded Spill +; CONTIGUOUS-NEXT: str z18, [sp, #6, mul vl] // 16-byte Folded Spill +; CONTIGUOUS-NEXT: str z17, [sp, #7, mul vl] // 16-byte Folded Spill +; CONTIGUOUS-NEXT: str z16, [sp, #8, mul vl] // 16-byte Folded Spill +; CONTIGUOUS-NEXT: str z15, [sp, #9, mul vl] // 16-byte Folded Spill +; CONTIGUOUS-NEXT: str z14, [sp, #10, mul vl] // 16-byte Folded Spill +; CONTIGUOUS-NEXT: str z13, [sp, #11, mul vl] // 16-byte Folded Spill +; CONTIGUOUS-NEXT: str z12, [sp, #12, mul vl] // 16-byte Folded Spill +; CONTIGUOUS-NEXT: str z11, [sp, #13, mul vl] // 16-byte Folded Spill +; CONTIGUOUS-NEXT: str z10, [sp, #14, mul vl] // 16-byte Folded Spill ; CONTIGUOUS-NEXT: str z9, [sp, #15, mul vl] // 16-byte Folded Spill ; CONTIGUOUS-NEXT: addvl sp, sp, #-2 ; CONTIGUOUS-NEXT: mov p8.b, p0.b @@ -1015,15 +1217,21 @@ define @ld1_x2_i64_z0_z8_scalar( %unused, < ; CONTIGUOUS-NEXT: ldr z0, [sp] ; CONTIGUOUS-NEXT: ldr z1, [sp, #1, mul vl] ; CONTIGUOUS-NEXT: addvl sp, sp, #2 -; CONTIGUOUS-NEXT: ptrue pn8.b +; CONTIGUOUS-NEXT: ldr z23, [sp, #1, mul vl] // 16-byte Folded Reload +; CONTIGUOUS-NEXT: ldr z22, [sp, #2, mul vl] // 16-byte Folded Reload +; CONTIGUOUS-NEXT: ldr z21, [sp, #3, mul vl] // 16-byte Folded Reload +; CONTIGUOUS-NEXT: ldr z20, [sp, #4, mul vl] // 16-byte Folded Reload +; CONTIGUOUS-NEXT: ldr z19, [sp, #5, mul vl] // 16-byte Folded Reload +; CONTIGUOUS-NEXT: ldr z18, [sp, #6, mul vl] // 16-byte Folded Reload +; CONTIGUOUS-NEXT: ldr z17, [sp, #7, mul vl] // 16-byte Folded Reload +; CONTIGUOUS-NEXT: ldr z16, [sp, #8, mul vl] // 16-byte Folded Reload +; CONTIGUOUS-NEXT: ldr z15, [sp, #9, mul vl] // 16-byte Folded Reload +; CONTIGUOUS-NEXT: ldr z14, [sp, #10, mul vl] // 16-byte Folded Reload +; CONTIGUOUS-NEXT: ldr z13, [sp, #11, mul vl] // 16-byte Folded Reload +; CONTIGUOUS-NEXT: ldr z12, [sp, #12, mul vl] // 16-byte Folded Reload +; CONTIGUOUS-NEXT: ldr z11, [sp, #13, mul vl] // 16-byte Folded Reload +; CONTIGUOUS-NEXT: ldr z10, [sp, #14, mul vl] // 16-byte Folded Reload ; CONTIGUOUS-NEXT: ldr z9, [sp, #15, mul vl] // 16-byte Folded Reload -; CONTIGUOUS-NEXT: ld1b { z22.b, z23.b }, pn8/z, [sp, #2, mul vl] // 32-byte Folded Reload -; CONTIGUOUS-NEXT: ld1b { z20.b, z21.b }, pn8/z, [sp, #6, mul vl] // 32-byte Folded Reload -; CONTIGUOUS-NEXT: ld1b { z18.b, z19.b }, pn8/z, [sp, #10, mul vl] // 32-byte Folded Reload -; CONTIGUOUS-NEXT: ld1b { z16.b, z17.b }, pn8/z, [sp, #14, mul vl] // 32-byte Folded Reload -; CONTIGUOUS-NEXT: ld1b { z14.b, z15.b }, pn8/z, [sp, #18, mul vl] // 32-byte Folded Reload -; CONTIGUOUS-NEXT: ld1b { z12.b, z13.b }, pn8/z, [sp, #22, mul vl] // 32-byte Folded Reload -; CONTIGUOUS-NEXT: ld1b { z10.b, z11.b }, pn8/z, [sp, #26, mul vl] // 32-byte Folded Reload ; CONTIGUOUS-NEXT: ldr p8, [sp, #7, mul vl] // 2-byte Folded Reload ; CONTIGUOUS-NEXT: addvl sp, sp, #16 ; CONTIGUOUS-NEXT: ldr x29, [sp], #16 // 8-byte Folded Reload @@ -1093,32 +1301,46 @@ define @ld1_x4_i8_z0_z4_z8_z12( %unused, @ld1_x4_i8_z0_z4_z8_z12( %unused, @ld1_x4_i8_z0_z4_z8_z12( %unused, @ld1_x4_i8_z0_z4_z8_z12_scalar( %unu ; STRIDED-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill ; STRIDED-NEXT: addvl sp, sp, #-17 ; STRIDED-NEXT: str p8, [sp, #7, mul vl] // 2-byte Folded Spill -; STRIDED-NEXT: ptrue pn8.b -; STRIDED-NEXT: st1b { z22.b, z23.b }, pn8, [sp, #2, mul vl] // 32-byte Folded Spill -; STRIDED-NEXT: st1b { z20.b, z21.b }, pn8, [sp, #6, mul vl] // 32-byte Folded Spill -; STRIDED-NEXT: st1b { z18.b, z19.b }, pn8, [sp, #10, mul vl] // 32-byte Folded Spill -; STRIDED-NEXT: st1b { z16.b, z17.b }, pn8, [sp, #14, mul vl] // 32-byte Folded Spill -; STRIDED-NEXT: st1b { z14.b, z15.b }, pn8, [sp, #18, mul vl] // 32-byte Folded Spill -; STRIDED-NEXT: st1b { z12.b, z13.b }, pn8, [sp, #22, mul vl] // 32-byte Folded Spill -; STRIDED-NEXT: st1b { z10.b, z11.b }, pn8, [sp, #26, mul vl] // 32-byte Folded Spill -; STRIDED-NEXT: st1b { z8.b, z9.b }, pn8, [sp, #30, mul vl] // 32-byte Folded Spill ; STRIDED-NEXT: mov p8.b, p0.b +; STRIDED-NEXT: str z23, [sp, #1, mul vl] // 16-byte Folded Spill +; STRIDED-NEXT: str z22, [sp, #2, mul vl] // 16-byte Folded Spill +; STRIDED-NEXT: str z21, [sp, #3, mul vl] // 16-byte Folded Spill +; STRIDED-NEXT: str z20, [sp, #4, mul vl] // 16-byte Folded Spill +; STRIDED-NEXT: str z19, [sp, #5, mul vl] // 16-byte Folded Spill +; STRIDED-NEXT: str z18, [sp, #6, mul vl] // 16-byte Folded Spill +; STRIDED-NEXT: str z17, [sp, #7, mul vl] // 16-byte Folded Spill +; STRIDED-NEXT: str z16, [sp, #8, mul vl] // 16-byte Folded Spill +; STRIDED-NEXT: str z15, [sp, #9, mul vl] // 16-byte Folded Spill +; STRIDED-NEXT: str z14, [sp, #10, mul vl] // 16-byte Folded Spill +; STRIDED-NEXT: str z13, [sp, #11, mul vl] // 16-byte Folded Spill +; STRIDED-NEXT: str z12, [sp, #12, mul vl] // 16-byte Folded Spill +; STRIDED-NEXT: str z11, [sp, #13, mul vl] // 16-byte Folded Spill +; STRIDED-NEXT: str z10, [sp, #14, mul vl] // 16-byte Folded Spill +; STRIDED-NEXT: str z9, [sp, #15, mul vl] // 16-byte Folded Spill +; STRIDED-NEXT: str z8, [sp, #16, mul vl] // 16-byte Folded Spill ; STRIDED-NEXT: ld1b { z0.b, z4.b, z8.b, z12.b }, pn8/z, [x0, x1] ; STRIDED-NEXT: //APP ; STRIDED-NEXT: nop ; STRIDED-NEXT: //NO_APP -; STRIDED-NEXT: ptrue pn8.b -; STRIDED-NEXT: ld1b { z22.b, z23.b }, pn8/z, [sp, #2, mul vl] // 32-byte Folded Reload -; STRIDED-NEXT: ld1b { z20.b, z21.b }, pn8/z, [sp, #6, mul vl] // 32-byte Folded Reload -; STRIDED-NEXT: ld1b { z18.b, z19.b }, pn8/z, [sp, #10, mul vl] // 32-byte Folded Reload -; STRIDED-NEXT: ld1b { z16.b, z17.b }, pn8/z, [sp, #14, mul vl] // 32-byte Folded Reload -; STRIDED-NEXT: ld1b { z14.b, z15.b }, pn8/z, [sp, #18, mul vl] // 32-byte Folded Reload -; STRIDED-NEXT: ld1b { z10.b, z11.b }, pn8/z, [sp, #26, mul vl] // 32-byte Folded Reload +; STRIDED-NEXT: ldr p8, [sp, #7, mul vl] // 2-byte Folded Reload +; STRIDED-NEXT: ldr z23, [sp, #1, mul vl] // 16-byte Folded Reload +; STRIDED-NEXT: ldr z22, [sp, #2, mul vl] // 16-byte Folded Reload +; STRIDED-NEXT: ldr z21, [sp, #3, mul vl] // 16-byte Folded Reload +; STRIDED-NEXT: ldr z20, [sp, #4, mul vl] // 16-byte Folded Reload +; STRIDED-NEXT: ldr z19, [sp, #5, mul vl] // 16-byte Folded Reload +; STRIDED-NEXT: ldr z18, [sp, #6, mul vl] // 16-byte Folded Reload ; STRIDED-NEXT: mov z2.d, z8.d ; STRIDED-NEXT: mov z3.d, z12.d -; STRIDED-NEXT: ld1b { z12.b, z13.b }, pn8/z, [sp, #22, mul vl] // 32-byte Folded Reload -; STRIDED-NEXT: ld1b { z8.b, z9.b }, pn8/z, [sp, #30, mul vl] // 32-byte Folded Reload -; STRIDED-NEXT: ldr p8, [sp, #7, mul vl] // 2-byte Folded Reload +; STRIDED-NEXT: ldr z17, [sp, #7, mul vl] // 16-byte Folded Reload +; STRIDED-NEXT: ldr z16, [sp, #8, mul vl] // 16-byte Folded Reload +; STRIDED-NEXT: ldr z15, [sp, #9, mul vl] // 16-byte Folded Reload +; STRIDED-NEXT: ldr z14, [sp, #10, mul vl] // 16-byte Folded Reload +; STRIDED-NEXT: ldr z13, [sp, #11, mul vl] // 16-byte Folded Reload +; STRIDED-NEXT: ldr z12, [sp, #12, mul vl] // 16-byte Folded Reload +; STRIDED-NEXT: ldr z11, [sp, #13, mul vl] // 16-byte Folded Reload +; STRIDED-NEXT: ldr z10, [sp, #14, mul vl] // 16-byte Folded Reload +; STRIDED-NEXT: ldr z9, [sp, #15, mul vl] // 16-byte Folded Reload +; STRIDED-NEXT: ldr z8, [sp, #16, mul vl] // 16-byte Folded Reload ; STRIDED-NEXT: mov z1.d, z4.d ; STRIDED-NEXT: addvl sp, sp, #17 ; STRIDED-NEXT: ldr x29, [sp], #16 // 8-byte Folded Reload @@ -1270,14 +1516,19 @@ define @ld1_x4_i8_z0_z4_z8_z12_scalar( %unu ; CONTIGUOUS-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill ; CONTIGUOUS-NEXT: addvl sp, sp, #-15 ; CONTIGUOUS-NEXT: str p8, [sp, #7, mul vl] // 2-byte Folded Spill -; CONTIGUOUS-NEXT: ptrue pn8.b -; CONTIGUOUS-NEXT: st1b { z22.b, z23.b }, pn8, [sp, #2, mul vl] // 32-byte Folded Spill -; CONTIGUOUS-NEXT: st1b { z20.b, z21.b }, pn8, [sp, #6, mul vl] // 32-byte Folded Spill -; CONTIGUOUS-NEXT: st1b { z18.b, z19.b }, pn8, [sp, #10, mul vl] // 32-byte Folded Spill -; CONTIGUOUS-NEXT: st1b { z16.b, z17.b }, pn8, [sp, #14, mul vl] // 32-byte Folded Spill -; CONTIGUOUS-NEXT: st1b { z14.b, z15.b }, pn8, [sp, #18, mul vl] // 32-byte Folded Spill +; CONTIGUOUS-NEXT: str z23, [sp, #1, mul vl] // 16-byte Folded Spill +; CONTIGUOUS-NEXT: str z22, [sp, #2, mul vl] // 16-byte Folded Spill +; CONTIGUOUS-NEXT: str z21, [sp, #3, mul vl] // 16-byte Folded Spill +; CONTIGUOUS-NEXT: str z20, [sp, #4, mul vl] // 16-byte Folded Spill +; CONTIGUOUS-NEXT: str z19, [sp, #5, mul vl] // 16-byte Folded Spill +; CONTIGUOUS-NEXT: str z18, [sp, #6, mul vl] // 16-byte Folded Spill +; CONTIGUOUS-NEXT: str z17, [sp, #7, mul vl] // 16-byte Folded Spill +; CONTIGUOUS-NEXT: str z16, [sp, #8, mul vl] // 16-byte Folded Spill +; CONTIGUOUS-NEXT: str z15, [sp, #9, mul vl] // 16-byte Folded Spill +; CONTIGUOUS-NEXT: str z14, [sp, #10, mul vl] // 16-byte Folded Spill ; CONTIGUOUS-NEXT: str z13, [sp, #11, mul vl] // 16-byte Folded Spill -; CONTIGUOUS-NEXT: st1b { z10.b, z11.b }, pn8, [sp, #24, mul vl] // 32-byte Folded Spill +; CONTIGUOUS-NEXT: str z11, [sp, #12, mul vl] // 16-byte Folded Spill +; CONTIGUOUS-NEXT: str z10, [sp, #13, mul vl] // 16-byte Folded Spill ; CONTIGUOUS-NEXT: str z9, [sp, #14, mul vl] // 16-byte Folded Spill ; CONTIGUOUS-NEXT: addvl sp, sp, #-4 ; CONTIGUOUS-NEXT: mov p8.b, p0.b @@ -1294,15 +1545,20 @@ define @ld1_x4_i8_z0_z4_z8_z12_scalar( %unu ; CONTIGUOUS-NEXT: ldr z2, [sp, #2, mul vl] ; CONTIGUOUS-NEXT: ldr z3, [sp, #3, mul vl] ; CONTIGUOUS-NEXT: addvl sp, sp, #4 -; CONTIGUOUS-NEXT: ptrue pn8.b +; CONTIGUOUS-NEXT: ldr z23, [sp, #1, mul vl] // 16-byte Folded Reload +; CONTIGUOUS-NEXT: ldr z22, [sp, #2, mul vl] // 16-byte Folded Reload +; CONTIGUOUS-NEXT: ldr z21, [sp, #3, mul vl] // 16-byte Folded Reload +; CONTIGUOUS-NEXT: ldr z20, [sp, #4, mul vl] // 16-byte Folded Reload +; CONTIGUOUS-NEXT: ldr z19, [sp, #5, mul vl] // 16-byte Folded Reload +; CONTIGUOUS-NEXT: ldr z18, [sp, #6, mul vl] // 16-byte Folded Reload +; CONTIGUOUS-NEXT: ldr z17, [sp, #7, mul vl] // 16-byte Folded Reload +; CONTIGUOUS-NEXT: ldr z16, [sp, #8, mul vl] // 16-byte Folded Reload +; CONTIGUOUS-NEXT: ldr z15, [sp, #9, mul vl] // 16-byte Folded Reload +; CONTIGUOUS-NEXT: ldr z14, [sp, #10, mul vl] // 16-byte Folded Reload ; CONTIGUOUS-NEXT: ldr z13, [sp, #11, mul vl] // 16-byte Folded Reload +; CONTIGUOUS-NEXT: ldr z11, [sp, #12, mul vl] // 16-byte Folded Reload +; CONTIGUOUS-NEXT: ldr z10, [sp, #13, mul vl] // 16-byte Folded Reload ; CONTIGUOUS-NEXT: ldr z9, [sp, #14, mul vl] // 16-byte Folded Reload -; CONTIGUOUS-NEXT: ld1b { z22.b, z23.b }, pn8/z, [sp, #2, mul vl] // 32-byte Folded Reload -; CONTIGUOUS-NEXT: ld1b { z20.b, z21.b }, pn8/z, [sp, #6, mul vl] // 32-byte Folded Reload -; CONTIGUOUS-NEXT: ld1b { z18.b, z19.b }, pn8/z, [sp, #10, mul vl] // 32-byte Folded Reload -; CONTIGUOUS-NEXT: ld1b { z16.b, z17.b }, pn8/z, [sp, #14, mul vl] // 32-byte Folded Reload -; CONTIGUOUS-NEXT: ld1b { z14.b, z15.b }, pn8/z, [sp, #18, mul vl] // 32-byte Folded Reload -; CONTIGUOUS-NEXT: ld1b { z10.b, z11.b }, pn8/z, [sp, #24, mul vl] // 32-byte Folded Reload ; CONTIGUOUS-NEXT: ldr p8, [sp, #7, mul vl] // 2-byte Folded Reload ; CONTIGUOUS-NEXT: addvl sp, sp, #15 ; CONTIGUOUS-NEXT: ldr x29, [sp], #16 // 8-byte Folded Reload @@ -1376,32 +1632,46 @@ define @ld1_x4_i16_z0_z4_z8_z12( %unused, ; STRIDED-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill ; STRIDED-NEXT: addvl sp, sp, #-17 ; STRIDED-NEXT: str p8, [sp, #7, mul vl] // 2-byte Folded Spill -; STRIDED-NEXT: ptrue pn8.b -; STRIDED-NEXT: st1b { z22.b, z23.b }, pn8, [sp, #2, mul vl] // 32-byte Folded Spill -; STRIDED-NEXT: st1b { z20.b, z21.b }, pn8, [sp, #6, mul vl] // 32-byte Folded Spill -; STRIDED-NEXT: st1b { z18.b, z19.b }, pn8, [sp, #10, mul vl] // 32-byte Folded Spill -; STRIDED-NEXT: st1b { z16.b, z17.b }, pn8, [sp, #14, mul vl] // 32-byte Folded Spill -; STRIDED-NEXT: st1b { z14.b, z15.b }, pn8, [sp, #18, mul vl] // 32-byte Folded Spill -; STRIDED-NEXT: st1b { z12.b, z13.b }, pn8, [sp, #22, mul vl] // 32-byte Folded Spill -; STRIDED-NEXT: st1b { z10.b, z11.b }, pn8, [sp, #26, mul vl] // 32-byte Folded Spill -; STRIDED-NEXT: st1b { z8.b, z9.b }, pn8, [sp, #30, mul vl] // 32-byte Folded Spill ; STRIDED-NEXT: mov p8.b, p0.b +; STRIDED-NEXT: str z23, [sp, #1, mul vl] // 16-byte Folded Spill +; STRIDED-NEXT: str z22, [sp, #2, mul vl] // 16-byte Folded Spill +; STRIDED-NEXT: str z21, [sp, #3, mul vl] // 16-byte Folded Spill +; STRIDED-NEXT: str z20, [sp, #4, mul vl] // 16-byte Folded Spill +; STRIDED-NEXT: str z19, [sp, #5, mul vl] // 16-byte Folded Spill +; STRIDED-NEXT: str z18, [sp, #6, mul vl] // 16-byte Folded Spill +; STRIDED-NEXT: str z17, [sp, #7, mul vl] // 16-byte Folded Spill +; STRIDED-NEXT: str z16, [sp, #8, mul vl] // 16-byte Folded Spill +; STRIDED-NEXT: str z15, [sp, #9, mul vl] // 16-byte Folded Spill +; STRIDED-NEXT: str z14, [sp, #10, mul vl] // 16-byte Folded Spill +; STRIDED-NEXT: str z13, [sp, #11, mul vl] // 16-byte Folded Spill +; STRIDED-NEXT: str z12, [sp, #12, mul vl] // 16-byte Folded Spill +; STRIDED-NEXT: str z11, [sp, #13, mul vl] // 16-byte Folded Spill +; STRIDED-NEXT: str z10, [sp, #14, mul vl] // 16-byte Folded Spill +; STRIDED-NEXT: str z9, [sp, #15, mul vl] // 16-byte Folded Spill +; STRIDED-NEXT: str z8, [sp, #16, mul vl] // 16-byte Folded Spill ; STRIDED-NEXT: ld1h { z0.h, z4.h, z8.h, z12.h }, pn8/z, [x0] ; STRIDED-NEXT: //APP ; STRIDED-NEXT: nop ; STRIDED-NEXT: //NO_APP -; STRIDED-NEXT: ptrue pn8.b -; STRIDED-NEXT: ld1b { z22.b, z23.b }, pn8/z, [sp, #2, mul vl] // 32-byte Folded Reload -; STRIDED-NEXT: ld1b { z20.b, z21.b }, pn8/z, [sp, #6, mul vl] // 32-byte Folded Reload -; STRIDED-NEXT: ld1b { z18.b, z19.b }, pn8/z, [sp, #10, mul vl] // 32-byte Folded Reload -; STRIDED-NEXT: ld1b { z16.b, z17.b }, pn8/z, [sp, #14, mul vl] // 32-byte Folded Reload -; STRIDED-NEXT: ld1b { z14.b, z15.b }, pn8/z, [sp, #18, mul vl] // 32-byte Folded Reload -; STRIDED-NEXT: ld1b { z10.b, z11.b }, pn8/z, [sp, #26, mul vl] // 32-byte Folded Reload +; STRIDED-NEXT: ldr p8, [sp, #7, mul vl] // 2-byte Folded Reload +; STRIDED-NEXT: ldr z23, [sp, #1, mul vl] // 16-byte Folded Reload +; STRIDED-NEXT: ldr z22, [sp, #2, mul vl] // 16-byte Folded Reload +; STRIDED-NEXT: ldr z21, [sp, #3, mul vl] // 16-byte Folded Reload +; STRIDED-NEXT: ldr z20, [sp, #4, mul vl] // 16-byte Folded Reload +; STRIDED-NEXT: ldr z19, [sp, #5, mul vl] // 16-byte Folded Reload +; STRIDED-NEXT: ldr z18, [sp, #6, mul vl] // 16-byte Folded Reload ; STRIDED-NEXT: mov z2.d, z8.d ; STRIDED-NEXT: mov z3.d, z12.d -; STRIDED-NEXT: ld1b { z12.b, z13.b }, pn8/z, [sp, #22, mul vl] // 32-byte Folded Reload -; STRIDED-NEXT: ld1b { z8.b, z9.b }, pn8/z, [sp, #30, mul vl] // 32-byte Folded Reload -; STRIDED-NEXT: ldr p8, [sp, #7, mul vl] // 2-byte Folded Reload +; STRIDED-NEXT: ldr z17, [sp, #7, mul vl] // 16-byte Folded Reload +; STRIDED-NEXT: ldr z16, [sp, #8, mul vl] // 16-byte Folded Reload +; STRIDED-NEXT: ldr z15, [sp, #9, mul vl] // 16-byte Folded Reload +; STRIDED-NEXT: ldr z14, [sp, #10, mul vl] // 16-byte Folded Reload +; STRIDED-NEXT: ldr z13, [sp, #11, mul vl] // 16-byte Folded Reload +; STRIDED-NEXT: ldr z12, [sp, #12, mul vl] // 16-byte Folded Reload +; STRIDED-NEXT: ldr z11, [sp, #13, mul vl] // 16-byte Folded Reload +; STRIDED-NEXT: ldr z10, [sp, #14, mul vl] // 16-byte Folded Reload +; STRIDED-NEXT: ldr z9, [sp, #15, mul vl] // 16-byte Folded Reload +; STRIDED-NEXT: ldr z8, [sp, #16, mul vl] // 16-byte Folded Reload ; STRIDED-NEXT: mov z1.d, z4.d ; STRIDED-NEXT: addvl sp, sp, #17 ; STRIDED-NEXT: ldr x29, [sp], #16 // 8-byte Folded Reload @@ -1412,14 +1682,19 @@ define @ld1_x4_i16_z0_z4_z8_z12( %unused, ; CONTIGUOUS-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill ; CONTIGUOUS-NEXT: addvl sp, sp, #-15 ; CONTIGUOUS-NEXT: str p8, [sp, #7, mul vl] // 2-byte Folded Spill -; CONTIGUOUS-NEXT: ptrue pn8.b -; CONTIGUOUS-NEXT: st1b { z22.b, z23.b }, pn8, [sp, #2, mul vl] // 32-byte Folded Spill -; CONTIGUOUS-NEXT: st1b { z20.b, z21.b }, pn8, [sp, #6, mul vl] // 32-byte Folded Spill -; CONTIGUOUS-NEXT: st1b { z18.b, z19.b }, pn8, [sp, #10, mul vl] // 32-byte Folded Spill -; CONTIGUOUS-NEXT: st1b { z16.b, z17.b }, pn8, [sp, #14, mul vl] // 32-byte Folded Spill -; CONTIGUOUS-NEXT: st1b { z14.b, z15.b }, pn8, [sp, #18, mul vl] // 32-byte Folded Spill +; CONTIGUOUS-NEXT: str z23, [sp, #1, mul vl] // 16-byte Folded Spill +; CONTIGUOUS-NEXT: str z22, [sp, #2, mul vl] // 16-byte Folded Spill +; CONTIGUOUS-NEXT: str z21, [sp, #3, mul vl] // 16-byte Folded Spill +; CONTIGUOUS-NEXT: str z20, [sp, #4, mul vl] // 16-byte Folded Spill +; CONTIGUOUS-NEXT: str z19, [sp, #5, mul vl] // 16-byte Folded Spill +; CONTIGUOUS-NEXT: str z18, [sp, #6, mul vl] // 16-byte Folded Spill +; CONTIGUOUS-NEXT: str z17, [sp, #7, mul vl] // 16-byte Folded Spill +; CONTIGUOUS-NEXT: str z16, [sp, #8, mul vl] // 16-byte Folded Spill +; CONTIGUOUS-NEXT: str z15, [sp, #9, mul vl] // 16-byte Folded Spill +; CONTIGUOUS-NEXT: str z14, [sp, #10, mul vl] // 16-byte Folded Spill ; CONTIGUOUS-NEXT: str z13, [sp, #11, mul vl] // 16-byte Folded Spill -; CONTIGUOUS-NEXT: st1b { z10.b, z11.b }, pn8, [sp, #24, mul vl] // 32-byte Folded Spill +; CONTIGUOUS-NEXT: str z11, [sp, #12, mul vl] // 16-byte Folded Spill +; CONTIGUOUS-NEXT: str z10, [sp, #13, mul vl] // 16-byte Folded Spill ; CONTIGUOUS-NEXT: str z9, [sp, #14, mul vl] // 16-byte Folded Spill ; CONTIGUOUS-NEXT: addvl sp, sp, #-4 ; CONTIGUOUS-NEXT: mov p8.b, p0.b @@ -1436,15 +1711,20 @@ define @ld1_x4_i16_z0_z4_z8_z12( %unused, ; CONTIGUOUS-NEXT: ldr z2, [sp, #2, mul vl] ; CONTIGUOUS-NEXT: ldr z3, [sp, #3, mul vl] ; CONTIGUOUS-NEXT: addvl sp, sp, #4 -; CONTIGUOUS-NEXT: ptrue pn8.b +; CONTIGUOUS-NEXT: ldr z23, [sp, #1, mul vl] // 16-byte Folded Reload +; CONTIGUOUS-NEXT: ldr z22, [sp, #2, mul vl] // 16-byte Folded Reload +; CONTIGUOUS-NEXT: ldr z21, [sp, #3, mul vl] // 16-byte Folded Reload +; CONTIGUOUS-NEXT: ldr z20, [sp, #4, mul vl] // 16-byte Folded Reload +; CONTIGUOUS-NEXT: ldr z19, [sp, #5, mul vl] // 16-byte Folded Reload +; CONTIGUOUS-NEXT: ldr z18, [sp, #6, mul vl] // 16-byte Folded Reload +; CONTIGUOUS-NEXT: ldr z17, [sp, #7, mul vl] // 16-byte Folded Reload +; CONTIGUOUS-NEXT: ldr z16, [sp, #8, mul vl] // 16-byte Folded Reload +; CONTIGUOUS-NEXT: ldr z15, [sp, #9, mul vl] // 16-byte Folded Reload +; CONTIGUOUS-NEXT: ldr z14, [sp, #10, mul vl] // 16-byte Folded Reload ; CONTIGUOUS-NEXT: ldr z13, [sp, #11, mul vl] // 16-byte Folded Reload +; CONTIGUOUS-NEXT: ldr z11, [sp, #12, mul vl] // 16-byte Folded Reload +; CONTIGUOUS-NEXT: ldr z10, [sp, #13, mul vl] // 16-byte Folded Reload ; CONTIGUOUS-NEXT: ldr z9, [sp, #14, mul vl] // 16-byte Folded Reload -; CONTIGUOUS-NEXT: ld1b { z22.b, z23.b }, pn8/z, [sp, #2, mul vl] // 32-byte Folded Reload -; CONTIGUOUS-NEXT: ld1b { z20.b, z21.b }, pn8/z, [sp, #6, mul vl] // 32-byte Folded Reload -; CONTIGUOUS-NEXT: ld1b { z18.b, z19.b }, pn8/z, [sp, #10, mul vl] // 32-byte Folded Reload -; CONTIGUOUS-NEXT: ld1b { z16.b, z17.b }, pn8/z, [sp, #14, mul vl] // 32-byte Folded Reload -; CONTIGUOUS-NEXT: ld1b { z14.b, z15.b }, pn8/z, [sp, #18, mul vl] // 32-byte Folded Reload -; CONTIGUOUS-NEXT: ld1b { z10.b, z11.b }, pn8/z, [sp, #24, mul vl] // 32-byte Folded Reload ; CONTIGUOUS-NEXT: ldr p8, [sp, #7, mul vl] // 2-byte Folded Reload ; CONTIGUOUS-NEXT: addvl sp, sp, #15 ; CONTIGUOUS-NEXT: ldr x29, [sp], #16 // 8-byte Folded Reload @@ -1518,32 +1798,46 @@ define @ld1_x4_i16_z0_z4_z8_z12_scalar( %u ; STRIDED-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill ; STRIDED-NEXT: addvl sp, sp, #-17 ; STRIDED-NEXT: str p8, [sp, #7, mul vl] // 2-byte Folded Spill -; STRIDED-NEXT: ptrue pn8.b -; STRIDED-NEXT: st1b { z22.b, z23.b }, pn8, [sp, #2, mul vl] // 32-byte Folded Spill -; STRIDED-NEXT: st1b { z20.b, z21.b }, pn8, [sp, #6, mul vl] // 32-byte Folded Spill -; STRIDED-NEXT: st1b { z18.b, z19.b }, pn8, [sp, #10, mul vl] // 32-byte Folded Spill -; STRIDED-NEXT: st1b { z16.b, z17.b }, pn8, [sp, #14, mul vl] // 32-byte Folded Spill -; STRIDED-NEXT: st1b { z14.b, z15.b }, pn8, [sp, #18, mul vl] // 32-byte Folded Spill -; STRIDED-NEXT: st1b { z12.b, z13.b }, pn8, [sp, #22, mul vl] // 32-byte Folded Spill -; STRIDED-NEXT: st1b { z10.b, z11.b }, pn8, [sp, #26, mul vl] // 32-byte Folded Spill -; STRIDED-NEXT: st1b { z8.b, z9.b }, pn8, [sp, #30, mul vl] // 32-byte Folded Spill ; STRIDED-NEXT: mov p8.b, p0.b +; STRIDED-NEXT: str z23, [sp, #1, mul vl] // 16-byte Folded Spill +; STRIDED-NEXT: str z22, [sp, #2, mul vl] // 16-byte Folded Spill +; STRIDED-NEXT: str z21, [sp, #3, mul vl] // 16-byte Folded Spill +; STRIDED-NEXT: str z20, [sp, #4, mul vl] // 16-byte Folded Spill +; STRIDED-NEXT: str z19, [sp, #5, mul vl] // 16-byte Folded Spill +; STRIDED-NEXT: str z18, [sp, #6, mul vl] // 16-byte Folded Spill +; STRIDED-NEXT: str z17, [sp, #7, mul vl] // 16-byte Folded Spill +; STRIDED-NEXT: str z16, [sp, #8, mul vl] // 16-byte Folded Spill +; STRIDED-NEXT: str z15, [sp, #9, mul vl] // 16-byte Folded Spill +; STRIDED-NEXT: str z14, [sp, #10, mul vl] // 16-byte Folded Spill +; STRIDED-NEXT: str z13, [sp, #11, mul vl] // 16-byte Folded Spill +; STRIDED-NEXT: str z12, [sp, #12, mul vl] // 16-byte Folded Spill +; STRIDED-NEXT: str z11, [sp, #13, mul vl] // 16-byte Folded Spill +; STRIDED-NEXT: str z10, [sp, #14, mul vl] // 16-byte Folded Spill +; STRIDED-NEXT: str z9, [sp, #15, mul vl] // 16-byte Folded Spill +; STRIDED-NEXT: str z8, [sp, #16, mul vl] // 16-byte Folded Spill ; STRIDED-NEXT: ld1h { z0.h, z4.h, z8.h, z12.h }, pn8/z, [x0, x1, lsl #1] ; STRIDED-NEXT: //APP ; STRIDED-NEXT: nop ; STRIDED-NEXT: //NO_APP -; STRIDED-NEXT: ptrue pn8.b -; STRIDED-NEXT: ld1b { z22.b, z23.b }, pn8/z, [sp, #2, mul vl] // 32-byte Folded Reload -; STRIDED-NEXT: ld1b { z20.b, z21.b }, pn8/z, [sp, #6, mul vl] // 32-byte Folded Reload -; STRIDED-NEXT: ld1b { z18.b, z19.b }, pn8/z, [sp, #10, mul vl] // 32-byte Folded Reload -; STRIDED-NEXT: ld1b { z16.b, z17.b }, pn8/z, [sp, #14, mul vl] // 32-byte Folded Reload -; STRIDED-NEXT: ld1b { z14.b, z15.b }, pn8/z, [sp, #18, mul vl] // 32-byte Folded Reload -; STRIDED-NEXT: ld1b { z10.b, z11.b }, pn8/z, [sp, #26, mul vl] // 32-byte Folded Reload +; STRIDED-NEXT: ldr p8, [sp, #7, mul vl] // 2-byte Folded Reload +; STRIDED-NEXT: ldr z23, [sp, #1, mul vl] // 16-byte Folded Reload +; STRIDED-NEXT: ldr z22, [sp, #2, mul vl] // 16-byte Folded Reload +; STRIDED-NEXT: ldr z21, [sp, #3, mul vl] // 16-byte Folded Reload +; STRIDED-NEXT: ldr z20, [sp, #4, mul vl] // 16-byte Folded Reload +; STRIDED-NEXT: ldr z19, [sp, #5, mul vl] // 16-byte Folded Reload +; STRIDED-NEXT: ldr z18, [sp, #6, mul vl] // 16-byte Folded Reload ; STRIDED-NEXT: mov z2.d, z8.d ; STRIDED-NEXT: mov z3.d, z12.d -; STRIDED-NEXT: ld1b { z12.b, z13.b }, pn8/z, [sp, #22, mul vl] // 32-byte Folded Reload -; STRIDED-NEXT: ld1b { z8.b, z9.b }, pn8/z, [sp, #30, mul vl] // 32-byte Folded Reload -; STRIDED-NEXT: ldr p8, [sp, #7, mul vl] // 2-byte Folded Reload +; STRIDED-NEXT: ldr z17, [sp, #7, mul vl] // 16-byte Folded Reload +; STRIDED-NEXT: ldr z16, [sp, #8, mul vl] // 16-byte Folded Reload +; STRIDED-NEXT: ldr z15, [sp, #9, mul vl] // 16-byte Folded Reload +; STRIDED-NEXT: ldr z14, [sp, #10, mul vl] // 16-byte Folded Reload +; STRIDED-NEXT: ldr z13, [sp, #11, mul vl] // 16-byte Folded Reload +; STRIDED-NEXT: ldr z12, [sp, #12, mul vl] // 16-byte Folded Reload +; STRIDED-NEXT: ldr z11, [sp, #13, mul vl] // 16-byte Folded Reload +; STRIDED-NEXT: ldr z10, [sp, #14, mul vl] // 16-byte Folded Reload +; STRIDED-NEXT: ldr z9, [sp, #15, mul vl] // 16-byte Folded Reload +; STRIDED-NEXT: ldr z8, [sp, #16, mul vl] // 16-byte Folded Reload ; STRIDED-NEXT: mov z1.d, z4.d ; STRIDED-NEXT: addvl sp, sp, #17 ; STRIDED-NEXT: ldr x29, [sp], #16 // 8-byte Folded Reload @@ -1554,14 +1848,19 @@ define @ld1_x4_i16_z0_z4_z8_z12_scalar( %u ; CONTIGUOUS-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill ; CONTIGUOUS-NEXT: addvl sp, sp, #-15 ; CONTIGUOUS-NEXT: str p8, [sp, #7, mul vl] // 2-byte Folded Spill -; CONTIGUOUS-NEXT: ptrue pn8.b -; CONTIGUOUS-NEXT: st1b { z22.b, z23.b }, pn8, [sp, #2, mul vl] // 32-byte Folded Spill -; CONTIGUOUS-NEXT: st1b { z20.b, z21.b }, pn8, [sp, #6, mul vl] // 32-byte Folded Spill -; CONTIGUOUS-NEXT: st1b { z18.b, z19.b }, pn8, [sp, #10, mul vl] // 32-byte Folded Spill -; CONTIGUOUS-NEXT: st1b { z16.b, z17.b }, pn8, [sp, #14, mul vl] // 32-byte Folded Spill -; CONTIGUOUS-NEXT: st1b { z14.b, z15.b }, pn8, [sp, #18, mul vl] // 32-byte Folded Spill +; CONTIGUOUS-NEXT: str z23, [sp, #1, mul vl] // 16-byte Folded Spill +; CONTIGUOUS-NEXT: str z22, [sp, #2, mul vl] // 16-byte Folded Spill +; CONTIGUOUS-NEXT: str z21, [sp, #3, mul vl] // 16-byte Folded Spill +; CONTIGUOUS-NEXT: str z20, [sp, #4, mul vl] // 16-byte Folded Spill +; CONTIGUOUS-NEXT: str z19, [sp, #5, mul vl] // 16-byte Folded Spill +; CONTIGUOUS-NEXT: str z18, [sp, #6, mul vl] // 16-byte Folded Spill +; CONTIGUOUS-NEXT: str z17, [sp, #7, mul vl] // 16-byte Folded Spill +; CONTIGUOUS-NEXT: str z16, [sp, #8, mul vl] // 16-byte Folded Spill +; CONTIGUOUS-NEXT: str z15, [sp, #9, mul vl] // 16-byte Folded Spill +; CONTIGUOUS-NEXT: str z14, [sp, #10, mul vl] // 16-byte Folded Spill ; CONTIGUOUS-NEXT: str z13, [sp, #11, mul vl] // 16-byte Folded Spill -; CONTIGUOUS-NEXT: st1b { z10.b, z11.b }, pn8, [sp, #24, mul vl] // 32-byte Folded Spill +; CONTIGUOUS-NEXT: str z11, [sp, #12, mul vl] // 16-byte Folded Spill +; CONTIGUOUS-NEXT: str z10, [sp, #13, mul vl] // 16-byte Folded Spill ; CONTIGUOUS-NEXT: str z9, [sp, #14, mul vl] // 16-byte Folded Spill ; CONTIGUOUS-NEXT: addvl sp, sp, #-4 ; CONTIGUOUS-NEXT: mov p8.b, p0.b @@ -1578,15 +1877,20 @@ define @ld1_x4_i16_z0_z4_z8_z12_scalar( %u ; CONTIGUOUS-NEXT: ldr z2, [sp, #2, mul vl] ; CONTIGUOUS-NEXT: ldr z3, [sp, #3, mul vl] ; CONTIGUOUS-NEXT: addvl sp, sp, #4 -; CONTIGUOUS-NEXT: ptrue pn8.b +; CONTIGUOUS-NEXT: ldr z23, [sp, #1, mul vl] // 16-byte Folded Reload +; CONTIGUOUS-NEXT: ldr z22, [sp, #2, mul vl] // 16-byte Folded Reload +; CONTIGUOUS-NEXT: ldr z21, [sp, #3, mul vl] // 16-byte Folded Reload +; CONTIGUOUS-NEXT: ldr z20, [sp, #4, mul vl] // 16-byte Folded Reload +; CONTIGUOUS-NEXT: ldr z19, [sp, #5, mul vl] // 16-byte Folded Reload +; CONTIGUOUS-NEXT: ldr z18, [sp, #6, mul vl] // 16-byte Folded Reload +; CONTIGUOUS-NEXT: ldr z17, [sp, #7, mul vl] // 16-byte Folded Reload +; CONTIGUOUS-NEXT: ldr z16, [sp, #8, mul vl] // 16-byte Folded Reload +; CONTIGUOUS-NEXT: ldr z15, [sp, #9, mul vl] // 16-byte Folded Reload +; CONTIGUOUS-NEXT: ldr z14, [sp, #10, mul vl] // 16-byte Folded Reload ; CONTIGUOUS-NEXT: ldr z13, [sp, #11, mul vl] // 16-byte Folded Reload +; CONTIGUOUS-NEXT: ldr z11, [sp, #12, mul vl] // 16-byte Folded Reload +; CONTIGUOUS-NEXT: ldr z10, [sp, #13, mul vl] // 16-byte Folded Reload ; CONTIGUOUS-NEXT: ldr z9, [sp, #14, mul vl] // 16-byte Folded Reload -; CONTIGUOUS-NEXT: ld1b { z22.b, z23.b }, pn8/z, [sp, #2, mul vl] // 32-byte Folded Reload -; CONTIGUOUS-NEXT: ld1b { z20.b, z21.b }, pn8/z, [sp, #6, mul vl] // 32-byte Folded Reload -; CONTIGUOUS-NEXT: ld1b { z18.b, z19.b }, pn8/z, [sp, #10, mul vl] // 32-byte Folded Reload -; CONTIGUOUS-NEXT: ld1b { z16.b, z17.b }, pn8/z, [sp, #14, mul vl] // 32-byte Folded Reload -; CONTIGUOUS-NEXT: ld1b { z14.b, z15.b }, pn8/z, [sp, #18, mul vl] // 32-byte Folded Reload -; CONTIGUOUS-NEXT: ld1b { z10.b, z11.b }, pn8/z, [sp, #24, mul vl] // 32-byte Folded Reload ; CONTIGUOUS-NEXT: ldr p8, [sp, #7, mul vl] // 2-byte Folded Reload ; CONTIGUOUS-NEXT: addvl sp, sp, #15 ; CONTIGUOUS-NEXT: ldr x29, [sp], #16 // 8-byte Folded Reload @@ -1660,32 +1964,46 @@ define @ld1_x4_i32_z0_z4_z8_z12( %unused, ; STRIDED-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill ; STRIDED-NEXT: addvl sp, sp, #-17 ; STRIDED-NEXT: str p8, [sp, #7, mul vl] // 2-byte Folded Spill -; STRIDED-NEXT: ptrue pn8.b -; STRIDED-NEXT: st1b { z22.b, z23.b }, pn8, [sp, #2, mul vl] // 32-byte Folded Spill -; STRIDED-NEXT: st1b { z20.b, z21.b }, pn8, [sp, #6, mul vl] // 32-byte Folded Spill -; STRIDED-NEXT: st1b { z18.b, z19.b }, pn8, [sp, #10, mul vl] // 32-byte Folded Spill -; STRIDED-NEXT: st1b { z16.b, z17.b }, pn8, [sp, #14, mul vl] // 32-byte Folded Spill -; STRIDED-NEXT: st1b { z14.b, z15.b }, pn8, [sp, #18, mul vl] // 32-byte Folded Spill -; STRIDED-NEXT: st1b { z12.b, z13.b }, pn8, [sp, #22, mul vl] // 32-byte Folded Spill -; STRIDED-NEXT: st1b { z10.b, z11.b }, pn8, [sp, #26, mul vl] // 32-byte Folded Spill -; STRIDED-NEXT: st1b { z8.b, z9.b }, pn8, [sp, #30, mul vl] // 32-byte Folded Spill ; STRIDED-NEXT: mov p8.b, p0.b +; STRIDED-NEXT: str z23, [sp, #1, mul vl] // 16-byte Folded Spill +; STRIDED-NEXT: str z22, [sp, #2, mul vl] // 16-byte Folded Spill +; STRIDED-NEXT: str z21, [sp, #3, mul vl] // 16-byte Folded Spill +; STRIDED-NEXT: str z20, [sp, #4, mul vl] // 16-byte Folded Spill +; STRIDED-NEXT: str z19, [sp, #5, mul vl] // 16-byte Folded Spill +; STRIDED-NEXT: str z18, [sp, #6, mul vl] // 16-byte Folded Spill +; STRIDED-NEXT: str z17, [sp, #7, mul vl] // 16-byte Folded Spill +; STRIDED-NEXT: str z16, [sp, #8, mul vl] // 16-byte Folded Spill +; STRIDED-NEXT: str z15, [sp, #9, mul vl] // 16-byte Folded Spill +; STRIDED-NEXT: str z14, [sp, #10, mul vl] // 16-byte Folded Spill +; STRIDED-NEXT: str z13, [sp, #11, mul vl] // 16-byte Folded Spill +; STRIDED-NEXT: str z12, [sp, #12, mul vl] // 16-byte Folded Spill +; STRIDED-NEXT: str z11, [sp, #13, mul vl] // 16-byte Folded Spill +; STRIDED-NEXT: str z10, [sp, #14, mul vl] // 16-byte Folded Spill +; STRIDED-NEXT: str z9, [sp, #15, mul vl] // 16-byte Folded Spill +; STRIDED-NEXT: str z8, [sp, #16, mul vl] // 16-byte Folded Spill ; STRIDED-NEXT: ld1w { z0.s, z4.s, z8.s, z12.s }, pn8/z, [x0] ; STRIDED-NEXT: //APP ; STRIDED-NEXT: nop ; STRIDED-NEXT: //NO_APP -; STRIDED-NEXT: ptrue pn8.b -; STRIDED-NEXT: ld1b { z22.b, z23.b }, pn8/z, [sp, #2, mul vl] // 32-byte Folded Reload -; STRIDED-NEXT: ld1b { z20.b, z21.b }, pn8/z, [sp, #6, mul vl] // 32-byte Folded Reload -; STRIDED-NEXT: ld1b { z18.b, z19.b }, pn8/z, [sp, #10, mul vl] // 32-byte Folded Reload -; STRIDED-NEXT: ld1b { z16.b, z17.b }, pn8/z, [sp, #14, mul vl] // 32-byte Folded Reload -; STRIDED-NEXT: ld1b { z14.b, z15.b }, pn8/z, [sp, #18, mul vl] // 32-byte Folded Reload -; STRIDED-NEXT: ld1b { z10.b, z11.b }, pn8/z, [sp, #26, mul vl] // 32-byte Folded Reload +; STRIDED-NEXT: ldr p8, [sp, #7, mul vl] // 2-byte Folded Reload +; STRIDED-NEXT: ldr z23, [sp, #1, mul vl] // 16-byte Folded Reload +; STRIDED-NEXT: ldr z22, [sp, #2, mul vl] // 16-byte Folded Reload +; STRIDED-NEXT: ldr z21, [sp, #3, mul vl] // 16-byte Folded Reload +; STRIDED-NEXT: ldr z20, [sp, #4, mul vl] // 16-byte Folded Reload +; STRIDED-NEXT: ldr z19, [sp, #5, mul vl] // 16-byte Folded Reload +; STRIDED-NEXT: ldr z18, [sp, #6, mul vl] // 16-byte Folded Reload ; STRIDED-NEXT: mov z2.d, z8.d ; STRIDED-NEXT: mov z3.d, z12.d -; STRIDED-NEXT: ld1b { z12.b, z13.b }, pn8/z, [sp, #22, mul vl] // 32-byte Folded Reload -; STRIDED-NEXT: ld1b { z8.b, z9.b }, pn8/z, [sp, #30, mul vl] // 32-byte Folded Reload -; STRIDED-NEXT: ldr p8, [sp, #7, mul vl] // 2-byte Folded Reload +; STRIDED-NEXT: ldr z17, [sp, #7, mul vl] // 16-byte Folded Reload +; STRIDED-NEXT: ldr z16, [sp, #8, mul vl] // 16-byte Folded Reload +; STRIDED-NEXT: ldr z15, [sp, #9, mul vl] // 16-byte Folded Reload +; STRIDED-NEXT: ldr z14, [sp, #10, mul vl] // 16-byte Folded Reload +; STRIDED-NEXT: ldr z13, [sp, #11, mul vl] // 16-byte Folded Reload +; STRIDED-NEXT: ldr z12, [sp, #12, mul vl] // 16-byte Folded Reload +; STRIDED-NEXT: ldr z11, [sp, #13, mul vl] // 16-byte Folded Reload +; STRIDED-NEXT: ldr z10, [sp, #14, mul vl] // 16-byte Folded Reload +; STRIDED-NEXT: ldr z9, [sp, #15, mul vl] // 16-byte Folded Reload +; STRIDED-NEXT: ldr z8, [sp, #16, mul vl] // 16-byte Folded Reload ; STRIDED-NEXT: mov z1.d, z4.d ; STRIDED-NEXT: addvl sp, sp, #17 ; STRIDED-NEXT: ldr x29, [sp], #16 // 8-byte Folded Reload @@ -1696,14 +2014,19 @@ define @ld1_x4_i32_z0_z4_z8_z12( %unused, ; CONTIGUOUS-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill ; CONTIGUOUS-NEXT: addvl sp, sp, #-15 ; CONTIGUOUS-NEXT: str p8, [sp, #7, mul vl] // 2-byte Folded Spill -; CONTIGUOUS-NEXT: ptrue pn8.b -; CONTIGUOUS-NEXT: st1b { z22.b, z23.b }, pn8, [sp, #2, mul vl] // 32-byte Folded Spill -; CONTIGUOUS-NEXT: st1b { z20.b, z21.b }, pn8, [sp, #6, mul vl] // 32-byte Folded Spill -; CONTIGUOUS-NEXT: st1b { z18.b, z19.b }, pn8, [sp, #10, mul vl] // 32-byte Folded Spill -; CONTIGUOUS-NEXT: st1b { z16.b, z17.b }, pn8, [sp, #14, mul vl] // 32-byte Folded Spill -; CONTIGUOUS-NEXT: st1b { z14.b, z15.b }, pn8, [sp, #18, mul vl] // 32-byte Folded Spill +; CONTIGUOUS-NEXT: str z23, [sp, #1, mul vl] // 16-byte Folded Spill +; CONTIGUOUS-NEXT: str z22, [sp, #2, mul vl] // 16-byte Folded Spill +; CONTIGUOUS-NEXT: str z21, [sp, #3, mul vl] // 16-byte Folded Spill +; CONTIGUOUS-NEXT: str z20, [sp, #4, mul vl] // 16-byte Folded Spill +; CONTIGUOUS-NEXT: str z19, [sp, #5, mul vl] // 16-byte Folded Spill +; CONTIGUOUS-NEXT: str z18, [sp, #6, mul vl] // 16-byte Folded Spill +; CONTIGUOUS-NEXT: str z17, [sp, #7, mul vl] // 16-byte Folded Spill +; CONTIGUOUS-NEXT: str z16, [sp, #8, mul vl] // 16-byte Folded Spill +; CONTIGUOUS-NEXT: str z15, [sp, #9, mul vl] // 16-byte Folded Spill +; CONTIGUOUS-NEXT: str z14, [sp, #10, mul vl] // 16-byte Folded Spill ; CONTIGUOUS-NEXT: str z13, [sp, #11, mul vl] // 16-byte Folded Spill -; CONTIGUOUS-NEXT: st1b { z10.b, z11.b }, pn8, [sp, #24, mul vl] // 32-byte Folded Spill +; CONTIGUOUS-NEXT: str z11, [sp, #12, mul vl] // 16-byte Folded Spill +; CONTIGUOUS-NEXT: str z10, [sp, #13, mul vl] // 16-byte Folded Spill ; CONTIGUOUS-NEXT: str z9, [sp, #14, mul vl] // 16-byte Folded Spill ; CONTIGUOUS-NEXT: addvl sp, sp, #-4 ; CONTIGUOUS-NEXT: mov p8.b, p0.b @@ -1720,15 +2043,20 @@ define @ld1_x4_i32_z0_z4_z8_z12( %unused, ; CONTIGUOUS-NEXT: ldr z2, [sp, #2, mul vl] ; CONTIGUOUS-NEXT: ldr z3, [sp, #3, mul vl] ; CONTIGUOUS-NEXT: addvl sp, sp, #4 -; CONTIGUOUS-NEXT: ptrue pn8.b +; CONTIGUOUS-NEXT: ldr z23, [sp, #1, mul vl] // 16-byte Folded Reload +; CONTIGUOUS-NEXT: ldr z22, [sp, #2, mul vl] // 16-byte Folded Reload +; CONTIGUOUS-NEXT: ldr z21, [sp, #3, mul vl] // 16-byte Folded Reload +; CONTIGUOUS-NEXT: ldr z20, [sp, #4, mul vl] // 16-byte Folded Reload +; CONTIGUOUS-NEXT: ldr z19, [sp, #5, mul vl] // 16-byte Folded Reload +; CONTIGUOUS-NEXT: ldr z18, [sp, #6, mul vl] // 16-byte Folded Reload +; CONTIGUOUS-NEXT: ldr z17, [sp, #7, mul vl] // 16-byte Folded Reload +; CONTIGUOUS-NEXT: ldr z16, [sp, #8, mul vl] // 16-byte Folded Reload +; CONTIGUOUS-NEXT: ldr z15, [sp, #9, mul vl] // 16-byte Folded Reload +; CONTIGUOUS-NEXT: ldr z14, [sp, #10, mul vl] // 16-byte Folded Reload ; CONTIGUOUS-NEXT: ldr z13, [sp, #11, mul vl] // 16-byte Folded Reload +; CONTIGUOUS-NEXT: ldr z11, [sp, #12, mul vl] // 16-byte Folded Reload +; CONTIGUOUS-NEXT: ldr z10, [sp, #13, mul vl] // 16-byte Folded Reload ; CONTIGUOUS-NEXT: ldr z9, [sp, #14, mul vl] // 16-byte Folded Reload -; CONTIGUOUS-NEXT: ld1b { z22.b, z23.b }, pn8/z, [sp, #2, mul vl] // 32-byte Folded Reload -; CONTIGUOUS-NEXT: ld1b { z20.b, z21.b }, pn8/z, [sp, #6, mul vl] // 32-byte Folded Reload -; CONTIGUOUS-NEXT: ld1b { z18.b, z19.b }, pn8/z, [sp, #10, mul vl] // 32-byte Folded Reload -; CONTIGUOUS-NEXT: ld1b { z16.b, z17.b }, pn8/z, [sp, #14, mul vl] // 32-byte Folded Reload -; CONTIGUOUS-NEXT: ld1b { z14.b, z15.b }, pn8/z, [sp, #18, mul vl] // 32-byte Folded Reload -; CONTIGUOUS-NEXT: ld1b { z10.b, z11.b }, pn8/z, [sp, #24, mul vl] // 32-byte Folded Reload ; CONTIGUOUS-NEXT: ldr p8, [sp, #7, mul vl] // 2-byte Folded Reload ; CONTIGUOUS-NEXT: addvl sp, sp, #15 ; CONTIGUOUS-NEXT: ldr x29, [sp], #16 // 8-byte Folded Reload @@ -1802,32 +2130,46 @@ define @ld1_x4_i32_z0_z4_z8_z12_scalar( %u ; STRIDED-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill ; STRIDED-NEXT: addvl sp, sp, #-17 ; STRIDED-NEXT: str p8, [sp, #7, mul vl] // 2-byte Folded Spill -; STRIDED-NEXT: ptrue pn8.b -; STRIDED-NEXT: st1b { z22.b, z23.b }, pn8, [sp, #2, mul vl] // 32-byte Folded Spill -; STRIDED-NEXT: st1b { z20.b, z21.b }, pn8, [sp, #6, mul vl] // 32-byte Folded Spill -; STRIDED-NEXT: st1b { z18.b, z19.b }, pn8, [sp, #10, mul vl] // 32-byte Folded Spill -; STRIDED-NEXT: st1b { z16.b, z17.b }, pn8, [sp, #14, mul vl] // 32-byte Folded Spill -; STRIDED-NEXT: st1b { z14.b, z15.b }, pn8, [sp, #18, mul vl] // 32-byte Folded Spill -; STRIDED-NEXT: st1b { z12.b, z13.b }, pn8, [sp, #22, mul vl] // 32-byte Folded Spill -; STRIDED-NEXT: st1b { z10.b, z11.b }, pn8, [sp, #26, mul vl] // 32-byte Folded Spill -; STRIDED-NEXT: st1b { z8.b, z9.b }, pn8, [sp, #30, mul vl] // 32-byte Folded Spill ; STRIDED-NEXT: mov p8.b, p0.b +; STRIDED-NEXT: str z23, [sp, #1, mul vl] // 16-byte Folded Spill +; STRIDED-NEXT: str z22, [sp, #2, mul vl] // 16-byte Folded Spill +; STRIDED-NEXT: str z21, [sp, #3, mul vl] // 16-byte Folded Spill +; STRIDED-NEXT: str z20, [sp, #4, mul vl] // 16-byte Folded Spill +; STRIDED-NEXT: str z19, [sp, #5, mul vl] // 16-byte Folded Spill +; STRIDED-NEXT: str z18, [sp, #6, mul vl] // 16-byte Folded Spill +; STRIDED-NEXT: str z17, [sp, #7, mul vl] // 16-byte Folded Spill +; STRIDED-NEXT: str z16, [sp, #8, mul vl] // 16-byte Folded Spill +; STRIDED-NEXT: str z15, [sp, #9, mul vl] // 16-byte Folded Spill +; STRIDED-NEXT: str z14, [sp, #10, mul vl] // 16-byte Folded Spill +; STRIDED-NEXT: str z13, [sp, #11, mul vl] // 16-byte Folded Spill +; STRIDED-NEXT: str z12, [sp, #12, mul vl] // 16-byte Folded Spill +; STRIDED-NEXT: str z11, [sp, #13, mul vl] // 16-byte Folded Spill +; STRIDED-NEXT: str z10, [sp, #14, mul vl] // 16-byte Folded Spill +; STRIDED-NEXT: str z9, [sp, #15, mul vl] // 16-byte Folded Spill +; STRIDED-NEXT: str z8, [sp, #16, mul vl] // 16-byte Folded Spill ; STRIDED-NEXT: ld1w { z0.s, z4.s, z8.s, z12.s }, pn8/z, [x0, x1, lsl #2] ; STRIDED-NEXT: //APP ; STRIDED-NEXT: nop ; STRIDED-NEXT: //NO_APP -; STRIDED-NEXT: ptrue pn8.b -; STRIDED-NEXT: ld1b { z22.b, z23.b }, pn8/z, [sp, #2, mul vl] // 32-byte Folded Reload -; STRIDED-NEXT: ld1b { z20.b, z21.b }, pn8/z, [sp, #6, mul vl] // 32-byte Folded Reload -; STRIDED-NEXT: ld1b { z18.b, z19.b }, pn8/z, [sp, #10, mul vl] // 32-byte Folded Reload -; STRIDED-NEXT: ld1b { z16.b, z17.b }, pn8/z, [sp, #14, mul vl] // 32-byte Folded Reload -; STRIDED-NEXT: ld1b { z14.b, z15.b }, pn8/z, [sp, #18, mul vl] // 32-byte Folded Reload -; STRIDED-NEXT: ld1b { z10.b, z11.b }, pn8/z, [sp, #26, mul vl] // 32-byte Folded Reload +; STRIDED-NEXT: ldr p8, [sp, #7, mul vl] // 2-byte Folded Reload +; STRIDED-NEXT: ldr z23, [sp, #1, mul vl] // 16-byte Folded Reload +; STRIDED-NEXT: ldr z22, [sp, #2, mul vl] // 16-byte Folded Reload +; STRIDED-NEXT: ldr z21, [sp, #3, mul vl] // 16-byte Folded Reload +; STRIDED-NEXT: ldr z20, [sp, #4, mul vl] // 16-byte Folded Reload +; STRIDED-NEXT: ldr z19, [sp, #5, mul vl] // 16-byte Folded Reload +; STRIDED-NEXT: ldr z18, [sp, #6, mul vl] // 16-byte Folded Reload ; STRIDED-NEXT: mov z2.d, z8.d ; STRIDED-NEXT: mov z3.d, z12.d -; STRIDED-NEXT: ld1b { z12.b, z13.b }, pn8/z, [sp, #22, mul vl] // 32-byte Folded Reload -; STRIDED-NEXT: ld1b { z8.b, z9.b }, pn8/z, [sp, #30, mul vl] // 32-byte Folded Reload -; STRIDED-NEXT: ldr p8, [sp, #7, mul vl] // 2-byte Folded Reload +; STRIDED-NEXT: ldr z17, [sp, #7, mul vl] // 16-byte Folded Reload +; STRIDED-NEXT: ldr z16, [sp, #8, mul vl] // 16-byte Folded Reload +; STRIDED-NEXT: ldr z15, [sp, #9, mul vl] // 16-byte Folded Reload +; STRIDED-NEXT: ldr z14, [sp, #10, mul vl] // 16-byte Folded Reload +; STRIDED-NEXT: ldr z13, [sp, #11, mul vl] // 16-byte Folded Reload +; STRIDED-NEXT: ldr z12, [sp, #12, mul vl] // 16-byte Folded Reload +; STRIDED-NEXT: ldr z11, [sp, #13, mul vl] // 16-byte Folded Reload +; STRIDED-NEXT: ldr z10, [sp, #14, mul vl] // 16-byte Folded Reload +; STRIDED-NEXT: ldr z9, [sp, #15, mul vl] // 16-byte Folded Reload +; STRIDED-NEXT: ldr z8, [sp, #16, mul vl] // 16-byte Folded Reload ; STRIDED-NEXT: mov z1.d, z4.d ; STRIDED-NEXT: addvl sp, sp, #17 ; STRIDED-NEXT: ldr x29, [sp], #16 // 8-byte Folded Reload @@ -1838,14 +2180,19 @@ define @ld1_x4_i32_z0_z4_z8_z12_scalar( %u ; CONTIGUOUS-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill ; CONTIGUOUS-NEXT: addvl sp, sp, #-15 ; CONTIGUOUS-NEXT: str p8, [sp, #7, mul vl] // 2-byte Folded Spill -; CONTIGUOUS-NEXT: ptrue pn8.b -; CONTIGUOUS-NEXT: st1b { z22.b, z23.b }, pn8, [sp, #2, mul vl] // 32-byte Folded Spill -; CONTIGUOUS-NEXT: st1b { z20.b, z21.b }, pn8, [sp, #6, mul vl] // 32-byte Folded Spill -; CONTIGUOUS-NEXT: st1b { z18.b, z19.b }, pn8, [sp, #10, mul vl] // 32-byte Folded Spill -; CONTIGUOUS-NEXT: st1b { z16.b, z17.b }, pn8, [sp, #14, mul vl] // 32-byte Folded Spill -; CONTIGUOUS-NEXT: st1b { z14.b, z15.b }, pn8, [sp, #18, mul vl] // 32-byte Folded Spill +; CONTIGUOUS-NEXT: str z23, [sp, #1, mul vl] // 16-byte Folded Spill +; CONTIGUOUS-NEXT: str z22, [sp, #2, mul vl] // 16-byte Folded Spill +; CONTIGUOUS-NEXT: str z21, [sp, #3, mul vl] // 16-byte Folded Spill +; CONTIGUOUS-NEXT: str z20, [sp, #4, mul vl] // 16-byte Folded Spill +; CONTIGUOUS-NEXT: str z19, [sp, #5, mul vl] // 16-byte Folded Spill +; CONTIGUOUS-NEXT: str z18, [sp, #6, mul vl] // 16-byte Folded Spill +; CONTIGUOUS-NEXT: str z17, [sp, #7, mul vl] // 16-byte Folded Spill +; CONTIGUOUS-NEXT: str z16, [sp, #8, mul vl] // 16-byte Folded Spill +; CONTIGUOUS-NEXT: str z15, [sp, #9, mul vl] // 16-byte Folded Spill +; CONTIGUOUS-NEXT: str z14, [sp, #10, mul vl] // 16-byte Folded Spill ; CONTIGUOUS-NEXT: str z13, [sp, #11, mul vl] // 16-byte Folded Spill -; CONTIGUOUS-NEXT: st1b { z10.b, z11.b }, pn8, [sp, #24, mul vl] // 32-byte Folded Spill +; CONTIGUOUS-NEXT: str z11, [sp, #12, mul vl] // 16-byte Folded Spill +; CONTIGUOUS-NEXT: str z10, [sp, #13, mul vl] // 16-byte Folded Spill ; CONTIGUOUS-NEXT: str z9, [sp, #14, mul vl] // 16-byte Folded Spill ; CONTIGUOUS-NEXT: addvl sp, sp, #-4 ; CONTIGUOUS-NEXT: mov p8.b, p0.b @@ -1862,15 +2209,20 @@ define @ld1_x4_i32_z0_z4_z8_z12_scalar( %u ; CONTIGUOUS-NEXT: ldr z2, [sp, #2, mul vl] ; CONTIGUOUS-NEXT: ldr z3, [sp, #3, mul vl] ; CONTIGUOUS-NEXT: addvl sp, sp, #4 -; CONTIGUOUS-NEXT: ptrue pn8.b +; CONTIGUOUS-NEXT: ldr z23, [sp, #1, mul vl] // 16-byte Folded Reload +; CONTIGUOUS-NEXT: ldr z22, [sp, #2, mul vl] // 16-byte Folded Reload +; CONTIGUOUS-NEXT: ldr z21, [sp, #3, mul vl] // 16-byte Folded Reload +; CONTIGUOUS-NEXT: ldr z20, [sp, #4, mul vl] // 16-byte Folded Reload +; CONTIGUOUS-NEXT: ldr z19, [sp, #5, mul vl] // 16-byte Folded Reload +; CONTIGUOUS-NEXT: ldr z18, [sp, #6, mul vl] // 16-byte Folded Reload +; CONTIGUOUS-NEXT: ldr z17, [sp, #7, mul vl] // 16-byte Folded Reload +; CONTIGUOUS-NEXT: ldr z16, [sp, #8, mul vl] // 16-byte Folded Reload +; CONTIGUOUS-NEXT: ldr z15, [sp, #9, mul vl] // 16-byte Folded Reload +; CONTIGUOUS-NEXT: ldr z14, [sp, #10, mul vl] // 16-byte Folded Reload ; CONTIGUOUS-NEXT: ldr z13, [sp, #11, mul vl] // 16-byte Folded Reload +; CONTIGUOUS-NEXT: ldr z11, [sp, #12, mul vl] // 16-byte Folded Reload +; CONTIGUOUS-NEXT: ldr z10, [sp, #13, mul vl] // 16-byte Folded Reload ; CONTIGUOUS-NEXT: ldr z9, [sp, #14, mul vl] // 16-byte Folded Reload -; CONTIGUOUS-NEXT: ld1b { z22.b, z23.b }, pn8/z, [sp, #2, mul vl] // 32-byte Folded Reload -; CONTIGUOUS-NEXT: ld1b { z20.b, z21.b }, pn8/z, [sp, #6, mul vl] // 32-byte Folded Reload -; CONTIGUOUS-NEXT: ld1b { z18.b, z19.b }, pn8/z, [sp, #10, mul vl] // 32-byte Folded Reload -; CONTIGUOUS-NEXT: ld1b { z16.b, z17.b }, pn8/z, [sp, #14, mul vl] // 32-byte Folded Reload -; CONTIGUOUS-NEXT: ld1b { z14.b, z15.b }, pn8/z, [sp, #18, mul vl] // 32-byte Folded Reload -; CONTIGUOUS-NEXT: ld1b { z10.b, z11.b }, pn8/z, [sp, #24, mul vl] // 32-byte Folded Reload ; CONTIGUOUS-NEXT: ldr p8, [sp, #7, mul vl] // 2-byte Folded Reload ; CONTIGUOUS-NEXT: addvl sp, sp, #15 ; CONTIGUOUS-NEXT: ldr x29, [sp], #16 // 8-byte Folded Reload @@ -1944,32 +2296,46 @@ define @ld1_x4_i64_z0_z4_z8_z12( %unused, < ; STRIDED-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill ; STRIDED-NEXT: addvl sp, sp, #-17 ; STRIDED-NEXT: str p8, [sp, #7, mul vl] // 2-byte Folded Spill -; STRIDED-NEXT: ptrue pn8.b -; STRIDED-NEXT: st1b { z22.b, z23.b }, pn8, [sp, #2, mul vl] // 32-byte Folded Spill -; STRIDED-NEXT: st1b { z20.b, z21.b }, pn8, [sp, #6, mul vl] // 32-byte Folded Spill -; STRIDED-NEXT: st1b { z18.b, z19.b }, pn8, [sp, #10, mul vl] // 32-byte Folded Spill -; STRIDED-NEXT: st1b { z16.b, z17.b }, pn8, [sp, #14, mul vl] // 32-byte Folded Spill -; STRIDED-NEXT: st1b { z14.b, z15.b }, pn8, [sp, #18, mul vl] // 32-byte Folded Spill -; STRIDED-NEXT: st1b { z12.b, z13.b }, pn8, [sp, #22, mul vl] // 32-byte Folded Spill -; STRIDED-NEXT: st1b { z10.b, z11.b }, pn8, [sp, #26, mul vl] // 32-byte Folded Spill -; STRIDED-NEXT: st1b { z8.b, z9.b }, pn8, [sp, #30, mul vl] // 32-byte Folded Spill ; STRIDED-NEXT: mov p8.b, p0.b +; STRIDED-NEXT: str z23, [sp, #1, mul vl] // 16-byte Folded Spill +; STRIDED-NEXT: str z22, [sp, #2, mul vl] // 16-byte Folded Spill +; STRIDED-NEXT: str z21, [sp, #3, mul vl] // 16-byte Folded Spill +; STRIDED-NEXT: str z20, [sp, #4, mul vl] // 16-byte Folded Spill +; STRIDED-NEXT: str z19, [sp, #5, mul vl] // 16-byte Folded Spill +; STRIDED-NEXT: str z18, [sp, #6, mul vl] // 16-byte Folded Spill +; STRIDED-NEXT: str z17, [sp, #7, mul vl] // 16-byte Folded Spill +; STRIDED-NEXT: str z16, [sp, #8, mul vl] // 16-byte Folded Spill +; STRIDED-NEXT: str z15, [sp, #9, mul vl] // 16-byte Folded Spill +; STRIDED-NEXT: str z14, [sp, #10, mul vl] // 16-byte Folded Spill +; STRIDED-NEXT: str z13, [sp, #11, mul vl] // 16-byte Folded Spill +; STRIDED-NEXT: str z12, [sp, #12, mul vl] // 16-byte Folded Spill +; STRIDED-NEXT: str z11, [sp, #13, mul vl] // 16-byte Folded Spill +; STRIDED-NEXT: str z10, [sp, #14, mul vl] // 16-byte Folded Spill +; STRIDED-NEXT: str z9, [sp, #15, mul vl] // 16-byte Folded Spill +; STRIDED-NEXT: str z8, [sp, #16, mul vl] // 16-byte Folded Spill ; STRIDED-NEXT: ld1d { z0.d, z4.d, z8.d, z12.d }, pn8/z, [x0] ; STRIDED-NEXT: //APP ; STRIDED-NEXT: nop ; STRIDED-NEXT: //NO_APP -; STRIDED-NEXT: ptrue pn8.b -; STRIDED-NEXT: ld1b { z22.b, z23.b }, pn8/z, [sp, #2, mul vl] // 32-byte Folded Reload -; STRIDED-NEXT: ld1b { z20.b, z21.b }, pn8/z, [sp, #6, mul vl] // 32-byte Folded Reload -; STRIDED-NEXT: ld1b { z18.b, z19.b }, pn8/z, [sp, #10, mul vl] // 32-byte Folded Reload -; STRIDED-NEXT: ld1b { z16.b, z17.b }, pn8/z, [sp, #14, mul vl] // 32-byte Folded Reload -; STRIDED-NEXT: ld1b { z14.b, z15.b }, pn8/z, [sp, #18, mul vl] // 32-byte Folded Reload -; STRIDED-NEXT: ld1b { z10.b, z11.b }, pn8/z, [sp, #26, mul vl] // 32-byte Folded Reload +; STRIDED-NEXT: ldr p8, [sp, #7, mul vl] // 2-byte Folded Reload +; STRIDED-NEXT: ldr z23, [sp, #1, mul vl] // 16-byte Folded Reload +; STRIDED-NEXT: ldr z22, [sp, #2, mul vl] // 16-byte Folded Reload +; STRIDED-NEXT: ldr z21, [sp, #3, mul vl] // 16-byte Folded Reload +; STRIDED-NEXT: ldr z20, [sp, #4, mul vl] // 16-byte Folded Reload +; STRIDED-NEXT: ldr z19, [sp, #5, mul vl] // 16-byte Folded Reload +; STRIDED-NEXT: ldr z18, [sp, #6, mul vl] // 16-byte Folded Reload ; STRIDED-NEXT: mov z2.d, z8.d ; STRIDED-NEXT: mov z3.d, z12.d -; STRIDED-NEXT: ld1b { z12.b, z13.b }, pn8/z, [sp, #22, mul vl] // 32-byte Folded Reload -; STRIDED-NEXT: ld1b { z8.b, z9.b }, pn8/z, [sp, #30, mul vl] // 32-byte Folded Reload -; STRIDED-NEXT: ldr p8, [sp, #7, mul vl] // 2-byte Folded Reload +; STRIDED-NEXT: ldr z17, [sp, #7, mul vl] // 16-byte Folded Reload +; STRIDED-NEXT: ldr z16, [sp, #8, mul vl] // 16-byte Folded Reload +; STRIDED-NEXT: ldr z15, [sp, #9, mul vl] // 16-byte Folded Reload +; STRIDED-NEXT: ldr z14, [sp, #10, mul vl] // 16-byte Folded Reload +; STRIDED-NEXT: ldr z13, [sp, #11, mul vl] // 16-byte Folded Reload +; STRIDED-NEXT: ldr z12, [sp, #12, mul vl] // 16-byte Folded Reload +; STRIDED-NEXT: ldr z11, [sp, #13, mul vl] // 16-byte Folded Reload +; STRIDED-NEXT: ldr z10, [sp, #14, mul vl] // 16-byte Folded Reload +; STRIDED-NEXT: ldr z9, [sp, #15, mul vl] // 16-byte Folded Reload +; STRIDED-NEXT: ldr z8, [sp, #16, mul vl] // 16-byte Folded Reload ; STRIDED-NEXT: mov z1.d, z4.d ; STRIDED-NEXT: addvl sp, sp, #17 ; STRIDED-NEXT: ldr x29, [sp], #16 // 8-byte Folded Reload @@ -1980,14 +2346,19 @@ define @ld1_x4_i64_z0_z4_z8_z12( %unused, < ; CONTIGUOUS-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill ; CONTIGUOUS-NEXT: addvl sp, sp, #-15 ; CONTIGUOUS-NEXT: str p8, [sp, #7, mul vl] // 2-byte Folded Spill -; CONTIGUOUS-NEXT: ptrue pn8.b -; CONTIGUOUS-NEXT: st1b { z22.b, z23.b }, pn8, [sp, #2, mul vl] // 32-byte Folded Spill -; CONTIGUOUS-NEXT: st1b { z20.b, z21.b }, pn8, [sp, #6, mul vl] // 32-byte Folded Spill -; CONTIGUOUS-NEXT: st1b { z18.b, z19.b }, pn8, [sp, #10, mul vl] // 32-byte Folded Spill -; CONTIGUOUS-NEXT: st1b { z16.b, z17.b }, pn8, [sp, #14, mul vl] // 32-byte Folded Spill -; CONTIGUOUS-NEXT: st1b { z14.b, z15.b }, pn8, [sp, #18, mul vl] // 32-byte Folded Spill +; CONTIGUOUS-NEXT: str z23, [sp, #1, mul vl] // 16-byte Folded Spill +; CONTIGUOUS-NEXT: str z22, [sp, #2, mul vl] // 16-byte Folded Spill +; CONTIGUOUS-NEXT: str z21, [sp, #3, mul vl] // 16-byte Folded Spill +; CONTIGUOUS-NEXT: str z20, [sp, #4, mul vl] // 16-byte Folded Spill +; CONTIGUOUS-NEXT: str z19, [sp, #5, mul vl] // 16-byte Folded Spill +; CONTIGUOUS-NEXT: str z18, [sp, #6, mul vl] // 16-byte Folded Spill +; CONTIGUOUS-NEXT: str z17, [sp, #7, mul vl] // 16-byte Folded Spill +; CONTIGUOUS-NEXT: str z16, [sp, #8, mul vl] // 16-byte Folded Spill +; CONTIGUOUS-NEXT: str z15, [sp, #9, mul vl] // 16-byte Folded Spill +; CONTIGUOUS-NEXT: str z14, [sp, #10, mul vl] // 16-byte Folded Spill ; CONTIGUOUS-NEXT: str z13, [sp, #11, mul vl] // 16-byte Folded Spill -; CONTIGUOUS-NEXT: st1b { z10.b, z11.b }, pn8, [sp, #24, mul vl] // 32-byte Folded Spill +; CONTIGUOUS-NEXT: str z11, [sp, #12, mul vl] // 16-byte Folded Spill +; CONTIGUOUS-NEXT: str z10, [sp, #13, mul vl] // 16-byte Folded Spill ; CONTIGUOUS-NEXT: str z9, [sp, #14, mul vl] // 16-byte Folded Spill ; CONTIGUOUS-NEXT: addvl sp, sp, #-4 ; CONTIGUOUS-NEXT: mov p8.b, p0.b @@ -2004,15 +2375,20 @@ define @ld1_x4_i64_z0_z4_z8_z12( %unused, < ; CONTIGUOUS-NEXT: ldr z2, [sp, #2, mul vl] ; CONTIGUOUS-NEXT: ldr z3, [sp, #3, mul vl] ; CONTIGUOUS-NEXT: addvl sp, sp, #4 -; CONTIGUOUS-NEXT: ptrue pn8.b +; CONTIGUOUS-NEXT: ldr z23, [sp, #1, mul vl] // 16-byte Folded Reload +; CONTIGUOUS-NEXT: ldr z22, [sp, #2, mul vl] // 16-byte Folded Reload +; CONTIGUOUS-NEXT: ldr z21, [sp, #3, mul vl] // 16-byte Folded Reload +; CONTIGUOUS-NEXT: ldr z20, [sp, #4, mul vl] // 16-byte Folded Reload +; CONTIGUOUS-NEXT: ldr z19, [sp, #5, mul vl] // 16-byte Folded Reload +; CONTIGUOUS-NEXT: ldr z18, [sp, #6, mul vl] // 16-byte Folded Reload +; CONTIGUOUS-NEXT: ldr z17, [sp, #7, mul vl] // 16-byte Folded Reload +; CONTIGUOUS-NEXT: ldr z16, [sp, #8, mul vl] // 16-byte Folded Reload +; CONTIGUOUS-NEXT: ldr z15, [sp, #9, mul vl] // 16-byte Folded Reload +; CONTIGUOUS-NEXT: ldr z14, [sp, #10, mul vl] // 16-byte Folded Reload ; CONTIGUOUS-NEXT: ldr z13, [sp, #11, mul vl] // 16-byte Folded Reload +; CONTIGUOUS-NEXT: ldr z11, [sp, #12, mul vl] // 16-byte Folded Reload +; CONTIGUOUS-NEXT: ldr z10, [sp, #13, mul vl] // 16-byte Folded Reload ; CONTIGUOUS-NEXT: ldr z9, [sp, #14, mul vl] // 16-byte Folded Reload -; CONTIGUOUS-NEXT: ld1b { z22.b, z23.b }, pn8/z, [sp, #2, mul vl] // 32-byte Folded Reload -; CONTIGUOUS-NEXT: ld1b { z20.b, z21.b }, pn8/z, [sp, #6, mul vl] // 32-byte Folded Reload -; CONTIGUOUS-NEXT: ld1b { z18.b, z19.b }, pn8/z, [sp, #10, mul vl] // 32-byte Folded Reload -; CONTIGUOUS-NEXT: ld1b { z16.b, z17.b }, pn8/z, [sp, #14, mul vl] // 32-byte Folded Reload -; CONTIGUOUS-NEXT: ld1b { z14.b, z15.b }, pn8/z, [sp, #18, mul vl] // 32-byte Folded Reload -; CONTIGUOUS-NEXT: ld1b { z10.b, z11.b }, pn8/z, [sp, #24, mul vl] // 32-byte Folded Reload ; CONTIGUOUS-NEXT: ldr p8, [sp, #7, mul vl] // 2-byte Folded Reload ; CONTIGUOUS-NEXT: addvl sp, sp, #15 ; CONTIGUOUS-NEXT: ldr x29, [sp], #16 // 8-byte Folded Reload @@ -2086,32 +2462,46 @@ define @ld1_x4_i64_z0_z4_z8_z12_scalar( %un ; STRIDED-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill ; STRIDED-NEXT: addvl sp, sp, #-17 ; STRIDED-NEXT: str p8, [sp, #7, mul vl] // 2-byte Folded Spill -; STRIDED-NEXT: ptrue pn8.b -; STRIDED-NEXT: st1b { z22.b, z23.b }, pn8, [sp, #2, mul vl] // 32-byte Folded Spill -; STRIDED-NEXT: st1b { z20.b, z21.b }, pn8, [sp, #6, mul vl] // 32-byte Folded Spill -; STRIDED-NEXT: st1b { z18.b, z19.b }, pn8, [sp, #10, mul vl] // 32-byte Folded Spill -; STRIDED-NEXT: st1b { z16.b, z17.b }, pn8, [sp, #14, mul vl] // 32-byte Folded Spill -; STRIDED-NEXT: st1b { z14.b, z15.b }, pn8, [sp, #18, mul vl] // 32-byte Folded Spill -; STRIDED-NEXT: st1b { z12.b, z13.b }, pn8, [sp, #22, mul vl] // 32-byte Folded Spill -; STRIDED-NEXT: st1b { z10.b, z11.b }, pn8, [sp, #26, mul vl] // 32-byte Folded Spill -; STRIDED-NEXT: st1b { z8.b, z9.b }, pn8, [sp, #30, mul vl] // 32-byte Folded Spill ; STRIDED-NEXT: mov p8.b, p0.b +; STRIDED-NEXT: str z23, [sp, #1, mul vl] // 16-byte Folded Spill +; STRIDED-NEXT: str z22, [sp, #2, mul vl] // 16-byte Folded Spill +; STRIDED-NEXT: str z21, [sp, #3, mul vl] // 16-byte Folded Spill +; STRIDED-NEXT: str z20, [sp, #4, mul vl] // 16-byte Folded Spill +; STRIDED-NEXT: str z19, [sp, #5, mul vl] // 16-byte Folded Spill +; STRIDED-NEXT: str z18, [sp, #6, mul vl] // 16-byte Folded Spill +; STRIDED-NEXT: str z17, [sp, #7, mul vl] // 16-byte Folded Spill +; STRIDED-NEXT: str z16, [sp, #8, mul vl] // 16-byte Folded Spill +; STRIDED-NEXT: str z15, [sp, #9, mul vl] // 16-byte Folded Spill +; STRIDED-NEXT: str z14, [sp, #10, mul vl] // 16-byte Folded Spill +; STRIDED-NEXT: str z13, [sp, #11, mul vl] // 16-byte Folded Spill +; STRIDED-NEXT: str z12, [sp, #12, mul vl] // 16-byte Folded Spill +; STRIDED-NEXT: str z11, [sp, #13, mul vl] // 16-byte Folded Spill +; STRIDED-NEXT: str z10, [sp, #14, mul vl] // 16-byte Folded Spill +; STRIDED-NEXT: str z9, [sp, #15, mul vl] // 16-byte Folded Spill +; STRIDED-NEXT: str z8, [sp, #16, mul vl] // 16-byte Folded Spill ; STRIDED-NEXT: ld1d { z0.d, z4.d, z8.d, z12.d }, pn8/z, [x0, x1, lsl #3] ; STRIDED-NEXT: //APP ; STRIDED-NEXT: nop ; STRIDED-NEXT: //NO_APP -; STRIDED-NEXT: ptrue pn8.b -; STRIDED-NEXT: ld1b { z22.b, z23.b }, pn8/z, [sp, #2, mul vl] // 32-byte Folded Reload -; STRIDED-NEXT: ld1b { z20.b, z21.b }, pn8/z, [sp, #6, mul vl] // 32-byte Folded Reload -; STRIDED-NEXT: ld1b { z18.b, z19.b }, pn8/z, [sp, #10, mul vl] // 32-byte Folded Reload -; STRIDED-NEXT: ld1b { z16.b, z17.b }, pn8/z, [sp, #14, mul vl] // 32-byte Folded Reload -; STRIDED-NEXT: ld1b { z14.b, z15.b }, pn8/z, [sp, #18, mul vl] // 32-byte Folded Reload -; STRIDED-NEXT: ld1b { z10.b, z11.b }, pn8/z, [sp, #26, mul vl] // 32-byte Folded Reload +; STRIDED-NEXT: ldr p8, [sp, #7, mul vl] // 2-byte Folded Reload +; STRIDED-NEXT: ldr z23, [sp, #1, mul vl] // 16-byte Folded Reload +; STRIDED-NEXT: ldr z22, [sp, #2, mul vl] // 16-byte Folded Reload +; STRIDED-NEXT: ldr z21, [sp, #3, mul vl] // 16-byte Folded Reload +; STRIDED-NEXT: ldr z20, [sp, #4, mul vl] // 16-byte Folded Reload +; STRIDED-NEXT: ldr z19, [sp, #5, mul vl] // 16-byte Folded Reload +; STRIDED-NEXT: ldr z18, [sp, #6, mul vl] // 16-byte Folded Reload ; STRIDED-NEXT: mov z2.d, z8.d ; STRIDED-NEXT: mov z3.d, z12.d -; STRIDED-NEXT: ld1b { z12.b, z13.b }, pn8/z, [sp, #22, mul vl] // 32-byte Folded Reload -; STRIDED-NEXT: ld1b { z8.b, z9.b }, pn8/z, [sp, #30, mul vl] // 32-byte Folded Reload -; STRIDED-NEXT: ldr p8, [sp, #7, mul vl] // 2-byte Folded Reload +; STRIDED-NEXT: ldr z17, [sp, #7, mul vl] // 16-byte Folded Reload +; STRIDED-NEXT: ldr z16, [sp, #8, mul vl] // 16-byte Folded Reload +; STRIDED-NEXT: ldr z15, [sp, #9, mul vl] // 16-byte Folded Reload +; STRIDED-NEXT: ldr z14, [sp, #10, mul vl] // 16-byte Folded Reload +; STRIDED-NEXT: ldr z13, [sp, #11, mul vl] // 16-byte Folded Reload +; STRIDED-NEXT: ldr z12, [sp, #12, mul vl] // 16-byte Folded Reload +; STRIDED-NEXT: ldr z11, [sp, #13, mul vl] // 16-byte Folded Reload +; STRIDED-NEXT: ldr z10, [sp, #14, mul vl] // 16-byte Folded Reload +; STRIDED-NEXT: ldr z9, [sp, #15, mul vl] // 16-byte Folded Reload +; STRIDED-NEXT: ldr z8, [sp, #16, mul vl] // 16-byte Folded Reload ; STRIDED-NEXT: mov z1.d, z4.d ; STRIDED-NEXT: addvl sp, sp, #17 ; STRIDED-NEXT: ldr x29, [sp], #16 // 8-byte Folded Reload @@ -2122,14 +2512,19 @@ define @ld1_x4_i64_z0_z4_z8_z12_scalar( %un ; CONTIGUOUS-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill ; CONTIGUOUS-NEXT: addvl sp, sp, #-15 ; CONTIGUOUS-NEXT: str p8, [sp, #7, mul vl] // 2-byte Folded Spill -; CONTIGUOUS-NEXT: ptrue pn8.b -; CONTIGUOUS-NEXT: st1b { z22.b, z23.b }, pn8, [sp, #2, mul vl] // 32-byte Folded Spill -; CONTIGUOUS-NEXT: st1b { z20.b, z21.b }, pn8, [sp, #6, mul vl] // 32-byte Folded Spill -; CONTIGUOUS-NEXT: st1b { z18.b, z19.b }, pn8, [sp, #10, mul vl] // 32-byte Folded Spill -; CONTIGUOUS-NEXT: st1b { z16.b, z17.b }, pn8, [sp, #14, mul vl] // 32-byte Folded Spill -; CONTIGUOUS-NEXT: st1b { z14.b, z15.b }, pn8, [sp, #18, mul vl] // 32-byte Folded Spill +; CONTIGUOUS-NEXT: str z23, [sp, #1, mul vl] // 16-byte Folded Spill +; CONTIGUOUS-NEXT: str z22, [sp, #2, mul vl] // 16-byte Folded Spill +; CONTIGUOUS-NEXT: str z21, [sp, #3, mul vl] // 16-byte Folded Spill +; CONTIGUOUS-NEXT: str z20, [sp, #4, mul vl] // 16-byte Folded Spill +; CONTIGUOUS-NEXT: str z19, [sp, #5, mul vl] // 16-byte Folded Spill +; CONTIGUOUS-NEXT: str z18, [sp, #6, mul vl] // 16-byte Folded Spill +; CONTIGUOUS-NEXT: str z17, [sp, #7, mul vl] // 16-byte Folded Spill +; CONTIGUOUS-NEXT: str z16, [sp, #8, mul vl] // 16-byte Folded Spill +; CONTIGUOUS-NEXT: str z15, [sp, #9, mul vl] // 16-byte Folded Spill +; CONTIGUOUS-NEXT: str z14, [sp, #10, mul vl] // 16-byte Folded Spill ; CONTIGUOUS-NEXT: str z13, [sp, #11, mul vl] // 16-byte Folded Spill -; CONTIGUOUS-NEXT: st1b { z10.b, z11.b }, pn8, [sp, #24, mul vl] // 32-byte Folded Spill +; CONTIGUOUS-NEXT: str z11, [sp, #12, mul vl] // 16-byte Folded Spill +; CONTIGUOUS-NEXT: str z10, [sp, #13, mul vl] // 16-byte Folded Spill ; CONTIGUOUS-NEXT: str z9, [sp, #14, mul vl] // 16-byte Folded Spill ; CONTIGUOUS-NEXT: addvl sp, sp, #-4 ; CONTIGUOUS-NEXT: mov p8.b, p0.b @@ -2146,15 +2541,20 @@ define @ld1_x4_i64_z0_z4_z8_z12_scalar( %un ; CONTIGUOUS-NEXT: ldr z2, [sp, #2, mul vl] ; CONTIGUOUS-NEXT: ldr z3, [sp, #3, mul vl] ; CONTIGUOUS-NEXT: addvl sp, sp, #4 -; CONTIGUOUS-NEXT: ptrue pn8.b +; CONTIGUOUS-NEXT: ldr z23, [sp, #1, mul vl] // 16-byte Folded Reload +; CONTIGUOUS-NEXT: ldr z22, [sp, #2, mul vl] // 16-byte Folded Reload +; CONTIGUOUS-NEXT: ldr z21, [sp, #3, mul vl] // 16-byte Folded Reload +; CONTIGUOUS-NEXT: ldr z20, [sp, #4, mul vl] // 16-byte Folded Reload +; CONTIGUOUS-NEXT: ldr z19, [sp, #5, mul vl] // 16-byte Folded Reload +; CONTIGUOUS-NEXT: ldr z18, [sp, #6, mul vl] // 16-byte Folded Reload +; CONTIGUOUS-NEXT: ldr z17, [sp, #7, mul vl] // 16-byte Folded Reload +; CONTIGUOUS-NEXT: ldr z16, [sp, #8, mul vl] // 16-byte Folded Reload +; CONTIGUOUS-NEXT: ldr z15, [sp, #9, mul vl] // 16-byte Folded Reload +; CONTIGUOUS-NEXT: ldr z14, [sp, #10, mul vl] // 16-byte Folded Reload ; CONTIGUOUS-NEXT: ldr z13, [sp, #11, mul vl] // 16-byte Folded Reload +; CONTIGUOUS-NEXT: ldr z11, [sp, #12, mul vl] // 16-byte Folded Reload +; CONTIGUOUS-NEXT: ldr z10, [sp, #13, mul vl] // 16-byte Folded Reload ; CONTIGUOUS-NEXT: ldr z9, [sp, #14, mul vl] // 16-byte Folded Reload -; CONTIGUOUS-NEXT: ld1b { z22.b, z23.b }, pn8/z, [sp, #2, mul vl] // 32-byte Folded Reload -; CONTIGUOUS-NEXT: ld1b { z20.b, z21.b }, pn8/z, [sp, #6, mul vl] // 32-byte Folded Reload -; CONTIGUOUS-NEXT: ld1b { z18.b, z19.b }, pn8/z, [sp, #10, mul vl] // 32-byte Folded Reload -; CONTIGUOUS-NEXT: ld1b { z16.b, z17.b }, pn8/z, [sp, #14, mul vl] // 32-byte Folded Reload -; CONTIGUOUS-NEXT: ld1b { z14.b, z15.b }, pn8/z, [sp, #18, mul vl] // 32-byte Folded Reload -; CONTIGUOUS-NEXT: ld1b { z10.b, z11.b }, pn8/z, [sp, #24, mul vl] // 32-byte Folded Reload ; CONTIGUOUS-NEXT: ldr p8, [sp, #7, mul vl] // 2-byte Folded Reload ; CONTIGUOUS-NEXT: addvl sp, sp, #15 ; CONTIGUOUS-NEXT: ldr x29, [sp], #16 // 8-byte Folded Reload diff --git a/llvm/test/CodeGen/AArch64/sme2-intrinsics-ldnt1.ll b/llvm/test/CodeGen/AArch64/sme2-intrinsics-ldnt1.ll index 3d3748e10112..eff1260c947d 100644 --- a/llvm/test/CodeGen/AArch64/sme2-intrinsics-ldnt1.ll +++ b/llvm/test/CodeGen/AArch64/sme2-intrinsics-ldnt1.ll @@ -8,31 +8,45 @@ define @ldnt1_x2_i8_z0_z8( %unused, @ldnt1_x2_i8_z0_z8( %unused, @ldnt1_x2_i8_z0_z8( %unused, @ldnt1_x2_i8_z0_z8_scalar( %unused, ; STRIDED-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill ; STRIDED-NEXT: addvl sp, sp, #-17 ; STRIDED-NEXT: str p8, [sp, #7, mul vl] // 2-byte Folded Spill -; STRIDED-NEXT: ptrue pn8.b -; STRIDED-NEXT: st1b { z22.b, z23.b }, pn8, [sp, #2, mul vl] // 32-byte Folded Spill -; STRIDED-NEXT: st1b { z20.b, z21.b }, pn8, [sp, #6, mul vl] // 32-byte Folded Spill -; STRIDED-NEXT: st1b { z18.b, z19.b }, pn8, [sp, #10, mul vl] // 32-byte Folded Spill -; STRIDED-NEXT: st1b { z16.b, z17.b }, pn8, [sp, #14, mul vl] // 32-byte Folded Spill -; STRIDED-NEXT: st1b { z14.b, z15.b }, pn8, [sp, #18, mul vl] // 32-byte Folded Spill -; STRIDED-NEXT: st1b { z12.b, z13.b }, pn8, [sp, #22, mul vl] // 32-byte Folded Spill -; STRIDED-NEXT: st1b { z10.b, z11.b }, pn8, [sp, #26, mul vl] // 32-byte Folded Spill -; STRIDED-NEXT: st1b { z8.b, z9.b }, pn8, [sp, #30, mul vl] // 32-byte Folded Spill ; STRIDED-NEXT: mov p8.b, p0.b +; STRIDED-NEXT: str z23, [sp, #1, mul vl] // 16-byte Folded Spill +; STRIDED-NEXT: str z22, [sp, #2, mul vl] // 16-byte Folded Spill +; STRIDED-NEXT: str z21, [sp, #3, mul vl] // 16-byte Folded Spill +; STRIDED-NEXT: str z20, [sp, #4, mul vl] // 16-byte Folded Spill +; STRIDED-NEXT: str z19, [sp, #5, mul vl] // 16-byte Folded Spill +; STRIDED-NEXT: str z18, [sp, #6, mul vl] // 16-byte Folded Spill +; STRIDED-NEXT: str z17, [sp, #7, mul vl] // 16-byte Folded Spill +; STRIDED-NEXT: str z16, [sp, #8, mul vl] // 16-byte Folded Spill +; STRIDED-NEXT: str z15, [sp, #9, mul vl] // 16-byte Folded Spill +; STRIDED-NEXT: str z14, [sp, #10, mul vl] // 16-byte Folded Spill +; STRIDED-NEXT: str z13, [sp, #11, mul vl] // 16-byte Folded Spill +; STRIDED-NEXT: str z12, [sp, #12, mul vl] // 16-byte Folded Spill +; STRIDED-NEXT: str z11, [sp, #13, mul vl] // 16-byte Folded Spill +; STRIDED-NEXT: str z10, [sp, #14, mul vl] // 16-byte Folded Spill +; STRIDED-NEXT: str z9, [sp, #15, mul vl] // 16-byte Folded Spill +; STRIDED-NEXT: str z8, [sp, #16, mul vl] // 16-byte Folded Spill ; STRIDED-NEXT: ldnt1b { z0.b, z8.b }, pn8/z, [x0, x1] ; STRIDED-NEXT: //APP ; STRIDED-NEXT: nop ; STRIDED-NEXT: //NO_APP -; STRIDED-NEXT: ptrue pn8.b -; STRIDED-NEXT: ld1b { z22.b, z23.b }, pn8/z, [sp, #2, mul vl] // 32-byte Folded Reload -; STRIDED-NEXT: ld1b { z20.b, z21.b }, pn8/z, [sp, #6, mul vl] // 32-byte Folded Reload -; STRIDED-NEXT: ld1b { z18.b, z19.b }, pn8/z, [sp, #10, mul vl] // 32-byte Folded Reload -; STRIDED-NEXT: ld1b { z16.b, z17.b }, pn8/z, [sp, #14, mul vl] // 32-byte Folded Reload -; STRIDED-NEXT: ld1b { z14.b, z15.b }, pn8/z, [sp, #18, mul vl] // 32-byte Folded Reload -; STRIDED-NEXT: ld1b { z12.b, z13.b }, pn8/z, [sp, #22, mul vl] // 32-byte Folded Reload -; STRIDED-NEXT: mov z1.d, z8.d -; STRIDED-NEXT: ld1b { z10.b, z11.b }, pn8/z, [sp, #26, mul vl] // 32-byte Folded Reload -; STRIDED-NEXT: ld1b { z8.b, z9.b }, pn8/z, [sp, #30, mul vl] // 32-byte Folded Reload ; STRIDED-NEXT: ldr p8, [sp, #7, mul vl] // 2-byte Folded Reload +; STRIDED-NEXT: ldr z23, [sp, #1, mul vl] // 16-byte Folded Reload +; STRIDED-NEXT: ldr z22, [sp, #2, mul vl] // 16-byte Folded Reload +; STRIDED-NEXT: ldr z21, [sp, #3, mul vl] // 16-byte Folded Reload +; STRIDED-NEXT: ldr z20, [sp, #4, mul vl] // 16-byte Folded Reload +; STRIDED-NEXT: ldr z19, [sp, #5, mul vl] // 16-byte Folded Reload +; STRIDED-NEXT: ldr z18, [sp, #6, mul vl] // 16-byte Folded Reload +; STRIDED-NEXT: mov z1.d, z8.d +; STRIDED-NEXT: ldr z17, [sp, #7, mul vl] // 16-byte Folded Reload +; STRIDED-NEXT: ldr z16, [sp, #8, mul vl] // 16-byte Folded Reload +; STRIDED-NEXT: ldr z15, [sp, #9, mul vl] // 16-byte Folded Reload +; STRIDED-NEXT: ldr z14, [sp, #10, mul vl] // 16-byte Folded Reload +; STRIDED-NEXT: ldr z13, [sp, #11, mul vl] // 16-byte Folded Reload +; STRIDED-NEXT: ldr z12, [sp, #12, mul vl] // 16-byte Folded Reload +; STRIDED-NEXT: ldr z11, [sp, #13, mul vl] // 16-byte Folded Reload +; STRIDED-NEXT: ldr z10, [sp, #14, mul vl] // 16-byte Folded Reload +; STRIDED-NEXT: ldr z9, [sp, #15, mul vl] // 16-byte Folded Reload +; STRIDED-NEXT: ldr z8, [sp, #16, mul vl] // 16-byte Folded Reload ; STRIDED-NEXT: addvl sp, sp, #17 ; STRIDED-NEXT: ldr x29, [sp], #16 // 8-byte Folded Reload ; STRIDED-NEXT: ret @@ -124,14 +164,20 @@ define @ldnt1_x2_i8_z0_z8_scalar( %unused, ; CONTIGUOUS-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill ; CONTIGUOUS-NEXT: addvl sp, sp, #-16 ; CONTIGUOUS-NEXT: str p8, [sp, #7, mul vl] // 2-byte Folded Spill -; CONTIGUOUS-NEXT: ptrue pn8.b -; CONTIGUOUS-NEXT: st1b { z22.b, z23.b }, pn8, [sp, #2, mul vl] // 32-byte Folded Spill -; CONTIGUOUS-NEXT: st1b { z20.b, z21.b }, pn8, [sp, #6, mul vl] // 32-byte Folded Spill -; CONTIGUOUS-NEXT: st1b { z18.b, z19.b }, pn8, [sp, #10, mul vl] // 32-byte Folded Spill -; CONTIGUOUS-NEXT: st1b { z16.b, z17.b }, pn8, [sp, #14, mul vl] // 32-byte Folded Spill -; CONTIGUOUS-NEXT: st1b { z14.b, z15.b }, pn8, [sp, #18, mul vl] // 32-byte Folded Spill -; CONTIGUOUS-NEXT: st1b { z12.b, z13.b }, pn8, [sp, #22, mul vl] // 32-byte Folded Spill -; CONTIGUOUS-NEXT: st1b { z10.b, z11.b }, pn8, [sp, #26, mul vl] // 32-byte Folded Spill +; CONTIGUOUS-NEXT: str z23, [sp, #1, mul vl] // 16-byte Folded Spill +; CONTIGUOUS-NEXT: str z22, [sp, #2, mul vl] // 16-byte Folded Spill +; CONTIGUOUS-NEXT: str z21, [sp, #3, mul vl] // 16-byte Folded Spill +; CONTIGUOUS-NEXT: str z20, [sp, #4, mul vl] // 16-byte Folded Spill +; CONTIGUOUS-NEXT: str z19, [sp, #5, mul vl] // 16-byte Folded Spill +; CONTIGUOUS-NEXT: str z18, [sp, #6, mul vl] // 16-byte Folded Spill +; CONTIGUOUS-NEXT: str z17, [sp, #7, mul vl] // 16-byte Folded Spill +; CONTIGUOUS-NEXT: str z16, [sp, #8, mul vl] // 16-byte Folded Spill +; CONTIGUOUS-NEXT: str z15, [sp, #9, mul vl] // 16-byte Folded Spill +; CONTIGUOUS-NEXT: str z14, [sp, #10, mul vl] // 16-byte Folded Spill +; CONTIGUOUS-NEXT: str z13, [sp, #11, mul vl] // 16-byte Folded Spill +; CONTIGUOUS-NEXT: str z12, [sp, #12, mul vl] // 16-byte Folded Spill +; CONTIGUOUS-NEXT: str z11, [sp, #13, mul vl] // 16-byte Folded Spill +; CONTIGUOUS-NEXT: str z10, [sp, #14, mul vl] // 16-byte Folded Spill ; CONTIGUOUS-NEXT: str z9, [sp, #15, mul vl] // 16-byte Folded Spill ; CONTIGUOUS-NEXT: addvl sp, sp, #-2 ; CONTIGUOUS-NEXT: mov p8.b, p0.b @@ -144,15 +190,21 @@ define @ldnt1_x2_i8_z0_z8_scalar( %unused, ; CONTIGUOUS-NEXT: ldr z0, [sp] ; CONTIGUOUS-NEXT: ldr z1, [sp, #1, mul vl] ; CONTIGUOUS-NEXT: addvl sp, sp, #2 -; CONTIGUOUS-NEXT: ptrue pn8.b +; CONTIGUOUS-NEXT: ldr z23, [sp, #1, mul vl] // 16-byte Folded Reload +; CONTIGUOUS-NEXT: ldr z22, [sp, #2, mul vl] // 16-byte Folded Reload +; CONTIGUOUS-NEXT: ldr z21, [sp, #3, mul vl] // 16-byte Folded Reload +; CONTIGUOUS-NEXT: ldr z20, [sp, #4, mul vl] // 16-byte Folded Reload +; CONTIGUOUS-NEXT: ldr z19, [sp, #5, mul vl] // 16-byte Folded Reload +; CONTIGUOUS-NEXT: ldr z18, [sp, #6, mul vl] // 16-byte Folded Reload +; CONTIGUOUS-NEXT: ldr z17, [sp, #7, mul vl] // 16-byte Folded Reload +; CONTIGUOUS-NEXT: ldr z16, [sp, #8, mul vl] // 16-byte Folded Reload +; CONTIGUOUS-NEXT: ldr z15, [sp, #9, mul vl] // 16-byte Folded Reload +; CONTIGUOUS-NEXT: ldr z14, [sp, #10, mul vl] // 16-byte Folded Reload +; CONTIGUOUS-NEXT: ldr z13, [sp, #11, mul vl] // 16-byte Folded Reload +; CONTIGUOUS-NEXT: ldr z12, [sp, #12, mul vl] // 16-byte Folded Reload +; CONTIGUOUS-NEXT: ldr z11, [sp, #13, mul vl] // 16-byte Folded Reload +; CONTIGUOUS-NEXT: ldr z10, [sp, #14, mul vl] // 16-byte Folded Reload ; CONTIGUOUS-NEXT: ldr z9, [sp, #15, mul vl] // 16-byte Folded Reload -; CONTIGUOUS-NEXT: ld1b { z22.b, z23.b }, pn8/z, [sp, #2, mul vl] // 32-byte Folded Reload -; CONTIGUOUS-NEXT: ld1b { z20.b, z21.b }, pn8/z, [sp, #6, mul vl] // 32-byte Folded Reload -; CONTIGUOUS-NEXT: ld1b { z18.b, z19.b }, pn8/z, [sp, #10, mul vl] // 32-byte Folded Reload -; CONTIGUOUS-NEXT: ld1b { z16.b, z17.b }, pn8/z, [sp, #14, mul vl] // 32-byte Folded Reload -; CONTIGUOUS-NEXT: ld1b { z14.b, z15.b }, pn8/z, [sp, #18, mul vl] // 32-byte Folded Reload -; CONTIGUOUS-NEXT: ld1b { z12.b, z13.b }, pn8/z, [sp, #22, mul vl] // 32-byte Folded Reload -; CONTIGUOUS-NEXT: ld1b { z10.b, z11.b }, pn8/z, [sp, #26, mul vl] // 32-byte Folded Reload ; CONTIGUOUS-NEXT: ldr p8, [sp, #7, mul vl] // 2-byte Folded Reload ; CONTIGUOUS-NEXT: addvl sp, sp, #16 ; CONTIGUOUS-NEXT: ldr x29, [sp], #16 // 8-byte Folded Reload @@ -173,31 +225,45 @@ define @ldnt1_x2_i16_z0_z8( %unused, @ldnt1_x2_i16_z0_z8( %unused, @ldnt1_x2_i16_z0_z8( %unused, @ldnt1_x2_i16_z0_z8_scalar( %unused ; STRIDED-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill ; STRIDED-NEXT: addvl sp, sp, #-17 ; STRIDED-NEXT: str p8, [sp, #7, mul vl] // 2-byte Folded Spill -; STRIDED-NEXT: ptrue pn8.b -; STRIDED-NEXT: st1b { z22.b, z23.b }, pn8, [sp, #2, mul vl] // 32-byte Folded Spill -; STRIDED-NEXT: st1b { z20.b, z21.b }, pn8, [sp, #6, mul vl] // 32-byte Folded Spill -; STRIDED-NEXT: st1b { z18.b, z19.b }, pn8, [sp, #10, mul vl] // 32-byte Folded Spill -; STRIDED-NEXT: st1b { z16.b, z17.b }, pn8, [sp, #14, mul vl] // 32-byte Folded Spill -; STRIDED-NEXT: st1b { z14.b, z15.b }, pn8, [sp, #18, mul vl] // 32-byte Folded Spill -; STRIDED-NEXT: st1b { z12.b, z13.b }, pn8, [sp, #22, mul vl] // 32-byte Folded Spill -; STRIDED-NEXT: st1b { z10.b, z11.b }, pn8, [sp, #26, mul vl] // 32-byte Folded Spill -; STRIDED-NEXT: st1b { z8.b, z9.b }, pn8, [sp, #30, mul vl] // 32-byte Folded Spill ; STRIDED-NEXT: mov p8.b, p0.b +; STRIDED-NEXT: str z23, [sp, #1, mul vl] // 16-byte Folded Spill +; STRIDED-NEXT: str z22, [sp, #2, mul vl] // 16-byte Folded Spill +; STRIDED-NEXT: str z21, [sp, #3, mul vl] // 16-byte Folded Spill +; STRIDED-NEXT: str z20, [sp, #4, mul vl] // 16-byte Folded Spill +; STRIDED-NEXT: str z19, [sp, #5, mul vl] // 16-byte Folded Spill +; STRIDED-NEXT: str z18, [sp, #6, mul vl] // 16-byte Folded Spill +; STRIDED-NEXT: str z17, [sp, #7, mul vl] // 16-byte Folded Spill +; STRIDED-NEXT: str z16, [sp, #8, mul vl] // 16-byte Folded Spill +; STRIDED-NEXT: str z15, [sp, #9, mul vl] // 16-byte Folded Spill +; STRIDED-NEXT: str z14, [sp, #10, mul vl] // 16-byte Folded Spill +; STRIDED-NEXT: str z13, [sp, #11, mul vl] // 16-byte Folded Spill +; STRIDED-NEXT: str z12, [sp, #12, mul vl] // 16-byte Folded Spill +; STRIDED-NEXT: str z11, [sp, #13, mul vl] // 16-byte Folded Spill +; STRIDED-NEXT: str z10, [sp, #14, mul vl] // 16-byte Folded Spill +; STRIDED-NEXT: str z9, [sp, #15, mul vl] // 16-byte Folded Spill +; STRIDED-NEXT: str z8, [sp, #16, mul vl] // 16-byte Folded Spill ; STRIDED-NEXT: ldnt1h { z0.h, z8.h }, pn8/z, [x0, x1, lsl #1] ; STRIDED-NEXT: //APP ; STRIDED-NEXT: nop ; STRIDED-NEXT: //NO_APP -; STRIDED-NEXT: ptrue pn8.b -; STRIDED-NEXT: ld1b { z22.b, z23.b }, pn8/z, [sp, #2, mul vl] // 32-byte Folded Reload -; STRIDED-NEXT: ld1b { z20.b, z21.b }, pn8/z, [sp, #6, mul vl] // 32-byte Folded Reload -; STRIDED-NEXT: ld1b { z18.b, z19.b }, pn8/z, [sp, #10, mul vl] // 32-byte Folded Reload -; STRIDED-NEXT: ld1b { z16.b, z17.b }, pn8/z, [sp, #14, mul vl] // 32-byte Folded Reload -; STRIDED-NEXT: ld1b { z14.b, z15.b }, pn8/z, [sp, #18, mul vl] // 32-byte Folded Reload -; STRIDED-NEXT: ld1b { z12.b, z13.b }, pn8/z, [sp, #22, mul vl] // 32-byte Folded Reload -; STRIDED-NEXT: mov z1.d, z8.d -; STRIDED-NEXT: ld1b { z10.b, z11.b }, pn8/z, [sp, #26, mul vl] // 32-byte Folded Reload -; STRIDED-NEXT: ld1b { z8.b, z9.b }, pn8/z, [sp, #30, mul vl] // 32-byte Folded Reload ; STRIDED-NEXT: ldr p8, [sp, #7, mul vl] // 2-byte Folded Reload +; STRIDED-NEXT: ldr z23, [sp, #1, mul vl] // 16-byte Folded Reload +; STRIDED-NEXT: ldr z22, [sp, #2, mul vl] // 16-byte Folded Reload +; STRIDED-NEXT: ldr z21, [sp, #3, mul vl] // 16-byte Folded Reload +; STRIDED-NEXT: ldr z20, [sp, #4, mul vl] // 16-byte Folded Reload +; STRIDED-NEXT: ldr z19, [sp, #5, mul vl] // 16-byte Folded Reload +; STRIDED-NEXT: ldr z18, [sp, #6, mul vl] // 16-byte Folded Reload +; STRIDED-NEXT: mov z1.d, z8.d +; STRIDED-NEXT: ldr z17, [sp, #7, mul vl] // 16-byte Folded Reload +; STRIDED-NEXT: ldr z16, [sp, #8, mul vl] // 16-byte Folded Reload +; STRIDED-NEXT: ldr z15, [sp, #9, mul vl] // 16-byte Folded Reload +; STRIDED-NEXT: ldr z14, [sp, #10, mul vl] // 16-byte Folded Reload +; STRIDED-NEXT: ldr z13, [sp, #11, mul vl] // 16-byte Folded Reload +; STRIDED-NEXT: ldr z12, [sp, #12, mul vl] // 16-byte Folded Reload +; STRIDED-NEXT: ldr z11, [sp, #13, mul vl] // 16-byte Folded Reload +; STRIDED-NEXT: ldr z10, [sp, #14, mul vl] // 16-byte Folded Reload +; STRIDED-NEXT: ldr z9, [sp, #15, mul vl] // 16-byte Folded Reload +; STRIDED-NEXT: ldr z8, [sp, #16, mul vl] // 16-byte Folded Reload ; STRIDED-NEXT: addvl sp, sp, #17 ; STRIDED-NEXT: ldr x29, [sp], #16 // 8-byte Folded Reload ; STRIDED-NEXT: ret @@ -289,14 +381,20 @@ define @ldnt1_x2_i16_z0_z8_scalar( %unused ; CONTIGUOUS-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill ; CONTIGUOUS-NEXT: addvl sp, sp, #-16 ; CONTIGUOUS-NEXT: str p8, [sp, #7, mul vl] // 2-byte Folded Spill -; CONTIGUOUS-NEXT: ptrue pn8.b -; CONTIGUOUS-NEXT: st1b { z22.b, z23.b }, pn8, [sp, #2, mul vl] // 32-byte Folded Spill -; CONTIGUOUS-NEXT: st1b { z20.b, z21.b }, pn8, [sp, #6, mul vl] // 32-byte Folded Spill -; CONTIGUOUS-NEXT: st1b { z18.b, z19.b }, pn8, [sp, #10, mul vl] // 32-byte Folded Spill -; CONTIGUOUS-NEXT: st1b { z16.b, z17.b }, pn8, [sp, #14, mul vl] // 32-byte Folded Spill -; CONTIGUOUS-NEXT: st1b { z14.b, z15.b }, pn8, [sp, #18, mul vl] // 32-byte Folded Spill -; CONTIGUOUS-NEXT: st1b { z12.b, z13.b }, pn8, [sp, #22, mul vl] // 32-byte Folded Spill -; CONTIGUOUS-NEXT: st1b { z10.b, z11.b }, pn8, [sp, #26, mul vl] // 32-byte Folded Spill +; CONTIGUOUS-NEXT: str z23, [sp, #1, mul vl] // 16-byte Folded Spill +; CONTIGUOUS-NEXT: str z22, [sp, #2, mul vl] // 16-byte Folded Spill +; CONTIGUOUS-NEXT: str z21, [sp, #3, mul vl] // 16-byte Folded Spill +; CONTIGUOUS-NEXT: str z20, [sp, #4, mul vl] // 16-byte Folded Spill +; CONTIGUOUS-NEXT: str z19, [sp, #5, mul vl] // 16-byte Folded Spill +; CONTIGUOUS-NEXT: str z18, [sp, #6, mul vl] // 16-byte Folded Spill +; CONTIGUOUS-NEXT: str z17, [sp, #7, mul vl] // 16-byte Folded Spill +; CONTIGUOUS-NEXT: str z16, [sp, #8, mul vl] // 16-byte Folded Spill +; CONTIGUOUS-NEXT: str z15, [sp, #9, mul vl] // 16-byte Folded Spill +; CONTIGUOUS-NEXT: str z14, [sp, #10, mul vl] // 16-byte Folded Spill +; CONTIGUOUS-NEXT: str z13, [sp, #11, mul vl] // 16-byte Folded Spill +; CONTIGUOUS-NEXT: str z12, [sp, #12, mul vl] // 16-byte Folded Spill +; CONTIGUOUS-NEXT: str z11, [sp, #13, mul vl] // 16-byte Folded Spill +; CONTIGUOUS-NEXT: str z10, [sp, #14, mul vl] // 16-byte Folded Spill ; CONTIGUOUS-NEXT: str z9, [sp, #15, mul vl] // 16-byte Folded Spill ; CONTIGUOUS-NEXT: addvl sp, sp, #-2 ; CONTIGUOUS-NEXT: mov p8.b, p0.b @@ -309,15 +407,21 @@ define @ldnt1_x2_i16_z0_z8_scalar( %unused ; CONTIGUOUS-NEXT: ldr z0, [sp] ; CONTIGUOUS-NEXT: ldr z1, [sp, #1, mul vl] ; CONTIGUOUS-NEXT: addvl sp, sp, #2 -; CONTIGUOUS-NEXT: ptrue pn8.b +; CONTIGUOUS-NEXT: ldr z23, [sp, #1, mul vl] // 16-byte Folded Reload +; CONTIGUOUS-NEXT: ldr z22, [sp, #2, mul vl] // 16-byte Folded Reload +; CONTIGUOUS-NEXT: ldr z21, [sp, #3, mul vl] // 16-byte Folded Reload +; CONTIGUOUS-NEXT: ldr z20, [sp, #4, mul vl] // 16-byte Folded Reload +; CONTIGUOUS-NEXT: ldr z19, [sp, #5, mul vl] // 16-byte Folded Reload +; CONTIGUOUS-NEXT: ldr z18, [sp, #6, mul vl] // 16-byte Folded Reload +; CONTIGUOUS-NEXT: ldr z17, [sp, #7, mul vl] // 16-byte Folded Reload +; CONTIGUOUS-NEXT: ldr z16, [sp, #8, mul vl] // 16-byte Folded Reload +; CONTIGUOUS-NEXT: ldr z15, [sp, #9, mul vl] // 16-byte Folded Reload +; CONTIGUOUS-NEXT: ldr z14, [sp, #10, mul vl] // 16-byte Folded Reload +; CONTIGUOUS-NEXT: ldr z13, [sp, #11, mul vl] // 16-byte Folded Reload +; CONTIGUOUS-NEXT: ldr z12, [sp, #12, mul vl] // 16-byte Folded Reload +; CONTIGUOUS-NEXT: ldr z11, [sp, #13, mul vl] // 16-byte Folded Reload +; CONTIGUOUS-NEXT: ldr z10, [sp, #14, mul vl] // 16-byte Folded Reload ; CONTIGUOUS-NEXT: ldr z9, [sp, #15, mul vl] // 16-byte Folded Reload -; CONTIGUOUS-NEXT: ld1b { z22.b, z23.b }, pn8/z, [sp, #2, mul vl] // 32-byte Folded Reload -; CONTIGUOUS-NEXT: ld1b { z20.b, z21.b }, pn8/z, [sp, #6, mul vl] // 32-byte Folded Reload -; CONTIGUOUS-NEXT: ld1b { z18.b, z19.b }, pn8/z, [sp, #10, mul vl] // 32-byte Folded Reload -; CONTIGUOUS-NEXT: ld1b { z16.b, z17.b }, pn8/z, [sp, #14, mul vl] // 32-byte Folded Reload -; CONTIGUOUS-NEXT: ld1b { z14.b, z15.b }, pn8/z, [sp, #18, mul vl] // 32-byte Folded Reload -; CONTIGUOUS-NEXT: ld1b { z12.b, z13.b }, pn8/z, [sp, #22, mul vl] // 32-byte Folded Reload -; CONTIGUOUS-NEXT: ld1b { z10.b, z11.b }, pn8/z, [sp, #26, mul vl] // 32-byte Folded Reload ; CONTIGUOUS-NEXT: ldr p8, [sp, #7, mul vl] // 2-byte Folded Reload ; CONTIGUOUS-NEXT: addvl sp, sp, #16 ; CONTIGUOUS-NEXT: ldr x29, [sp], #16 // 8-byte Folded Reload @@ -338,31 +442,45 @@ define @ldnt1_x2_i32_z0_z8( %unused, @ldnt1_x2_i32_z0_z8( %unused, @ldnt1_x2_i32_z0_z8( %unused, @ldnt1_x2_i32_z0_z8_scalar( %unused, ; STRIDED-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill ; STRIDED-NEXT: addvl sp, sp, #-17 ; STRIDED-NEXT: str p8, [sp, #7, mul vl] // 2-byte Folded Spill -; STRIDED-NEXT: ptrue pn8.b -; STRIDED-NEXT: st1b { z22.b, z23.b }, pn8, [sp, #2, mul vl] // 32-byte Folded Spill -; STRIDED-NEXT: st1b { z20.b, z21.b }, pn8, [sp, #6, mul vl] // 32-byte Folded Spill -; STRIDED-NEXT: st1b { z18.b, z19.b }, pn8, [sp, #10, mul vl] // 32-byte Folded Spill -; STRIDED-NEXT: st1b { z16.b, z17.b }, pn8, [sp, #14, mul vl] // 32-byte Folded Spill -; STRIDED-NEXT: st1b { z14.b, z15.b }, pn8, [sp, #18, mul vl] // 32-byte Folded Spill -; STRIDED-NEXT: st1b { z12.b, z13.b }, pn8, [sp, #22, mul vl] // 32-byte Folded Spill -; STRIDED-NEXT: st1b { z10.b, z11.b }, pn8, [sp, #26, mul vl] // 32-byte Folded Spill -; STRIDED-NEXT: st1b { z8.b, z9.b }, pn8, [sp, #30, mul vl] // 32-byte Folded Spill ; STRIDED-NEXT: mov p8.b, p0.b +; STRIDED-NEXT: str z23, [sp, #1, mul vl] // 16-byte Folded Spill +; STRIDED-NEXT: str z22, [sp, #2, mul vl] // 16-byte Folded Spill +; STRIDED-NEXT: str z21, [sp, #3, mul vl] // 16-byte Folded Spill +; STRIDED-NEXT: str z20, [sp, #4, mul vl] // 16-byte Folded Spill +; STRIDED-NEXT: str z19, [sp, #5, mul vl] // 16-byte Folded Spill +; STRIDED-NEXT: str z18, [sp, #6, mul vl] // 16-byte Folded Spill +; STRIDED-NEXT: str z17, [sp, #7, mul vl] // 16-byte Folded Spill +; STRIDED-NEXT: str z16, [sp, #8, mul vl] // 16-byte Folded Spill +; STRIDED-NEXT: str z15, [sp, #9, mul vl] // 16-byte Folded Spill +; STRIDED-NEXT: str z14, [sp, #10, mul vl] // 16-byte Folded Spill +; STRIDED-NEXT: str z13, [sp, #11, mul vl] // 16-byte Folded Spill +; STRIDED-NEXT: str z12, [sp, #12, mul vl] // 16-byte Folded Spill +; STRIDED-NEXT: str z11, [sp, #13, mul vl] // 16-byte Folded Spill +; STRIDED-NEXT: str z10, [sp, #14, mul vl] // 16-byte Folded Spill +; STRIDED-NEXT: str z9, [sp, #15, mul vl] // 16-byte Folded Spill +; STRIDED-NEXT: str z8, [sp, #16, mul vl] // 16-byte Folded Spill ; STRIDED-NEXT: ldnt1w { z0.s, z8.s }, pn8/z, [x0, x1, lsl #2] ; STRIDED-NEXT: //APP ; STRIDED-NEXT: nop ; STRIDED-NEXT: //NO_APP -; STRIDED-NEXT: ptrue pn8.b -; STRIDED-NEXT: ld1b { z22.b, z23.b }, pn8/z, [sp, #2, mul vl] // 32-byte Folded Reload -; STRIDED-NEXT: ld1b { z20.b, z21.b }, pn8/z, [sp, #6, mul vl] // 32-byte Folded Reload -; STRIDED-NEXT: ld1b { z18.b, z19.b }, pn8/z, [sp, #10, mul vl] // 32-byte Folded Reload -; STRIDED-NEXT: ld1b { z16.b, z17.b }, pn8/z, [sp, #14, mul vl] // 32-byte Folded Reload -; STRIDED-NEXT: ld1b { z14.b, z15.b }, pn8/z, [sp, #18, mul vl] // 32-byte Folded Reload -; STRIDED-NEXT: ld1b { z12.b, z13.b }, pn8/z, [sp, #22, mul vl] // 32-byte Folded Reload -; STRIDED-NEXT: mov z1.d, z8.d -; STRIDED-NEXT: ld1b { z10.b, z11.b }, pn8/z, [sp, #26, mul vl] // 32-byte Folded Reload -; STRIDED-NEXT: ld1b { z8.b, z9.b }, pn8/z, [sp, #30, mul vl] // 32-byte Folded Reload ; STRIDED-NEXT: ldr p8, [sp, #7, mul vl] // 2-byte Folded Reload +; STRIDED-NEXT: ldr z23, [sp, #1, mul vl] // 16-byte Folded Reload +; STRIDED-NEXT: ldr z22, [sp, #2, mul vl] // 16-byte Folded Reload +; STRIDED-NEXT: ldr z21, [sp, #3, mul vl] // 16-byte Folded Reload +; STRIDED-NEXT: ldr z20, [sp, #4, mul vl] // 16-byte Folded Reload +; STRIDED-NEXT: ldr z19, [sp, #5, mul vl] // 16-byte Folded Reload +; STRIDED-NEXT: ldr z18, [sp, #6, mul vl] // 16-byte Folded Reload +; STRIDED-NEXT: mov z1.d, z8.d +; STRIDED-NEXT: ldr z17, [sp, #7, mul vl] // 16-byte Folded Reload +; STRIDED-NEXT: ldr z16, [sp, #8, mul vl] // 16-byte Folded Reload +; STRIDED-NEXT: ldr z15, [sp, #9, mul vl] // 16-byte Folded Reload +; STRIDED-NEXT: ldr z14, [sp, #10, mul vl] // 16-byte Folded Reload +; STRIDED-NEXT: ldr z13, [sp, #11, mul vl] // 16-byte Folded Reload +; STRIDED-NEXT: ldr z12, [sp, #12, mul vl] // 16-byte Folded Reload +; STRIDED-NEXT: ldr z11, [sp, #13, mul vl] // 16-byte Folded Reload +; STRIDED-NEXT: ldr z10, [sp, #14, mul vl] // 16-byte Folded Reload +; STRIDED-NEXT: ldr z9, [sp, #15, mul vl] // 16-byte Folded Reload +; STRIDED-NEXT: ldr z8, [sp, #16, mul vl] // 16-byte Folded Reload ; STRIDED-NEXT: addvl sp, sp, #17 ; STRIDED-NEXT: ldr x29, [sp], #16 // 8-byte Folded Reload ; STRIDED-NEXT: ret @@ -454,14 +598,20 @@ define @ldnt1_x2_i32_z0_z8_scalar( %unused, ; CONTIGUOUS-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill ; CONTIGUOUS-NEXT: addvl sp, sp, #-16 ; CONTIGUOUS-NEXT: str p8, [sp, #7, mul vl] // 2-byte Folded Spill -; CONTIGUOUS-NEXT: ptrue pn8.b -; CONTIGUOUS-NEXT: st1b { z22.b, z23.b }, pn8, [sp, #2, mul vl] // 32-byte Folded Spill -; CONTIGUOUS-NEXT: st1b { z20.b, z21.b }, pn8, [sp, #6, mul vl] // 32-byte Folded Spill -; CONTIGUOUS-NEXT: st1b { z18.b, z19.b }, pn8, [sp, #10, mul vl] // 32-byte Folded Spill -; CONTIGUOUS-NEXT: st1b { z16.b, z17.b }, pn8, [sp, #14, mul vl] // 32-byte Folded Spill -; CONTIGUOUS-NEXT: st1b { z14.b, z15.b }, pn8, [sp, #18, mul vl] // 32-byte Folded Spill -; CONTIGUOUS-NEXT: st1b { z12.b, z13.b }, pn8, [sp, #22, mul vl] // 32-byte Folded Spill -; CONTIGUOUS-NEXT: st1b { z10.b, z11.b }, pn8, [sp, #26, mul vl] // 32-byte Folded Spill +; CONTIGUOUS-NEXT: str z23, [sp, #1, mul vl] // 16-byte Folded Spill +; CONTIGUOUS-NEXT: str z22, [sp, #2, mul vl] // 16-byte Folded Spill +; CONTIGUOUS-NEXT: str z21, [sp, #3, mul vl] // 16-byte Folded Spill +; CONTIGUOUS-NEXT: str z20, [sp, #4, mul vl] // 16-byte Folded Spill +; CONTIGUOUS-NEXT: str z19, [sp, #5, mul vl] // 16-byte Folded Spill +; CONTIGUOUS-NEXT: str z18, [sp, #6, mul vl] // 16-byte Folded Spill +; CONTIGUOUS-NEXT: str z17, [sp, #7, mul vl] // 16-byte Folded Spill +; CONTIGUOUS-NEXT: str z16, [sp, #8, mul vl] // 16-byte Folded Spill +; CONTIGUOUS-NEXT: str z15, [sp, #9, mul vl] // 16-byte Folded Spill +; CONTIGUOUS-NEXT: str z14, [sp, #10, mul vl] // 16-byte Folded Spill +; CONTIGUOUS-NEXT: str z13, [sp, #11, mul vl] // 16-byte Folded Spill +; CONTIGUOUS-NEXT: str z12, [sp, #12, mul vl] // 16-byte Folded Spill +; CONTIGUOUS-NEXT: str z11, [sp, #13, mul vl] // 16-byte Folded Spill +; CONTIGUOUS-NEXT: str z10, [sp, #14, mul vl] // 16-byte Folded Spill ; CONTIGUOUS-NEXT: str z9, [sp, #15, mul vl] // 16-byte Folded Spill ; CONTIGUOUS-NEXT: addvl sp, sp, #-2 ; CONTIGUOUS-NEXT: mov p8.b, p0.b @@ -474,15 +624,21 @@ define @ldnt1_x2_i32_z0_z8_scalar( %unused, ; CONTIGUOUS-NEXT: ldr z0, [sp] ; CONTIGUOUS-NEXT: ldr z1, [sp, #1, mul vl] ; CONTIGUOUS-NEXT: addvl sp, sp, #2 -; CONTIGUOUS-NEXT: ptrue pn8.b +; CONTIGUOUS-NEXT: ldr z23, [sp, #1, mul vl] // 16-byte Folded Reload +; CONTIGUOUS-NEXT: ldr z22, [sp, #2, mul vl] // 16-byte Folded Reload +; CONTIGUOUS-NEXT: ldr z21, [sp, #3, mul vl] // 16-byte Folded Reload +; CONTIGUOUS-NEXT: ldr z20, [sp, #4, mul vl] // 16-byte Folded Reload +; CONTIGUOUS-NEXT: ldr z19, [sp, #5, mul vl] // 16-byte Folded Reload +; CONTIGUOUS-NEXT: ldr z18, [sp, #6, mul vl] // 16-byte Folded Reload +; CONTIGUOUS-NEXT: ldr z17, [sp, #7, mul vl] // 16-byte Folded Reload +; CONTIGUOUS-NEXT: ldr z16, [sp, #8, mul vl] // 16-byte Folded Reload +; CONTIGUOUS-NEXT: ldr z15, [sp, #9, mul vl] // 16-byte Folded Reload +; CONTIGUOUS-NEXT: ldr z14, [sp, #10, mul vl] // 16-byte Folded Reload +; CONTIGUOUS-NEXT: ldr z13, [sp, #11, mul vl] // 16-byte Folded Reload +; CONTIGUOUS-NEXT: ldr z12, [sp, #12, mul vl] // 16-byte Folded Reload +; CONTIGUOUS-NEXT: ldr z11, [sp, #13, mul vl] // 16-byte Folded Reload +; CONTIGUOUS-NEXT: ldr z10, [sp, #14, mul vl] // 16-byte Folded Reload ; CONTIGUOUS-NEXT: ldr z9, [sp, #15, mul vl] // 16-byte Folded Reload -; CONTIGUOUS-NEXT: ld1b { z22.b, z23.b }, pn8/z, [sp, #2, mul vl] // 32-byte Folded Reload -; CONTIGUOUS-NEXT: ld1b { z20.b, z21.b }, pn8/z, [sp, #6, mul vl] // 32-byte Folded Reload -; CONTIGUOUS-NEXT: ld1b { z18.b, z19.b }, pn8/z, [sp, #10, mul vl] // 32-byte Folded Reload -; CONTIGUOUS-NEXT: ld1b { z16.b, z17.b }, pn8/z, [sp, #14, mul vl] // 32-byte Folded Reload -; CONTIGUOUS-NEXT: ld1b { z14.b, z15.b }, pn8/z, [sp, #18, mul vl] // 32-byte Folded Reload -; CONTIGUOUS-NEXT: ld1b { z12.b, z13.b }, pn8/z, [sp, #22, mul vl] // 32-byte Folded Reload -; CONTIGUOUS-NEXT: ld1b { z10.b, z11.b }, pn8/z, [sp, #26, mul vl] // 32-byte Folded Reload ; CONTIGUOUS-NEXT: ldr p8, [sp, #7, mul vl] // 2-byte Folded Reload ; CONTIGUOUS-NEXT: addvl sp, sp, #16 ; CONTIGUOUS-NEXT: ldr x29, [sp], #16 // 8-byte Folded Reload @@ -503,31 +659,45 @@ define @ldnt1_x2_i64_z0_z8( %unused, @ldnt1_x2_i64_z0_z8( %unused, @ldnt1_x2_i64_z0_z8( %unused, @ldnt1_x2_i64_z0_z8_scalar( %unused, ; STRIDED-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill ; STRIDED-NEXT: addvl sp, sp, #-17 ; STRIDED-NEXT: str p8, [sp, #7, mul vl] // 2-byte Folded Spill -; STRIDED-NEXT: ptrue pn8.b -; STRIDED-NEXT: st1b { z22.b, z23.b }, pn8, [sp, #2, mul vl] // 32-byte Folded Spill -; STRIDED-NEXT: st1b { z20.b, z21.b }, pn8, [sp, #6, mul vl] // 32-byte Folded Spill -; STRIDED-NEXT: st1b { z18.b, z19.b }, pn8, [sp, #10, mul vl] // 32-byte Folded Spill -; STRIDED-NEXT: st1b { z16.b, z17.b }, pn8, [sp, #14, mul vl] // 32-byte Folded Spill -; STRIDED-NEXT: st1b { z14.b, z15.b }, pn8, [sp, #18, mul vl] // 32-byte Folded Spill -; STRIDED-NEXT: st1b { z12.b, z13.b }, pn8, [sp, #22, mul vl] // 32-byte Folded Spill -; STRIDED-NEXT: st1b { z10.b, z11.b }, pn8, [sp, #26, mul vl] // 32-byte Folded Spill -; STRIDED-NEXT: st1b { z8.b, z9.b }, pn8, [sp, #30, mul vl] // 32-byte Folded Spill ; STRIDED-NEXT: mov p8.b, p0.b +; STRIDED-NEXT: str z23, [sp, #1, mul vl] // 16-byte Folded Spill +; STRIDED-NEXT: str z22, [sp, #2, mul vl] // 16-byte Folded Spill +; STRIDED-NEXT: str z21, [sp, #3, mul vl] // 16-byte Folded Spill +; STRIDED-NEXT: str z20, [sp, #4, mul vl] // 16-byte Folded Spill +; STRIDED-NEXT: str z19, [sp, #5, mul vl] // 16-byte Folded Spill +; STRIDED-NEXT: str z18, [sp, #6, mul vl] // 16-byte Folded Spill +; STRIDED-NEXT: str z17, [sp, #7, mul vl] // 16-byte Folded Spill +; STRIDED-NEXT: str z16, [sp, #8, mul vl] // 16-byte Folded Spill +; STRIDED-NEXT: str z15, [sp, #9, mul vl] // 16-byte Folded Spill +; STRIDED-NEXT: str z14, [sp, #10, mul vl] // 16-byte Folded Spill +; STRIDED-NEXT: str z13, [sp, #11, mul vl] // 16-byte Folded Spill +; STRIDED-NEXT: str z12, [sp, #12, mul vl] // 16-byte Folded Spill +; STRIDED-NEXT: str z11, [sp, #13, mul vl] // 16-byte Folded Spill +; STRIDED-NEXT: str z10, [sp, #14, mul vl] // 16-byte Folded Spill +; STRIDED-NEXT: str z9, [sp, #15, mul vl] // 16-byte Folded Spill +; STRIDED-NEXT: str z8, [sp, #16, mul vl] // 16-byte Folded Spill ; STRIDED-NEXT: ldnt1d { z0.d, z8.d }, pn8/z, [x0, x1, lsl #3] ; STRIDED-NEXT: //APP ; STRIDED-NEXT: nop ; STRIDED-NEXT: //NO_APP -; STRIDED-NEXT: ptrue pn8.b -; STRIDED-NEXT: ld1b { z22.b, z23.b }, pn8/z, [sp, #2, mul vl] // 32-byte Folded Reload -; STRIDED-NEXT: ld1b { z20.b, z21.b }, pn8/z, [sp, #6, mul vl] // 32-byte Folded Reload -; STRIDED-NEXT: ld1b { z18.b, z19.b }, pn8/z, [sp, #10, mul vl] // 32-byte Folded Reload -; STRIDED-NEXT: ld1b { z16.b, z17.b }, pn8/z, [sp, #14, mul vl] // 32-byte Folded Reload -; STRIDED-NEXT: ld1b { z14.b, z15.b }, pn8/z, [sp, #18, mul vl] // 32-byte Folded Reload -; STRIDED-NEXT: ld1b { z12.b, z13.b }, pn8/z, [sp, #22, mul vl] // 32-byte Folded Reload -; STRIDED-NEXT: mov z1.d, z8.d -; STRIDED-NEXT: ld1b { z10.b, z11.b }, pn8/z, [sp, #26, mul vl] // 32-byte Folded Reload -; STRIDED-NEXT: ld1b { z8.b, z9.b }, pn8/z, [sp, #30, mul vl] // 32-byte Folded Reload ; STRIDED-NEXT: ldr p8, [sp, #7, mul vl] // 2-byte Folded Reload +; STRIDED-NEXT: ldr z23, [sp, #1, mul vl] // 16-byte Folded Reload +; STRIDED-NEXT: ldr z22, [sp, #2, mul vl] // 16-byte Folded Reload +; STRIDED-NEXT: ldr z21, [sp, #3, mul vl] // 16-byte Folded Reload +; STRIDED-NEXT: ldr z20, [sp, #4, mul vl] // 16-byte Folded Reload +; STRIDED-NEXT: ldr z19, [sp, #5, mul vl] // 16-byte Folded Reload +; STRIDED-NEXT: ldr z18, [sp, #6, mul vl] // 16-byte Folded Reload +; STRIDED-NEXT: mov z1.d, z8.d +; STRIDED-NEXT: ldr z17, [sp, #7, mul vl] // 16-byte Folded Reload +; STRIDED-NEXT: ldr z16, [sp, #8, mul vl] // 16-byte Folded Reload +; STRIDED-NEXT: ldr z15, [sp, #9, mul vl] // 16-byte Folded Reload +; STRIDED-NEXT: ldr z14, [sp, #10, mul vl] // 16-byte Folded Reload +; STRIDED-NEXT: ldr z13, [sp, #11, mul vl] // 16-byte Folded Reload +; STRIDED-NEXT: ldr z12, [sp, #12, mul vl] // 16-byte Folded Reload +; STRIDED-NEXT: ldr z11, [sp, #13, mul vl] // 16-byte Folded Reload +; STRIDED-NEXT: ldr z10, [sp, #14, mul vl] // 16-byte Folded Reload +; STRIDED-NEXT: ldr z9, [sp, #15, mul vl] // 16-byte Folded Reload +; STRIDED-NEXT: ldr z8, [sp, #16, mul vl] // 16-byte Folded Reload ; STRIDED-NEXT: addvl sp, sp, #17 ; STRIDED-NEXT: ldr x29, [sp], #16 // 8-byte Folded Reload ; STRIDED-NEXT: ret @@ -619,14 +815,20 @@ define @ldnt1_x2_i64_z0_z8_scalar( %unused, ; CONTIGUOUS-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill ; CONTIGUOUS-NEXT: addvl sp, sp, #-16 ; CONTIGUOUS-NEXT: str p8, [sp, #7, mul vl] // 2-byte Folded Spill -; CONTIGUOUS-NEXT: ptrue pn8.b -; CONTIGUOUS-NEXT: st1b { z22.b, z23.b }, pn8, [sp, #2, mul vl] // 32-byte Folded Spill -; CONTIGUOUS-NEXT: st1b { z20.b, z21.b }, pn8, [sp, #6, mul vl] // 32-byte Folded Spill -; CONTIGUOUS-NEXT: st1b { z18.b, z19.b }, pn8, [sp, #10, mul vl] // 32-byte Folded Spill -; CONTIGUOUS-NEXT: st1b { z16.b, z17.b }, pn8, [sp, #14, mul vl] // 32-byte Folded Spill -; CONTIGUOUS-NEXT: st1b { z14.b, z15.b }, pn8, [sp, #18, mul vl] // 32-byte Folded Spill -; CONTIGUOUS-NEXT: st1b { z12.b, z13.b }, pn8, [sp, #22, mul vl] // 32-byte Folded Spill -; CONTIGUOUS-NEXT: st1b { z10.b, z11.b }, pn8, [sp, #26, mul vl] // 32-byte Folded Spill +; CONTIGUOUS-NEXT: str z23, [sp, #1, mul vl] // 16-byte Folded Spill +; CONTIGUOUS-NEXT: str z22, [sp, #2, mul vl] // 16-byte Folded Spill +; CONTIGUOUS-NEXT: str z21, [sp, #3, mul vl] // 16-byte Folded Spill +; CONTIGUOUS-NEXT: str z20, [sp, #4, mul vl] // 16-byte Folded Spill +; CONTIGUOUS-NEXT: str z19, [sp, #5, mul vl] // 16-byte Folded Spill +; CONTIGUOUS-NEXT: str z18, [sp, #6, mul vl] // 16-byte Folded Spill +; CONTIGUOUS-NEXT: str z17, [sp, #7, mul vl] // 16-byte Folded Spill +; CONTIGUOUS-NEXT: str z16, [sp, #8, mul vl] // 16-byte Folded Spill +; CONTIGUOUS-NEXT: str z15, [sp, #9, mul vl] // 16-byte Folded Spill +; CONTIGUOUS-NEXT: str z14, [sp, #10, mul vl] // 16-byte Folded Spill +; CONTIGUOUS-NEXT: str z13, [sp, #11, mul vl] // 16-byte Folded Spill +; CONTIGUOUS-NEXT: str z12, [sp, #12, mul vl] // 16-byte Folded Spill +; CONTIGUOUS-NEXT: str z11, [sp, #13, mul vl] // 16-byte Folded Spill +; CONTIGUOUS-NEXT: str z10, [sp, #14, mul vl] // 16-byte Folded Spill ; CONTIGUOUS-NEXT: str z9, [sp, #15, mul vl] // 16-byte Folded Spill ; CONTIGUOUS-NEXT: addvl sp, sp, #-2 ; CONTIGUOUS-NEXT: mov p8.b, p0.b @@ -639,15 +841,21 @@ define @ldnt1_x2_i64_z0_z8_scalar( %unused, ; CONTIGUOUS-NEXT: ldr z0, [sp] ; CONTIGUOUS-NEXT: ldr z1, [sp, #1, mul vl] ; CONTIGUOUS-NEXT: addvl sp, sp, #2 -; CONTIGUOUS-NEXT: ptrue pn8.b +; CONTIGUOUS-NEXT: ldr z23, [sp, #1, mul vl] // 16-byte Folded Reload +; CONTIGUOUS-NEXT: ldr z22, [sp, #2, mul vl] // 16-byte Folded Reload +; CONTIGUOUS-NEXT: ldr z21, [sp, #3, mul vl] // 16-byte Folded Reload +; CONTIGUOUS-NEXT: ldr z20, [sp, #4, mul vl] // 16-byte Folded Reload +; CONTIGUOUS-NEXT: ldr z19, [sp, #5, mul vl] // 16-byte Folded Reload +; CONTIGUOUS-NEXT: ldr z18, [sp, #6, mul vl] // 16-byte Folded Reload +; CONTIGUOUS-NEXT: ldr z17, [sp, #7, mul vl] // 16-byte Folded Reload +; CONTIGUOUS-NEXT: ldr z16, [sp, #8, mul vl] // 16-byte Folded Reload +; CONTIGUOUS-NEXT: ldr z15, [sp, #9, mul vl] // 16-byte Folded Reload +; CONTIGUOUS-NEXT: ldr z14, [sp, #10, mul vl] // 16-byte Folded Reload +; CONTIGUOUS-NEXT: ldr z13, [sp, #11, mul vl] // 16-byte Folded Reload +; CONTIGUOUS-NEXT: ldr z12, [sp, #12, mul vl] // 16-byte Folded Reload +; CONTIGUOUS-NEXT: ldr z11, [sp, #13, mul vl] // 16-byte Folded Reload +; CONTIGUOUS-NEXT: ldr z10, [sp, #14, mul vl] // 16-byte Folded Reload ; CONTIGUOUS-NEXT: ldr z9, [sp, #15, mul vl] // 16-byte Folded Reload -; CONTIGUOUS-NEXT: ld1b { z22.b, z23.b }, pn8/z, [sp, #2, mul vl] // 32-byte Folded Reload -; CONTIGUOUS-NEXT: ld1b { z20.b, z21.b }, pn8/z, [sp, #6, mul vl] // 32-byte Folded Reload -; CONTIGUOUS-NEXT: ld1b { z18.b, z19.b }, pn8/z, [sp, #10, mul vl] // 32-byte Folded Reload -; CONTIGUOUS-NEXT: ld1b { z16.b, z17.b }, pn8/z, [sp, #14, mul vl] // 32-byte Folded Reload -; CONTIGUOUS-NEXT: ld1b { z14.b, z15.b }, pn8/z, [sp, #18, mul vl] // 32-byte Folded Reload -; CONTIGUOUS-NEXT: ld1b { z12.b, z13.b }, pn8/z, [sp, #22, mul vl] // 32-byte Folded Reload -; CONTIGUOUS-NEXT: ld1b { z10.b, z11.b }, pn8/z, [sp, #26, mul vl] // 32-byte Folded Reload ; CONTIGUOUS-NEXT: ldr p8, [sp, #7, mul vl] // 2-byte Folded Reload ; CONTIGUOUS-NEXT: addvl sp, sp, #16 ; CONTIGUOUS-NEXT: ldr x29, [sp], #16 // 8-byte Folded Reload @@ -668,32 +876,46 @@ define @ldnt1_x4_i8_z0_z4_z8_z12( %unused, ; STRIDED-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill ; STRIDED-NEXT: addvl sp, sp, #-17 ; STRIDED-NEXT: str p8, [sp, #7, mul vl] // 2-byte Folded Spill -; STRIDED-NEXT: ptrue pn8.b -; STRIDED-NEXT: st1b { z22.b, z23.b }, pn8, [sp, #2, mul vl] // 32-byte Folded Spill -; STRIDED-NEXT: st1b { z20.b, z21.b }, pn8, [sp, #6, mul vl] // 32-byte Folded Spill -; STRIDED-NEXT: st1b { z18.b, z19.b }, pn8, [sp, #10, mul vl] // 32-byte Folded Spill -; STRIDED-NEXT: st1b { z16.b, z17.b }, pn8, [sp, #14, mul vl] // 32-byte Folded Spill -; STRIDED-NEXT: st1b { z14.b, z15.b }, pn8, [sp, #18, mul vl] // 32-byte Folded Spill -; STRIDED-NEXT: st1b { z12.b, z13.b }, pn8, [sp, #22, mul vl] // 32-byte Folded Spill -; STRIDED-NEXT: st1b { z10.b, z11.b }, pn8, [sp, #26, mul vl] // 32-byte Folded Spill -; STRIDED-NEXT: st1b { z8.b, z9.b }, pn8, [sp, #30, mul vl] // 32-byte Folded Spill ; STRIDED-NEXT: mov p8.b, p0.b +; STRIDED-NEXT: str z23, [sp, #1, mul vl] // 16-byte Folded Spill +; STRIDED-NEXT: str z22, [sp, #2, mul vl] // 16-byte Folded Spill +; STRIDED-NEXT: str z21, [sp, #3, mul vl] // 16-byte Folded Spill +; STRIDED-NEXT: str z20, [sp, #4, mul vl] // 16-byte Folded Spill +; STRIDED-NEXT: str z19, [sp, #5, mul vl] // 16-byte Folded Spill +; STRIDED-NEXT: str z18, [sp, #6, mul vl] // 16-byte Folded Spill +; STRIDED-NEXT: str z17, [sp, #7, mul vl] // 16-byte Folded Spill +; STRIDED-NEXT: str z16, [sp, #8, mul vl] // 16-byte Folded Spill +; STRIDED-NEXT: str z15, [sp, #9, mul vl] // 16-byte Folded Spill +; STRIDED-NEXT: str z14, [sp, #10, mul vl] // 16-byte Folded Spill +; STRIDED-NEXT: str z13, [sp, #11, mul vl] // 16-byte Folded Spill +; STRIDED-NEXT: str z12, [sp, #12, mul vl] // 16-byte Folded Spill +; STRIDED-NEXT: str z11, [sp, #13, mul vl] // 16-byte Folded Spill +; STRIDED-NEXT: str z10, [sp, #14, mul vl] // 16-byte Folded Spill +; STRIDED-NEXT: str z9, [sp, #15, mul vl] // 16-byte Folded Spill +; STRIDED-NEXT: str z8, [sp, #16, mul vl] // 16-byte Folded Spill ; STRIDED-NEXT: ldnt1b { z0.b, z4.b, z8.b, z12.b }, pn8/z, [x0] ; STRIDED-NEXT: //APP ; STRIDED-NEXT: nop ; STRIDED-NEXT: //NO_APP -; STRIDED-NEXT: ptrue pn8.b -; STRIDED-NEXT: ld1b { z22.b, z23.b }, pn8/z, [sp, #2, mul vl] // 32-byte Folded Reload -; STRIDED-NEXT: ld1b { z20.b, z21.b }, pn8/z, [sp, #6, mul vl] // 32-byte Folded Reload -; STRIDED-NEXT: ld1b { z18.b, z19.b }, pn8/z, [sp, #10, mul vl] // 32-byte Folded Reload -; STRIDED-NEXT: ld1b { z16.b, z17.b }, pn8/z, [sp, #14, mul vl] // 32-byte Folded Reload -; STRIDED-NEXT: ld1b { z14.b, z15.b }, pn8/z, [sp, #18, mul vl] // 32-byte Folded Reload -; STRIDED-NEXT: ld1b { z10.b, z11.b }, pn8/z, [sp, #26, mul vl] // 32-byte Folded Reload +; STRIDED-NEXT: ldr p8, [sp, #7, mul vl] // 2-byte Folded Reload +; STRIDED-NEXT: ldr z23, [sp, #1, mul vl] // 16-byte Folded Reload +; STRIDED-NEXT: ldr z22, [sp, #2, mul vl] // 16-byte Folded Reload +; STRIDED-NEXT: ldr z21, [sp, #3, mul vl] // 16-byte Folded Reload +; STRIDED-NEXT: ldr z20, [sp, #4, mul vl] // 16-byte Folded Reload +; STRIDED-NEXT: ldr z19, [sp, #5, mul vl] // 16-byte Folded Reload +; STRIDED-NEXT: ldr z18, [sp, #6, mul vl] // 16-byte Folded Reload ; STRIDED-NEXT: mov z2.d, z8.d ; STRIDED-NEXT: mov z3.d, z12.d -; STRIDED-NEXT: ld1b { z12.b, z13.b }, pn8/z, [sp, #22, mul vl] // 32-byte Folded Reload -; STRIDED-NEXT: ld1b { z8.b, z9.b }, pn8/z, [sp, #30, mul vl] // 32-byte Folded Reload -; STRIDED-NEXT: ldr p8, [sp, #7, mul vl] // 2-byte Folded Reload +; STRIDED-NEXT: ldr z17, [sp, #7, mul vl] // 16-byte Folded Reload +; STRIDED-NEXT: ldr z16, [sp, #8, mul vl] // 16-byte Folded Reload +; STRIDED-NEXT: ldr z15, [sp, #9, mul vl] // 16-byte Folded Reload +; STRIDED-NEXT: ldr z14, [sp, #10, mul vl] // 16-byte Folded Reload +; STRIDED-NEXT: ldr z13, [sp, #11, mul vl] // 16-byte Folded Reload +; STRIDED-NEXT: ldr z12, [sp, #12, mul vl] // 16-byte Folded Reload +; STRIDED-NEXT: ldr z11, [sp, #13, mul vl] // 16-byte Folded Reload +; STRIDED-NEXT: ldr z10, [sp, #14, mul vl] // 16-byte Folded Reload +; STRIDED-NEXT: ldr z9, [sp, #15, mul vl] // 16-byte Folded Reload +; STRIDED-NEXT: ldr z8, [sp, #16, mul vl] // 16-byte Folded Reload ; STRIDED-NEXT: mov z1.d, z4.d ; STRIDED-NEXT: addvl sp, sp, #17 ; STRIDED-NEXT: ldr x29, [sp], #16 // 8-byte Folded Reload @@ -704,14 +926,19 @@ define @ldnt1_x4_i8_z0_z4_z8_z12( %unused, ; CONTIGUOUS-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill ; CONTIGUOUS-NEXT: addvl sp, sp, #-15 ; CONTIGUOUS-NEXT: str p8, [sp, #7, mul vl] // 2-byte Folded Spill -; CONTIGUOUS-NEXT: ptrue pn8.b -; CONTIGUOUS-NEXT: st1b { z22.b, z23.b }, pn8, [sp, #2, mul vl] // 32-byte Folded Spill -; CONTIGUOUS-NEXT: st1b { z20.b, z21.b }, pn8, [sp, #6, mul vl] // 32-byte Folded Spill -; CONTIGUOUS-NEXT: st1b { z18.b, z19.b }, pn8, [sp, #10, mul vl] // 32-byte Folded Spill -; CONTIGUOUS-NEXT: st1b { z16.b, z17.b }, pn8, [sp, #14, mul vl] // 32-byte Folded Spill -; CONTIGUOUS-NEXT: st1b { z14.b, z15.b }, pn8, [sp, #18, mul vl] // 32-byte Folded Spill +; CONTIGUOUS-NEXT: str z23, [sp, #1, mul vl] // 16-byte Folded Spill +; CONTIGUOUS-NEXT: str z22, [sp, #2, mul vl] // 16-byte Folded Spill +; CONTIGUOUS-NEXT: str z21, [sp, #3, mul vl] // 16-byte Folded Spill +; CONTIGUOUS-NEXT: str z20, [sp, #4, mul vl] // 16-byte Folded Spill +; CONTIGUOUS-NEXT: str z19, [sp, #5, mul vl] // 16-byte Folded Spill +; CONTIGUOUS-NEXT: str z18, [sp, #6, mul vl] // 16-byte Folded Spill +; CONTIGUOUS-NEXT: str z17, [sp, #7, mul vl] // 16-byte Folded Spill +; CONTIGUOUS-NEXT: str z16, [sp, #8, mul vl] // 16-byte Folded Spill +; CONTIGUOUS-NEXT: str z15, [sp, #9, mul vl] // 16-byte Folded Spill +; CONTIGUOUS-NEXT: str z14, [sp, #10, mul vl] // 16-byte Folded Spill ; CONTIGUOUS-NEXT: str z13, [sp, #11, mul vl] // 16-byte Folded Spill -; CONTIGUOUS-NEXT: st1b { z10.b, z11.b }, pn8, [sp, #24, mul vl] // 32-byte Folded Spill +; CONTIGUOUS-NEXT: str z11, [sp, #12, mul vl] // 16-byte Folded Spill +; CONTIGUOUS-NEXT: str z10, [sp, #13, mul vl] // 16-byte Folded Spill ; CONTIGUOUS-NEXT: str z9, [sp, #14, mul vl] // 16-byte Folded Spill ; CONTIGUOUS-NEXT: addvl sp, sp, #-4 ; CONTIGUOUS-NEXT: mov p8.b, p0.b @@ -728,15 +955,20 @@ define @ldnt1_x4_i8_z0_z4_z8_z12( %unused, ; CONTIGUOUS-NEXT: ldr z2, [sp, #2, mul vl] ; CONTIGUOUS-NEXT: ldr z3, [sp, #3, mul vl] ; CONTIGUOUS-NEXT: addvl sp, sp, #4 -; CONTIGUOUS-NEXT: ptrue pn8.b +; CONTIGUOUS-NEXT: ldr z23, [sp, #1, mul vl] // 16-byte Folded Reload +; CONTIGUOUS-NEXT: ldr z22, [sp, #2, mul vl] // 16-byte Folded Reload +; CONTIGUOUS-NEXT: ldr z21, [sp, #3, mul vl] // 16-byte Folded Reload +; CONTIGUOUS-NEXT: ldr z20, [sp, #4, mul vl] // 16-byte Folded Reload +; CONTIGUOUS-NEXT: ldr z19, [sp, #5, mul vl] // 16-byte Folded Reload +; CONTIGUOUS-NEXT: ldr z18, [sp, #6, mul vl] // 16-byte Folded Reload +; CONTIGUOUS-NEXT: ldr z17, [sp, #7, mul vl] // 16-byte Folded Reload +; CONTIGUOUS-NEXT: ldr z16, [sp, #8, mul vl] // 16-byte Folded Reload +; CONTIGUOUS-NEXT: ldr z15, [sp, #9, mul vl] // 16-byte Folded Reload +; CONTIGUOUS-NEXT: ldr z14, [sp, #10, mul vl] // 16-byte Folded Reload ; CONTIGUOUS-NEXT: ldr z13, [sp, #11, mul vl] // 16-byte Folded Reload +; CONTIGUOUS-NEXT: ldr z11, [sp, #12, mul vl] // 16-byte Folded Reload +; CONTIGUOUS-NEXT: ldr z10, [sp, #13, mul vl] // 16-byte Folded Reload ; CONTIGUOUS-NEXT: ldr z9, [sp, #14, mul vl] // 16-byte Folded Reload -; CONTIGUOUS-NEXT: ld1b { z22.b, z23.b }, pn8/z, [sp, #2, mul vl] // 32-byte Folded Reload -; CONTIGUOUS-NEXT: ld1b { z20.b, z21.b }, pn8/z, [sp, #6, mul vl] // 32-byte Folded Reload -; CONTIGUOUS-NEXT: ld1b { z18.b, z19.b }, pn8/z, [sp, #10, mul vl] // 32-byte Folded Reload -; CONTIGUOUS-NEXT: ld1b { z16.b, z17.b }, pn8/z, [sp, #14, mul vl] // 32-byte Folded Reload -; CONTIGUOUS-NEXT: ld1b { z14.b, z15.b }, pn8/z, [sp, #18, mul vl] // 32-byte Folded Reload -; CONTIGUOUS-NEXT: ld1b { z10.b, z11.b }, pn8/z, [sp, #24, mul vl] // 32-byte Folded Reload ; CONTIGUOUS-NEXT: ldr p8, [sp, #7, mul vl] // 2-byte Folded Reload ; CONTIGUOUS-NEXT: addvl sp, sp, #15 ; CONTIGUOUS-NEXT: ldr x29, [sp], #16 // 8-byte Folded Reload @@ -760,32 +992,46 @@ define @ldnt1_x4_i8_z0_z4_z8_z12_scalar( %u ; STRIDED-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill ; STRIDED-NEXT: addvl sp, sp, #-17 ; STRIDED-NEXT: str p8, [sp, #7, mul vl] // 2-byte Folded Spill -; STRIDED-NEXT: ptrue pn8.b -; STRIDED-NEXT: st1b { z22.b, z23.b }, pn8, [sp, #2, mul vl] // 32-byte Folded Spill -; STRIDED-NEXT: st1b { z20.b, z21.b }, pn8, [sp, #6, mul vl] // 32-byte Folded Spill -; STRIDED-NEXT: st1b { z18.b, z19.b }, pn8, [sp, #10, mul vl] // 32-byte Folded Spill -; STRIDED-NEXT: st1b { z16.b, z17.b }, pn8, [sp, #14, mul vl] // 32-byte Folded Spill -; STRIDED-NEXT: st1b { z14.b, z15.b }, pn8, [sp, #18, mul vl] // 32-byte Folded Spill -; STRIDED-NEXT: st1b { z12.b, z13.b }, pn8, [sp, #22, mul vl] // 32-byte Folded Spill -; STRIDED-NEXT: st1b { z10.b, z11.b }, pn8, [sp, #26, mul vl] // 32-byte Folded Spill -; STRIDED-NEXT: st1b { z8.b, z9.b }, pn8, [sp, #30, mul vl] // 32-byte Folded Spill ; STRIDED-NEXT: mov p8.b, p0.b +; STRIDED-NEXT: str z23, [sp, #1, mul vl] // 16-byte Folded Spill +; STRIDED-NEXT: str z22, [sp, #2, mul vl] // 16-byte Folded Spill +; STRIDED-NEXT: str z21, [sp, #3, mul vl] // 16-byte Folded Spill +; STRIDED-NEXT: str z20, [sp, #4, mul vl] // 16-byte Folded Spill +; STRIDED-NEXT: str z19, [sp, #5, mul vl] // 16-byte Folded Spill +; STRIDED-NEXT: str z18, [sp, #6, mul vl] // 16-byte Folded Spill +; STRIDED-NEXT: str z17, [sp, #7, mul vl] // 16-byte Folded Spill +; STRIDED-NEXT: str z16, [sp, #8, mul vl] // 16-byte Folded Spill +; STRIDED-NEXT: str z15, [sp, #9, mul vl] // 16-byte Folded Spill +; STRIDED-NEXT: str z14, [sp, #10, mul vl] // 16-byte Folded Spill +; STRIDED-NEXT: str z13, [sp, #11, mul vl] // 16-byte Folded Spill +; STRIDED-NEXT: str z12, [sp, #12, mul vl] // 16-byte Folded Spill +; STRIDED-NEXT: str z11, [sp, #13, mul vl] // 16-byte Folded Spill +; STRIDED-NEXT: str z10, [sp, #14, mul vl] // 16-byte Folded Spill +; STRIDED-NEXT: str z9, [sp, #15, mul vl] // 16-byte Folded Spill +; STRIDED-NEXT: str z8, [sp, #16, mul vl] // 16-byte Folded Spill ; STRIDED-NEXT: ldnt1b { z0.b, z4.b, z8.b, z12.b }, pn8/z, [x0, x1] ; STRIDED-NEXT: //APP ; STRIDED-NEXT: nop ; STRIDED-NEXT: //NO_APP -; STRIDED-NEXT: ptrue pn8.b -; STRIDED-NEXT: ld1b { z22.b, z23.b }, pn8/z, [sp, #2, mul vl] // 32-byte Folded Reload -; STRIDED-NEXT: ld1b { z20.b, z21.b }, pn8/z, [sp, #6, mul vl] // 32-byte Folded Reload -; STRIDED-NEXT: ld1b { z18.b, z19.b }, pn8/z, [sp, #10, mul vl] // 32-byte Folded Reload -; STRIDED-NEXT: ld1b { z16.b, z17.b }, pn8/z, [sp, #14, mul vl] // 32-byte Folded Reload -; STRIDED-NEXT: ld1b { z14.b, z15.b }, pn8/z, [sp, #18, mul vl] // 32-byte Folded Reload -; STRIDED-NEXT: ld1b { z10.b, z11.b }, pn8/z, [sp, #26, mul vl] // 32-byte Folded Reload +; STRIDED-NEXT: ldr p8, [sp, #7, mul vl] // 2-byte Folded Reload +; STRIDED-NEXT: ldr z23, [sp, #1, mul vl] // 16-byte Folded Reload +; STRIDED-NEXT: ldr z22, [sp, #2, mul vl] // 16-byte Folded Reload +; STRIDED-NEXT: ldr z21, [sp, #3, mul vl] // 16-byte Folded Reload +; STRIDED-NEXT: ldr z20, [sp, #4, mul vl] // 16-byte Folded Reload +; STRIDED-NEXT: ldr z19, [sp, #5, mul vl] // 16-byte Folded Reload +; STRIDED-NEXT: ldr z18, [sp, #6, mul vl] // 16-byte Folded Reload ; STRIDED-NEXT: mov z2.d, z8.d ; STRIDED-NEXT: mov z3.d, z12.d -; STRIDED-NEXT: ld1b { z12.b, z13.b }, pn8/z, [sp, #22, mul vl] // 32-byte Folded Reload -; STRIDED-NEXT: ld1b { z8.b, z9.b }, pn8/z, [sp, #30, mul vl] // 32-byte Folded Reload -; STRIDED-NEXT: ldr p8, [sp, #7, mul vl] // 2-byte Folded Reload +; STRIDED-NEXT: ldr z17, [sp, #7, mul vl] // 16-byte Folded Reload +; STRIDED-NEXT: ldr z16, [sp, #8, mul vl] // 16-byte Folded Reload +; STRIDED-NEXT: ldr z15, [sp, #9, mul vl] // 16-byte Folded Reload +; STRIDED-NEXT: ldr z14, [sp, #10, mul vl] // 16-byte Folded Reload +; STRIDED-NEXT: ldr z13, [sp, #11, mul vl] // 16-byte Folded Reload +; STRIDED-NEXT: ldr z12, [sp, #12, mul vl] // 16-byte Folded Reload +; STRIDED-NEXT: ldr z11, [sp, #13, mul vl] // 16-byte Folded Reload +; STRIDED-NEXT: ldr z10, [sp, #14, mul vl] // 16-byte Folded Reload +; STRIDED-NEXT: ldr z9, [sp, #15, mul vl] // 16-byte Folded Reload +; STRIDED-NEXT: ldr z8, [sp, #16, mul vl] // 16-byte Folded Reload ; STRIDED-NEXT: mov z1.d, z4.d ; STRIDED-NEXT: addvl sp, sp, #17 ; STRIDED-NEXT: ldr x29, [sp], #16 // 8-byte Folded Reload @@ -796,14 +1042,19 @@ define @ldnt1_x4_i8_z0_z4_z8_z12_scalar( %u ; CONTIGUOUS-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill ; CONTIGUOUS-NEXT: addvl sp, sp, #-15 ; CONTIGUOUS-NEXT: str p8, [sp, #7, mul vl] // 2-byte Folded Spill -; CONTIGUOUS-NEXT: ptrue pn8.b -; CONTIGUOUS-NEXT: st1b { z22.b, z23.b }, pn8, [sp, #2, mul vl] // 32-byte Folded Spill -; CONTIGUOUS-NEXT: st1b { z20.b, z21.b }, pn8, [sp, #6, mul vl] // 32-byte Folded Spill -; CONTIGUOUS-NEXT: st1b { z18.b, z19.b }, pn8, [sp, #10, mul vl] // 32-byte Folded Spill -; CONTIGUOUS-NEXT: st1b { z16.b, z17.b }, pn8, [sp, #14, mul vl] // 32-byte Folded Spill -; CONTIGUOUS-NEXT: st1b { z14.b, z15.b }, pn8, [sp, #18, mul vl] // 32-byte Folded Spill +; CONTIGUOUS-NEXT: str z23, [sp, #1, mul vl] // 16-byte Folded Spill +; CONTIGUOUS-NEXT: str z22, [sp, #2, mul vl] // 16-byte Folded Spill +; CONTIGUOUS-NEXT: str z21, [sp, #3, mul vl] // 16-byte Folded Spill +; CONTIGUOUS-NEXT: str z20, [sp, #4, mul vl] // 16-byte Folded Spill +; CONTIGUOUS-NEXT: str z19, [sp, #5, mul vl] // 16-byte Folded Spill +; CONTIGUOUS-NEXT: str z18, [sp, #6, mul vl] // 16-byte Folded Spill +; CONTIGUOUS-NEXT: str z17, [sp, #7, mul vl] // 16-byte Folded Spill +; CONTIGUOUS-NEXT: str z16, [sp, #8, mul vl] // 16-byte Folded Spill +; CONTIGUOUS-NEXT: str z15, [sp, #9, mul vl] // 16-byte Folded Spill +; CONTIGUOUS-NEXT: str z14, [sp, #10, mul vl] // 16-byte Folded Spill ; CONTIGUOUS-NEXT: str z13, [sp, #11, mul vl] // 16-byte Folded Spill -; CONTIGUOUS-NEXT: st1b { z10.b, z11.b }, pn8, [sp, #24, mul vl] // 32-byte Folded Spill +; CONTIGUOUS-NEXT: str z11, [sp, #12, mul vl] // 16-byte Folded Spill +; CONTIGUOUS-NEXT: str z10, [sp, #13, mul vl] // 16-byte Folded Spill ; CONTIGUOUS-NEXT: str z9, [sp, #14, mul vl] // 16-byte Folded Spill ; CONTIGUOUS-NEXT: addvl sp, sp, #-4 ; CONTIGUOUS-NEXT: mov p8.b, p0.b @@ -820,15 +1071,20 @@ define @ldnt1_x4_i8_z0_z4_z8_z12_scalar( %u ; CONTIGUOUS-NEXT: ldr z2, [sp, #2, mul vl] ; CONTIGUOUS-NEXT: ldr z3, [sp, #3, mul vl] ; CONTIGUOUS-NEXT: addvl sp, sp, #4 -; CONTIGUOUS-NEXT: ptrue pn8.b +; CONTIGUOUS-NEXT: ldr z23, [sp, #1, mul vl] // 16-byte Folded Reload +; CONTIGUOUS-NEXT: ldr z22, [sp, #2, mul vl] // 16-byte Folded Reload +; CONTIGUOUS-NEXT: ldr z21, [sp, #3, mul vl] // 16-byte Folded Reload +; CONTIGUOUS-NEXT: ldr z20, [sp, #4, mul vl] // 16-byte Folded Reload +; CONTIGUOUS-NEXT: ldr z19, [sp, #5, mul vl] // 16-byte Folded Reload +; CONTIGUOUS-NEXT: ldr z18, [sp, #6, mul vl] // 16-byte Folded Reload +; CONTIGUOUS-NEXT: ldr z17, [sp, #7, mul vl] // 16-byte Folded Reload +; CONTIGUOUS-NEXT: ldr z16, [sp, #8, mul vl] // 16-byte Folded Reload +; CONTIGUOUS-NEXT: ldr z15, [sp, #9, mul vl] // 16-byte Folded Reload +; CONTIGUOUS-NEXT: ldr z14, [sp, #10, mul vl] // 16-byte Folded Reload ; CONTIGUOUS-NEXT: ldr z13, [sp, #11, mul vl] // 16-byte Folded Reload +; CONTIGUOUS-NEXT: ldr z11, [sp, #12, mul vl] // 16-byte Folded Reload +; CONTIGUOUS-NEXT: ldr z10, [sp, #13, mul vl] // 16-byte Folded Reload ; CONTIGUOUS-NEXT: ldr z9, [sp, #14, mul vl] // 16-byte Folded Reload -; CONTIGUOUS-NEXT: ld1b { z22.b, z23.b }, pn8/z, [sp, #2, mul vl] // 32-byte Folded Reload -; CONTIGUOUS-NEXT: ld1b { z20.b, z21.b }, pn8/z, [sp, #6, mul vl] // 32-byte Folded Reload -; CONTIGUOUS-NEXT: ld1b { z18.b, z19.b }, pn8/z, [sp, #10, mul vl] // 32-byte Folded Reload -; CONTIGUOUS-NEXT: ld1b { z16.b, z17.b }, pn8/z, [sp, #14, mul vl] // 32-byte Folded Reload -; CONTIGUOUS-NEXT: ld1b { z14.b, z15.b }, pn8/z, [sp, #18, mul vl] // 32-byte Folded Reload -; CONTIGUOUS-NEXT: ld1b { z10.b, z11.b }, pn8/z, [sp, #24, mul vl] // 32-byte Folded Reload ; CONTIGUOUS-NEXT: ldr p8, [sp, #7, mul vl] // 2-byte Folded Reload ; CONTIGUOUS-NEXT: addvl sp, sp, #15 ; CONTIGUOUS-NEXT: ldr x29, [sp], #16 // 8-byte Folded Reload @@ -853,32 +1109,46 @@ define @ldnt1_x4_i16_z0_z4_z8_z12( %unused ; STRIDED-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill ; STRIDED-NEXT: addvl sp, sp, #-17 ; STRIDED-NEXT: str p8, [sp, #7, mul vl] // 2-byte Folded Spill -; STRIDED-NEXT: ptrue pn8.b -; STRIDED-NEXT: st1b { z22.b, z23.b }, pn8, [sp, #2, mul vl] // 32-byte Folded Spill -; STRIDED-NEXT: st1b { z20.b, z21.b }, pn8, [sp, #6, mul vl] // 32-byte Folded Spill -; STRIDED-NEXT: st1b { z18.b, z19.b }, pn8, [sp, #10, mul vl] // 32-byte Folded Spill -; STRIDED-NEXT: st1b { z16.b, z17.b }, pn8, [sp, #14, mul vl] // 32-byte Folded Spill -; STRIDED-NEXT: st1b { z14.b, z15.b }, pn8, [sp, #18, mul vl] // 32-byte Folded Spill -; STRIDED-NEXT: st1b { z12.b, z13.b }, pn8, [sp, #22, mul vl] // 32-byte Folded Spill -; STRIDED-NEXT: st1b { z10.b, z11.b }, pn8, [sp, #26, mul vl] // 32-byte Folded Spill -; STRIDED-NEXT: st1b { z8.b, z9.b }, pn8, [sp, #30, mul vl] // 32-byte Folded Spill ; STRIDED-NEXT: mov p8.b, p0.b +; STRIDED-NEXT: str z23, [sp, #1, mul vl] // 16-byte Folded Spill +; STRIDED-NEXT: str z22, [sp, #2, mul vl] // 16-byte Folded Spill +; STRIDED-NEXT: str z21, [sp, #3, mul vl] // 16-byte Folded Spill +; STRIDED-NEXT: str z20, [sp, #4, mul vl] // 16-byte Folded Spill +; STRIDED-NEXT: str z19, [sp, #5, mul vl] // 16-byte Folded Spill +; STRIDED-NEXT: str z18, [sp, #6, mul vl] // 16-byte Folded Spill +; STRIDED-NEXT: str z17, [sp, #7, mul vl] // 16-byte Folded Spill +; STRIDED-NEXT: str z16, [sp, #8, mul vl] // 16-byte Folded Spill +; STRIDED-NEXT: str z15, [sp, #9, mul vl] // 16-byte Folded Spill +; STRIDED-NEXT: str z14, [sp, #10, mul vl] // 16-byte Folded Spill +; STRIDED-NEXT: str z13, [sp, #11, mul vl] // 16-byte Folded Spill +; STRIDED-NEXT: str z12, [sp, #12, mul vl] // 16-byte Folded Spill +; STRIDED-NEXT: str z11, [sp, #13, mul vl] // 16-byte Folded Spill +; STRIDED-NEXT: str z10, [sp, #14, mul vl] // 16-byte Folded Spill +; STRIDED-NEXT: str z9, [sp, #15, mul vl] // 16-byte Folded Spill +; STRIDED-NEXT: str z8, [sp, #16, mul vl] // 16-byte Folded Spill ; STRIDED-NEXT: ldnt1h { z0.h, z4.h, z8.h, z12.h }, pn8/z, [x0] ; STRIDED-NEXT: //APP ; STRIDED-NEXT: nop ; STRIDED-NEXT: //NO_APP -; STRIDED-NEXT: ptrue pn8.b -; STRIDED-NEXT: ld1b { z22.b, z23.b }, pn8/z, [sp, #2, mul vl] // 32-byte Folded Reload -; STRIDED-NEXT: ld1b { z20.b, z21.b }, pn8/z, [sp, #6, mul vl] // 32-byte Folded Reload -; STRIDED-NEXT: ld1b { z18.b, z19.b }, pn8/z, [sp, #10, mul vl] // 32-byte Folded Reload -; STRIDED-NEXT: ld1b { z16.b, z17.b }, pn8/z, [sp, #14, mul vl] // 32-byte Folded Reload -; STRIDED-NEXT: ld1b { z14.b, z15.b }, pn8/z, [sp, #18, mul vl] // 32-byte Folded Reload -; STRIDED-NEXT: ld1b { z10.b, z11.b }, pn8/z, [sp, #26, mul vl] // 32-byte Folded Reload +; STRIDED-NEXT: ldr p8, [sp, #7, mul vl] // 2-byte Folded Reload +; STRIDED-NEXT: ldr z23, [sp, #1, mul vl] // 16-byte Folded Reload +; STRIDED-NEXT: ldr z22, [sp, #2, mul vl] // 16-byte Folded Reload +; STRIDED-NEXT: ldr z21, [sp, #3, mul vl] // 16-byte Folded Reload +; STRIDED-NEXT: ldr z20, [sp, #4, mul vl] // 16-byte Folded Reload +; STRIDED-NEXT: ldr z19, [sp, #5, mul vl] // 16-byte Folded Reload +; STRIDED-NEXT: ldr z18, [sp, #6, mul vl] // 16-byte Folded Reload ; STRIDED-NEXT: mov z2.d, z8.d ; STRIDED-NEXT: mov z3.d, z12.d -; STRIDED-NEXT: ld1b { z12.b, z13.b }, pn8/z, [sp, #22, mul vl] // 32-byte Folded Reload -; STRIDED-NEXT: ld1b { z8.b, z9.b }, pn8/z, [sp, #30, mul vl] // 32-byte Folded Reload -; STRIDED-NEXT: ldr p8, [sp, #7, mul vl] // 2-byte Folded Reload +; STRIDED-NEXT: ldr z17, [sp, #7, mul vl] // 16-byte Folded Reload +; STRIDED-NEXT: ldr z16, [sp, #8, mul vl] // 16-byte Folded Reload +; STRIDED-NEXT: ldr z15, [sp, #9, mul vl] // 16-byte Folded Reload +; STRIDED-NEXT: ldr z14, [sp, #10, mul vl] // 16-byte Folded Reload +; STRIDED-NEXT: ldr z13, [sp, #11, mul vl] // 16-byte Folded Reload +; STRIDED-NEXT: ldr z12, [sp, #12, mul vl] // 16-byte Folded Reload +; STRIDED-NEXT: ldr z11, [sp, #13, mul vl] // 16-byte Folded Reload +; STRIDED-NEXT: ldr z10, [sp, #14, mul vl] // 16-byte Folded Reload +; STRIDED-NEXT: ldr z9, [sp, #15, mul vl] // 16-byte Folded Reload +; STRIDED-NEXT: ldr z8, [sp, #16, mul vl] // 16-byte Folded Reload ; STRIDED-NEXT: mov z1.d, z4.d ; STRIDED-NEXT: addvl sp, sp, #17 ; STRIDED-NEXT: ldr x29, [sp], #16 // 8-byte Folded Reload @@ -889,14 +1159,19 @@ define @ldnt1_x4_i16_z0_z4_z8_z12( %unused ; CONTIGUOUS-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill ; CONTIGUOUS-NEXT: addvl sp, sp, #-15 ; CONTIGUOUS-NEXT: str p8, [sp, #7, mul vl] // 2-byte Folded Spill -; CONTIGUOUS-NEXT: ptrue pn8.b -; CONTIGUOUS-NEXT: st1b { z22.b, z23.b }, pn8, [sp, #2, mul vl] // 32-byte Folded Spill -; CONTIGUOUS-NEXT: st1b { z20.b, z21.b }, pn8, [sp, #6, mul vl] // 32-byte Folded Spill -; CONTIGUOUS-NEXT: st1b { z18.b, z19.b }, pn8, [sp, #10, mul vl] // 32-byte Folded Spill -; CONTIGUOUS-NEXT: st1b { z16.b, z17.b }, pn8, [sp, #14, mul vl] // 32-byte Folded Spill -; CONTIGUOUS-NEXT: st1b { z14.b, z15.b }, pn8, [sp, #18, mul vl] // 32-byte Folded Spill +; CONTIGUOUS-NEXT: str z23, [sp, #1, mul vl] // 16-byte Folded Spill +; CONTIGUOUS-NEXT: str z22, [sp, #2, mul vl] // 16-byte Folded Spill +; CONTIGUOUS-NEXT: str z21, [sp, #3, mul vl] // 16-byte Folded Spill +; CONTIGUOUS-NEXT: str z20, [sp, #4, mul vl] // 16-byte Folded Spill +; CONTIGUOUS-NEXT: str z19, [sp, #5, mul vl] // 16-byte Folded Spill +; CONTIGUOUS-NEXT: str z18, [sp, #6, mul vl] // 16-byte Folded Spill +; CONTIGUOUS-NEXT: str z17, [sp, #7, mul vl] // 16-byte Folded Spill +; CONTIGUOUS-NEXT: str z16, [sp, #8, mul vl] // 16-byte Folded Spill +; CONTIGUOUS-NEXT: str z15, [sp, #9, mul vl] // 16-byte Folded Spill +; CONTIGUOUS-NEXT: str z14, [sp, #10, mul vl] // 16-byte Folded Spill ; CONTIGUOUS-NEXT: str z13, [sp, #11, mul vl] // 16-byte Folded Spill -; CONTIGUOUS-NEXT: st1b { z10.b, z11.b }, pn8, [sp, #24, mul vl] // 32-byte Folded Spill +; CONTIGUOUS-NEXT: str z11, [sp, #12, mul vl] // 16-byte Folded Spill +; CONTIGUOUS-NEXT: str z10, [sp, #13, mul vl] // 16-byte Folded Spill ; CONTIGUOUS-NEXT: str z9, [sp, #14, mul vl] // 16-byte Folded Spill ; CONTIGUOUS-NEXT: addvl sp, sp, #-4 ; CONTIGUOUS-NEXT: mov p8.b, p0.b @@ -913,15 +1188,20 @@ define @ldnt1_x4_i16_z0_z4_z8_z12( %unused ; CONTIGUOUS-NEXT: ldr z2, [sp, #2, mul vl] ; CONTIGUOUS-NEXT: ldr z3, [sp, #3, mul vl] ; CONTIGUOUS-NEXT: addvl sp, sp, #4 -; CONTIGUOUS-NEXT: ptrue pn8.b +; CONTIGUOUS-NEXT: ldr z23, [sp, #1, mul vl] // 16-byte Folded Reload +; CONTIGUOUS-NEXT: ldr z22, [sp, #2, mul vl] // 16-byte Folded Reload +; CONTIGUOUS-NEXT: ldr z21, [sp, #3, mul vl] // 16-byte Folded Reload +; CONTIGUOUS-NEXT: ldr z20, [sp, #4, mul vl] // 16-byte Folded Reload +; CONTIGUOUS-NEXT: ldr z19, [sp, #5, mul vl] // 16-byte Folded Reload +; CONTIGUOUS-NEXT: ldr z18, [sp, #6, mul vl] // 16-byte Folded Reload +; CONTIGUOUS-NEXT: ldr z17, [sp, #7, mul vl] // 16-byte Folded Reload +; CONTIGUOUS-NEXT: ldr z16, [sp, #8, mul vl] // 16-byte Folded Reload +; CONTIGUOUS-NEXT: ldr z15, [sp, #9, mul vl] // 16-byte Folded Reload +; CONTIGUOUS-NEXT: ldr z14, [sp, #10, mul vl] // 16-byte Folded Reload ; CONTIGUOUS-NEXT: ldr z13, [sp, #11, mul vl] // 16-byte Folded Reload +; CONTIGUOUS-NEXT: ldr z11, [sp, #12, mul vl] // 16-byte Folded Reload +; CONTIGUOUS-NEXT: ldr z10, [sp, #13, mul vl] // 16-byte Folded Reload ; CONTIGUOUS-NEXT: ldr z9, [sp, #14, mul vl] // 16-byte Folded Reload -; CONTIGUOUS-NEXT: ld1b { z22.b, z23.b }, pn8/z, [sp, #2, mul vl] // 32-byte Folded Reload -; CONTIGUOUS-NEXT: ld1b { z20.b, z21.b }, pn8/z, [sp, #6, mul vl] // 32-byte Folded Reload -; CONTIGUOUS-NEXT: ld1b { z18.b, z19.b }, pn8/z, [sp, #10, mul vl] // 32-byte Folded Reload -; CONTIGUOUS-NEXT: ld1b { z16.b, z17.b }, pn8/z, [sp, #14, mul vl] // 32-byte Folded Reload -; CONTIGUOUS-NEXT: ld1b { z14.b, z15.b }, pn8/z, [sp, #18, mul vl] // 32-byte Folded Reload -; CONTIGUOUS-NEXT: ld1b { z10.b, z11.b }, pn8/z, [sp, #24, mul vl] // 32-byte Folded Reload ; CONTIGUOUS-NEXT: ldr p8, [sp, #7, mul vl] // 2-byte Folded Reload ; CONTIGUOUS-NEXT: addvl sp, sp, #15 ; CONTIGUOUS-NEXT: ldr x29, [sp], #16 // 8-byte Folded Reload @@ -945,32 +1225,46 @@ define @ldnt1_x4_i16_z0_z4_z8_z12_scalar( ; STRIDED-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill ; STRIDED-NEXT: addvl sp, sp, #-17 ; STRIDED-NEXT: str p8, [sp, #7, mul vl] // 2-byte Folded Spill -; STRIDED-NEXT: ptrue pn8.b -; STRIDED-NEXT: st1b { z22.b, z23.b }, pn8, [sp, #2, mul vl] // 32-byte Folded Spill -; STRIDED-NEXT: st1b { z20.b, z21.b }, pn8, [sp, #6, mul vl] // 32-byte Folded Spill -; STRIDED-NEXT: st1b { z18.b, z19.b }, pn8, [sp, #10, mul vl] // 32-byte Folded Spill -; STRIDED-NEXT: st1b { z16.b, z17.b }, pn8, [sp, #14, mul vl] // 32-byte Folded Spill -; STRIDED-NEXT: st1b { z14.b, z15.b }, pn8, [sp, #18, mul vl] // 32-byte Folded Spill -; STRIDED-NEXT: st1b { z12.b, z13.b }, pn8, [sp, #22, mul vl] // 32-byte Folded Spill -; STRIDED-NEXT: st1b { z10.b, z11.b }, pn8, [sp, #26, mul vl] // 32-byte Folded Spill -; STRIDED-NEXT: st1b { z8.b, z9.b }, pn8, [sp, #30, mul vl] // 32-byte Folded Spill ; STRIDED-NEXT: mov p8.b, p0.b +; STRIDED-NEXT: str z23, [sp, #1, mul vl] // 16-byte Folded Spill +; STRIDED-NEXT: str z22, [sp, #2, mul vl] // 16-byte Folded Spill +; STRIDED-NEXT: str z21, [sp, #3, mul vl] // 16-byte Folded Spill +; STRIDED-NEXT: str z20, [sp, #4, mul vl] // 16-byte Folded Spill +; STRIDED-NEXT: str z19, [sp, #5, mul vl] // 16-byte Folded Spill +; STRIDED-NEXT: str z18, [sp, #6, mul vl] // 16-byte Folded Spill +; STRIDED-NEXT: str z17, [sp, #7, mul vl] // 16-byte Folded Spill +; STRIDED-NEXT: str z16, [sp, #8, mul vl] // 16-byte Folded Spill +; STRIDED-NEXT: str z15, [sp, #9, mul vl] // 16-byte Folded Spill +; STRIDED-NEXT: str z14, [sp, #10, mul vl] // 16-byte Folded Spill +; STRIDED-NEXT: str z13, [sp, #11, mul vl] // 16-byte Folded Spill +; STRIDED-NEXT: str z12, [sp, #12, mul vl] // 16-byte Folded Spill +; STRIDED-NEXT: str z11, [sp, #13, mul vl] // 16-byte Folded Spill +; STRIDED-NEXT: str z10, [sp, #14, mul vl] // 16-byte Folded Spill +; STRIDED-NEXT: str z9, [sp, #15, mul vl] // 16-byte Folded Spill +; STRIDED-NEXT: str z8, [sp, #16, mul vl] // 16-byte Folded Spill ; STRIDED-NEXT: ldnt1h { z0.h, z4.h, z8.h, z12.h }, pn8/z, [x0, x1, lsl #1] ; STRIDED-NEXT: //APP ; STRIDED-NEXT: nop ; STRIDED-NEXT: //NO_APP -; STRIDED-NEXT: ptrue pn8.b -; STRIDED-NEXT: ld1b { z22.b, z23.b }, pn8/z, [sp, #2, mul vl] // 32-byte Folded Reload -; STRIDED-NEXT: ld1b { z20.b, z21.b }, pn8/z, [sp, #6, mul vl] // 32-byte Folded Reload -; STRIDED-NEXT: ld1b { z18.b, z19.b }, pn8/z, [sp, #10, mul vl] // 32-byte Folded Reload -; STRIDED-NEXT: ld1b { z16.b, z17.b }, pn8/z, [sp, #14, mul vl] // 32-byte Folded Reload -; STRIDED-NEXT: ld1b { z14.b, z15.b }, pn8/z, [sp, #18, mul vl] // 32-byte Folded Reload -; STRIDED-NEXT: ld1b { z10.b, z11.b }, pn8/z, [sp, #26, mul vl] // 32-byte Folded Reload +; STRIDED-NEXT: ldr p8, [sp, #7, mul vl] // 2-byte Folded Reload +; STRIDED-NEXT: ldr z23, [sp, #1, mul vl] // 16-byte Folded Reload +; STRIDED-NEXT: ldr z22, [sp, #2, mul vl] // 16-byte Folded Reload +; STRIDED-NEXT: ldr z21, [sp, #3, mul vl] // 16-byte Folded Reload +; STRIDED-NEXT: ldr z20, [sp, #4, mul vl] // 16-byte Folded Reload +; STRIDED-NEXT: ldr z19, [sp, #5, mul vl] // 16-byte Folded Reload +; STRIDED-NEXT: ldr z18, [sp, #6, mul vl] // 16-byte Folded Reload ; STRIDED-NEXT: mov z2.d, z8.d ; STRIDED-NEXT: mov z3.d, z12.d -; STRIDED-NEXT: ld1b { z12.b, z13.b }, pn8/z, [sp, #22, mul vl] // 32-byte Folded Reload -; STRIDED-NEXT: ld1b { z8.b, z9.b }, pn8/z, [sp, #30, mul vl] // 32-byte Folded Reload -; STRIDED-NEXT: ldr p8, [sp, #7, mul vl] // 2-byte Folded Reload +; STRIDED-NEXT: ldr z17, [sp, #7, mul vl] // 16-byte Folded Reload +; STRIDED-NEXT: ldr z16, [sp, #8, mul vl] // 16-byte Folded Reload +; STRIDED-NEXT: ldr z15, [sp, #9, mul vl] // 16-byte Folded Reload +; STRIDED-NEXT: ldr z14, [sp, #10, mul vl] // 16-byte Folded Reload +; STRIDED-NEXT: ldr z13, [sp, #11, mul vl] // 16-byte Folded Reload +; STRIDED-NEXT: ldr z12, [sp, #12, mul vl] // 16-byte Folded Reload +; STRIDED-NEXT: ldr z11, [sp, #13, mul vl] // 16-byte Folded Reload +; STRIDED-NEXT: ldr z10, [sp, #14, mul vl] // 16-byte Folded Reload +; STRIDED-NEXT: ldr z9, [sp, #15, mul vl] // 16-byte Folded Reload +; STRIDED-NEXT: ldr z8, [sp, #16, mul vl] // 16-byte Folded Reload ; STRIDED-NEXT: mov z1.d, z4.d ; STRIDED-NEXT: addvl sp, sp, #17 ; STRIDED-NEXT: ldr x29, [sp], #16 // 8-byte Folded Reload @@ -981,14 +1275,19 @@ define @ldnt1_x4_i16_z0_z4_z8_z12_scalar( ; CONTIGUOUS-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill ; CONTIGUOUS-NEXT: addvl sp, sp, #-15 ; CONTIGUOUS-NEXT: str p8, [sp, #7, mul vl] // 2-byte Folded Spill -; CONTIGUOUS-NEXT: ptrue pn8.b -; CONTIGUOUS-NEXT: st1b { z22.b, z23.b }, pn8, [sp, #2, mul vl] // 32-byte Folded Spill -; CONTIGUOUS-NEXT: st1b { z20.b, z21.b }, pn8, [sp, #6, mul vl] // 32-byte Folded Spill -; CONTIGUOUS-NEXT: st1b { z18.b, z19.b }, pn8, [sp, #10, mul vl] // 32-byte Folded Spill -; CONTIGUOUS-NEXT: st1b { z16.b, z17.b }, pn8, [sp, #14, mul vl] // 32-byte Folded Spill -; CONTIGUOUS-NEXT: st1b { z14.b, z15.b }, pn8, [sp, #18, mul vl] // 32-byte Folded Spill +; CONTIGUOUS-NEXT: str z23, [sp, #1, mul vl] // 16-byte Folded Spill +; CONTIGUOUS-NEXT: str z22, [sp, #2, mul vl] // 16-byte Folded Spill +; CONTIGUOUS-NEXT: str z21, [sp, #3, mul vl] // 16-byte Folded Spill +; CONTIGUOUS-NEXT: str z20, [sp, #4, mul vl] // 16-byte Folded Spill +; CONTIGUOUS-NEXT: str z19, [sp, #5, mul vl] // 16-byte Folded Spill +; CONTIGUOUS-NEXT: str z18, [sp, #6, mul vl] // 16-byte Folded Spill +; CONTIGUOUS-NEXT: str z17, [sp, #7, mul vl] // 16-byte Folded Spill +; CONTIGUOUS-NEXT: str z16, [sp, #8, mul vl] // 16-byte Folded Spill +; CONTIGUOUS-NEXT: str z15, [sp, #9, mul vl] // 16-byte Folded Spill +; CONTIGUOUS-NEXT: str z14, [sp, #10, mul vl] // 16-byte Folded Spill ; CONTIGUOUS-NEXT: str z13, [sp, #11, mul vl] // 16-byte Folded Spill -; CONTIGUOUS-NEXT: st1b { z10.b, z11.b }, pn8, [sp, #24, mul vl] // 32-byte Folded Spill +; CONTIGUOUS-NEXT: str z11, [sp, #12, mul vl] // 16-byte Folded Spill +; CONTIGUOUS-NEXT: str z10, [sp, #13, mul vl] // 16-byte Folded Spill ; CONTIGUOUS-NEXT: str z9, [sp, #14, mul vl] // 16-byte Folded Spill ; CONTIGUOUS-NEXT: addvl sp, sp, #-4 ; CONTIGUOUS-NEXT: mov p8.b, p0.b @@ -1005,15 +1304,20 @@ define @ldnt1_x4_i16_z0_z4_z8_z12_scalar( ; CONTIGUOUS-NEXT: ldr z2, [sp, #2, mul vl] ; CONTIGUOUS-NEXT: ldr z3, [sp, #3, mul vl] ; CONTIGUOUS-NEXT: addvl sp, sp, #4 -; CONTIGUOUS-NEXT: ptrue pn8.b +; CONTIGUOUS-NEXT: ldr z23, [sp, #1, mul vl] // 16-byte Folded Reload +; CONTIGUOUS-NEXT: ldr z22, [sp, #2, mul vl] // 16-byte Folded Reload +; CONTIGUOUS-NEXT: ldr z21, [sp, #3, mul vl] // 16-byte Folded Reload +; CONTIGUOUS-NEXT: ldr z20, [sp, #4, mul vl] // 16-byte Folded Reload +; CONTIGUOUS-NEXT: ldr z19, [sp, #5, mul vl] // 16-byte Folded Reload +; CONTIGUOUS-NEXT: ldr z18, [sp, #6, mul vl] // 16-byte Folded Reload +; CONTIGUOUS-NEXT: ldr z17, [sp, #7, mul vl] // 16-byte Folded Reload +; CONTIGUOUS-NEXT: ldr z16, [sp, #8, mul vl] // 16-byte Folded Reload +; CONTIGUOUS-NEXT: ldr z15, [sp, #9, mul vl] // 16-byte Folded Reload +; CONTIGUOUS-NEXT: ldr z14, [sp, #10, mul vl] // 16-byte Folded Reload ; CONTIGUOUS-NEXT: ldr z13, [sp, #11, mul vl] // 16-byte Folded Reload +; CONTIGUOUS-NEXT: ldr z11, [sp, #12, mul vl] // 16-byte Folded Reload +; CONTIGUOUS-NEXT: ldr z10, [sp, #13, mul vl] // 16-byte Folded Reload ; CONTIGUOUS-NEXT: ldr z9, [sp, #14, mul vl] // 16-byte Folded Reload -; CONTIGUOUS-NEXT: ld1b { z22.b, z23.b }, pn8/z, [sp, #2, mul vl] // 32-byte Folded Reload -; CONTIGUOUS-NEXT: ld1b { z20.b, z21.b }, pn8/z, [sp, #6, mul vl] // 32-byte Folded Reload -; CONTIGUOUS-NEXT: ld1b { z18.b, z19.b }, pn8/z, [sp, #10, mul vl] // 32-byte Folded Reload -; CONTIGUOUS-NEXT: ld1b { z16.b, z17.b }, pn8/z, [sp, #14, mul vl] // 32-byte Folded Reload -; CONTIGUOUS-NEXT: ld1b { z14.b, z15.b }, pn8/z, [sp, #18, mul vl] // 32-byte Folded Reload -; CONTIGUOUS-NEXT: ld1b { z10.b, z11.b }, pn8/z, [sp, #24, mul vl] // 32-byte Folded Reload ; CONTIGUOUS-NEXT: ldr p8, [sp, #7, mul vl] // 2-byte Folded Reload ; CONTIGUOUS-NEXT: addvl sp, sp, #15 ; CONTIGUOUS-NEXT: ldr x29, [sp], #16 // 8-byte Folded Reload @@ -1038,32 +1342,46 @@ define @ldnt1_x4_i32_z0_z4_z8_z12( %unused ; STRIDED-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill ; STRIDED-NEXT: addvl sp, sp, #-17 ; STRIDED-NEXT: str p8, [sp, #7, mul vl] // 2-byte Folded Spill -; STRIDED-NEXT: ptrue pn8.b -; STRIDED-NEXT: st1b { z22.b, z23.b }, pn8, [sp, #2, mul vl] // 32-byte Folded Spill -; STRIDED-NEXT: st1b { z20.b, z21.b }, pn8, [sp, #6, mul vl] // 32-byte Folded Spill -; STRIDED-NEXT: st1b { z18.b, z19.b }, pn8, [sp, #10, mul vl] // 32-byte Folded Spill -; STRIDED-NEXT: st1b { z16.b, z17.b }, pn8, [sp, #14, mul vl] // 32-byte Folded Spill -; STRIDED-NEXT: st1b { z14.b, z15.b }, pn8, [sp, #18, mul vl] // 32-byte Folded Spill -; STRIDED-NEXT: st1b { z12.b, z13.b }, pn8, [sp, #22, mul vl] // 32-byte Folded Spill -; STRIDED-NEXT: st1b { z10.b, z11.b }, pn8, [sp, #26, mul vl] // 32-byte Folded Spill -; STRIDED-NEXT: st1b { z8.b, z9.b }, pn8, [sp, #30, mul vl] // 32-byte Folded Spill ; STRIDED-NEXT: mov p8.b, p0.b +; STRIDED-NEXT: str z23, [sp, #1, mul vl] // 16-byte Folded Spill +; STRIDED-NEXT: str z22, [sp, #2, mul vl] // 16-byte Folded Spill +; STRIDED-NEXT: str z21, [sp, #3, mul vl] // 16-byte Folded Spill +; STRIDED-NEXT: str z20, [sp, #4, mul vl] // 16-byte Folded Spill +; STRIDED-NEXT: str z19, [sp, #5, mul vl] // 16-byte Folded Spill +; STRIDED-NEXT: str z18, [sp, #6, mul vl] // 16-byte Folded Spill +; STRIDED-NEXT: str z17, [sp, #7, mul vl] // 16-byte Folded Spill +; STRIDED-NEXT: str z16, [sp, #8, mul vl] // 16-byte Folded Spill +; STRIDED-NEXT: str z15, [sp, #9, mul vl] // 16-byte Folded Spill +; STRIDED-NEXT: str z14, [sp, #10, mul vl] // 16-byte Folded Spill +; STRIDED-NEXT: str z13, [sp, #11, mul vl] // 16-byte Folded Spill +; STRIDED-NEXT: str z12, [sp, #12, mul vl] // 16-byte Folded Spill +; STRIDED-NEXT: str z11, [sp, #13, mul vl] // 16-byte Folded Spill +; STRIDED-NEXT: str z10, [sp, #14, mul vl] // 16-byte Folded Spill +; STRIDED-NEXT: str z9, [sp, #15, mul vl] // 16-byte Folded Spill +; STRIDED-NEXT: str z8, [sp, #16, mul vl] // 16-byte Folded Spill ; STRIDED-NEXT: ldnt1w { z0.s, z4.s, z8.s, z12.s }, pn8/z, [x0] ; STRIDED-NEXT: //APP ; STRIDED-NEXT: nop ; STRIDED-NEXT: //NO_APP -; STRIDED-NEXT: ptrue pn8.b -; STRIDED-NEXT: ld1b { z22.b, z23.b }, pn8/z, [sp, #2, mul vl] // 32-byte Folded Reload -; STRIDED-NEXT: ld1b { z20.b, z21.b }, pn8/z, [sp, #6, mul vl] // 32-byte Folded Reload -; STRIDED-NEXT: ld1b { z18.b, z19.b }, pn8/z, [sp, #10, mul vl] // 32-byte Folded Reload -; STRIDED-NEXT: ld1b { z16.b, z17.b }, pn8/z, [sp, #14, mul vl] // 32-byte Folded Reload -; STRIDED-NEXT: ld1b { z14.b, z15.b }, pn8/z, [sp, #18, mul vl] // 32-byte Folded Reload -; STRIDED-NEXT: ld1b { z10.b, z11.b }, pn8/z, [sp, #26, mul vl] // 32-byte Folded Reload +; STRIDED-NEXT: ldr p8, [sp, #7, mul vl] // 2-byte Folded Reload +; STRIDED-NEXT: ldr z23, [sp, #1, mul vl] // 16-byte Folded Reload +; STRIDED-NEXT: ldr z22, [sp, #2, mul vl] // 16-byte Folded Reload +; STRIDED-NEXT: ldr z21, [sp, #3, mul vl] // 16-byte Folded Reload +; STRIDED-NEXT: ldr z20, [sp, #4, mul vl] // 16-byte Folded Reload +; STRIDED-NEXT: ldr z19, [sp, #5, mul vl] // 16-byte Folded Reload +; STRIDED-NEXT: ldr z18, [sp, #6, mul vl] // 16-byte Folded Reload ; STRIDED-NEXT: mov z2.d, z8.d ; STRIDED-NEXT: mov z3.d, z12.d -; STRIDED-NEXT: ld1b { z12.b, z13.b }, pn8/z, [sp, #22, mul vl] // 32-byte Folded Reload -; STRIDED-NEXT: ld1b { z8.b, z9.b }, pn8/z, [sp, #30, mul vl] // 32-byte Folded Reload -; STRIDED-NEXT: ldr p8, [sp, #7, mul vl] // 2-byte Folded Reload +; STRIDED-NEXT: ldr z17, [sp, #7, mul vl] // 16-byte Folded Reload +; STRIDED-NEXT: ldr z16, [sp, #8, mul vl] // 16-byte Folded Reload +; STRIDED-NEXT: ldr z15, [sp, #9, mul vl] // 16-byte Folded Reload +; STRIDED-NEXT: ldr z14, [sp, #10, mul vl] // 16-byte Folded Reload +; STRIDED-NEXT: ldr z13, [sp, #11, mul vl] // 16-byte Folded Reload +; STRIDED-NEXT: ldr z12, [sp, #12, mul vl] // 16-byte Folded Reload +; STRIDED-NEXT: ldr z11, [sp, #13, mul vl] // 16-byte Folded Reload +; STRIDED-NEXT: ldr z10, [sp, #14, mul vl] // 16-byte Folded Reload +; STRIDED-NEXT: ldr z9, [sp, #15, mul vl] // 16-byte Folded Reload +; STRIDED-NEXT: ldr z8, [sp, #16, mul vl] // 16-byte Folded Reload ; STRIDED-NEXT: mov z1.d, z4.d ; STRIDED-NEXT: addvl sp, sp, #17 ; STRIDED-NEXT: ldr x29, [sp], #16 // 8-byte Folded Reload @@ -1074,14 +1392,19 @@ define @ldnt1_x4_i32_z0_z4_z8_z12( %unused ; CONTIGUOUS-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill ; CONTIGUOUS-NEXT: addvl sp, sp, #-15 ; CONTIGUOUS-NEXT: str p8, [sp, #7, mul vl] // 2-byte Folded Spill -; CONTIGUOUS-NEXT: ptrue pn8.b -; CONTIGUOUS-NEXT: st1b { z22.b, z23.b }, pn8, [sp, #2, mul vl] // 32-byte Folded Spill -; CONTIGUOUS-NEXT: st1b { z20.b, z21.b }, pn8, [sp, #6, mul vl] // 32-byte Folded Spill -; CONTIGUOUS-NEXT: st1b { z18.b, z19.b }, pn8, [sp, #10, mul vl] // 32-byte Folded Spill -; CONTIGUOUS-NEXT: st1b { z16.b, z17.b }, pn8, [sp, #14, mul vl] // 32-byte Folded Spill -; CONTIGUOUS-NEXT: st1b { z14.b, z15.b }, pn8, [sp, #18, mul vl] // 32-byte Folded Spill +; CONTIGUOUS-NEXT: str z23, [sp, #1, mul vl] // 16-byte Folded Spill +; CONTIGUOUS-NEXT: str z22, [sp, #2, mul vl] // 16-byte Folded Spill +; CONTIGUOUS-NEXT: str z21, [sp, #3, mul vl] // 16-byte Folded Spill +; CONTIGUOUS-NEXT: str z20, [sp, #4, mul vl] // 16-byte Folded Spill +; CONTIGUOUS-NEXT: str z19, [sp, #5, mul vl] // 16-byte Folded Spill +; CONTIGUOUS-NEXT: str z18, [sp, #6, mul vl] // 16-byte Folded Spill +; CONTIGUOUS-NEXT: str z17, [sp, #7, mul vl] // 16-byte Folded Spill +; CONTIGUOUS-NEXT: str z16, [sp, #8, mul vl] // 16-byte Folded Spill +; CONTIGUOUS-NEXT: str z15, [sp, #9, mul vl] // 16-byte Folded Spill +; CONTIGUOUS-NEXT: str z14, [sp, #10, mul vl] // 16-byte Folded Spill ; CONTIGUOUS-NEXT: str z13, [sp, #11, mul vl] // 16-byte Folded Spill -; CONTIGUOUS-NEXT: st1b { z10.b, z11.b }, pn8, [sp, #24, mul vl] // 32-byte Folded Spill +; CONTIGUOUS-NEXT: str z11, [sp, #12, mul vl] // 16-byte Folded Spill +; CONTIGUOUS-NEXT: str z10, [sp, #13, mul vl] // 16-byte Folded Spill ; CONTIGUOUS-NEXT: str z9, [sp, #14, mul vl] // 16-byte Folded Spill ; CONTIGUOUS-NEXT: addvl sp, sp, #-4 ; CONTIGUOUS-NEXT: mov p8.b, p0.b @@ -1098,15 +1421,20 @@ define @ldnt1_x4_i32_z0_z4_z8_z12( %unused ; CONTIGUOUS-NEXT: ldr z2, [sp, #2, mul vl] ; CONTIGUOUS-NEXT: ldr z3, [sp, #3, mul vl] ; CONTIGUOUS-NEXT: addvl sp, sp, #4 -; CONTIGUOUS-NEXT: ptrue pn8.b +; CONTIGUOUS-NEXT: ldr z23, [sp, #1, mul vl] // 16-byte Folded Reload +; CONTIGUOUS-NEXT: ldr z22, [sp, #2, mul vl] // 16-byte Folded Reload +; CONTIGUOUS-NEXT: ldr z21, [sp, #3, mul vl] // 16-byte Folded Reload +; CONTIGUOUS-NEXT: ldr z20, [sp, #4, mul vl] // 16-byte Folded Reload +; CONTIGUOUS-NEXT: ldr z19, [sp, #5, mul vl] // 16-byte Folded Reload +; CONTIGUOUS-NEXT: ldr z18, [sp, #6, mul vl] // 16-byte Folded Reload +; CONTIGUOUS-NEXT: ldr z17, [sp, #7, mul vl] // 16-byte Folded Reload +; CONTIGUOUS-NEXT: ldr z16, [sp, #8, mul vl] // 16-byte Folded Reload +; CONTIGUOUS-NEXT: ldr z15, [sp, #9, mul vl] // 16-byte Folded Reload +; CONTIGUOUS-NEXT: ldr z14, [sp, #10, mul vl] // 16-byte Folded Reload ; CONTIGUOUS-NEXT: ldr z13, [sp, #11, mul vl] // 16-byte Folded Reload +; CONTIGUOUS-NEXT: ldr z11, [sp, #12, mul vl] // 16-byte Folded Reload +; CONTIGUOUS-NEXT: ldr z10, [sp, #13, mul vl] // 16-byte Folded Reload ; CONTIGUOUS-NEXT: ldr z9, [sp, #14, mul vl] // 16-byte Folded Reload -; CONTIGUOUS-NEXT: ld1b { z22.b, z23.b }, pn8/z, [sp, #2, mul vl] // 32-byte Folded Reload -; CONTIGUOUS-NEXT: ld1b { z20.b, z21.b }, pn8/z, [sp, #6, mul vl] // 32-byte Folded Reload -; CONTIGUOUS-NEXT: ld1b { z18.b, z19.b }, pn8/z, [sp, #10, mul vl] // 32-byte Folded Reload -; CONTIGUOUS-NEXT: ld1b { z16.b, z17.b }, pn8/z, [sp, #14, mul vl] // 32-byte Folded Reload -; CONTIGUOUS-NEXT: ld1b { z14.b, z15.b }, pn8/z, [sp, #18, mul vl] // 32-byte Folded Reload -; CONTIGUOUS-NEXT: ld1b { z10.b, z11.b }, pn8/z, [sp, #24, mul vl] // 32-byte Folded Reload ; CONTIGUOUS-NEXT: ldr p8, [sp, #7, mul vl] // 2-byte Folded Reload ; CONTIGUOUS-NEXT: addvl sp, sp, #15 ; CONTIGUOUS-NEXT: ldr x29, [sp], #16 // 8-byte Folded Reload @@ -1130,32 +1458,46 @@ define @ldnt1_x4_i32_z0_z4_z8_z12_scalar( ; STRIDED-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill ; STRIDED-NEXT: addvl sp, sp, #-17 ; STRIDED-NEXT: str p8, [sp, #7, mul vl] // 2-byte Folded Spill -; STRIDED-NEXT: ptrue pn8.b -; STRIDED-NEXT: st1b { z22.b, z23.b }, pn8, [sp, #2, mul vl] // 32-byte Folded Spill -; STRIDED-NEXT: st1b { z20.b, z21.b }, pn8, [sp, #6, mul vl] // 32-byte Folded Spill -; STRIDED-NEXT: st1b { z18.b, z19.b }, pn8, [sp, #10, mul vl] // 32-byte Folded Spill -; STRIDED-NEXT: st1b { z16.b, z17.b }, pn8, [sp, #14, mul vl] // 32-byte Folded Spill -; STRIDED-NEXT: st1b { z14.b, z15.b }, pn8, [sp, #18, mul vl] // 32-byte Folded Spill -; STRIDED-NEXT: st1b { z12.b, z13.b }, pn8, [sp, #22, mul vl] // 32-byte Folded Spill -; STRIDED-NEXT: st1b { z10.b, z11.b }, pn8, [sp, #26, mul vl] // 32-byte Folded Spill -; STRIDED-NEXT: st1b { z8.b, z9.b }, pn8, [sp, #30, mul vl] // 32-byte Folded Spill ; STRIDED-NEXT: mov p8.b, p0.b +; STRIDED-NEXT: str z23, [sp, #1, mul vl] // 16-byte Folded Spill +; STRIDED-NEXT: str z22, [sp, #2, mul vl] // 16-byte Folded Spill +; STRIDED-NEXT: str z21, [sp, #3, mul vl] // 16-byte Folded Spill +; STRIDED-NEXT: str z20, [sp, #4, mul vl] // 16-byte Folded Spill +; STRIDED-NEXT: str z19, [sp, #5, mul vl] // 16-byte Folded Spill +; STRIDED-NEXT: str z18, [sp, #6, mul vl] // 16-byte Folded Spill +; STRIDED-NEXT: str z17, [sp, #7, mul vl] // 16-byte Folded Spill +; STRIDED-NEXT: str z16, [sp, #8, mul vl] // 16-byte Folded Spill +; STRIDED-NEXT: str z15, [sp, #9, mul vl] // 16-byte Folded Spill +; STRIDED-NEXT: str z14, [sp, #10, mul vl] // 16-byte Folded Spill +; STRIDED-NEXT: str z13, [sp, #11, mul vl] // 16-byte Folded Spill +; STRIDED-NEXT: str z12, [sp, #12, mul vl] // 16-byte Folded Spill +; STRIDED-NEXT: str z11, [sp, #13, mul vl] // 16-byte Folded Spill +; STRIDED-NEXT: str z10, [sp, #14, mul vl] // 16-byte Folded Spill +; STRIDED-NEXT: str z9, [sp, #15, mul vl] // 16-byte Folded Spill +; STRIDED-NEXT: str z8, [sp, #16, mul vl] // 16-byte Folded Spill ; STRIDED-NEXT: ldnt1w { z0.s, z4.s, z8.s, z12.s }, pn8/z, [x0, x1, lsl #2] ; STRIDED-NEXT: //APP ; STRIDED-NEXT: nop ; STRIDED-NEXT: //NO_APP -; STRIDED-NEXT: ptrue pn8.b -; STRIDED-NEXT: ld1b { z22.b, z23.b }, pn8/z, [sp, #2, mul vl] // 32-byte Folded Reload -; STRIDED-NEXT: ld1b { z20.b, z21.b }, pn8/z, [sp, #6, mul vl] // 32-byte Folded Reload -; STRIDED-NEXT: ld1b { z18.b, z19.b }, pn8/z, [sp, #10, mul vl] // 32-byte Folded Reload -; STRIDED-NEXT: ld1b { z16.b, z17.b }, pn8/z, [sp, #14, mul vl] // 32-byte Folded Reload -; STRIDED-NEXT: ld1b { z14.b, z15.b }, pn8/z, [sp, #18, mul vl] // 32-byte Folded Reload -; STRIDED-NEXT: ld1b { z10.b, z11.b }, pn8/z, [sp, #26, mul vl] // 32-byte Folded Reload +; STRIDED-NEXT: ldr p8, [sp, #7, mul vl] // 2-byte Folded Reload +; STRIDED-NEXT: ldr z23, [sp, #1, mul vl] // 16-byte Folded Reload +; STRIDED-NEXT: ldr z22, [sp, #2, mul vl] // 16-byte Folded Reload +; STRIDED-NEXT: ldr z21, [sp, #3, mul vl] // 16-byte Folded Reload +; STRIDED-NEXT: ldr z20, [sp, #4, mul vl] // 16-byte Folded Reload +; STRIDED-NEXT: ldr z19, [sp, #5, mul vl] // 16-byte Folded Reload +; STRIDED-NEXT: ldr z18, [sp, #6, mul vl] // 16-byte Folded Reload ; STRIDED-NEXT: mov z2.d, z8.d ; STRIDED-NEXT: mov z3.d, z12.d -; STRIDED-NEXT: ld1b { z12.b, z13.b }, pn8/z, [sp, #22, mul vl] // 32-byte Folded Reload -; STRIDED-NEXT: ld1b { z8.b, z9.b }, pn8/z, [sp, #30, mul vl] // 32-byte Folded Reload -; STRIDED-NEXT: ldr p8, [sp, #7, mul vl] // 2-byte Folded Reload +; STRIDED-NEXT: ldr z17, [sp, #7, mul vl] // 16-byte Folded Reload +; STRIDED-NEXT: ldr z16, [sp, #8, mul vl] // 16-byte Folded Reload +; STRIDED-NEXT: ldr z15, [sp, #9, mul vl] // 16-byte Folded Reload +; STRIDED-NEXT: ldr z14, [sp, #10, mul vl] // 16-byte Folded Reload +; STRIDED-NEXT: ldr z13, [sp, #11, mul vl] // 16-byte Folded Reload +; STRIDED-NEXT: ldr z12, [sp, #12, mul vl] // 16-byte Folded Reload +; STRIDED-NEXT: ldr z11, [sp, #13, mul vl] // 16-byte Folded Reload +; STRIDED-NEXT: ldr z10, [sp, #14, mul vl] // 16-byte Folded Reload +; STRIDED-NEXT: ldr z9, [sp, #15, mul vl] // 16-byte Folded Reload +; STRIDED-NEXT: ldr z8, [sp, #16, mul vl] // 16-byte Folded Reload ; STRIDED-NEXT: mov z1.d, z4.d ; STRIDED-NEXT: addvl sp, sp, #17 ; STRIDED-NEXT: ldr x29, [sp], #16 // 8-byte Folded Reload @@ -1166,14 +1508,19 @@ define @ldnt1_x4_i32_z0_z4_z8_z12_scalar( ; CONTIGUOUS-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill ; CONTIGUOUS-NEXT: addvl sp, sp, #-15 ; CONTIGUOUS-NEXT: str p8, [sp, #7, mul vl] // 2-byte Folded Spill -; CONTIGUOUS-NEXT: ptrue pn8.b -; CONTIGUOUS-NEXT: st1b { z22.b, z23.b }, pn8, [sp, #2, mul vl] // 32-byte Folded Spill -; CONTIGUOUS-NEXT: st1b { z20.b, z21.b }, pn8, [sp, #6, mul vl] // 32-byte Folded Spill -; CONTIGUOUS-NEXT: st1b { z18.b, z19.b }, pn8, [sp, #10, mul vl] // 32-byte Folded Spill -; CONTIGUOUS-NEXT: st1b { z16.b, z17.b }, pn8, [sp, #14, mul vl] // 32-byte Folded Spill -; CONTIGUOUS-NEXT: st1b { z14.b, z15.b }, pn8, [sp, #18, mul vl] // 32-byte Folded Spill +; CONTIGUOUS-NEXT: str z23, [sp, #1, mul vl] // 16-byte Folded Spill +; CONTIGUOUS-NEXT: str z22, [sp, #2, mul vl] // 16-byte Folded Spill +; CONTIGUOUS-NEXT: str z21, [sp, #3, mul vl] // 16-byte Folded Spill +; CONTIGUOUS-NEXT: str z20, [sp, #4, mul vl] // 16-byte Folded Spill +; CONTIGUOUS-NEXT: str z19, [sp, #5, mul vl] // 16-byte Folded Spill +; CONTIGUOUS-NEXT: str z18, [sp, #6, mul vl] // 16-byte Folded Spill +; CONTIGUOUS-NEXT: str z17, [sp, #7, mul vl] // 16-byte Folded Spill +; CONTIGUOUS-NEXT: str z16, [sp, #8, mul vl] // 16-byte Folded Spill +; CONTIGUOUS-NEXT: str z15, [sp, #9, mul vl] // 16-byte Folded Spill +; CONTIGUOUS-NEXT: str z14, [sp, #10, mul vl] // 16-byte Folded Spill ; CONTIGUOUS-NEXT: str z13, [sp, #11, mul vl] // 16-byte Folded Spill -; CONTIGUOUS-NEXT: st1b { z10.b, z11.b }, pn8, [sp, #24, mul vl] // 32-byte Folded Spill +; CONTIGUOUS-NEXT: str z11, [sp, #12, mul vl] // 16-byte Folded Spill +; CONTIGUOUS-NEXT: str z10, [sp, #13, mul vl] // 16-byte Folded Spill ; CONTIGUOUS-NEXT: str z9, [sp, #14, mul vl] // 16-byte Folded Spill ; CONTIGUOUS-NEXT: addvl sp, sp, #-4 ; CONTIGUOUS-NEXT: mov p8.b, p0.b @@ -1190,15 +1537,20 @@ define @ldnt1_x4_i32_z0_z4_z8_z12_scalar( ; CONTIGUOUS-NEXT: ldr z2, [sp, #2, mul vl] ; CONTIGUOUS-NEXT: ldr z3, [sp, #3, mul vl] ; CONTIGUOUS-NEXT: addvl sp, sp, #4 -; CONTIGUOUS-NEXT: ptrue pn8.b +; CONTIGUOUS-NEXT: ldr z23, [sp, #1, mul vl] // 16-byte Folded Reload +; CONTIGUOUS-NEXT: ldr z22, [sp, #2, mul vl] // 16-byte Folded Reload +; CONTIGUOUS-NEXT: ldr z21, [sp, #3, mul vl] // 16-byte Folded Reload +; CONTIGUOUS-NEXT: ldr z20, [sp, #4, mul vl] // 16-byte Folded Reload +; CONTIGUOUS-NEXT: ldr z19, [sp, #5, mul vl] // 16-byte Folded Reload +; CONTIGUOUS-NEXT: ldr z18, [sp, #6, mul vl] // 16-byte Folded Reload +; CONTIGUOUS-NEXT: ldr z17, [sp, #7, mul vl] // 16-byte Folded Reload +; CONTIGUOUS-NEXT: ldr z16, [sp, #8, mul vl] // 16-byte Folded Reload +; CONTIGUOUS-NEXT: ldr z15, [sp, #9, mul vl] // 16-byte Folded Reload +; CONTIGUOUS-NEXT: ldr z14, [sp, #10, mul vl] // 16-byte Folded Reload ; CONTIGUOUS-NEXT: ldr z13, [sp, #11, mul vl] // 16-byte Folded Reload +; CONTIGUOUS-NEXT: ldr z11, [sp, #12, mul vl] // 16-byte Folded Reload +; CONTIGUOUS-NEXT: ldr z10, [sp, #13, mul vl] // 16-byte Folded Reload ; CONTIGUOUS-NEXT: ldr z9, [sp, #14, mul vl] // 16-byte Folded Reload -; CONTIGUOUS-NEXT: ld1b { z22.b, z23.b }, pn8/z, [sp, #2, mul vl] // 32-byte Folded Reload -; CONTIGUOUS-NEXT: ld1b { z20.b, z21.b }, pn8/z, [sp, #6, mul vl] // 32-byte Folded Reload -; CONTIGUOUS-NEXT: ld1b { z18.b, z19.b }, pn8/z, [sp, #10, mul vl] // 32-byte Folded Reload -; CONTIGUOUS-NEXT: ld1b { z16.b, z17.b }, pn8/z, [sp, #14, mul vl] // 32-byte Folded Reload -; CONTIGUOUS-NEXT: ld1b { z14.b, z15.b }, pn8/z, [sp, #18, mul vl] // 32-byte Folded Reload -; CONTIGUOUS-NEXT: ld1b { z10.b, z11.b }, pn8/z, [sp, #24, mul vl] // 32-byte Folded Reload ; CONTIGUOUS-NEXT: ldr p8, [sp, #7, mul vl] // 2-byte Folded Reload ; CONTIGUOUS-NEXT: addvl sp, sp, #15 ; CONTIGUOUS-NEXT: ldr x29, [sp], #16 // 8-byte Folded Reload @@ -1223,32 +1575,46 @@ define @ldnt1_x4_i64_z0_z4_z8_z12( %unused, ; STRIDED-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill ; STRIDED-NEXT: addvl sp, sp, #-17 ; STRIDED-NEXT: str p8, [sp, #7, mul vl] // 2-byte Folded Spill -; STRIDED-NEXT: ptrue pn8.b -; STRIDED-NEXT: st1b { z22.b, z23.b }, pn8, [sp, #2, mul vl] // 32-byte Folded Spill -; STRIDED-NEXT: st1b { z20.b, z21.b }, pn8, [sp, #6, mul vl] // 32-byte Folded Spill -; STRIDED-NEXT: st1b { z18.b, z19.b }, pn8, [sp, #10, mul vl] // 32-byte Folded Spill -; STRIDED-NEXT: st1b { z16.b, z17.b }, pn8, [sp, #14, mul vl] // 32-byte Folded Spill -; STRIDED-NEXT: st1b { z14.b, z15.b }, pn8, [sp, #18, mul vl] // 32-byte Folded Spill -; STRIDED-NEXT: st1b { z12.b, z13.b }, pn8, [sp, #22, mul vl] // 32-byte Folded Spill -; STRIDED-NEXT: st1b { z10.b, z11.b }, pn8, [sp, #26, mul vl] // 32-byte Folded Spill -; STRIDED-NEXT: st1b { z8.b, z9.b }, pn8, [sp, #30, mul vl] // 32-byte Folded Spill ; STRIDED-NEXT: mov p8.b, p0.b +; STRIDED-NEXT: str z23, [sp, #1, mul vl] // 16-byte Folded Spill +; STRIDED-NEXT: str z22, [sp, #2, mul vl] // 16-byte Folded Spill +; STRIDED-NEXT: str z21, [sp, #3, mul vl] // 16-byte Folded Spill +; STRIDED-NEXT: str z20, [sp, #4, mul vl] // 16-byte Folded Spill +; STRIDED-NEXT: str z19, [sp, #5, mul vl] // 16-byte Folded Spill +; STRIDED-NEXT: str z18, [sp, #6, mul vl] // 16-byte Folded Spill +; STRIDED-NEXT: str z17, [sp, #7, mul vl] // 16-byte Folded Spill +; STRIDED-NEXT: str z16, [sp, #8, mul vl] // 16-byte Folded Spill +; STRIDED-NEXT: str z15, [sp, #9, mul vl] // 16-byte Folded Spill +; STRIDED-NEXT: str z14, [sp, #10, mul vl] // 16-byte Folded Spill +; STRIDED-NEXT: str z13, [sp, #11, mul vl] // 16-byte Folded Spill +; STRIDED-NEXT: str z12, [sp, #12, mul vl] // 16-byte Folded Spill +; STRIDED-NEXT: str z11, [sp, #13, mul vl] // 16-byte Folded Spill +; STRIDED-NEXT: str z10, [sp, #14, mul vl] // 16-byte Folded Spill +; STRIDED-NEXT: str z9, [sp, #15, mul vl] // 16-byte Folded Spill +; STRIDED-NEXT: str z8, [sp, #16, mul vl] // 16-byte Folded Spill ; STRIDED-NEXT: ldnt1d { z0.d, z4.d, z8.d, z12.d }, pn8/z, [x0] ; STRIDED-NEXT: //APP ; STRIDED-NEXT: nop ; STRIDED-NEXT: //NO_APP -; STRIDED-NEXT: ptrue pn8.b -; STRIDED-NEXT: ld1b { z22.b, z23.b }, pn8/z, [sp, #2, mul vl] // 32-byte Folded Reload -; STRIDED-NEXT: ld1b { z20.b, z21.b }, pn8/z, [sp, #6, mul vl] // 32-byte Folded Reload -; STRIDED-NEXT: ld1b { z18.b, z19.b }, pn8/z, [sp, #10, mul vl] // 32-byte Folded Reload -; STRIDED-NEXT: ld1b { z16.b, z17.b }, pn8/z, [sp, #14, mul vl] // 32-byte Folded Reload -; STRIDED-NEXT: ld1b { z14.b, z15.b }, pn8/z, [sp, #18, mul vl] // 32-byte Folded Reload -; STRIDED-NEXT: ld1b { z10.b, z11.b }, pn8/z, [sp, #26, mul vl] // 32-byte Folded Reload +; STRIDED-NEXT: ldr p8, [sp, #7, mul vl] // 2-byte Folded Reload +; STRIDED-NEXT: ldr z23, [sp, #1, mul vl] // 16-byte Folded Reload +; STRIDED-NEXT: ldr z22, [sp, #2, mul vl] // 16-byte Folded Reload +; STRIDED-NEXT: ldr z21, [sp, #3, mul vl] // 16-byte Folded Reload +; STRIDED-NEXT: ldr z20, [sp, #4, mul vl] // 16-byte Folded Reload +; STRIDED-NEXT: ldr z19, [sp, #5, mul vl] // 16-byte Folded Reload +; STRIDED-NEXT: ldr z18, [sp, #6, mul vl] // 16-byte Folded Reload ; STRIDED-NEXT: mov z2.d, z8.d ; STRIDED-NEXT: mov z3.d, z12.d -; STRIDED-NEXT: ld1b { z12.b, z13.b }, pn8/z, [sp, #22, mul vl] // 32-byte Folded Reload -; STRIDED-NEXT: ld1b { z8.b, z9.b }, pn8/z, [sp, #30, mul vl] // 32-byte Folded Reload -; STRIDED-NEXT: ldr p8, [sp, #7, mul vl] // 2-byte Folded Reload +; STRIDED-NEXT: ldr z17, [sp, #7, mul vl] // 16-byte Folded Reload +; STRIDED-NEXT: ldr z16, [sp, #8, mul vl] // 16-byte Folded Reload +; STRIDED-NEXT: ldr z15, [sp, #9, mul vl] // 16-byte Folded Reload +; STRIDED-NEXT: ldr z14, [sp, #10, mul vl] // 16-byte Folded Reload +; STRIDED-NEXT: ldr z13, [sp, #11, mul vl] // 16-byte Folded Reload +; STRIDED-NEXT: ldr z12, [sp, #12, mul vl] // 16-byte Folded Reload +; STRIDED-NEXT: ldr z11, [sp, #13, mul vl] // 16-byte Folded Reload +; STRIDED-NEXT: ldr z10, [sp, #14, mul vl] // 16-byte Folded Reload +; STRIDED-NEXT: ldr z9, [sp, #15, mul vl] // 16-byte Folded Reload +; STRIDED-NEXT: ldr z8, [sp, #16, mul vl] // 16-byte Folded Reload ; STRIDED-NEXT: mov z1.d, z4.d ; STRIDED-NEXT: addvl sp, sp, #17 ; STRIDED-NEXT: ldr x29, [sp], #16 // 8-byte Folded Reload @@ -1259,14 +1625,19 @@ define @ldnt1_x4_i64_z0_z4_z8_z12( %unused, ; CONTIGUOUS-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill ; CONTIGUOUS-NEXT: addvl sp, sp, #-15 ; CONTIGUOUS-NEXT: str p8, [sp, #7, mul vl] // 2-byte Folded Spill -; CONTIGUOUS-NEXT: ptrue pn8.b -; CONTIGUOUS-NEXT: st1b { z22.b, z23.b }, pn8, [sp, #2, mul vl] // 32-byte Folded Spill -; CONTIGUOUS-NEXT: st1b { z20.b, z21.b }, pn8, [sp, #6, mul vl] // 32-byte Folded Spill -; CONTIGUOUS-NEXT: st1b { z18.b, z19.b }, pn8, [sp, #10, mul vl] // 32-byte Folded Spill -; CONTIGUOUS-NEXT: st1b { z16.b, z17.b }, pn8, [sp, #14, mul vl] // 32-byte Folded Spill -; CONTIGUOUS-NEXT: st1b { z14.b, z15.b }, pn8, [sp, #18, mul vl] // 32-byte Folded Spill +; CONTIGUOUS-NEXT: str z23, [sp, #1, mul vl] // 16-byte Folded Spill +; CONTIGUOUS-NEXT: str z22, [sp, #2, mul vl] // 16-byte Folded Spill +; CONTIGUOUS-NEXT: str z21, [sp, #3, mul vl] // 16-byte Folded Spill +; CONTIGUOUS-NEXT: str z20, [sp, #4, mul vl] // 16-byte Folded Spill +; CONTIGUOUS-NEXT: str z19, [sp, #5, mul vl] // 16-byte Folded Spill +; CONTIGUOUS-NEXT: str z18, [sp, #6, mul vl] // 16-byte Folded Spill +; CONTIGUOUS-NEXT: str z17, [sp, #7, mul vl] // 16-byte Folded Spill +; CONTIGUOUS-NEXT: str z16, [sp, #8, mul vl] // 16-byte Folded Spill +; CONTIGUOUS-NEXT: str z15, [sp, #9, mul vl] // 16-byte Folded Spill +; CONTIGUOUS-NEXT: str z14, [sp, #10, mul vl] // 16-byte Folded Spill ; CONTIGUOUS-NEXT: str z13, [sp, #11, mul vl] // 16-byte Folded Spill -; CONTIGUOUS-NEXT: st1b { z10.b, z11.b }, pn8, [sp, #24, mul vl] // 32-byte Folded Spill +; CONTIGUOUS-NEXT: str z11, [sp, #12, mul vl] // 16-byte Folded Spill +; CONTIGUOUS-NEXT: str z10, [sp, #13, mul vl] // 16-byte Folded Spill ; CONTIGUOUS-NEXT: str z9, [sp, #14, mul vl] // 16-byte Folded Spill ; CONTIGUOUS-NEXT: addvl sp, sp, #-4 ; CONTIGUOUS-NEXT: mov p8.b, p0.b @@ -1283,15 +1654,20 @@ define @ldnt1_x4_i64_z0_z4_z8_z12( %unused, ; CONTIGUOUS-NEXT: ldr z2, [sp, #2, mul vl] ; CONTIGUOUS-NEXT: ldr z3, [sp, #3, mul vl] ; CONTIGUOUS-NEXT: addvl sp, sp, #4 -; CONTIGUOUS-NEXT: ptrue pn8.b +; CONTIGUOUS-NEXT: ldr z23, [sp, #1, mul vl] // 16-byte Folded Reload +; CONTIGUOUS-NEXT: ldr z22, [sp, #2, mul vl] // 16-byte Folded Reload +; CONTIGUOUS-NEXT: ldr z21, [sp, #3, mul vl] // 16-byte Folded Reload +; CONTIGUOUS-NEXT: ldr z20, [sp, #4, mul vl] // 16-byte Folded Reload +; CONTIGUOUS-NEXT: ldr z19, [sp, #5, mul vl] // 16-byte Folded Reload +; CONTIGUOUS-NEXT: ldr z18, [sp, #6, mul vl] // 16-byte Folded Reload +; CONTIGUOUS-NEXT: ldr z17, [sp, #7, mul vl] // 16-byte Folded Reload +; CONTIGUOUS-NEXT: ldr z16, [sp, #8, mul vl] // 16-byte Folded Reload +; CONTIGUOUS-NEXT: ldr z15, [sp, #9, mul vl] // 16-byte Folded Reload +; CONTIGUOUS-NEXT: ldr z14, [sp, #10, mul vl] // 16-byte Folded Reload ; CONTIGUOUS-NEXT: ldr z13, [sp, #11, mul vl] // 16-byte Folded Reload +; CONTIGUOUS-NEXT: ldr z11, [sp, #12, mul vl] // 16-byte Folded Reload +; CONTIGUOUS-NEXT: ldr z10, [sp, #13, mul vl] // 16-byte Folded Reload ; CONTIGUOUS-NEXT: ldr z9, [sp, #14, mul vl] // 16-byte Folded Reload -; CONTIGUOUS-NEXT: ld1b { z22.b, z23.b }, pn8/z, [sp, #2, mul vl] // 32-byte Folded Reload -; CONTIGUOUS-NEXT: ld1b { z20.b, z21.b }, pn8/z, [sp, #6, mul vl] // 32-byte Folded Reload -; CONTIGUOUS-NEXT: ld1b { z18.b, z19.b }, pn8/z, [sp, #10, mul vl] // 32-byte Folded Reload -; CONTIGUOUS-NEXT: ld1b { z16.b, z17.b }, pn8/z, [sp, #14, mul vl] // 32-byte Folded Reload -; CONTIGUOUS-NEXT: ld1b { z14.b, z15.b }, pn8/z, [sp, #18, mul vl] // 32-byte Folded Reload -; CONTIGUOUS-NEXT: ld1b { z10.b, z11.b }, pn8/z, [sp, #24, mul vl] // 32-byte Folded Reload ; CONTIGUOUS-NEXT: ldr p8, [sp, #7, mul vl] // 2-byte Folded Reload ; CONTIGUOUS-NEXT: addvl sp, sp, #15 ; CONTIGUOUS-NEXT: ldr x29, [sp], #16 // 8-byte Folded Reload @@ -1315,32 +1691,46 @@ define @ldnt1_x4_i64_z0_z4_z8_z12_scalar( % ; STRIDED-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill ; STRIDED-NEXT: addvl sp, sp, #-17 ; STRIDED-NEXT: str p8, [sp, #7, mul vl] // 2-byte Folded Spill -; STRIDED-NEXT: ptrue pn8.b -; STRIDED-NEXT: st1b { z22.b, z23.b }, pn8, [sp, #2, mul vl] // 32-byte Folded Spill -; STRIDED-NEXT: st1b { z20.b, z21.b }, pn8, [sp, #6, mul vl] // 32-byte Folded Spill -; STRIDED-NEXT: st1b { z18.b, z19.b }, pn8, [sp, #10, mul vl] // 32-byte Folded Spill -; STRIDED-NEXT: st1b { z16.b, z17.b }, pn8, [sp, #14, mul vl] // 32-byte Folded Spill -; STRIDED-NEXT: st1b { z14.b, z15.b }, pn8, [sp, #18, mul vl] // 32-byte Folded Spill -; STRIDED-NEXT: st1b { z12.b, z13.b }, pn8, [sp, #22, mul vl] // 32-byte Folded Spill -; STRIDED-NEXT: st1b { z10.b, z11.b }, pn8, [sp, #26, mul vl] // 32-byte Folded Spill -; STRIDED-NEXT: st1b { z8.b, z9.b }, pn8, [sp, #30, mul vl] // 32-byte Folded Spill ; STRIDED-NEXT: mov p8.b, p0.b +; STRIDED-NEXT: str z23, [sp, #1, mul vl] // 16-byte Folded Spill +; STRIDED-NEXT: str z22, [sp, #2, mul vl] // 16-byte Folded Spill +; STRIDED-NEXT: str z21, [sp, #3, mul vl] // 16-byte Folded Spill +; STRIDED-NEXT: str z20, [sp, #4, mul vl] // 16-byte Folded Spill +; STRIDED-NEXT: str z19, [sp, #5, mul vl] // 16-byte Folded Spill +; STRIDED-NEXT: str z18, [sp, #6, mul vl] // 16-byte Folded Spill +; STRIDED-NEXT: str z17, [sp, #7, mul vl] // 16-byte Folded Spill +; STRIDED-NEXT: str z16, [sp, #8, mul vl] // 16-byte Folded Spill +; STRIDED-NEXT: str z15, [sp, #9, mul vl] // 16-byte Folded Spill +; STRIDED-NEXT: str z14, [sp, #10, mul vl] // 16-byte Folded Spill +; STRIDED-NEXT: str z13, [sp, #11, mul vl] // 16-byte Folded Spill +; STRIDED-NEXT: str z12, [sp, #12, mul vl] // 16-byte Folded Spill +; STRIDED-NEXT: str z11, [sp, #13, mul vl] // 16-byte Folded Spill +; STRIDED-NEXT: str z10, [sp, #14, mul vl] // 16-byte Folded Spill +; STRIDED-NEXT: str z9, [sp, #15, mul vl] // 16-byte Folded Spill +; STRIDED-NEXT: str z8, [sp, #16, mul vl] // 16-byte Folded Spill ; STRIDED-NEXT: ldnt1d { z0.d, z4.d, z8.d, z12.d }, pn8/z, [x0, x1, lsl #3] ; STRIDED-NEXT: //APP ; STRIDED-NEXT: nop ; STRIDED-NEXT: //NO_APP -; STRIDED-NEXT: ptrue pn8.b -; STRIDED-NEXT: ld1b { z22.b, z23.b }, pn8/z, [sp, #2, mul vl] // 32-byte Folded Reload -; STRIDED-NEXT: ld1b { z20.b, z21.b }, pn8/z, [sp, #6, mul vl] // 32-byte Folded Reload -; STRIDED-NEXT: ld1b { z18.b, z19.b }, pn8/z, [sp, #10, mul vl] // 32-byte Folded Reload -; STRIDED-NEXT: ld1b { z16.b, z17.b }, pn8/z, [sp, #14, mul vl] // 32-byte Folded Reload -; STRIDED-NEXT: ld1b { z14.b, z15.b }, pn8/z, [sp, #18, mul vl] // 32-byte Folded Reload -; STRIDED-NEXT: ld1b { z10.b, z11.b }, pn8/z, [sp, #26, mul vl] // 32-byte Folded Reload +; STRIDED-NEXT: ldr p8, [sp, #7, mul vl] // 2-byte Folded Reload +; STRIDED-NEXT: ldr z23, [sp, #1, mul vl] // 16-byte Folded Reload +; STRIDED-NEXT: ldr z22, [sp, #2, mul vl] // 16-byte Folded Reload +; STRIDED-NEXT: ldr z21, [sp, #3, mul vl] // 16-byte Folded Reload +; STRIDED-NEXT: ldr z20, [sp, #4, mul vl] // 16-byte Folded Reload +; STRIDED-NEXT: ldr z19, [sp, #5, mul vl] // 16-byte Folded Reload +; STRIDED-NEXT: ldr z18, [sp, #6, mul vl] // 16-byte Folded Reload ; STRIDED-NEXT: mov z2.d, z8.d ; STRIDED-NEXT: mov z3.d, z12.d -; STRIDED-NEXT: ld1b { z12.b, z13.b }, pn8/z, [sp, #22, mul vl] // 32-byte Folded Reload -; STRIDED-NEXT: ld1b { z8.b, z9.b }, pn8/z, [sp, #30, mul vl] // 32-byte Folded Reload -; STRIDED-NEXT: ldr p8, [sp, #7, mul vl] // 2-byte Folded Reload +; STRIDED-NEXT: ldr z17, [sp, #7, mul vl] // 16-byte Folded Reload +; STRIDED-NEXT: ldr z16, [sp, #8, mul vl] // 16-byte Folded Reload +; STRIDED-NEXT: ldr z15, [sp, #9, mul vl] // 16-byte Folded Reload +; STRIDED-NEXT: ldr z14, [sp, #10, mul vl] // 16-byte Folded Reload +; STRIDED-NEXT: ldr z13, [sp, #11, mul vl] // 16-byte Folded Reload +; STRIDED-NEXT: ldr z12, [sp, #12, mul vl] // 16-byte Folded Reload +; STRIDED-NEXT: ldr z11, [sp, #13, mul vl] // 16-byte Folded Reload +; STRIDED-NEXT: ldr z10, [sp, #14, mul vl] // 16-byte Folded Reload +; STRIDED-NEXT: ldr z9, [sp, #15, mul vl] // 16-byte Folded Reload +; STRIDED-NEXT: ldr z8, [sp, #16, mul vl] // 16-byte Folded Reload ; STRIDED-NEXT: mov z1.d, z4.d ; STRIDED-NEXT: addvl sp, sp, #17 ; STRIDED-NEXT: ldr x29, [sp], #16 // 8-byte Folded Reload @@ -1351,14 +1741,19 @@ define @ldnt1_x4_i64_z0_z4_z8_z12_scalar( % ; CONTIGUOUS-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill ; CONTIGUOUS-NEXT: addvl sp, sp, #-15 ; CONTIGUOUS-NEXT: str p8, [sp, #7, mul vl] // 2-byte Folded Spill -; CONTIGUOUS-NEXT: ptrue pn8.b -; CONTIGUOUS-NEXT: st1b { z22.b, z23.b }, pn8, [sp, #2, mul vl] // 32-byte Folded Spill -; CONTIGUOUS-NEXT: st1b { z20.b, z21.b }, pn8, [sp, #6, mul vl] // 32-byte Folded Spill -; CONTIGUOUS-NEXT: st1b { z18.b, z19.b }, pn8, [sp, #10, mul vl] // 32-byte Folded Spill -; CONTIGUOUS-NEXT: st1b { z16.b, z17.b }, pn8, [sp, #14, mul vl] // 32-byte Folded Spill -; CONTIGUOUS-NEXT: st1b { z14.b, z15.b }, pn8, [sp, #18, mul vl] // 32-byte Folded Spill +; CONTIGUOUS-NEXT: str z23, [sp, #1, mul vl] // 16-byte Folded Spill +; CONTIGUOUS-NEXT: str z22, [sp, #2, mul vl] // 16-byte Folded Spill +; CONTIGUOUS-NEXT: str z21, [sp, #3, mul vl] // 16-byte Folded Spill +; CONTIGUOUS-NEXT: str z20, [sp, #4, mul vl] // 16-byte Folded Spill +; CONTIGUOUS-NEXT: str z19, [sp, #5, mul vl] // 16-byte Folded Spill +; CONTIGUOUS-NEXT: str z18, [sp, #6, mul vl] // 16-byte Folded Spill +; CONTIGUOUS-NEXT: str z17, [sp, #7, mul vl] // 16-byte Folded Spill +; CONTIGUOUS-NEXT: str z16, [sp, #8, mul vl] // 16-byte Folded Spill +; CONTIGUOUS-NEXT: str z15, [sp, #9, mul vl] // 16-byte Folded Spill +; CONTIGUOUS-NEXT: str z14, [sp, #10, mul vl] // 16-byte Folded Spill ; CONTIGUOUS-NEXT: str z13, [sp, #11, mul vl] // 16-byte Folded Spill -; CONTIGUOUS-NEXT: st1b { z10.b, z11.b }, pn8, [sp, #24, mul vl] // 32-byte Folded Spill +; CONTIGUOUS-NEXT: str z11, [sp, #12, mul vl] // 16-byte Folded Spill +; CONTIGUOUS-NEXT: str z10, [sp, #13, mul vl] // 16-byte Folded Spill ; CONTIGUOUS-NEXT: str z9, [sp, #14, mul vl] // 16-byte Folded Spill ; CONTIGUOUS-NEXT: addvl sp, sp, #-4 ; CONTIGUOUS-NEXT: mov p8.b, p0.b @@ -1375,15 +1770,20 @@ define @ldnt1_x4_i64_z0_z4_z8_z12_scalar( % ; CONTIGUOUS-NEXT: ldr z2, [sp, #2, mul vl] ; CONTIGUOUS-NEXT: ldr z3, [sp, #3, mul vl] ; CONTIGUOUS-NEXT: addvl sp, sp, #4 -; CONTIGUOUS-NEXT: ptrue pn8.b +; CONTIGUOUS-NEXT: ldr z23, [sp, #1, mul vl] // 16-byte Folded Reload +; CONTIGUOUS-NEXT: ldr z22, [sp, #2, mul vl] // 16-byte Folded Reload +; CONTIGUOUS-NEXT: ldr z21, [sp, #3, mul vl] // 16-byte Folded Reload +; CONTIGUOUS-NEXT: ldr z20, [sp, #4, mul vl] // 16-byte Folded Reload +; CONTIGUOUS-NEXT: ldr z19, [sp, #5, mul vl] // 16-byte Folded Reload +; CONTIGUOUS-NEXT: ldr z18, [sp, #6, mul vl] // 16-byte Folded Reload +; CONTIGUOUS-NEXT: ldr z17, [sp, #7, mul vl] // 16-byte Folded Reload +; CONTIGUOUS-NEXT: ldr z16, [sp, #8, mul vl] // 16-byte Folded Reload +; CONTIGUOUS-NEXT: ldr z15, [sp, #9, mul vl] // 16-byte Folded Reload +; CONTIGUOUS-NEXT: ldr z14, [sp, #10, mul vl] // 16-byte Folded Reload ; CONTIGUOUS-NEXT: ldr z13, [sp, #11, mul vl] // 16-byte Folded Reload +; CONTIGUOUS-NEXT: ldr z11, [sp, #12, mul vl] // 16-byte Folded Reload +; CONTIGUOUS-NEXT: ldr z10, [sp, #13, mul vl] // 16-byte Folded Reload ; CONTIGUOUS-NEXT: ldr z9, [sp, #14, mul vl] // 16-byte Folded Reload -; CONTIGUOUS-NEXT: ld1b { z22.b, z23.b }, pn8/z, [sp, #2, mul vl] // 32-byte Folded Reload -; CONTIGUOUS-NEXT: ld1b { z20.b, z21.b }, pn8/z, [sp, #6, mul vl] // 32-byte Folded Reload -; CONTIGUOUS-NEXT: ld1b { z18.b, z19.b }, pn8/z, [sp, #10, mul vl] // 32-byte Folded Reload -; CONTIGUOUS-NEXT: ld1b { z16.b, z17.b }, pn8/z, [sp, #14, mul vl] // 32-byte Folded Reload -; CONTIGUOUS-NEXT: ld1b { z14.b, z15.b }, pn8/z, [sp, #18, mul vl] // 32-byte Folded Reload -; CONTIGUOUS-NEXT: ld1b { z10.b, z11.b }, pn8/z, [sp, #24, mul vl] // 32-byte Folded Reload ; CONTIGUOUS-NEXT: ldr p8, [sp, #7, mul vl] // 2-byte Folded Reload ; CONTIGUOUS-NEXT: addvl sp, sp, #15 ; CONTIGUOUS-NEXT: ldr x29, [sp], #16 // 8-byte Folded Reload diff --git a/llvm/test/CodeGen/AArch64/sve-callee-save-restore-pairs.ll b/llvm/test/CodeGen/AArch64/sve-callee-save-restore-pairs.ll index 470c0dd45782..c0d5d9dfdbb0 100644 --- a/llvm/test/CodeGen/AArch64/sve-callee-save-restore-pairs.ll +++ b/llvm/test/CodeGen/AArch64/sve-callee-save-restore-pairs.ll @@ -88,27 +88,34 @@ define void @fbyte( %v) { ; PAIR: // %bb.0: ; PAIR-NEXT: stp x29, x30, [sp, #-16]! // 16-byte Folded Spill ; PAIR-NEXT: addvl sp, sp, #-18 -; PAIR-NEXT: str p8, [sp, #11, mul vl] // 2-byte Folded Spill -; PAIR-NEXT: ptrue pn8.b ; PAIR-NEXT: str p15, [sp, #4, mul vl] // 2-byte Folded Spill -; PAIR-NEXT: st1b { z22.b, z23.b }, pn8, [sp, #4, mul vl] // 32-byte Folded Spill -; PAIR-NEXT: st1b { z20.b, z21.b }, pn8, [sp, #8, mul vl] // 32-byte Folded Spill ; PAIR-NEXT: str p14, [sp, #5, mul vl] // 2-byte Folded Spill -; PAIR-NEXT: st1b { z18.b, z19.b }, pn8, [sp, #12, mul vl] // 32-byte Folded Spill -; PAIR-NEXT: st1b { z16.b, z17.b }, pn8, [sp, #16, mul vl] // 32-byte Folded Spill ; PAIR-NEXT: str p13, [sp, #6, mul vl] // 2-byte Folded Spill -; PAIR-NEXT: st1b { z14.b, z15.b }, pn8, [sp, #20, mul vl] // 32-byte Folded Spill -; PAIR-NEXT: st1b { z12.b, z13.b }, pn8, [sp, #24, mul vl] // 32-byte Folded Spill ; PAIR-NEXT: str p12, [sp, #7, mul vl] // 2-byte Folded Spill -; PAIR-NEXT: st1b { z10.b, z11.b }, pn8, [sp, #28, mul vl] // 32-byte Folded Spill ; PAIR-NEXT: str p11, [sp, #8, mul vl] // 2-byte Folded Spill ; PAIR-NEXT: str p10, [sp, #9, mul vl] // 2-byte Folded Spill ; PAIR-NEXT: str p9, [sp, #10, mul vl] // 2-byte Folded Spill +; PAIR-NEXT: str p8, [sp, #11, mul vl] // 2-byte Folded Spill ; PAIR-NEXT: str p7, [sp, #12, mul vl] // 2-byte Folded Spill ; PAIR-NEXT: str p6, [sp, #13, mul vl] // 2-byte Folded Spill ; PAIR-NEXT: str p5, [sp, #14, mul vl] // 2-byte Folded Spill ; PAIR-NEXT: str p4, [sp, #15, mul vl] // 2-byte Folded Spill -; PAIR-NEXT: st1b { z8.b, z9.b }, pn8, [sp, #32, mul vl] // 32-byte Folded Spill +; PAIR-NEXT: str z23, [sp, #2, mul vl] // 16-byte Folded Spill +; PAIR-NEXT: str z22, [sp, #3, mul vl] // 16-byte Folded Spill +; PAIR-NEXT: str z21, [sp, #4, mul vl] // 16-byte Folded Spill +; PAIR-NEXT: str z20, [sp, #5, mul vl] // 16-byte Folded Spill +; PAIR-NEXT: str z19, [sp, #6, mul vl] // 16-byte Folded Spill +; PAIR-NEXT: str z18, [sp, #7, mul vl] // 16-byte Folded Spill +; PAIR-NEXT: str z17, [sp, #8, mul vl] // 16-byte Folded Spill +; PAIR-NEXT: str z16, [sp, #9, mul vl] // 16-byte Folded Spill +; PAIR-NEXT: str z15, [sp, #10, mul vl] // 16-byte Folded Spill +; PAIR-NEXT: str z14, [sp, #11, mul vl] // 16-byte Folded Spill +; PAIR-NEXT: str z13, [sp, #12, mul vl] // 16-byte Folded Spill +; PAIR-NEXT: str z12, [sp, #13, mul vl] // 16-byte Folded Spill +; PAIR-NEXT: str z11, [sp, #14, mul vl] // 16-byte Folded Spill +; PAIR-NEXT: str z10, [sp, #15, mul vl] // 16-byte Folded Spill +; PAIR-NEXT: str z9, [sp, #16, mul vl] // 16-byte Folded Spill +; PAIR-NEXT: str z8, [sp, #17, mul vl] // 16-byte Folded Spill ; PAIR-NEXT: .cfi_escape 0x0f, 0x0d, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x90, 0x01, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 144 * VG ; PAIR-NEXT: .cfi_offset w30, -8 ; PAIR-NEXT: .cfi_offset w29, -16 @@ -121,16 +128,23 @@ define void @fbyte( %v) { ; PAIR-NEXT: .cfi_escape 0x10, 0x4e, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x48, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d14 @ cfa - 16 - 56 * VG ; PAIR-NEXT: .cfi_escape 0x10, 0x4f, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x40, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d15 @ cfa - 16 - 64 * VG ; PAIR-NEXT: bl my_func -; PAIR-NEXT: ptrue pn8.b +; PAIR-NEXT: ldr z23, [sp, #2, mul vl] // 16-byte Folded Reload +; PAIR-NEXT: ldr z22, [sp, #3, mul vl] // 16-byte Folded Reload +; PAIR-NEXT: ldr z21, [sp, #4, mul vl] // 16-byte Folded Reload +; PAIR-NEXT: ldr z20, [sp, #5, mul vl] // 16-byte Folded Reload +; PAIR-NEXT: ldr z19, [sp, #6, mul vl] // 16-byte Folded Reload +; PAIR-NEXT: ldr z18, [sp, #7, mul vl] // 16-byte Folded Reload +; PAIR-NEXT: ldr z17, [sp, #8, mul vl] // 16-byte Folded Reload +; PAIR-NEXT: ldr z16, [sp, #9, mul vl] // 16-byte Folded Reload +; PAIR-NEXT: ldr z15, [sp, #10, mul vl] // 16-byte Folded Reload +; PAIR-NEXT: ldr z14, [sp, #11, mul vl] // 16-byte Folded Reload +; PAIR-NEXT: ldr z13, [sp, #12, mul vl] // 16-byte Folded Reload +; PAIR-NEXT: ldr z12, [sp, #13, mul vl] // 16-byte Folded Reload +; PAIR-NEXT: ldr z11, [sp, #14, mul vl] // 16-byte Folded Reload +; PAIR-NEXT: ldr z10, [sp, #15, mul vl] // 16-byte Folded Reload +; PAIR-NEXT: ldr z9, [sp, #16, mul vl] // 16-byte Folded Reload +; PAIR-NEXT: ldr z8, [sp, #17, mul vl] // 16-byte Folded Reload ; PAIR-NEXT: ldr p15, [sp, #4, mul vl] // 2-byte Folded Reload -; PAIR-NEXT: ld1b { z22.b, z23.b }, pn8/z, [sp, #4, mul vl] // 32-byte Folded Reload -; PAIR-NEXT: ld1b { z20.b, z21.b }, pn8/z, [sp, #8, mul vl] // 32-byte Folded Reload -; PAIR-NEXT: ld1b { z18.b, z19.b }, pn8/z, [sp, #12, mul vl] // 32-byte Folded Reload -; PAIR-NEXT: ld1b { z16.b, z17.b }, pn8/z, [sp, #16, mul vl] // 32-byte Folded Reload -; PAIR-NEXT: ld1b { z14.b, z15.b }, pn8/z, [sp, #20, mul vl] // 32-byte Folded Reload -; PAIR-NEXT: ld1b { z12.b, z13.b }, pn8/z, [sp, #24, mul vl] // 32-byte Folded Reload -; PAIR-NEXT: ld1b { z10.b, z11.b }, pn8/z, [sp, #28, mul vl] // 32-byte Folded Reload -; PAIR-NEXT: ld1b { z8.b, z9.b }, pn8/z, [sp, #32, mul vl] // 32-byte Folded Reload ; PAIR-NEXT: ldr p14, [sp, #5, mul vl] // 2-byte Folded Reload ; PAIR-NEXT: ldr p13, [sp, #6, mul vl] // 2-byte Folded Reload ; PAIR-NEXT: ldr p12, [sp, #7, mul vl] // 2-byte Folded Reload @@ -230,27 +244,34 @@ define void @fhalf( %v) { ; PAIR: // %bb.0: ; PAIR-NEXT: stp x29, x30, [sp, #-16]! // 16-byte Folded Spill ; PAIR-NEXT: addvl sp, sp, #-18 -; PAIR-NEXT: str p8, [sp, #11, mul vl] // 2-byte Folded Spill -; PAIR-NEXT: ptrue pn8.b ; PAIR-NEXT: str p15, [sp, #4, mul vl] // 2-byte Folded Spill -; PAIR-NEXT: st1b { z22.b, z23.b }, pn8, [sp, #4, mul vl] // 32-byte Folded Spill -; PAIR-NEXT: st1b { z20.b, z21.b }, pn8, [sp, #8, mul vl] // 32-byte Folded Spill ; PAIR-NEXT: str p14, [sp, #5, mul vl] // 2-byte Folded Spill -; PAIR-NEXT: st1b { z18.b, z19.b }, pn8, [sp, #12, mul vl] // 32-byte Folded Spill -; PAIR-NEXT: st1b { z16.b, z17.b }, pn8, [sp, #16, mul vl] // 32-byte Folded Spill ; PAIR-NEXT: str p13, [sp, #6, mul vl] // 2-byte Folded Spill -; PAIR-NEXT: st1b { z14.b, z15.b }, pn8, [sp, #20, mul vl] // 32-byte Folded Spill -; PAIR-NEXT: st1b { z12.b, z13.b }, pn8, [sp, #24, mul vl] // 32-byte Folded Spill ; PAIR-NEXT: str p12, [sp, #7, mul vl] // 2-byte Folded Spill -; PAIR-NEXT: st1b { z10.b, z11.b }, pn8, [sp, #28, mul vl] // 32-byte Folded Spill ; PAIR-NEXT: str p11, [sp, #8, mul vl] // 2-byte Folded Spill ; PAIR-NEXT: str p10, [sp, #9, mul vl] // 2-byte Folded Spill ; PAIR-NEXT: str p9, [sp, #10, mul vl] // 2-byte Folded Spill +; PAIR-NEXT: str p8, [sp, #11, mul vl] // 2-byte Folded Spill ; PAIR-NEXT: str p7, [sp, #12, mul vl] // 2-byte Folded Spill ; PAIR-NEXT: str p6, [sp, #13, mul vl] // 2-byte Folded Spill ; PAIR-NEXT: str p5, [sp, #14, mul vl] // 2-byte Folded Spill ; PAIR-NEXT: str p4, [sp, #15, mul vl] // 2-byte Folded Spill -; PAIR-NEXT: st1b { z8.b, z9.b }, pn8, [sp, #32, mul vl] // 32-byte Folded Spill +; PAIR-NEXT: str z23, [sp, #2, mul vl] // 16-byte Folded Spill +; PAIR-NEXT: str z22, [sp, #3, mul vl] // 16-byte Folded Spill +; PAIR-NEXT: str z21, [sp, #4, mul vl] // 16-byte Folded Spill +; PAIR-NEXT: str z20, [sp, #5, mul vl] // 16-byte Folded Spill +; PAIR-NEXT: str z19, [sp, #6, mul vl] // 16-byte Folded Spill +; PAIR-NEXT: str z18, [sp, #7, mul vl] // 16-byte Folded Spill +; PAIR-NEXT: str z17, [sp, #8, mul vl] // 16-byte Folded Spill +; PAIR-NEXT: str z16, [sp, #9, mul vl] // 16-byte Folded Spill +; PAIR-NEXT: str z15, [sp, #10, mul vl] // 16-byte Folded Spill +; PAIR-NEXT: str z14, [sp, #11, mul vl] // 16-byte Folded Spill +; PAIR-NEXT: str z13, [sp, #12, mul vl] // 16-byte Folded Spill +; PAIR-NEXT: str z12, [sp, #13, mul vl] // 16-byte Folded Spill +; PAIR-NEXT: str z11, [sp, #14, mul vl] // 16-byte Folded Spill +; PAIR-NEXT: str z10, [sp, #15, mul vl] // 16-byte Folded Spill +; PAIR-NEXT: str z9, [sp, #16, mul vl] // 16-byte Folded Spill +; PAIR-NEXT: str z8, [sp, #17, mul vl] // 16-byte Folded Spill ; PAIR-NEXT: .cfi_escape 0x0f, 0x0d, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x90, 0x01, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 144 * VG ; PAIR-NEXT: .cfi_offset w30, -8 ; PAIR-NEXT: .cfi_offset w29, -16 @@ -263,16 +284,23 @@ define void @fhalf( %v) { ; PAIR-NEXT: .cfi_escape 0x10, 0x4e, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x48, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d14 @ cfa - 16 - 56 * VG ; PAIR-NEXT: .cfi_escape 0x10, 0x4f, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x40, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d15 @ cfa - 16 - 64 * VG ; PAIR-NEXT: bl my_func -; PAIR-NEXT: ptrue pn8.b +; PAIR-NEXT: ldr z23, [sp, #2, mul vl] // 16-byte Folded Reload +; PAIR-NEXT: ldr z22, [sp, #3, mul vl] // 16-byte Folded Reload +; PAIR-NEXT: ldr z21, [sp, #4, mul vl] // 16-byte Folded Reload +; PAIR-NEXT: ldr z20, [sp, #5, mul vl] // 16-byte Folded Reload +; PAIR-NEXT: ldr z19, [sp, #6, mul vl] // 16-byte Folded Reload +; PAIR-NEXT: ldr z18, [sp, #7, mul vl] // 16-byte Folded Reload +; PAIR-NEXT: ldr z17, [sp, #8, mul vl] // 16-byte Folded Reload +; PAIR-NEXT: ldr z16, [sp, #9, mul vl] // 16-byte Folded Reload +; PAIR-NEXT: ldr z15, [sp, #10, mul vl] // 16-byte Folded Reload +; PAIR-NEXT: ldr z14, [sp, #11, mul vl] // 16-byte Folded Reload +; PAIR-NEXT: ldr z13, [sp, #12, mul vl] // 16-byte Folded Reload +; PAIR-NEXT: ldr z12, [sp, #13, mul vl] // 16-byte Folded Reload +; PAIR-NEXT: ldr z11, [sp, #14, mul vl] // 16-byte Folded Reload +; PAIR-NEXT: ldr z10, [sp, #15, mul vl] // 16-byte Folded Reload +; PAIR-NEXT: ldr z9, [sp, #16, mul vl] // 16-byte Folded Reload +; PAIR-NEXT: ldr z8, [sp, #17, mul vl] // 16-byte Folded Reload ; PAIR-NEXT: ldr p15, [sp, #4, mul vl] // 2-byte Folded Reload -; PAIR-NEXT: ld1b { z22.b, z23.b }, pn8/z, [sp, #4, mul vl] // 32-byte Folded Reload -; PAIR-NEXT: ld1b { z20.b, z21.b }, pn8/z, [sp, #8, mul vl] // 32-byte Folded Reload -; PAIR-NEXT: ld1b { z18.b, z19.b }, pn8/z, [sp, #12, mul vl] // 32-byte Folded Reload -; PAIR-NEXT: ld1b { z16.b, z17.b }, pn8/z, [sp, #16, mul vl] // 32-byte Folded Reload -; PAIR-NEXT: ld1b { z14.b, z15.b }, pn8/z, [sp, #20, mul vl] // 32-byte Folded Reload -; PAIR-NEXT: ld1b { z12.b, z13.b }, pn8/z, [sp, #24, mul vl] // 32-byte Folded Reload -; PAIR-NEXT: ld1b { z10.b, z11.b }, pn8/z, [sp, #28, mul vl] // 32-byte Folded Reload -; PAIR-NEXT: ld1b { z8.b, z9.b }, pn8/z, [sp, #32, mul vl] // 32-byte Folded Reload ; PAIR-NEXT: ldr p14, [sp, #5, mul vl] // 2-byte Folded Reload ; PAIR-NEXT: ldr p13, [sp, #6, mul vl] // 2-byte Folded Reload ; PAIR-NEXT: ldr p12, [sp, #7, mul vl] // 2-byte Folded Reload @@ -323,12 +351,11 @@ define aarch64_sve_vector_pcs void @test_clobbers_z_p_regs() { ; PAIR: // %bb.0: ; PAIR-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill ; PAIR-NEXT: addvl sp, sp, #-4 -; PAIR-NEXT: str p8, [sp, #5, mul vl] // 2-byte Folded Spill -; PAIR-NEXT: ptrue pn8.b ; PAIR-NEXT: str p5, [sp, #6, mul vl] // 2-byte Folded Spill ; PAIR-NEXT: str p4, [sp, #7, mul vl] // 2-byte Folded Spill ; PAIR-NEXT: str z10, [sp, #1, mul vl] // 16-byte Folded Spill -; PAIR-NEXT: st1b { z8.b, z9.b }, pn8, [sp, #4, mul vl] // 32-byte Folded Spill +; PAIR-NEXT: str z9, [sp, #2, mul vl] // 16-byte Folded Spill +; PAIR-NEXT: str z8, [sp, #3, mul vl] // 16-byte Folded Spill ; PAIR-NEXT: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x20, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 32 * VG ; PAIR-NEXT: .cfi_offset w29, -16 ; PAIR-NEXT: .cfi_escape 0x10, 0x48, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x78, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d8 @ cfa - 16 - 8 * VG @@ -336,10 +363,9 @@ define aarch64_sve_vector_pcs void @test_clobbers_z_p_regs() { ; PAIR-NEXT: .cfi_escape 0x10, 0x4a, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x68, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d10 @ cfa - 16 - 24 * VG ; PAIR-NEXT: //APP ; PAIR-NEXT: //NO_APP -; PAIR-NEXT: ptrue pn8.b ; PAIR-NEXT: ldr z10, [sp, #1, mul vl] // 16-byte Folded Reload -; PAIR-NEXT: ld1b { z8.b, z9.b }, pn8/z, [sp, #4, mul vl] // 32-byte Folded Reload -; PAIR-NEXT: ldr p8, [sp, #5, mul vl] // 2-byte Folded Reload +; PAIR-NEXT: ldr z9, [sp, #2, mul vl] // 16-byte Folded Reload +; PAIR-NEXT: ldr z8, [sp, #3, mul vl] // 16-byte Folded Reload ; PAIR-NEXT: ldr p5, [sp, #6, mul vl] // 2-byte Folded Reload ; PAIR-NEXT: ldr p4, [sp, #7, mul vl] // 2-byte Folded Reload ; PAIR-NEXT: addvl sp, sp, #4 @@ -381,11 +407,11 @@ define aarch64_sve_vector_pcs void @test_clobbers_z_p_regs2() { ; PAIR: // %bb.0: ; PAIR-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill ; PAIR-NEXT: addvl sp, sp, #-4 -; PAIR-NEXT: str p9, [sp, #7, mul vl] // 2-byte Folded Spill -; PAIR-NEXT: ptrue pn9.b ; PAIR-NEXT: str p10, [sp, #6, mul vl] // 2-byte Folded Spill +; PAIR-NEXT: str p9, [sp, #7, mul vl] // 2-byte Folded Spill ; PAIR-NEXT: str z10, [sp, #1, mul vl] // 16-byte Folded Spill -; PAIR-NEXT: st1b { z8.b, z9.b }, pn9, [sp, #4, mul vl] // 32-byte Folded Spill +; PAIR-NEXT: str z9, [sp, #2, mul vl] // 16-byte Folded Spill +; PAIR-NEXT: str z8, [sp, #3, mul vl] // 16-byte Folded Spill ; PAIR-NEXT: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x20, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 32 * VG ; PAIR-NEXT: .cfi_offset w29, -16 ; PAIR-NEXT: .cfi_escape 0x10, 0x48, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x78, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d8 @ cfa - 16 - 8 * VG @@ -393,10 +419,10 @@ define aarch64_sve_vector_pcs void @test_clobbers_z_p_regs2() { ; PAIR-NEXT: .cfi_escape 0x10, 0x4a, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x68, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d10 @ cfa - 16 - 24 * VG ; PAIR-NEXT: //APP ; PAIR-NEXT: //NO_APP -; PAIR-NEXT: ptrue pn9.b ; PAIR-NEXT: ldr z10, [sp, #1, mul vl] // 16-byte Folded Reload +; PAIR-NEXT: ldr z9, [sp, #2, mul vl] // 16-byte Folded Reload +; PAIR-NEXT: ldr z8, [sp, #3, mul vl] // 16-byte Folded Reload ; PAIR-NEXT: ldr p10, [sp, #6, mul vl] // 2-byte Folded Reload -; PAIR-NEXT: ld1b { z8.b, z9.b }, pn9/z, [sp, #4, mul vl] // 32-byte Folded Reload ; PAIR-NEXT: ldr p9, [sp, #7, mul vl] // 2-byte Folded Reload ; PAIR-NEXT: addvl sp, sp, #4 ; PAIR-NEXT: ldr x29, [sp], #16 // 8-byte Folded Reload @@ -429,20 +455,18 @@ define aarch64_sve_vector_pcs void @test_clobbers_z_regs() { ; PAIR-LABEL: test_clobbers_z_regs: ; PAIR: // %bb.0: ; PAIR-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill -; PAIR-NEXT: addvl sp, sp, #-3 -; PAIR-NEXT: str p8, [sp, #7, mul vl] // 2-byte Folded Spill -; PAIR-NEXT: ptrue pn8.b -; PAIR-NEXT: st1b { z8.b, z9.b }, pn8, [sp, #2, mul vl] // 32-byte Folded Spill -; PAIR-NEXT: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x18, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 24 * VG +; PAIR-NEXT: addvl sp, sp, #-2 +; PAIR-NEXT: str z9, [sp] // 16-byte Folded Spill +; PAIR-NEXT: str z8, [sp, #1, mul vl] // 16-byte Folded Spill +; PAIR-NEXT: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x10, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 16 * VG ; PAIR-NEXT: .cfi_offset w29, -16 ; PAIR-NEXT: .cfi_escape 0x10, 0x48, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x78, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d8 @ cfa - 16 - 8 * VG ; PAIR-NEXT: .cfi_escape 0x10, 0x49, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x70, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d9 @ cfa - 16 - 16 * VG ; PAIR-NEXT: //APP ; PAIR-NEXT: //NO_APP -; PAIR-NEXT: ptrue pn8.b -; PAIR-NEXT: ld1b { z8.b, z9.b }, pn8/z, [sp, #2, mul vl] // 32-byte Folded Reload -; PAIR-NEXT: ldr p8, [sp, #7, mul vl] // 2-byte Folded Reload -; PAIR-NEXT: addvl sp, sp, #3 +; PAIR-NEXT: ldr z9, [sp] // 16-byte Folded Reload +; PAIR-NEXT: ldr z8, [sp, #1, mul vl] // 16-byte Folded Reload +; PAIR-NEXT: addvl sp, sp, #2 ; PAIR-NEXT: ldr x29, [sp], #16 // 8-byte Folded Reload ; PAIR-NEXT: ret call void asm sideeffect "", "~{z8},~{z9}"() From bb79e7f668456473e13985a8f135cc3a45340fb5 Mon Sep 17 00:00:00 2001 From: Nicolas van Kempen Date: Mon, 9 Sep 2024 07:12:46 -0400 Subject: [PATCH 10/29] [clang][analyzer] Fix #embed crash (#107764) Fix #107724. (cherry picked from commit d84d9559bdc7aeb4ce14c251f6a3490c66db8d3a) --- clang/lib/StaticAnalyzer/Core/ExprEngine.cpp | 5 +---- clang/test/Analysis/embed.c | 12 ++++++++++++ 2 files changed, 13 insertions(+), 4 deletions(-) create mode 100644 clang/test/Analysis/embed.c diff --git a/clang/lib/StaticAnalyzer/Core/ExprEngine.cpp b/clang/lib/StaticAnalyzer/Core/ExprEngine.cpp index 62a240ecbc60..c11468a08ae5 100644 --- a/clang/lib/StaticAnalyzer/Core/ExprEngine.cpp +++ b/clang/lib/StaticAnalyzer/Core/ExprEngine.cpp @@ -1928,6 +1928,7 @@ void ExprEngine::Visit(const Stmt *S, ExplodedNode *Pred, case Stmt::CXXRewrittenBinaryOperatorClass: case Stmt::RequiresExprClass: case Expr::CXXParenListInitExprClass: + case Stmt::EmbedExprClass: // Fall through. // Cases we intentionally don't evaluate, since they don't need @@ -2430,10 +2431,6 @@ void ExprEngine::Visit(const Stmt *S, ExplodedNode *Pred, Bldr.addNodes(Dst); break; } - - case Stmt::EmbedExprClass: - llvm::report_fatal_error("Support for EmbedExpr is not implemented."); - break; } } diff --git a/clang/test/Analysis/embed.c b/clang/test/Analysis/embed.c new file mode 100644 index 000000000000..32f6c1303257 --- /dev/null +++ b/clang/test/Analysis/embed.c @@ -0,0 +1,12 @@ +// RUN: %clang_analyze_cc1 -std=c23 -analyzer-checker=core,debug.ExprInspection -verify %s + +void clang_analyzer_dump_ptr(const unsigned char *ptr); +void clang_analyzer_dump(unsigned char val); + +int main() { + const unsigned char SelfBytes[] = { + #embed "embed.c" + }; + clang_analyzer_dump_ptr(SelfBytes); // expected-warning {{&Element{SelfBytes,0 S64b,unsigned char}}} + clang_analyzer_dump(SelfBytes[0]); // expected-warning {{Unknown}} FIXME: This should be the `/` character. +} From f64404e32187a6f45771e72e1b65e99be82acaba Mon Sep 17 00:00:00 2001 From: Rainer Orth Date: Sat, 3 Aug 2024 22:18:11 +0200 Subject: [PATCH 11/29] [builtins] Fix divtc3.c etc. compilation on Solaris/SPARC with gcc (#101662) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit `compiler-rt/lib/builtins/divtc3.c` and `multc3.c` don't compile on Solaris/sparcv9 with `gcc -m32`: ``` FAILED: projects/compiler-rt/lib/builtins/CMakeFiles/clang_rt.builtins-sparc.dir/divtc3.c.o [...] compiler-rt/lib/builtins/divtc3.c: In function ‘__divtc3’: compiler-rt/lib/builtins/divtc3.c:22:18: error: implicit declaration of function ‘__compiler_rt_logbtf’ [-Wimplicit-function-declaration] 22 | fp_t __logbw = __compiler_rt_logbtf( | ^~~~~~~~~~~~~~~~~~~~ ``` and many more. It turns out that while the definition of `__divtc3` is guarded with `CRT_HAS_F128`, the `__compiler_rt_logbtf` and other declarations use `CRT_HAS_128BIT && CRT_HAS_F128` as guard. This only shows up with `gcc` since, as documented in Issue #41838, `clang` violates the SPARC psABI in not using 128-bit `long double`, so this code path isn't used. Fixed by changing the guards to match. Tested on `sparcv9-sun-solaris2.11`. (cherry picked from commit 63a7786111c501920afc4cc27a4633f76cdaf803) --- compiler-rt/lib/builtins/divtc3.c | 2 +- compiler-rt/lib/builtins/multc3.c | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/compiler-rt/lib/builtins/divtc3.c b/compiler-rt/lib/builtins/divtc3.c index 099de5802daf..c393de815337 100644 --- a/compiler-rt/lib/builtins/divtc3.c +++ b/compiler-rt/lib/builtins/divtc3.c @@ -13,7 +13,7 @@ #define QUAD_PRECISION #include "fp_lib.h" -#if defined(CRT_HAS_F128) +#if defined(CRT_HAS_128BIT) && defined(CRT_HAS_F128) // Returns: the quotient of (a + ib) / (c + id) diff --git a/compiler-rt/lib/builtins/multc3.c b/compiler-rt/lib/builtins/multc3.c index 61a3f45e4727..a89832f0e883 100644 --- a/compiler-rt/lib/builtins/multc3.c +++ b/compiler-rt/lib/builtins/multc3.c @@ -15,7 +15,7 @@ #include "int_lib.h" #include "int_math.h" -#if defined(CRT_HAS_F128) +#if defined(CRT_HAS_128BIT) && defined(CRT_HAS_F128) // Returns: the product of a + ib and c + id From 2651d09ec9c4d87d09ae72d8bf42fab566fb02d0 Mon Sep 17 00:00:00 2001 From: Hua Tian Date: Thu, 15 Aug 2024 19:03:27 +0800 Subject: [PATCH 12/29] [llvm][CodeGen] Resolve issues when updating live intervals in window scheduler (#101945) Corrupted live interval information can cause window scheduling to crash in some cases. By adding the missing MBB's live interval information in the ModuloScheduleExpander, the information can be correctly analyzed in the window scheduler. (cherry picked from commit 43ba1097ee747b4ec5e757762ed0c9df6255a292) --- llvm/lib/CodeGen/ModuloSchedule.cpp | 3 + .../CodeGen/Hexagon/swp-ws-live-intervals.mir | 217 ++++++++++++++++++ 2 files changed, 220 insertions(+) create mode 100644 llvm/test/CodeGen/Hexagon/swp-ws-live-intervals.mir diff --git a/llvm/lib/CodeGen/ModuloSchedule.cpp b/llvm/lib/CodeGen/ModuloSchedule.cpp index 0f29ebe3ee79..b1a2bfaf7895 100644 --- a/llvm/lib/CodeGen/ModuloSchedule.cpp +++ b/llvm/lib/CodeGen/ModuloSchedule.cpp @@ -130,6 +130,7 @@ void ModuloScheduleExpander::generatePipelinedLoop() { // Generate the prolog instructions that set up the pipeline. generateProlog(MaxStageCount, KernelBB, VRMap, PrologBBs); MF.insert(BB->getIterator(), KernelBB); + LIS.insertMBBInMaps(KernelBB); // Rearrange the instructions to generate the new, pipelined loop, // and update register names as needed. @@ -210,6 +211,7 @@ void ModuloScheduleExpander::generateProlog(unsigned LastStage, NewBB->transferSuccessors(PredBB); PredBB->addSuccessor(NewBB); PredBB = NewBB; + LIS.insertMBBInMaps(NewBB); // Generate instructions for each appropriate stage. Process instructions // in original program order. @@ -283,6 +285,7 @@ void ModuloScheduleExpander::generateEpilog( PredBB->replaceSuccessor(LoopExitBB, NewBB); NewBB->addSuccessor(LoopExitBB); + LIS.insertMBBInMaps(NewBB); if (EpilogStart == LoopExitBB) EpilogStart = NewBB; diff --git a/llvm/test/CodeGen/Hexagon/swp-ws-live-intervals.mir b/llvm/test/CodeGen/Hexagon/swp-ws-live-intervals.mir new file mode 100644 index 000000000000..7fa3cdf62d09 --- /dev/null +++ b/llvm/test/CodeGen/Hexagon/swp-ws-live-intervals.mir @@ -0,0 +1,217 @@ +# REQUIRES: asserts +# +# RUN: llc --march=hexagon %s -run-pass=pipeliner -debug-only=pipeliner \ +# RUN: -window-sched=force -filetype=null -window-search-num=100 \ +# RUN: -window-search-ratio=100 -window-diff-limit=0 -verify-machineinstrs \ +# RUN: 2>&1 | FileCheck %s + +# The bug was reported at https://github.com/llvm/llvm-project/pull/99454. +# It is caused by the corruption of live intervals in certain scenarios. +# +# We check the newly generated MBBs after successful scheduling here. +# CHECK: Best window offset is {{[0-9]+}} and Best II is {{[0-9]+}}. +# CHECK: prolog: +# CHECK: bb.5: +# CHECK: New block +# CHECK: bb.6: +# CHECK: epilog: +# CHECK: bb.7: +# CHECK: Best window offset is {{[0-9]+}} and Best II is {{[0-9]+}}. +# CHECK: prolog: +# CHECK: bb.8: +# CHECK: New block +# CHECK: bb.9: +# CHECK: epilog: +# CHECK: bb.10: + +--- | + target triple = "hexagon" + + @_dp_ctrl_calc_tu_temp2_fp = global i64 0 + @_dp_ctrl_calc_tu_temp1_fp = global i32 0 + @dp_panel_update_tu_timings___trans_tmp_5 = global i64 0 + @_dp_ctrl_calc_tu___trans_tmp_8 = global i64 0 + + declare void @llvm.lifetime.start.p0(i64 immarg, ptr nocapture) + declare i8 @div64_u64_rem(i32, ptr) + declare void @llvm.lifetime.end.p0(i64 immarg, ptr nocapture) + + define void @dp_ctrl_calc_tu_parameters() { + if.end.i: + %rem.i11.i = alloca i64, align 8 + %rem.i.i = alloca i64, align 8 + call void @llvm.lifetime.start.p0(i64 8, ptr nonnull %rem.i11.i) + %call.i.i = call i8 @div64_u64_rem(i32 0, ptr nonnull %rem.i11.i) + %conv1.i.i = zext i8 %call.i.i to i64 + %rem.promoted.i.i = load i64, ptr %rem.i11.i, align 8 + br label %do.body.i.i + + do.body.i.i: + %lsr.iv1 = phi i32 [ %lsr.iv.next2, %do.body.i.i ], [ -32, %if.end.i ] + %sub9.i.i = phi i64 [ %rem.promoted.i.i, %if.end.i ], [ %sub8.i.i.7, %do.body.i.i ] + %res_abs.0.i.i = phi i64 [ %conv1.i.i, %if.end.i ], [ %res_abs.1.i.i.7, %do.body.i.i ] + %cmp.not.i.i = icmp ne i64 %sub9.i.i, 0 + %sub.i.neg.i = sext i1 %cmp.not.i.i to i64 + %sub8.i.i = add i64 %sub9.i.i, %sub.i.neg.i + %0 = shl i64 %res_abs.0.i.i, 2 + %1 = select i1 %cmp.not.i.i, i64 2, i64 0 + %shl.i.i.5 = or disjoint i64 %0, %1 + %cmp.not.i.i.5 = icmp ne i64 %sub8.i.i, 0 + %sub.i.neg.i.5 = sext i1 %cmp.not.i.i.5 to i64 + %sub8.i.i.5 = add i64 %sub8.i.i, %sub.i.neg.i.5 + %or.i.i.5 = zext i1 %cmp.not.i.i.5 to i64 + %res_abs.1.i.i.5 = or disjoint i64 %shl.i.i.5, %or.i.i.5 + %cmp.not.i.i.6 = icmp ne i64 %sub8.i.i.5, 0 + %sub.i.neg.i.6 = sext i1 %cmp.not.i.i.6 to i64 + %sub8.i.i.6 = add i64 %sub8.i.i.5, %sub.i.neg.i.6 + %2 = shl i64 %res_abs.1.i.i.5, 2 + %3 = select i1 %cmp.not.i.i.6, i64 2, i64 0 + %shl.i.i.7 = or disjoint i64 %2, %3 + %cmp.not.i.i.7 = icmp ne i64 %sub8.i.i.6, 0 + %sub.i.neg.i.7 = sext i1 %cmp.not.i.i.7 to i64 + %sub8.i.i.7 = add i64 %sub8.i.i.6, %sub.i.neg.i.7 + %or.i.i.7 = zext i1 %cmp.not.i.i.7 to i64 + %res_abs.1.i.i.7 = or disjoint i64 %shl.i.i.7, %or.i.i.7 + %lsr.iv.next2 = add nsw i32 %lsr.iv1, 8 + %tobool.not.i.i.7 = icmp eq i32 %lsr.iv.next2, 0 + br i1 %tobool.not.i.i.7, label %fec_check.i, label %do.body.i.i + + fec_check.i: + call void @llvm.lifetime.end.p0(i64 8, ptr nonnull %rem.i11.i) + store i64 %res_abs.1.i.i.7, ptr @_dp_ctrl_calc_tu_temp2_fp, align 8 + call void @llvm.lifetime.start.p0(i64 8, ptr nonnull %rem.i11.i) + %call.i12.i = call i8 @div64_u64_rem(i32 0, ptr nonnull %rem.i11.i) + %conv1.i13.i = zext i8 %call.i12.i to i64 + %rem.promoted.i14.i = load i64, ptr %rem.i11.i, align 8 + br label %do.body.i15.i + + do.body.i15.i: + %lsr.iv = phi i32 [ %lsr.iv.next, %do.body.i15.i ], [ -32, %fec_check.i ] + %sub9.i16.i = phi i64 [ %rem.promoted.i14.i, %fec_check.i ], [ %sub8.i22.i.7, %do.body.i15.i ] + %res_abs.0.i17.i = phi i64 [ %conv1.i13.i, %fec_check.i ], [ %res_abs.1.i24.i.7, %do.body.i15.i ] + %cmp.not.i20.i = icmp ugt i64 %sub9.i16.i, 999 + %sub.i21.neg.i = select i1 %cmp.not.i20.i, i64 -1000, i64 0 + %sub8.i22.i = add i64 %sub.i21.neg.i, %sub9.i16.i + %4 = shl i64 %res_abs.0.i17.i, 2 + %5 = select i1 %cmp.not.i20.i, i64 2, i64 0 + %shl.i19.i.7 = or disjoint i64 %4, %5 + %cmp.not.i20.i.7 = icmp ugt i64 %sub8.i22.i, 999 + %sub.i21.neg.i.7 = select i1 %cmp.not.i20.i.7, i64 -1000, i64 0 + %sub8.i22.i.7 = add i64 %sub.i21.neg.i.7, %sub8.i22.i + %or.i23.i.7 = zext i1 %cmp.not.i20.i.7 to i64 + %res_abs.1.i24.i.7 = or disjoint i64 %shl.i19.i.7, %or.i23.i.7 + %lsr.iv.next = add nsw i32 %lsr.iv, 8 + %tobool.not.i26.i.7 = icmp eq i32 %lsr.iv.next, 0 + br i1 %tobool.not.i26.i.7, label %_dp_ctrl_calc_tu.exit, label %do.body.i15.i + + _dp_ctrl_calc_tu.exit: + call void @llvm.lifetime.end.p0(i64 8, ptr nonnull %rem.i11.i) + %conv.i = trunc i64 %res_abs.1.i24.i.7 to i32 + store i32 %conv.i, ptr @_dp_ctrl_calc_tu_temp1_fp, align 4 + %conv5.i = and i64 %res_abs.1.i24.i.7, 4294967295 + store i64 %conv5.i, ptr @dp_panel_update_tu_timings___trans_tmp_5, align 8 + store i64 %res_abs.1.i.i.7, ptr @_dp_ctrl_calc_tu___trans_tmp_8, align 8 + ret void + } + +... +--- +name: dp_ctrl_calc_tu_parameters +tracksRegLiveness: true +stack: + - { id: 0, name: rem.i11.i, type: default, offset: 0, size: 8, alignment: 8} +body: | + bb.0: + successors: %bb.1(0x80000000) + + %0:intregs = A2_tfrsi 0 + %1:intregs = PS_fi %stack.0.rem.i11.i, 0 + %2:intregs = A2_tfrsi 0 + %3:doubleregs = A4_combineir 0, %2 + %4:doubleregs = L2_loadrd_io %stack.0.rem.i11.i, 0 + %5:doubleregs = A2_tfrpi 0 + J2_loop0i %bb.1, 4, implicit-def $lc0, implicit-def $sa0, implicit-def $usr + + bb.1 (machine-block-address-taken): + successors: %bb.2(0x04000000), %bb.1(0x7c000000) + + %6:doubleregs = PHI %4, %bb.0, %7, %bb.1 + %8:doubleregs = PHI %3, %bb.0, %9, %bb.1 + %10:predregs = C2_cmpeqp %6, %5 + %11:intregs = C2_muxii %10, 0, -1 + %12:doubleregs = A2_addsp %11, %6 + %13:doubleregs = S2_asl_i_p %8, 2 + %14:intregs = S2_setbit_i %13.isub_lo, 1 + %15:intregs = C2_mux %10, %13.isub_lo, %14 + %16:predregs = C2_cmpeqp %12, %5 + %17:intregs = C2_muxii %16, 0, -1 + %18:doubleregs = A2_addsp %17, %12 + %19:intregs = S2_setbit_i %15, 0 + %20:intregs = C2_mux %16, %15, %19 + %21:predregs = C2_cmpeqp %18, %5 + %22:intregs = C2_muxii %21, 0, -1 + %23:doubleregs = A2_addsp %22, %18 + %24:intregs = S2_asl_i_r %20, 2 + %25:intregs = S2_extractu %8.isub_lo, 2, 28 + %26:intregs = S2_asl_i_r_or %25, %13.isub_hi, 2 + %27:intregs = S2_setbit_i %24, 1 + %28:intregs = C2_mux %21, %24, %27 + %29:predregs = C2_cmpeqp %23, %5 + %30:intregs = C2_muxii %29, 0, -1 + %7:doubleregs = A2_addsp %30, %23 + %31:intregs = S2_setbit_i %28, 0 + %32:intregs = C2_mux %29, %28, %31 + %9:doubleregs = REG_SEQUENCE %26, %subreg.isub_hi, %32, %subreg.isub_lo + ENDLOOP0 %bb.1, implicit-def $pc, implicit-def $lc0, implicit $sa0, implicit $lc0 + J2_jump %bb.2, implicit-def dead $pc + + bb.2: + successors: %bb.3(0x80000000) + + S2_storerdgp @_dp_ctrl_calc_tu_temp2_fp, %9, implicit $gp + %33:intregs = A2_tfrsi 0 + %34:intregs = PS_fi %stack.0.rem.i11.i, 0 + %35:intregs = A2_tfrsi 0 + %36:doubleregs = L2_loadrd_io %stack.0.rem.i11.i, 0 + %37:doubleregs = A2_tfrpi 124 + %38:intregs = A2_tfrsi -1000 + %39:intregs = A2_tfrsi -1 + J2_loop0i %bb.3, 4, implicit-def $lc0, implicit-def $sa0, implicit-def $usr + + bb.3 (machine-block-address-taken): + successors: %bb.4(0x04000000), %bb.3(0x7c000000) + + %40:doubleregs = PHI %36, %bb.2, %41, %bb.3 + %42:intregs = PHI %35, %bb.2, %43, %bb.3 + %44:intregs = PHI %33, %bb.2, %45, %bb.3 + %46:doubleregs = S2_lsr_i_p %40, 3 + %47:predregs = C2_cmpgtup %46, %37 + %48:intregs = C2_mux %47, %38, %33 + %49:intregs = C2_mux %47, %39, %33 + %50:doubleregs = REG_SEQUENCE %49, %subreg.isub_hi, %48, %subreg.isub_lo + %51:doubleregs = A2_addp %50, %40 + %52:intregs = S2_asl_i_r %42, 2 + %53:intregs = S2_extractu %42, 2, 30 + %45:intregs = S2_asl_i_r_or %53, %44, 2 + %54:intregs = S2_setbit_i %52, 1 + %55:intregs = C2_mux %47, %54, %52 + %56:doubleregs = S2_lsr_i_p %51, 3 + %57:predregs = C2_cmpgtup %56, %37 + %58:intregs = C2_mux %57, %38, %33 + %59:intregs = C2_mux %57, %39, %33 + %60:doubleregs = REG_SEQUENCE %59, %subreg.isub_hi, %58, %subreg.isub_lo + %41:doubleregs = A2_addp %60, %51 + %61:intregs = S2_setbit_i %55, 0 + %43:intregs = C2_mux %57, %61, %55 + ENDLOOP0 %bb.3, implicit-def $pc, implicit-def $lc0, implicit $sa0, implicit $lc0 + J2_jump %bb.4, implicit-def dead $pc + + bb.4: + S2_storerigp @_dp_ctrl_calc_tu_temp1_fp, %43, implicit $gp + %62:intregs = A2_tfrsi 0 + %63:doubleregs = REG_SEQUENCE %43, %subreg.isub_lo, %62, %subreg.isub_hi + S2_storerdgp @dp_panel_update_tu_timings___trans_tmp_5, %63, implicit $gp + S2_storerdgp @_dp_ctrl_calc_tu___trans_tmp_8, %9, implicit $gp + PS_jmpret $r31, implicit-def dead $pc + +... From 327ca6c02f0dbf13dd6f039d30d320a7ba1456b8 Mon Sep 17 00:00:00 2001 From: Owen Pan Date: Thu, 5 Sep 2024 23:59:11 -0700 Subject: [PATCH 13/29] [clang-format] Correctly annotate braces in macro definition (#107352) This reverts commit 2d90e8f7402b0a8114978b6f014cfe76c96c94a1 and backports commit 616a8ce6203d8c7569266bfaf163e74df1f440ad. --- clang/lib/Format/UnwrappedLineParser.cpp | 6 ++++-- clang/unittests/Format/TokenAnnotatorTest.cpp | 15 +++++++++++++++ 2 files changed, 19 insertions(+), 2 deletions(-) diff --git a/clang/lib/Format/UnwrappedLineParser.cpp b/clang/lib/Format/UnwrappedLineParser.cpp index 60e65aaa83e9..7813d86ff0ea 100644 --- a/clang/lib/Format/UnwrappedLineParser.cpp +++ b/clang/lib/Format/UnwrappedLineParser.cpp @@ -570,7 +570,8 @@ void UnwrappedLineParser::calculateBraceTypes(bool ExpectClassBody) { NextTok->isOneOf(Keywords.kw_of, Keywords.kw_in, Keywords.kw_as)); ProbablyBracedList = - ProbablyBracedList || (IsCpp && NextTok->is(tok::l_paren)); + ProbablyBracedList || (IsCpp && (PrevTok->Tok.isLiteral() || + NextTok->is(tok::l_paren))); // If there is a comma, semicolon or right paren after the closing // brace, we assume this is a braced initializer list. @@ -609,8 +610,9 @@ void UnwrappedLineParser::calculateBraceTypes(bool ExpectClassBody) { ProbablyBracedList = NextTok->isNot(tok::l_square); } - // Cpp macro definition body containing nonempty braced list or block: + // Cpp macro definition body that is a nonempty braced list or block: if (IsCpp && Line->InMacroBody && PrevTok != FormatTok && + !FormatTok->Previous && NextTok->is(tok::eof) && // A statement can end with only `;` (simple statement), a block // closing brace (compound statement), or `:` (label statement). // If PrevTok is a block opening brace, Tok ends an empty block. diff --git a/clang/unittests/Format/TokenAnnotatorTest.cpp b/clang/unittests/Format/TokenAnnotatorTest.cpp index db580d700588..dd58fbc70cb9 100644 --- a/clang/unittests/Format/TokenAnnotatorTest.cpp +++ b/clang/unittests/Format/TokenAnnotatorTest.cpp @@ -3219,6 +3219,21 @@ TEST_F(TokenAnnotatorTest, BraceKind) { EXPECT_TOKEN(Tokens[11], tok::r_brace, TT_StructRBrace); EXPECT_BRACE_KIND(Tokens[11], BK_Block); + Tokens = annotate("#define MACRO \\\n" + " struct hash { \\\n" + " void f() { return; } \\\n" + " };"); + ASSERT_EQ(Tokens.size(), 20u) << Tokens; + EXPECT_TOKEN(Tokens[8], tok::l_brace, TT_StructLBrace); + EXPECT_BRACE_KIND(Tokens[8], BK_Block); + EXPECT_TOKEN(Tokens[10], tok::identifier, TT_FunctionDeclarationName); + EXPECT_TOKEN(Tokens[11], tok::l_paren, TT_FunctionDeclarationLParen); + EXPECT_TOKEN(Tokens[13], tok::l_brace, TT_FunctionLBrace); + EXPECT_BRACE_KIND(Tokens[13], BK_Block); + EXPECT_BRACE_KIND(Tokens[16], BK_Block); + EXPECT_TOKEN(Tokens[17], tok::r_brace, TT_StructRBrace); + EXPECT_BRACE_KIND(Tokens[17], BK_Block); + Tokens = annotate("#define MEMBER(NAME) NAME{\"\"}"); ASSERT_EQ(Tokens.size(), 11u) << Tokens; EXPECT_BRACE_KIND(Tokens[7], BK_BracedInit); From 8290ce0998788b6a575ed7b4988b093f48c25b3d Mon Sep 17 00:00:00 2001 From: cor3ntin Date: Tue, 3 Sep 2024 20:36:15 +0200 Subject: [PATCH 14/29] [Clang] Fix handling of placeholder variables name in init captures (#107055) We were incorrectly not deduplicating results when looking up `_` which, for a lambda init capture, would result in an ambiguous lookup. The same bug caused some diagnostic notes to be emitted twice. Fixes #107024 --- clang/docs/ReleaseNotes.rst | 1 + clang/lib/Sema/SemaLambda.cpp | 1 - clang/lib/Sema/SemaLookup.cpp | 2 +- clang/test/SemaCXX/cxx2c-placeholder-vars.cpp | 6 ++++-- 4 files changed, 6 insertions(+), 4 deletions(-) diff --git a/clang/docs/ReleaseNotes.rst b/clang/docs/ReleaseNotes.rst index 53d819c6c445..8c7a6ba70acd 100644 --- a/clang/docs/ReleaseNotes.rst +++ b/clang/docs/ReleaseNotes.rst @@ -1122,6 +1122,7 @@ Bug Fixes to C++ Support - Fixed a crash-on-invalid bug involving extraneous template parameter with concept substitution. (#GH73885) - Fixed assertion failure by skipping the analysis of an invalid field declaration. (#GH99868) - Fix an issue with dependent source location expressions (#GH106428), (#GH81155), (#GH80210), (#GH85373) +- Fix handling of ``_`` as the name of a lambda's init capture variable. (#GH107024) Bug Fixes to AST Handling diff --git a/clang/lib/Sema/SemaLambda.cpp b/clang/lib/Sema/SemaLambda.cpp index 601077e9f333..809b94bb7412 100644 --- a/clang/lib/Sema/SemaLambda.cpp +++ b/clang/lib/Sema/SemaLambda.cpp @@ -1318,7 +1318,6 @@ void Sema::ActOnLambdaExpressionAfterIntroducer(LambdaIntroducer &Intro, if (C->Init.isUsable()) { addInitCapture(LSI, cast(Var), C->Kind == LCK_ByRef); - PushOnScopeChains(Var, CurScope, false); } else { TryCaptureKind Kind = C->Kind == LCK_ByRef ? TryCapture_ExplicitByRef : TryCapture_ExplicitByVal; diff --git a/clang/lib/Sema/SemaLookup.cpp b/clang/lib/Sema/SemaLookup.cpp index 7a6a64529f52..d3d4bf27ae72 100644 --- a/clang/lib/Sema/SemaLookup.cpp +++ b/clang/lib/Sema/SemaLookup.cpp @@ -570,7 +570,7 @@ void LookupResult::resolveKind() { // For non-type declarations, check for a prior lookup result naming this // canonical declaration. - if (!D->isPlaceholderVar(getSema().getLangOpts()) && !ExistingI) { + if (!ExistingI) { auto UniqueResult = Unique.insert(std::make_pair(D, I)); if (!UniqueResult.second) { // We've seen this entity before. diff --git a/clang/test/SemaCXX/cxx2c-placeholder-vars.cpp b/clang/test/SemaCXX/cxx2c-placeholder-vars.cpp index 5cf66b48784e..29ca3b5ef3df 100644 --- a/clang/test/SemaCXX/cxx2c-placeholder-vars.cpp +++ b/clang/test/SemaCXX/cxx2c-placeholder-vars.cpp @@ -50,14 +50,16 @@ void f() { void lambda() { (void)[_ = 0, _ = 1] { // expected-warning {{placeholder variables are incompatible with C++ standards before C++2c}} \ - // expected-note 4{{placeholder declared here}} + // expected-note 2{{placeholder declared here}} (void)_++; // expected-error {{ambiguous reference to placeholder '_', which is defined multiple times}} }; { int _ = 12; - (void)[_ = 0]{}; // no warning (different scope) + (void)[_ = 0]{ return _;}; // no warning (different scope) } + + auto GH107024 = [_ = 42]() { return _; }(); } namespace global_var { From 32a8b56bbf0a3c7678d44ba690427915446a9a72 Mon Sep 17 00:00:00 2001 From: Tom Stellard Date: Thu, 12 Sep 2024 09:50:57 -0700 Subject: [PATCH 15/29] workflows/release-binaries: Fix automatic upload (#107315) (cherry picked from commit ab96409180aaad5417030f06a386253722a99d71) --- .github/workflows/release-binaries.yml | 15 +++++++++++++-- 1 file changed, 13 insertions(+), 2 deletions(-) diff --git a/.github/workflows/release-binaries.yml b/.github/workflows/release-binaries.yml index 509016e5b89c..fcd371d49e6c 100644 --- a/.github/workflows/release-binaries.yml +++ b/.github/workflows/release-binaries.yml @@ -450,11 +450,22 @@ jobs: name: ${{ needs.prepare.outputs.release-binary-filename }}-attestation path: ${{ needs.prepare.outputs.release-binary-filename }}.jsonl + - name: Checkout Release Scripts + uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 + with: + sparse-checkout: | + llvm/utils/release/github-upload-release.py + llvm/utils/git/requirements.txt + sparse-checkout-cone-mode: false + + - name: Install Python Requirements + run: | + pip install --require-hashes -r ./llvm/utils/git/requirements.txt + - name: Upload Release shell: bash run: | - sudo apt install python3-github - ./llvm-project/llvm/utils/release/github-upload-release.py \ + ./llvm/utils/release/github-upload-release.py \ --token ${{ github.token }} \ --release ${{ needs.prepare.outputs.release-version }} \ upload \ From 373180b440d04dc3cc0f6111b06684d18779d7c8 Mon Sep 17 00:00:00 2001 From: Alexey Bataev Date: Thu, 15 Aug 2024 07:21:10 -0700 Subject: [PATCH 16/29] [SLP]Fix PR104422: Wrong value truncation The minbitwidth restrictions can be skipped only for immediate reduced values, for other nodes still need to check if external users allow bitwidth reduction. Fixes https://github.com/llvm/llvm-project/issues/104422 (cherry picked from commit 56140a8258a3498cfcd9f0f05c182457d43cbfd2) --- .../Transforms/Vectorize/SLPVectorizer.cpp | 3 +- .../X86/operand-is-reduced-val.ll | 49 +++++++++++++++++++ 2 files changed, 51 insertions(+), 1 deletion(-) create mode 100644 llvm/test/Transforms/SLPVectorizer/X86/operand-is-reduced-val.ll diff --git a/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp b/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp index 2f3d6b27378a..ab2b96cdc42d 100644 --- a/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp +++ b/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp @@ -15211,7 +15211,8 @@ bool BoUpSLP::collectValuesToDemote( if (any_of(E.Scalars, [&](Value *V) { return !all_of(V->users(), [=](User *U) { return getTreeEntry(U) || - (UserIgnoreList && UserIgnoreList->contains(U)) || + (E.Idx == 0 && UserIgnoreList && + UserIgnoreList->contains(U)) || (!isa(U) && U->getType()->isSized() && !U->getType()->isScalableTy() && DL->getTypeSizeInBits(U->getType()) <= BitWidth); diff --git a/llvm/test/Transforms/SLPVectorizer/X86/operand-is-reduced-val.ll b/llvm/test/Transforms/SLPVectorizer/X86/operand-is-reduced-val.ll new file mode 100644 index 000000000000..5fcac3fbf3ba --- /dev/null +++ b/llvm/test/Transforms/SLPVectorizer/X86/operand-is-reduced-val.ll @@ -0,0 +1,49 @@ +; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 5 +; RUN: opt -S --passes=slp-vectorizer -mtriple=x86_64-unknown-linux < %s -slp-threshold=-10 | FileCheck %s + +define i64 @src(i32 %a) { +; CHECK-LABEL: define i64 @src( +; CHECK-SAME: i32 [[A:%.*]]) { +; CHECK-NEXT: [[ENTRY:.*:]] +; CHECK-NEXT: [[TMP17:%.*]] = sext i32 [[A]] to i64 +; CHECK-NEXT: [[TMP1:%.*]] = insertelement <4 x i32> poison, i32 [[A]], i32 0 +; CHECK-NEXT: [[TMP2:%.*]] = shufflevector <4 x i32> [[TMP1]], <4 x i32> poison, <4 x i32> zeroinitializer +; CHECK-NEXT: [[TMP3:%.*]] = sext <4 x i32> [[TMP2]] to <4 x i64> +; CHECK-NEXT: [[TMP4:%.*]] = add nsw <4 x i64> [[TMP3]], +; CHECK-NEXT: [[TMP6:%.*]] = and <4 x i64> [[TMP4]], +; CHECK-NEXT: [[TMP18:%.*]] = call i64 @llvm.vector.reduce.add.v4i64(<4 x i64> [[TMP6]]) +; CHECK-NEXT: [[TMP16:%.*]] = call i64 @llvm.vector.reduce.add.v4i64(<4 x i64> [[TMP4]]) +; CHECK-NEXT: [[TMP8:%.*]] = insertelement <2 x i64> poison, i64 [[TMP16]], i32 0 +; CHECK-NEXT: [[TMP9:%.*]] = insertelement <2 x i64> [[TMP8]], i64 [[TMP18]], i32 1 +; CHECK-NEXT: [[TMP10:%.*]] = insertelement <2 x i64> , i64 [[TMP17]], i32 0 +; CHECK-NEXT: [[TMP11:%.*]] = add <2 x i64> [[TMP9]], [[TMP10]] +; CHECK-NEXT: [[TMP12:%.*]] = extractelement <2 x i64> [[TMP11]], i32 0 +; CHECK-NEXT: [[TMP13:%.*]] = extractelement <2 x i64> [[TMP11]], i32 1 +; CHECK-NEXT: [[TMP21:%.*]] = add i64 [[TMP12]], [[TMP13]] +; CHECK-NEXT: ret i64 [[TMP21]] +; +entry: + %0 = sext i32 %a to i64 + %1 = add nsw i64 %0, 4294967297 + %2 = sext i32 %a to i64 + %3 = add nsw i64 %2, 4294967297 + %4 = add i64 %3, %1 + %5 = and i64 %3, 1 + %6 = add i64 %4, %5 + %7 = sext i32 %a to i64 + %8 = add nsw i64 %7, 4294967297 + %9 = add i64 %8, %6 + %10 = and i64 %8, 1 + %11 = add i64 %9, %10 + %12 = sext i32 %a to i64 + %13 = add nsw i64 %12, 4294967297 + %14 = add i64 %13, %11 + %15 = and i64 %13, 1 + %16 = add i64 %14, %15 + %17 = sext i32 %a to i64 + %18 = add nsw i64 %17, 4294967297 + %19 = add i64 %18, %16 + %20 = and i64 %18, 1 + %21 = add i64 %19, %20 + ret i64 %21 +} From 93998aff7662d9b3f94d9627179dffe342e2b399 Mon Sep 17 00:00:00 2001 From: Jay Foad Date: Tue, 27 Aug 2024 17:09:40 +0100 Subject: [PATCH 17/29] [AMDGPU] Fix sign confusion in performMulLoHiCombine (#105831) SMUL_LOHI and UMUL_LOHI are different operations because the high part of the result is different, so it is not OK to optimize the signed version to MUL_U24/MULHI_U24 or the unsigned version to MUL_I24/MULHI_I24. --- llvm/lib/Target/AMDGPU/AMDGPUISelLowering.cpp | 30 +++--- llvm/test/CodeGen/AMDGPU/mul_int24.ll | 98 +++++++++++++++++++ 2 files changed, 116 insertions(+), 12 deletions(-) diff --git a/llvm/lib/Target/AMDGPU/AMDGPUISelLowering.cpp b/llvm/lib/Target/AMDGPU/AMDGPUISelLowering.cpp index 39ae7c96cf77..a71c9453d968 100644 --- a/llvm/lib/Target/AMDGPU/AMDGPUISelLowering.cpp +++ b/llvm/lib/Target/AMDGPU/AMDGPUISelLowering.cpp @@ -4349,6 +4349,7 @@ AMDGPUTargetLowering::performMulLoHiCombine(SDNode *N, SelectionDAG &DAG = DCI.DAG; SDLoc DL(N); + bool Signed = N->getOpcode() == ISD::SMUL_LOHI; SDValue N0 = N->getOperand(0); SDValue N1 = N->getOperand(1); @@ -4363,20 +4364,25 @@ AMDGPUTargetLowering::performMulLoHiCombine(SDNode *N, // Try to use two fast 24-bit multiplies (one for each half of the result) // instead of one slow extending multiply. - unsigned LoOpcode, HiOpcode; - if (Subtarget->hasMulU24() && isU24(N0, DAG) && isU24(N1, DAG)) { - N0 = DAG.getZExtOrTrunc(N0, DL, MVT::i32); - N1 = DAG.getZExtOrTrunc(N1, DL, MVT::i32); - LoOpcode = AMDGPUISD::MUL_U24; - HiOpcode = AMDGPUISD::MULHI_U24; - } else if (Subtarget->hasMulI24() && isI24(N0, DAG) && isI24(N1, DAG)) { - N0 = DAG.getSExtOrTrunc(N0, DL, MVT::i32); - N1 = DAG.getSExtOrTrunc(N1, DL, MVT::i32); - LoOpcode = AMDGPUISD::MUL_I24; - HiOpcode = AMDGPUISD::MULHI_I24; + unsigned LoOpcode = 0; + unsigned HiOpcode = 0; + if (Signed) { + if (Subtarget->hasMulI24() && isI24(N0, DAG) && isI24(N1, DAG)) { + N0 = DAG.getSExtOrTrunc(N0, DL, MVT::i32); + N1 = DAG.getSExtOrTrunc(N1, DL, MVT::i32); + LoOpcode = AMDGPUISD::MUL_I24; + HiOpcode = AMDGPUISD::MULHI_I24; + } } else { - return SDValue(); + if (Subtarget->hasMulU24() && isU24(N0, DAG) && isU24(N1, DAG)) { + N0 = DAG.getZExtOrTrunc(N0, DL, MVT::i32); + N1 = DAG.getZExtOrTrunc(N1, DL, MVT::i32); + LoOpcode = AMDGPUISD::MUL_U24; + HiOpcode = AMDGPUISD::MULHI_U24; + } } + if (!LoOpcode) + return SDValue(); SDValue Lo = DAG.getNode(LoOpcode, DL, MVT::i32, N0, N1); SDValue Hi = DAG.getNode(HiOpcode, DL, MVT::i32, N0, N1); diff --git a/llvm/test/CodeGen/AMDGPU/mul_int24.ll b/llvm/test/CodeGen/AMDGPU/mul_int24.ll index be77a10380c4..8f4c48fae6fb 100644 --- a/llvm/test/CodeGen/AMDGPU/mul_int24.ll +++ b/llvm/test/CodeGen/AMDGPU/mul_int24.ll @@ -813,4 +813,102 @@ bb7: ret void } + +define amdgpu_kernel void @test_umul_i24(ptr addrspace(1) %out, i32 %arg) { +; SI-LABEL: test_umul_i24: +; SI: ; %bb.0: +; SI-NEXT: s_load_dword s1, s[2:3], 0xb +; SI-NEXT: v_mov_b32_e32 v0, 0xff803fe1 +; SI-NEXT: s_mov_b32 s0, 0 +; SI-NEXT: s_mov_b32 s3, 0xf000 +; SI-NEXT: s_waitcnt lgkmcnt(0) +; SI-NEXT: s_lshr_b32 s1, s1, 9 +; SI-NEXT: v_mul_hi_u32 v0, s1, v0 +; SI-NEXT: s_mul_i32 s1, s1, 0xff803fe1 +; SI-NEXT: v_alignbit_b32 v0, v0, s1, 1 +; SI-NEXT: s_mov_b32 s2, -1 +; SI-NEXT: s_mov_b32 s1, s0 +; SI-NEXT: buffer_store_dword v0, off, s[0:3], 0 +; SI-NEXT: s_endpgm +; +; VI-LABEL: test_umul_i24: +; VI: ; %bb.0: +; VI-NEXT: s_load_dword s0, s[2:3], 0x2c +; VI-NEXT: v_mov_b32_e32 v0, 0xff803fe1 +; VI-NEXT: s_mov_b32 s3, 0xf000 +; VI-NEXT: s_mov_b32 s2, -1 +; VI-NEXT: s_waitcnt lgkmcnt(0) +; VI-NEXT: s_lshr_b32 s0, s0, 9 +; VI-NEXT: v_mad_u64_u32 v[0:1], s[0:1], s0, v0, 0 +; VI-NEXT: s_mov_b32 s0, 0 +; VI-NEXT: s_mov_b32 s1, s0 +; VI-NEXT: v_alignbit_b32 v0, v1, v0, 1 +; VI-NEXT: s_nop 1 +; VI-NEXT: buffer_store_dword v0, off, s[0:3], 0 +; VI-NEXT: s_endpgm +; +; GFX9-LABEL: test_umul_i24: +; GFX9: ; %bb.0: +; GFX9-NEXT: s_load_dword s1, s[2:3], 0x2c +; GFX9-NEXT: s_mov_b32 s0, 0 +; GFX9-NEXT: s_mov_b32 s3, 0xf000 +; GFX9-NEXT: s_mov_b32 s2, -1 +; GFX9-NEXT: s_waitcnt lgkmcnt(0) +; GFX9-NEXT: s_lshr_b32 s1, s1, 9 +; GFX9-NEXT: s_mul_hi_u32 s4, s1, 0xff803fe1 +; GFX9-NEXT: s_mul_i32 s1, s1, 0xff803fe1 +; GFX9-NEXT: v_mov_b32_e32 v0, s1 +; GFX9-NEXT: v_alignbit_b32 v0, s4, v0, 1 +; GFX9-NEXT: s_mov_b32 s1, s0 +; GFX9-NEXT: buffer_store_dword v0, off, s[0:3], 0 +; GFX9-NEXT: s_endpgm +; +; EG-LABEL: test_umul_i24: +; EG: ; %bb.0: +; EG-NEXT: ALU 8, @4, KC0[CB0:0-32], KC1[] +; EG-NEXT: MEM_RAT_CACHELESS STORE_RAW T0.X, T1.X, 1 +; EG-NEXT: CF_END +; EG-NEXT: PAD +; EG-NEXT: ALU clause starting at 4: +; EG-NEXT: LSHR * T0.W, KC0[2].Z, literal.x, +; EG-NEXT: 9(1.261169e-44), 0(0.000000e+00) +; EG-NEXT: MULHI * T0.X, PV.W, literal.x, +; EG-NEXT: -8372255(nan), 0(0.000000e+00) +; EG-NEXT: MULLO_INT * T0.Y, T0.W, literal.x, +; EG-NEXT: -8372255(nan), 0(0.000000e+00) +; EG-NEXT: BIT_ALIGN_INT T0.X, T0.X, PS, 1, +; EG-NEXT: MOV * T1.X, literal.x, +; EG-NEXT: 0(0.000000e+00), 0(0.000000e+00) +; +; CM-LABEL: test_umul_i24: +; CM: ; %bb.0: +; CM-NEXT: ALU 14, @4, KC0[CB0:0-32], KC1[] +; CM-NEXT: MEM_RAT_CACHELESS STORE_DWORD T0.X, T1.X +; CM-NEXT: CF_END +; CM-NEXT: PAD +; CM-NEXT: ALU clause starting at 4: +; CM-NEXT: LSHR * T0.W, KC0[2].Z, literal.x, +; CM-NEXT: 9(1.261169e-44), 0(0.000000e+00) +; CM-NEXT: MULHI T0.X, T0.W, literal.x, +; CM-NEXT: MULHI T0.Y (MASKED), T0.W, literal.x, +; CM-NEXT: MULHI T0.Z (MASKED), T0.W, literal.x, +; CM-NEXT: MULHI * T0.W (MASKED), T0.W, literal.x, +; CM-NEXT: -8372255(nan), 0(0.000000e+00) +; CM-NEXT: MULLO_INT T0.X (MASKED), T0.W, literal.x, +; CM-NEXT: MULLO_INT T0.Y, T0.W, literal.x, +; CM-NEXT: MULLO_INT T0.Z (MASKED), T0.W, literal.x, +; CM-NEXT: MULLO_INT * T0.W (MASKED), T0.W, literal.x, +; CM-NEXT: -8372255(nan), 0(0.000000e+00) +; CM-NEXT: BIT_ALIGN_INT * T0.X, T0.X, PV.Y, 1, +; CM-NEXT: MOV * T1.X, literal.x, +; CM-NEXT: 0(0.000000e+00), 0(0.000000e+00) + %i = lshr i32 %arg, 9 + %i1 = zext i32 %i to i64 + %i2 = mul i64 %i1, 4286595041 + %i3 = lshr i64 %i2, 1 + %i4 = trunc i64 %i3 to i32 + store i32 %i4, ptr addrspace(1) null, align 4 + ret void +} + attributes #0 = { nounwind } From f0010d131b79a1b401777aa32e96defc4a935c9d Mon Sep 17 00:00:00 2001 From: R-Goc <131907007+R-Goc@users.noreply.github.com> Date: Wed, 4 Sep 2024 20:10:36 +0200 Subject: [PATCH 18/29] [Windows SEH] Fix crash on empty seh block (#107031) Fixes https://github.com/llvm/llvm-project/issues/105813 and https://github.com/llvm/llvm-project/issues/106915. Adds a check for the end of the iterator, which can be a sentinel. The issue was introduced in https://github.com/llvm/llvm-project/commit/0efe111365ae176671e01252d24028047d807a84 from what I can see, so along with the introduction of /EHa support. (cherry picked from commit 2e0ded3371f8d42f376bdfd4d70687537e36818e) --- .../CodeGen/SelectionDAG/SelectionDAGISel.cpp | 4 ++++ .../CodeGen/WinEH/wineh-empty-seh-scope.ll | 18 ++++++++++++++++++ 2 files changed, 22 insertions(+) create mode 100644 llvm/test/CodeGen/WinEH/wineh-empty-seh-scope.ll diff --git a/llvm/lib/CodeGen/SelectionDAG/SelectionDAGISel.cpp b/llvm/lib/CodeGen/SelectionDAG/SelectionDAGISel.cpp index df3d207d85d3..b961d3bb1fec 100644 --- a/llvm/lib/CodeGen/SelectionDAG/SelectionDAGISel.cpp +++ b/llvm/lib/CodeGen/SelectionDAG/SelectionDAGISel.cpp @@ -1453,6 +1453,10 @@ void SelectionDAGISel::reportIPToStateForBlocks(MachineFunction *MF) { if (BB->getFirstMayFaultInst()) { // Report IP range only for blocks with Faulty inst auto MBBb = MBB.getFirstNonPHI(); + + if (MBBb == MBB.end()) + continue; + MachineInstr *MIb = &*MBBb; if (MIb->isTerminator()) continue; diff --git a/llvm/test/CodeGen/WinEH/wineh-empty-seh-scope.ll b/llvm/test/CodeGen/WinEH/wineh-empty-seh-scope.ll new file mode 100644 index 000000000000..5f382f10f180 --- /dev/null +++ b/llvm/test/CodeGen/WinEH/wineh-empty-seh-scope.ll @@ -0,0 +1,18 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5 +; RUN: llc -mtriple=x86_64-pc-windows-msvc19.41.34120 < %s | FileCheck %s + +define void @foo() personality ptr @__CxxFrameHandler3 { +; CHECK-LABEL: foo: +; CHECK: # %bb.0: +; CHECK-NEXT: nop # avoids zero-length function + call void @llvm.seh.scope.begin() + unreachable +} + +declare i32 @__CxxFrameHandler3(...) + +declare void @llvm.seh.scope.begin() + +!llvm.module.flags = !{!0} + +!0 = !{i32 2, !"eh-asynch", i32 1} From 78654faa0c6d9dc2f72b81953b9cffbb7675755b Mon Sep 17 00:00:00 2001 From: Yingwei Zheng Date: Tue, 10 Sep 2024 09:19:39 +0800 Subject: [PATCH 19/29] [LoongArch][ISel] Check the number of sign bits in `PatGprGpr_32` (#107432) After https://github.com/llvm/llvm-project/pull/92205, LoongArch ISel selects `div.w` for `trunc i64 (sdiv i64 3202030857, (sext i32 X to i64)) to i32`. It is incorrect since `3202030857` is not a signed 32-bit constant. It will produce wrong result when `X == 2`: https://alive2.llvm.org/ce/z/pzfGZZ This patch adds additional `sexti32` checks to operands of `PatGprGpr_32`. Alive2 proof: https://alive2.llvm.org/ce/z/AkH5Mp Fix #107414. (cherry picked from commit a111f9119a5ec77c19a514ec09454218f739454f) --- .../Target/LoongArch/LoongArchInstrInfo.td | 5 +- .../ir-instruction/sdiv-udiv-srem-urem.ll | 67 ++++++++++++++++++- 2 files changed, 69 insertions(+), 3 deletions(-) diff --git a/llvm/lib/Target/LoongArch/LoongArchInstrInfo.td b/llvm/lib/Target/LoongArch/LoongArchInstrInfo.td index ef647a427787..339d50bd8192 100644 --- a/llvm/lib/Target/LoongArch/LoongArchInstrInfo.td +++ b/llvm/lib/Target/LoongArch/LoongArchInstrInfo.td @@ -1065,10 +1065,13 @@ def RDTIME_D : RDTIME_2R<0x00006800>; /// Generic pattern classes +def assertsexti32 : PatFrag<(ops node:$src), (assertsext node:$src), [{ + return cast(N->getOperand(1))->getVT().bitsLE(MVT::i32); +}]>; class PatGprGpr : Pat<(OpNode GPR:$rj, GPR:$rk), (Inst GPR:$rj, GPR:$rk)>; class PatGprGpr_32 - : Pat<(sext_inreg (OpNode GPR:$rj, GPR:$rk), i32), (Inst GPR:$rj, GPR:$rk)>; + : Pat<(sext_inreg (OpNode (assertsexti32 GPR:$rj), (assertsexti32 GPR:$rk)), i32), (Inst GPR:$rj, GPR:$rk)>; class PatGpr : Pat<(OpNode GPR:$rj), (Inst GPR:$rj)>; diff --git a/llvm/test/CodeGen/LoongArch/ir-instruction/sdiv-udiv-srem-urem.ll b/llvm/test/CodeGen/LoongArch/ir-instruction/sdiv-udiv-srem-urem.ll index ab3eec240db3..c22acdb49690 100644 --- a/llvm/test/CodeGen/LoongArch/ir-instruction/sdiv-udiv-srem-urem.ll +++ b/llvm/test/CodeGen/LoongArch/ir-instruction/sdiv-udiv-srem-urem.ll @@ -191,7 +191,8 @@ define signext i32 @sdiv_si32_ui32_ui32(i32 %a, i32 %b) { ; LA64: # %bb.0: # %entry ; LA64-NEXT: addi.w $a1, $a1, 0 ; LA64-NEXT: addi.w $a0, $a0, 0 -; LA64-NEXT: div.w $a0, $a0, $a1 +; LA64-NEXT: div.d $a0, $a0, $a1 +; LA64-NEXT: addi.w $a0, $a0, 0 ; LA64-NEXT: ret ; ; LA32-TRAP-LABEL: sdiv_si32_ui32_ui32: @@ -207,11 +208,12 @@ define signext i32 @sdiv_si32_ui32_ui32(i32 %a, i32 %b) { ; LA64-TRAP: # %bb.0: # %entry ; LA64-TRAP-NEXT: addi.w $a1, $a1, 0 ; LA64-TRAP-NEXT: addi.w $a0, $a0, 0 -; LA64-TRAP-NEXT: div.w $a0, $a0, $a1 +; LA64-TRAP-NEXT: div.d $a0, $a0, $a1 ; LA64-TRAP-NEXT: bnez $a1, .LBB5_2 ; LA64-TRAP-NEXT: # %bb.1: # %entry ; LA64-TRAP-NEXT: break 7 ; LA64-TRAP-NEXT: .LBB5_2: # %entry +; LA64-TRAP-NEXT: addi.w $a0, $a0, 0 ; LA64-TRAP-NEXT: ret entry: %r = sdiv i32 %a, %b @@ -1151,3 +1153,64 @@ entry: %r = urem i64 %a, %b ret i64 %r } + +define signext i32 @pr107414(i32 signext %x) { +; LA32-LABEL: pr107414: +; LA32: # %bb.0: # %entry +; LA32-NEXT: addi.w $sp, $sp, -16 +; LA32-NEXT: .cfi_def_cfa_offset 16 +; LA32-NEXT: st.w $ra, $sp, 12 # 4-byte Folded Spill +; LA32-NEXT: .cfi_offset 1, -4 +; LA32-NEXT: move $a2, $a0 +; LA32-NEXT: srai.w $a3, $a0, 31 +; LA32-NEXT: lu12i.w $a0, -266831 +; LA32-NEXT: ori $a0, $a0, 3337 +; LA32-NEXT: move $a1, $zero +; LA32-NEXT: bl %plt(__divdi3) +; LA32-NEXT: ld.w $ra, $sp, 12 # 4-byte Folded Reload +; LA32-NEXT: addi.w $sp, $sp, 16 +; LA32-NEXT: ret +; +; LA64-LABEL: pr107414: +; LA64: # %bb.0: # %entry +; LA64-NEXT: lu12i.w $a1, -266831 +; LA64-NEXT: ori $a1, $a1, 3337 +; LA64-NEXT: lu32i.d $a1, 0 +; LA64-NEXT: div.d $a0, $a1, $a0 +; LA64-NEXT: addi.w $a0, $a0, 0 +; LA64-NEXT: ret +; +; LA32-TRAP-LABEL: pr107414: +; LA32-TRAP: # %bb.0: # %entry +; LA32-TRAP-NEXT: addi.w $sp, $sp, -16 +; LA32-TRAP-NEXT: .cfi_def_cfa_offset 16 +; LA32-TRAP-NEXT: st.w $ra, $sp, 12 # 4-byte Folded Spill +; LA32-TRAP-NEXT: .cfi_offset 1, -4 +; LA32-TRAP-NEXT: move $a2, $a0 +; LA32-TRAP-NEXT: srai.w $a3, $a0, 31 +; LA32-TRAP-NEXT: lu12i.w $a0, -266831 +; LA32-TRAP-NEXT: ori $a0, $a0, 3337 +; LA32-TRAP-NEXT: move $a1, $zero +; LA32-TRAP-NEXT: bl %plt(__divdi3) +; LA32-TRAP-NEXT: ld.w $ra, $sp, 12 # 4-byte Folded Reload +; LA32-TRAP-NEXT: addi.w $sp, $sp, 16 +; LA32-TRAP-NEXT: ret +; +; LA64-TRAP-LABEL: pr107414: +; LA64-TRAP: # %bb.0: # %entry +; LA64-TRAP-NEXT: lu12i.w $a1, -266831 +; LA64-TRAP-NEXT: ori $a1, $a1, 3337 +; LA64-TRAP-NEXT: lu32i.d $a1, 0 +; LA64-TRAP-NEXT: div.d $a1, $a1, $a0 +; LA64-TRAP-NEXT: bnez $a0, .LBB32_2 +; LA64-TRAP-NEXT: # %bb.1: # %entry +; LA64-TRAP-NEXT: break 7 +; LA64-TRAP-NEXT: .LBB32_2: # %entry +; LA64-TRAP-NEXT: addi.w $a0, $a1, 0 +; LA64-TRAP-NEXT: ret +entry: + %conv = sext i32 %x to i64 + %div = sdiv i64 3202030857, %conv + %conv1 = trunc i64 %div to i32 + ret i32 %conv1 +} From d752f29fb333d47724484e08b32d6499cc1e460e Mon Sep 17 00:00:00 2001 From: hev Date: Tue, 10 Sep 2024 16:52:21 +0800 Subject: [PATCH 20/29] [LoongArch] Eliminate the redundant sign extension of division (#107971) If all incoming values of `div.d` are sign-extended and all users only use the lower 32 bits, then convert them to W versions. Fixes: #107946 (cherry picked from commit 0f47e3aebdd2a4a938468a272ea4224552dbf176) --- llvm/lib/Target/LoongArch/LoongArchOptWInstrs.cpp | 15 +++++++++++++++ 1 file changed, 15 insertions(+) diff --git a/llvm/lib/Target/LoongArch/LoongArchOptWInstrs.cpp b/llvm/lib/Target/LoongArch/LoongArchOptWInstrs.cpp index abac69054f3b..ab90409fdf47 100644 --- a/llvm/lib/Target/LoongArch/LoongArchOptWInstrs.cpp +++ b/llvm/lib/Target/LoongArch/LoongArchOptWInstrs.cpp @@ -637,6 +637,19 @@ static bool isSignExtendedW(Register SrcReg, const LoongArchSubtarget &ST, break; } return false; + // If all incoming values are sign-extended and all users only use + // the lower 32 bits, then convert them to W versions. + case LoongArch::DIV_D: { + if (!AddRegToWorkList(MI->getOperand(1).getReg())) + return false; + if (!AddRegToWorkList(MI->getOperand(2).getReg())) + return false; + if (hasAllWUsers(*MI, ST, MRI)) { + FixableDef.insert(MI); + break; + } + return false; + } } } @@ -651,6 +664,8 @@ static unsigned getWOp(unsigned Opcode) { return LoongArch::ADDI_W; case LoongArch::ADD_D: return LoongArch::ADD_W; + case LoongArch::DIV_D: + return LoongArch::DIV_W; case LoongArch::LD_D: case LoongArch::LD_WU: return LoongArch::LD_W; From 6278084bc69a427cf7a610076817c420e3dc8594 Mon Sep 17 00:00:00 2001 From: Nikolas Klauser Date: Wed, 11 Sep 2024 08:47:24 +0200 Subject: [PATCH 21/29] [Clang] Fix crash due to invalid source location in __is_trivially_equality_comparable (#107815) Fixes #107777 (cherry picked from commit 6dbdb8430b492959c399a7809247424c6962902f) --- clang/lib/Sema/SemaExprCXX.cpp | 3 ++- clang/test/SemaCXX/type-traits.cpp | 18 ++++++++++++++++++ 2 files changed, 20 insertions(+), 1 deletion(-) diff --git a/clang/lib/Sema/SemaExprCXX.cpp b/clang/lib/Sema/SemaExprCXX.cpp index 14d1f395af90..de50786f4d6c 100644 --- a/clang/lib/Sema/SemaExprCXX.cpp +++ b/clang/lib/Sema/SemaExprCXX.cpp @@ -5140,7 +5140,8 @@ static bool HasNonDeletedDefaultedEqualityComparison(Sema &S, // const ClassT& obj; OpaqueValueExpr Operand( - {}, Decl->getTypeForDecl()->getCanonicalTypeUnqualified().withConst(), + KeyLoc, + Decl->getTypeForDecl()->getCanonicalTypeUnqualified().withConst(), ExprValueKind::VK_LValue); UnresolvedSet<16> Functions; // obj == obj; diff --git a/clang/test/SemaCXX/type-traits.cpp b/clang/test/SemaCXX/type-traits.cpp index 7c5be2ab374a..608852da7033 100644 --- a/clang/test/SemaCXX/type-traits.cpp +++ b/clang/test/SemaCXX/type-traits.cpp @@ -3958,6 +3958,24 @@ class Template {}; // Make sure we don't crash when instantiating a type static_assert(!__is_trivially_equality_comparable(Template>)); + +struct S operator==(S, S); + +template struct basic_string_view {}; + +struct basic_string { + operator basic_string_view() const; +}; + +template +const bool is_trivially_equality_comparable = __is_trivially_equality_comparable(T); + +template > +void find(); + +void func() { find(); } + + namespace hidden_friend { struct TriviallyEqualityComparable { From a847b66a750291f8b63c03b9f355c6f4d09cdfe3 Mon Sep 17 00:00:00 2001 From: Jonathon Penix Date: Wed, 11 Sep 2024 09:53:11 -0700 Subject: [PATCH 22/29] [RISCV] Don't outline pcrel_lo when the function has a section prefix (#107943) GNU ld will error when encountering a pcrel_lo whose corresponding pcrel_hi is in a different section. [1] introduced a check to help prevent this issue by preventing outlining in a few circumstances. However, we can also hit this same issue when outlining from functions with prefixes ("hot"/"unlikely"/"unknown" from profile information, for example) as the outlined function might not have the same prefix, possibly resulting in a "paired" pcrel_lo and pcrel_hi ending up in different sections. To prevent this issue, take a similar approach as [1] and additionally prevent outlining when we see a pcrel_lo and the function has a prefix. [1] https://github.com/llvm/llvm-project/commit/96c85f80f0d615ffde0f85d8270e0a8c9f4e5430 Fixes #107520 (cherry picked from commit 866b93e6b33fac9a4bc62bbc32199bd98f434784) --- llvm/lib/Target/RISCV/RISCVInstrInfo.cpp | 2 +- .../RISCV/machineoutliner-pcrel-lo.mir | 104 +++++++++++++++++- 2 files changed, 99 insertions(+), 7 deletions(-) diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfo.cpp b/llvm/lib/Target/RISCV/RISCVInstrInfo.cpp index ba3b4bd701d6..6c0cbeadebf4 100644 --- a/llvm/lib/Target/RISCV/RISCVInstrInfo.cpp +++ b/llvm/lib/Target/RISCV/RISCVInstrInfo.cpp @@ -2902,7 +2902,7 @@ RISCVInstrInfo::getOutliningTypeImpl(MachineBasicBlock::iterator &MBBI, // if any possible. if (MO.getTargetFlags() == RISCVII::MO_PCREL_LO && (MI.getMF()->getTarget().getFunctionSections() || F.hasComdat() || - F.hasSection())) + F.hasSection() || F.getSectionPrefix())) return outliner::InstrType::Illegal; } diff --git a/llvm/test/CodeGen/RISCV/machineoutliner-pcrel-lo.mir b/llvm/test/CodeGen/RISCV/machineoutliner-pcrel-lo.mir index 8a83543b0280..fd3630bcfad2 100644 --- a/llvm/test/CodeGen/RISCV/machineoutliner-pcrel-lo.mir +++ b/llvm/test/CodeGen/RISCV/machineoutliner-pcrel-lo.mir @@ -18,6 +18,9 @@ define i32 @foo2(i32 %a, i32 %b) comdat { ret i32 0 } define i32 @foo3(i32 %a, i32 %b) section ".abc" { ret i32 0 } + + define i32 @foo4(i32 %a, i32 %b) !section_prefix !0 { ret i32 0 } + !0 = !{!"function_section_prefix", !"myprefix"} ... --- name: foo @@ -27,23 +30,24 @@ body: | ; CHECK: bb.0: ; CHECK-NEXT: liveins: $x10, $x11, $x13 ; CHECK-NEXT: {{ $}} - ; CHECK-NEXT: $x5 = PseudoCALLReg target-flags(riscv-call) @OUTLINED_FUNCTION_0, implicit-def $x5, implicit-def $x10, implicit-def $x11, implicit-def $x12, implicit $x10, implicit $x11, implicit $x13 + ; CHECK-NEXT: $x5 = PseudoCALLReg target-flags(riscv-call) @OUTLINED_FUNCTION_1, implicit-def $x5, implicit-def $x10, implicit-def $x11, implicit-def $x12, implicit $x10, implicit $x11, implicit $x13 ; CHECK-NEXT: PseudoBR %bb.3 ; CHECK-NEXT: {{ $}} ; CHECK-NEXT: bb.1: ; CHECK-NEXT: liveins: $x10, $x11, $x13 ; CHECK-NEXT: {{ $}} - ; CHECK-NEXT: $x5 = PseudoCALLReg target-flags(riscv-call) @OUTLINED_FUNCTION_0, implicit-def $x5, implicit-def $x10, implicit-def $x11, implicit-def $x12, implicit $x10, implicit $x11, implicit $x13 + ; CHECK-NEXT: $x5 = PseudoCALLReg target-flags(riscv-call) @OUTLINED_FUNCTION_1, implicit-def $x5, implicit-def $x10, implicit-def $x11, implicit-def $x12, implicit $x10, implicit $x11, implicit $x13 ; CHECK-NEXT: PseudoBR %bb.3 ; CHECK-NEXT: {{ $}} ; CHECK-NEXT: bb.2: ; CHECK-NEXT: liveins: $x10, $x11, $x13 ; CHECK-NEXT: {{ $}} - ; CHECK-NEXT: $x5 = PseudoCALLReg target-flags(riscv-call) @OUTLINED_FUNCTION_0, implicit-def $x5, implicit-def $x10, implicit-def $x11, implicit-def $x12, implicit $x10, implicit $x11, implicit $x13 + ; CHECK-NEXT: $x5 = PseudoCALLReg target-flags(riscv-call) @OUTLINED_FUNCTION_1, implicit-def $x5, implicit-def $x10, implicit-def $x11, implicit-def $x12, implicit $x10, implicit $x11, implicit $x13 ; CHECK-NEXT: PseudoBR %bb.3 ; CHECK-NEXT: {{ $}} ; CHECK-NEXT: bb.3: ; CHECK-NEXT: PseudoRET + ; ; CHECK-FS-LABEL: name: foo ; CHECK-FS: bb.0: ; CHECK-FS-NEXT: liveins: $x10, $x11, $x13 @@ -109,26 +113,27 @@ body: | ; CHECK: bb.0: ; CHECK-NEXT: liveins: $x10, $x11, $x13 ; CHECK-NEXT: {{ $}} - ; CHECK-NEXT: $x5 = PseudoCALLReg target-flags(riscv-call) @OUTLINED_FUNCTION_1, implicit-def $x5, implicit-def $x10, implicit-def $x11, implicit-def $x12, implicit $x10, implicit $x11 + ; CHECK-NEXT: $x5 = PseudoCALLReg target-flags(riscv-call) @OUTLINED_FUNCTION_0, implicit-def $x5, implicit-def $x10, implicit-def $x11, implicit-def $x12, implicit $x10, implicit $x11 ; CHECK-NEXT: $x11 = LW killed renamable $x13, target-flags(riscv-pcrel-lo) :: (dereferenceable load (s32) from @bar) ; CHECK-NEXT: PseudoBR %bb.3 ; CHECK-NEXT: {{ $}} ; CHECK-NEXT: bb.1: ; CHECK-NEXT: liveins: $x10, $x11, $x13 ; CHECK-NEXT: {{ $}} - ; CHECK-NEXT: $x5 = PseudoCALLReg target-flags(riscv-call) @OUTLINED_FUNCTION_1, implicit-def $x5, implicit-def $x10, implicit-def $x11, implicit-def $x12, implicit $x10, implicit $x11 + ; CHECK-NEXT: $x5 = PseudoCALLReg target-flags(riscv-call) @OUTLINED_FUNCTION_0, implicit-def $x5, implicit-def $x10, implicit-def $x11, implicit-def $x12, implicit $x10, implicit $x11 ; CHECK-NEXT: $x11 = LW killed renamable $x13, target-flags(riscv-pcrel-lo) :: (dereferenceable load (s32) from @bar) ; CHECK-NEXT: PseudoBR %bb.3 ; CHECK-NEXT: {{ $}} ; CHECK-NEXT: bb.2: ; CHECK-NEXT: liveins: $x10, $x11, $x13 ; CHECK-NEXT: {{ $}} - ; CHECK-NEXT: $x5 = PseudoCALLReg target-flags(riscv-call) @OUTLINED_FUNCTION_1, implicit-def $x5, implicit-def $x10, implicit-def $x11, implicit-def $x12, implicit $x10, implicit $x11 + ; CHECK-NEXT: $x5 = PseudoCALLReg target-flags(riscv-call) @OUTLINED_FUNCTION_0, implicit-def $x5, implicit-def $x10, implicit-def $x11, implicit-def $x12, implicit $x10, implicit $x11 ; CHECK-NEXT: $x11 = LW killed renamable $x13, target-flags(riscv-pcrel-lo) :: (dereferenceable load (s32) from @bar) ; CHECK-NEXT: PseudoBR %bb.3 ; CHECK-NEXT: {{ $}} ; CHECK-NEXT: bb.3: ; CHECK-NEXT: PseudoRET + ; ; CHECK-FS-LABEL: name: foo2 ; CHECK-FS: bb.0: ; CHECK-FS-NEXT: liveins: $x10, $x11, $x13 @@ -223,6 +228,7 @@ body: | ; CHECK-NEXT: {{ $}} ; CHECK-NEXT: bb.3: ; CHECK-NEXT: PseudoRET + ; ; CHECK-FS-LABEL: name: foo3 ; CHECK-FS: bb.0: ; CHECK-FS-NEXT: liveins: $x10, $x11, $x13 @@ -289,3 +295,89 @@ body: | bb.3: PseudoRET ... +--- +name: foo4 +tracksRegLiveness: true +body: | + ; CHECK-LABEL: name: foo4 + ; CHECK: bb.0: + ; CHECK-NEXT: liveins: $x10, $x11, $x13 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: $x5 = PseudoCALLReg target-flags(riscv-call) @OUTLINED_FUNCTION_0, implicit-def $x5, implicit-def $x10, implicit-def $x11, implicit-def $x12, implicit $x10, implicit $x11 + ; CHECK-NEXT: $x11 = LW killed renamable $x13, target-flags(riscv-pcrel-lo) :: (dereferenceable load (s32) from @bar) + ; CHECK-NEXT: PseudoBR %bb.3 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: bb.1: + ; CHECK-NEXT: liveins: $x10, $x11, $x13 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: $x5 = PseudoCALLReg target-flags(riscv-call) @OUTLINED_FUNCTION_0, implicit-def $x5, implicit-def $x10, implicit-def $x11, implicit-def $x12, implicit $x10, implicit $x11 + ; CHECK-NEXT: $x11 = LW killed renamable $x13, target-flags(riscv-pcrel-lo) :: (dereferenceable load (s32) from @bar) + ; CHECK-NEXT: PseudoBR %bb.3 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: bb.2: + ; CHECK-NEXT: liveins: $x10, $x11, $x13 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: $x5 = PseudoCALLReg target-flags(riscv-call) @OUTLINED_FUNCTION_0, implicit-def $x5, implicit-def $x10, implicit-def $x11, implicit-def $x12, implicit $x10, implicit $x11 + ; CHECK-NEXT: $x11 = LW killed renamable $x13, target-flags(riscv-pcrel-lo) :: (dereferenceable load (s32) from @bar) + ; CHECK-NEXT: PseudoBR %bb.3 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: bb.3: + ; CHECK-NEXT: PseudoRET + ; + ; CHECK-FS-LABEL: name: foo4 + ; CHECK-FS: bb.0: + ; CHECK-FS-NEXT: liveins: $x10, $x11, $x13 + ; CHECK-FS-NEXT: {{ $}} + ; CHECK-FS-NEXT: $x5 = PseudoCALLReg target-flags(riscv-call) @OUTLINED_FUNCTION_0, implicit-def $x5, implicit-def $x10, implicit-def $x11, implicit-def $x12, implicit $x10, implicit $x11 + ; CHECK-FS-NEXT: $x11 = LW killed renamable $x13, target-flags(riscv-pcrel-lo) :: (dereferenceable load (s32) from @bar) + ; CHECK-FS-NEXT: PseudoBR %bb.3 + ; CHECK-FS-NEXT: {{ $}} + ; CHECK-FS-NEXT: bb.1: + ; CHECK-FS-NEXT: liveins: $x10, $x11, $x13 + ; CHECK-FS-NEXT: {{ $}} + ; CHECK-FS-NEXT: $x5 = PseudoCALLReg target-flags(riscv-call) @OUTLINED_FUNCTION_0, implicit-def $x5, implicit-def $x10, implicit-def $x11, implicit-def $x12, implicit $x10, implicit $x11 + ; CHECK-FS-NEXT: $x11 = LW killed renamable $x13, target-flags(riscv-pcrel-lo) :: (dereferenceable load (s32) from @bar) + ; CHECK-FS-NEXT: PseudoBR %bb.3 + ; CHECK-FS-NEXT: {{ $}} + ; CHECK-FS-NEXT: bb.2: + ; CHECK-FS-NEXT: liveins: $x10, $x11, $x13 + ; CHECK-FS-NEXT: {{ $}} + ; CHECK-FS-NEXT: $x5 = PseudoCALLReg target-flags(riscv-call) @OUTLINED_FUNCTION_0, implicit-def $x5, implicit-def $x10, implicit-def $x11, implicit-def $x12, implicit $x10, implicit $x11 + ; CHECK-FS-NEXT: $x11 = LW killed renamable $x13, target-flags(riscv-pcrel-lo) :: (dereferenceable load (s32) from @bar) + ; CHECK-FS-NEXT: PseudoBR %bb.3 + ; CHECK-FS-NEXT: {{ $}} + ; CHECK-FS-NEXT: bb.3: + ; CHECK-FS-NEXT: PseudoRET + bb.0: + liveins: $x10, $x11, $x13 + + $x11 = ORI $x11, 1023 + $x12 = ADDI $x10, 17 + $x11 = AND $x12, $x11 + $x10 = SUB $x10, $x11 + $x11 = LW killed renamable $x13, target-flags(riscv-pcrel-lo) :: (dereferenceable load (s32) from @bar) + PseudoBR %bb.3 + + bb.1: + liveins: $x10, $x11, $x13 + + $x11 = ORI $x11, 1023 + $x12 = ADDI $x10, 17 + $x11 = AND $x12, $x11 + $x10 = SUB $x10, $x11 + $x11 = LW killed renamable $x13, target-flags(riscv-pcrel-lo) :: (dereferenceable load (s32) from @bar) + PseudoBR %bb.3 + + bb.2: + liveins: $x10, $x11, $x13 + + $x11 = ORI $x11, 1023 + $x12 = ADDI $x10, 17 + $x11 = AND $x12, $x11 + $x10 = SUB $x10, $x11 + $x11 = LW killed renamable $x13, target-flags(riscv-pcrel-lo) :: (dereferenceable load (s32) from @bar) + PseudoBR %bb.3 + + bb.3: + PseudoRET +... From 82f3a4a32d2500ab1e6c51e0d749ffbac9afb1fa Mon Sep 17 00:00:00 2001 From: Konstantin Varlamov Date: Fri, 13 Sep 2024 01:26:57 -0700 Subject: [PATCH 23/29] Guard an include of `` in `` with availability macro (#108429) This fixes a regression introduced in https://github.com/llvm/llvm-project/pull/96035. (cherry picked from commit 127c34948bd54e92ef2ee544e8bc42acecf321ad) --- libcxx/include/chrono | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/libcxx/include/chrono b/libcxx/include/chrono index 990c415ec2e9..7bec5e5a26ef 100644 --- a/libcxx/include/chrono +++ b/libcxx/include/chrono @@ -1015,8 +1015,8 @@ constexpr chrono::year operator ""y(unsigned lo # include # if !defined(_LIBCPP_HAS_NO_LOCALIZATION) # include +# include # endif -# include #endif #endif // _LIBCPP_CHRONO From 82e85b62da3f62759ab94aecd0ebac61f3856719 Mon Sep 17 00:00:00 2001 From: Brian Cain Date: Fri, 13 Sep 2024 17:10:03 -0500 Subject: [PATCH 24/29] [lld] select a default eflags for hexagon (#108431) Empty archives are apparently routine in linux kernel builds, so instead of asserting, we should handle this case with a sane default value. (cherry picked from commit d1ba432533aafc52fc59158350af937a8b6b9538) --- lld/ELF/Arch/Hexagon.cpp | 8 +++----- lld/test/ELF/hexagon-eflag.s | 5 +++++ 2 files changed, 8 insertions(+), 5 deletions(-) diff --git a/lld/ELF/Arch/Hexagon.cpp b/lld/ELF/Arch/Hexagon.cpp index 54821c299bde..abde3cd96491 100644 --- a/lld/ELF/Arch/Hexagon.cpp +++ b/lld/ELF/Arch/Hexagon.cpp @@ -60,17 +60,15 @@ Hexagon::Hexagon() { } uint32_t Hexagon::calcEFlags() const { - assert(!ctx.objectFiles.empty()); - // The architecture revision must always be equal to or greater than // greatest revision in the list of inputs. - uint32_t ret = 0; + std::optional ret; for (InputFile *f : ctx.objectFiles) { uint32_t eflags = cast>(f)->getObj().getHeader().e_flags; - if (eflags > ret) + if (!ret || eflags > *ret) ret = eflags; } - return ret; + return ret.value_or(/* Default Arch Rev: */ 0x60); } static uint32_t applyMask(uint32_t mask, uint32_t data) { diff --git a/lld/test/ELF/hexagon-eflag.s b/lld/test/ELF/hexagon-eflag.s index 01cb5e5b0f29..dbe8604f69fd 100644 --- a/lld/test/ELF/hexagon-eflag.s +++ b/lld/test/ELF/hexagon-eflag.s @@ -5,3 +5,8 @@ # RUN: llvm-readelf -h %t3 | FileCheck %s # Verify that the largest arch in the input list is selected. # CHECK: Flags: 0x62 + +# RUN: llvm-ar rcsD %t4 +# RUN: ld.lld -m hexagonelf %t4 -o %t5 +# RUN: llvm-readelf -h %t5 | FileCheck --check-prefix=CHECK-EMPTYARCHIVE %s +# CHECK-EMPTYARCHIVE: Flags: 0x60 From 149a150b50c112e26fc5acbdd58250c44ccd777f Mon Sep 17 00:00:00 2001 From: Ganesh Gopalasubramanian Date: Mon, 16 Sep 2024 11:16:14 +0000 Subject: [PATCH 25/29] [X86] AMD Zen 5 Initial enablement --- clang/lib/Basic/Targets/X86.cpp | 4 + clang/test/CodeGen/target-builtin-noerror.c | 1 + clang/test/Driver/x86-march.c | 4 + clang/test/Frontend/x86-target-cpu.c | 1 + clang/test/Misc/target-invalid-cpu-note.c | 8 +- .../Preprocessor/predefined-arch-macros.c | 142 ++++++++++++++++++ compiler-rt/lib/builtins/cpu_model/x86.c | 20 +++ .../llvm/TargetParser/X86TargetParser.def | 3 + .../llvm/TargetParser/X86TargetParser.h | 1 + llvm/lib/Target/X86/X86.td | 15 ++ llvm/lib/Target/X86/X86PfmCounters.td | 1 + llvm/lib/TargetParser/Host.cpp | 19 +++ llvm/lib/TargetParser/X86TargetParser.cpp | 5 + .../CodeGen/X86/bypass-slow-division-64.ll | 1 + llvm/test/CodeGen/X86/cmp16.ll | 1 + llvm/test/CodeGen/X86/cpus-amd.ll | 1 + llvm/test/CodeGen/X86/rdpru.ll | 1 + llvm/test/CodeGen/X86/shuffle-as-shifts.ll | 1 + llvm/test/CodeGen/X86/slow-unaligned-mem.ll | 1 + llvm/test/CodeGen/X86/sqrt-fastmath-tune.ll | 1 + .../X86/tuning-shuffle-permilpd-avx512.ll | 1 + .../X86/tuning-shuffle-permilps-avx512.ll | 1 + .../X86/tuning-shuffle-unpckpd-avx512.ll | 1 + .../X86/tuning-shuffle-unpckps-avx512.ll | 1 + .../X86/vector-shuffle-fast-per-lane.ll | 1 + llvm/test/CodeGen/X86/vpdpwssd.ll | 1 + .../CodeGen/X86/x86-64-double-shifts-var.ll | 1 + llvm/test/MC/X86/x86_long_nop.s | 2 + .../Transforms/LoopUnroll/X86/call-remark.ll | 1 + .../Transforms/SLPVectorizer/X86/pr63668.ll | 1 + 30 files changed, 238 insertions(+), 4 deletions(-) diff --git a/clang/lib/Basic/Targets/X86.cpp b/clang/lib/Basic/Targets/X86.cpp index 18e6dbf03e00..072c97e6c8c6 100644 --- a/clang/lib/Basic/Targets/X86.cpp +++ b/clang/lib/Basic/Targets/X86.cpp @@ -723,6 +723,9 @@ void X86TargetInfo::getTargetDefines(const LangOptions &Opts, case CK_ZNVER4: defineCPUMacros(Builder, "znver4"); break; + case CK_ZNVER5: + defineCPUMacros(Builder, "znver5"); + break; case CK_Geode: defineCPUMacros(Builder, "geode"); break; @@ -1613,6 +1616,7 @@ std::optional X86TargetInfo::getCPUCacheLineSize() const { case CK_ZNVER2: case CK_ZNVER3: case CK_ZNVER4: + case CK_ZNVER5: // Deprecated case CK_x86_64: case CK_x86_64_v2: diff --git a/clang/test/CodeGen/target-builtin-noerror.c b/clang/test/CodeGen/target-builtin-noerror.c index 2e16fd8b9fe4..d681dcd3a13e 100644 --- a/clang/test/CodeGen/target-builtin-noerror.c +++ b/clang/test/CodeGen/target-builtin-noerror.c @@ -205,4 +205,5 @@ void verifycpustrings(void) { (void)__builtin_cpu_is("znver2"); (void)__builtin_cpu_is("znver3"); (void)__builtin_cpu_is("znver4"); + (void)__builtin_cpu_is("znver5"); } diff --git a/clang/test/Driver/x86-march.c b/clang/test/Driver/x86-march.c index cc993b53937c..3bc2a82ae778 100644 --- a/clang/test/Driver/x86-march.c +++ b/clang/test/Driver/x86-march.c @@ -242,6 +242,10 @@ // RUN: %clang -target x86_64-unknown-unknown -c -### %s -march=znver4 2>&1 \ // RUN: | FileCheck %s -check-prefix=znver4 // znver4: "-target-cpu" "znver4" +// +// RUN: %clang -target x86_64-unknown-unknown -c -### %s -march=znver5 2>&1 \ +// RUN: | FileCheck %s -check-prefix=znver5 +// znver5: "-target-cpu" "znver5" // RUN: %clang -target x86_64 -c -### %s -march=x86-64 2>&1 | FileCheck %s --check-prefix=x86-64 // x86-64: "-target-cpu" "x86-64" diff --git a/clang/test/Frontend/x86-target-cpu.c b/clang/test/Frontend/x86-target-cpu.c index 6c8502ac2c21..f2885a040c37 100644 --- a/clang/test/Frontend/x86-target-cpu.c +++ b/clang/test/Frontend/x86-target-cpu.c @@ -38,5 +38,6 @@ // RUN: %clang_cc1 -triple x86_64-unknown-unknown -target-cpu znver2 -verify %s // RUN: %clang_cc1 -triple x86_64-unknown-unknown -target-cpu znver3 -verify %s // RUN: %clang_cc1 -triple x86_64-unknown-unknown -target-cpu znver4 -verify %s +// RUN: %clang_cc1 -triple x86_64-unknown-unknown -target-cpu znver5 -verify %s // // expected-no-diagnostics diff --git a/clang/test/Misc/target-invalid-cpu-note.c b/clang/test/Misc/target-invalid-cpu-note.c index 4d6759dd8153..6fd71bb82381 100644 --- a/clang/test/Misc/target-invalid-cpu-note.c +++ b/clang/test/Misc/target-invalid-cpu-note.c @@ -13,19 +13,19 @@ // RUN: not %clang_cc1 -triple i386--- -target-cpu not-a-cpu -fsyntax-only %s 2>&1 | FileCheck %s --check-prefix X86 // X86: error: unknown target CPU 'not-a-cpu' -// X86-NEXT: note: valid target CPU values are: i386, i486, winchip-c6, winchip2, c3, i586, pentium, pentium-mmx, pentiumpro, i686, pentium2, pentium3, pentium3m, pentium-m, c3-2, yonah, pentium4, pentium4m, prescott, nocona, core2, penryn, bonnell, atom, silvermont, slm, goldmont, goldmont-plus, tremont, nehalem, corei7, westmere, sandybridge, corei7-avx, ivybridge, core-avx-i, haswell, core-avx2, broadwell, skylake, skylake-avx512, skx, cascadelake, cooperlake, cannonlake, icelake-client, rocketlake, icelake-server, tigerlake, sapphirerapids, alderlake, raptorlake, meteorlake, arrowlake, arrowlake-s, lunarlake, gracemont, pantherlake, sierraforest, grandridge, graniterapids, graniterapids-d, emeraldrapids, clearwaterforest, knl, knm, lakemont, k6, k6-2, k6-3, athlon, athlon-tbird, athlon-xp, athlon-mp, athlon-4, k8, athlon64, athlon-fx, opteron, k8-sse3, athlon64-sse3, opteron-sse3, amdfam10, barcelona, btver1, btver2, bdver1, bdver2, bdver3, bdver4, znver1, znver2, znver3, znver4, x86-64, x86-64-v2, x86-64-v3, x86-64-v4, geode{{$}} +// X86-NEXT: note: valid target CPU values are: i386, i486, winchip-c6, winchip2, c3, i586, pentium, pentium-mmx, pentiumpro, i686, pentium2, pentium3, pentium3m, pentium-m, c3-2, yonah, pentium4, pentium4m, prescott, nocona, core2, penryn, bonnell, atom, silvermont, slm, goldmont, goldmont-plus, tremont, nehalem, corei7, westmere, sandybridge, corei7-avx, ivybridge, core-avx-i, haswell, core-avx2, broadwell, skylake, skylake-avx512, skx, cascadelake, cooperlake, cannonlake, icelake-client, rocketlake, icelake-server, tigerlake, sapphirerapids, alderlake, raptorlake, meteorlake, arrowlake, arrowlake-s, lunarlake, gracemont, pantherlake, sierraforest, grandridge, graniterapids, graniterapids-d, emeraldrapids, clearwaterforest, knl, knm, lakemont, k6, k6-2, k6-3, athlon, athlon-tbird, athlon-xp, athlon-mp, athlon-4, k8, athlon64, athlon-fx, opteron, k8-sse3, athlon64-sse3, opteron-sse3, amdfam10, barcelona, btver1, btver2, bdver1, bdver2, bdver3, bdver4, znver1, znver2, znver3, znver4, znver5, x86-64, x86-64-v2, x86-64-v3, x86-64-v4, geode{{$}} // RUN: not %clang_cc1 -triple x86_64--- -target-cpu not-a-cpu -fsyntax-only %s 2>&1 | FileCheck %s --check-prefix X86_64 // X86_64: error: unknown target CPU 'not-a-cpu' -// X86_64-NEXT: note: valid target CPU values are: nocona, core2, penryn, bonnell, atom, silvermont, slm, goldmont, goldmont-plus, tremont, nehalem, corei7, westmere, sandybridge, corei7-avx, ivybridge, core-avx-i, haswell, core-avx2, broadwell, skylake, skylake-avx512, skx, cascadelake, cooperlake, cannonlake, icelake-client, rocketlake, icelake-server, tigerlake, sapphirerapids, alderlake, raptorlake, meteorlake, arrowlake, arrowlake-s, lunarlake, gracemont, pantherlake, sierraforest, grandridge, graniterapids, graniterapids-d, emeraldrapids, clearwaterforest, knl, knm, k8, athlon64, athlon-fx, opteron, k8-sse3, athlon64-sse3, opteron-sse3, amdfam10, barcelona, btver1, btver2, bdver1, bdver2, bdver3, bdver4, znver1, znver2, znver3, znver4, x86-64, x86-64-v2, x86-64-v3, x86-64-v4{{$}} +// X86_64-NEXT: note: valid target CPU values are: nocona, core2, penryn, bonnell, atom, silvermont, slm, goldmont, goldmont-plus, tremont, nehalem, corei7, westmere, sandybridge, corei7-avx, ivybridge, core-avx-i, haswell, core-avx2, broadwell, skylake, skylake-avx512, skx, cascadelake, cooperlake, cannonlake, icelake-client, rocketlake, icelake-server, tigerlake, sapphirerapids, alderlake, raptorlake, meteorlake, arrowlake, arrowlake-s, lunarlake, gracemont, pantherlake, sierraforest, grandridge, graniterapids, graniterapids-d, emeraldrapids, clearwaterforest, knl, knm, k8, athlon64, athlon-fx, opteron, k8-sse3, athlon64-sse3, opteron-sse3, amdfam10, barcelona, btver1, btver2, bdver1, bdver2, bdver3, bdver4, znver1, znver2, znver3, znver4, znver5, x86-64, x86-64-v2, x86-64-v3, x86-64-v4{{$}} // RUN: not %clang_cc1 -triple i386--- -tune-cpu not-a-cpu -fsyntax-only %s 2>&1 | FileCheck %s --check-prefix TUNE_X86 // TUNE_X86: error: unknown target CPU 'not-a-cpu' -// TUNE_X86-NEXT: note: valid target CPU values are: i386, i486, winchip-c6, winchip2, c3, i586, pentium, pentium-mmx, pentiumpro, i686, pentium2, pentium3, pentium3m, pentium-m, c3-2, yonah, pentium4, pentium4m, prescott, nocona, core2, penryn, bonnell, atom, silvermont, slm, goldmont, goldmont-plus, tremont, nehalem, corei7, westmere, sandybridge, corei7-avx, ivybridge, core-avx-i, haswell, core-avx2, broadwell, skylake, skylake-avx512, skx, cascadelake, cooperlake, cannonlake, icelake-client, rocketlake, icelake-server, tigerlake, sapphirerapids, alderlake, raptorlake, meteorlake, arrowlake, arrowlake-s, lunarlake, gracemont, pantherlake, sierraforest, grandridge, graniterapids, graniterapids-d, emeraldrapids, clearwaterforest, knl, knm, lakemont, k6, k6-2, k6-3, athlon, athlon-tbird, athlon-xp, athlon-mp, athlon-4, k8, athlon64, athlon-fx, opteron, k8-sse3, athlon64-sse3, opteron-sse3, amdfam10, barcelona, btver1, btver2, bdver1, bdver2, bdver3, bdver4, znver1, znver2, znver3, znver4, x86-64, geode{{$}} +// TUNE_X86-NEXT: note: valid target CPU values are: i386, i486, winchip-c6, winchip2, c3, i586, pentium, pentium-mmx, pentiumpro, i686, pentium2, pentium3, pentium3m, pentium-m, c3-2, yonah, pentium4, pentium4m, prescott, nocona, core2, penryn, bonnell, atom, silvermont, slm, goldmont, goldmont-plus, tremont, nehalem, corei7, westmere, sandybridge, corei7-avx, ivybridge, core-avx-i, haswell, core-avx2, broadwell, skylake, skylake-avx512, skx, cascadelake, cooperlake, cannonlake, icelake-client, rocketlake, icelake-server, tigerlake, sapphirerapids, alderlake, raptorlake, meteorlake, arrowlake, arrowlake-s, lunarlake, gracemont, pantherlake, sierraforest, grandridge, graniterapids, graniterapids-d, emeraldrapids, clearwaterforest, knl, knm, lakemont, k6, k6-2, k6-3, athlon, athlon-tbird, athlon-xp, athlon-mp, athlon-4, k8, athlon64, athlon-fx, opteron, k8-sse3, athlon64-sse3, opteron-sse3, amdfam10, barcelona, btver1, btver2, bdver1, bdver2, bdver3, bdver4, znver1, znver2, znver3, znver4, znver5, x86-64, geode{{$}} // RUN: not %clang_cc1 -triple x86_64--- -tune-cpu not-a-cpu -fsyntax-only %s 2>&1 | FileCheck %s --check-prefix TUNE_X86_64 // TUNE_X86_64: error: unknown target CPU 'not-a-cpu' -// TUNE_X86_64-NEXT: note: valid target CPU values are: i386, i486, winchip-c6, winchip2, c3, i586, pentium, pentium-mmx, pentiumpro, i686, pentium2, pentium3, pentium3m, pentium-m, c3-2, yonah, pentium4, pentium4m, prescott, nocona, core2, penryn, bonnell, atom, silvermont, slm, goldmont, goldmont-plus, tremont, nehalem, corei7, westmere, sandybridge, corei7-avx, ivybridge, core-avx-i, haswell, core-avx2, broadwell, skylake, skylake-avx512, skx, cascadelake, cooperlake, cannonlake, icelake-client, rocketlake, icelake-server, tigerlake, sapphirerapids, alderlake, raptorlake, meteorlake, arrowlake, arrowlake-s, lunarlake, gracemont, pantherlake, sierraforest, grandridge, graniterapids, graniterapids-d, emeraldrapids, clearwaterforest, knl, knm, lakemont, k6, k6-2, k6-3, athlon, athlon-tbird, athlon-xp, athlon-mp, athlon-4, k8, athlon64, athlon-fx, opteron, k8-sse3, athlon64-sse3, opteron-sse3, amdfam10, barcelona, btver1, btver2, bdver1, bdver2, bdver3, bdver4, znver1, znver2, znver3, znver4, x86-64, geode{{$}} +// TUNE_X86_64-NEXT: note: valid target CPU values are: i386, i486, winchip-c6, winchip2, c3, i586, pentium, pentium-mmx, pentiumpro, i686, pentium2, pentium3, pentium3m, pentium-m, c3-2, yonah, pentium4, pentium4m, prescott, nocona, core2, penryn, bonnell, atom, silvermont, slm, goldmont, goldmont-plus, tremont, nehalem, corei7, westmere, sandybridge, corei7-avx, ivybridge, core-avx-i, haswell, core-avx2, broadwell, skylake, skylake-avx512, skx, cascadelake, cooperlake, cannonlake, icelake-client, rocketlake, icelake-server, tigerlake, sapphirerapids, alderlake, raptorlake, meteorlake, arrowlake, arrowlake-s, lunarlake, gracemont, pantherlake, sierraforest, grandridge, graniterapids, graniterapids-d, emeraldrapids, clearwaterforest, knl, knm, lakemont, k6, k6-2, k6-3, athlon, athlon-tbird, athlon-xp, athlon-mp, athlon-4, k8, athlon64, athlon-fx, opteron, k8-sse3, athlon64-sse3, opteron-sse3, amdfam10, barcelona, btver1, btver2, bdver1, bdver2, bdver3, bdver4, znver1, znver2, znver3, znver4, znver5, x86-64, geode{{$}} // RUN: not %clang_cc1 -triple nvptx--- -target-cpu not-a-cpu -fsyntax-only %s 2>&1 | FileCheck %s --check-prefix NVPTX // NVPTX: error: unknown target CPU 'not-a-cpu' diff --git a/clang/test/Preprocessor/predefined-arch-macros.c b/clang/test/Preprocessor/predefined-arch-macros.c index 6f470d85ca56..a90ec1f56b1a 100644 --- a/clang/test/Preprocessor/predefined-arch-macros.c +++ b/clang/test/Preprocessor/predefined-arch-macros.c @@ -3923,6 +3923,148 @@ // CHECK_ZNVER4_M64: #define __znver4 1 // CHECK_ZNVER4_M64: #define __znver4__ 1 +// RUN: %clang -march=znver5 -m32 -E -dM %s -o - 2>&1 \ +// RUN: -target i386-unknown-linux \ +// RUN: | FileCheck -match-full-lines %s -check-prefix=CHECK_ZNVER5_M32 +// CHECK_ZNVER5_M32-NOT: #define __3dNOW_A__ 1 +// CHECK_ZNVER5_M32-NOT: #define __3dNOW__ 1 +// CHECK_ZNVER5_M32: #define __ADX__ 1 +// CHECK_ZNVER5_M32: #define __AES__ 1 +// CHECK_ZNVER5_M32: #define __AVX2__ 1 +// CHECK_ZNVER5_M32: #define __AVX512BF16__ 1 +// CHECK_ZNVER5_M32: #define __AVX512BITALG__ 1 +// CHECK_ZNVER5_M32: #define __AVX512BW__ 1 +// CHECK_ZNVER5_M32: #define __AVX512CD__ 1 +// CHECK_ZNVER5_M32: #define __AVX512DQ__ 1 +// CHECK_ZNVER5_M32: #define __AVX512F__ 1 +// CHECK_ZNVER5_M32: #define __AVX512IFMA__ 1 +// CHECK_ZNVER5_M32: #define __AVX512VBMI2__ 1 +// CHECK_ZNVER5_M32: #define __AVX512VBMI__ 1 +// CHECK_ZNVER5_M32: #define __AVX512VL__ 1 +// CHECK_ZNVER5_M32: #define __AVX512VNNI__ 1 +// CHECK_ZNVER5_M32: #define __AVX512VP2INTERSECT__ 1 +// CHECK_ZNVER5_M32: #define __AVX512VPOPCNTDQ__ 1 +// CHECK_ZNVER5_M32: #define __AVXVNNI__ 1 +// CHECK_ZNVER5_M32: #define __AVX__ 1 +// CHECK_ZNVER5_M32: #define __BMI2__ 1 +// CHECK_ZNVER5_M32: #define __BMI__ 1 +// CHECK_ZNVER5_M32: #define __CLFLUSHOPT__ 1 +// CHECK_ZNVER5_M32: #define __CLWB__ 1 +// CHECK_ZNVER5_M32: #define __CLZERO__ 1 +// CHECK_ZNVER5_M32: #define __F16C__ 1 +// CHECK_ZNVER5_M32-NOT: #define __FMA4__ 1 +// CHECK_ZNVER5_M32: #define __FMA__ 1 +// CHECK_ZNVER5_M32: #define __FSGSBASE__ 1 +// CHECK_ZNVER5_M32: #define __GFNI__ 1 +// CHECK_ZNVER5_M32: #define __LZCNT__ 1 +// CHECK_ZNVER5_M32: #define __MMX__ 1 +// CHECK_ZNVER5_M32: #define __MOVDIR64B__ 1 +// CHECK_ZNVER5_M32: #define __MOVDIRI__ 1 +// CHECK_ZNVER5_M32: #define __PCLMUL__ 1 +// CHECK_ZNVER5_M32: #define __PKU__ 1 +// CHECK_ZNVER5_M32: #define __POPCNT__ 1 +// CHECK_ZNVER5_M32: #define __PREFETCHI__ 1 +// CHECK_ZNVER5_M32: #define __PRFCHW__ 1 +// CHECK_ZNVER5_M32: #define __RDPID__ 1 +// CHECK_ZNVER5_M32: #define __RDPRU__ 1 +// CHECK_ZNVER5_M32: #define __RDRND__ 1 +// CHECK_ZNVER5_M32: #define __RDSEED__ 1 +// CHECK_ZNVER5_M32: #define __SHA__ 1 +// CHECK_ZNVER5_M32: #define __SSE2_MATH__ 1 +// CHECK_ZNVER5_M32: #define __SSE2__ 1 +// CHECK_ZNVER5_M32: #define __SSE3__ 1 +// CHECK_ZNVER5_M32: #define __SSE4A__ 1 +// CHECK_ZNVER5_M32: #define __SSE4_1__ 1 +// CHECK_ZNVER5_M32: #define __SSE4_2__ 1 +// CHECK_ZNVER5_M32: #define __SSE_MATH__ 1 +// CHECK_ZNVER5_M32: #define __SSE__ 1 +// CHECK_ZNVER5_M32: #define __SSSE3__ 1 +// CHECK_ZNVER5_M32-NOT: #define __TBM__ 1 +// CHECK_ZNVER5_M32: #define __WBNOINVD__ 1 +// CHECK_ZNVER5_M32-NOT: #define __XOP__ 1 +// CHECK_ZNVER5_M32: #define __XSAVEC__ 1 +// CHECK_ZNVER5_M32: #define __XSAVEOPT__ 1 +// CHECK_ZNVER5_M32: #define __XSAVES__ 1 +// CHECK_ZNVER5_M32: #define __XSAVE__ 1 +// CHECK_ZNVER5_M32: #define __i386 1 +// CHECK_ZNVER5_M32: #define __i386__ 1 +// CHECK_ZNVER5_M32: #define __tune_znver5__ 1 +// CHECK_ZNVER5_M32: #define __znver5 1 +// CHECK_ZNVER5_M32: #define __znver5__ 1 + +// RUN: %clang -march=znver5 -m64 -E -dM %s -o - 2>&1 \ +// RUN: -target i386-unknown-linux \ +// RUN: | FileCheck -match-full-lines %s -check-prefix=CHECK_ZNVER5_M64 +// CHECK_ZNVER5_M64-NOT: #define __3dNOW_A__ 1 +// CHECK_ZNVER5_M64-NOT: #define __3dNOW__ 1 +// CHECK_ZNVER5_M64: #define __ADX__ 1 +// CHECK_ZNVER5_M64: #define __AES__ 1 +// CHECK_ZNVER5_M64: #define __AVX2__ 1 +// CHECK_ZNVER5_M64: #define __AVX512BF16__ 1 +// CHECK_ZNVER5_M64: #define __AVX512BITALG__ 1 +// CHECK_ZNVER5_M64: #define __AVX512BW__ 1 +// CHECK_ZNVER5_M64: #define __AVX512CD__ 1 +// CHECK_ZNVER5_M64: #define __AVX512DQ__ 1 +// CHECK_ZNVER5_M64: #define __AVX512F__ 1 +// CHECK_ZNVER5_M64: #define __AVX512IFMA__ 1 +// CHECK_ZNVER5_M64: #define __AVX512VBMI2__ 1 +// CHECK_ZNVER5_M64: #define __AVX512VBMI__ 1 +// CHECK_ZNVER5_M64: #define __AVX512VL__ 1 +// CHECK_ZNVER5_M64: #define __AVX512VNNI__ 1 +// CHECK_ZNVER5_M64: #define __AVX512VP2INTERSECT__ 1 +// CHECK_ZNVER5_M64: #define __AVX512VPOPCNTDQ__ 1 +// CHECK_ZNVER5_M64: #define __AVXVNNI__ 1 +// CHECK_ZNVER5_M64: #define __AVX__ 1 +// CHECK_ZNVER5_M64: #define __BMI2__ 1 +// CHECK_ZNVER5_M64: #define __BMI__ 1 +// CHECK_ZNVER5_M64: #define __CLFLUSHOPT__ 1 +// CHECK_ZNVER5_M64: #define __CLWB__ 1 +// CHECK_ZNVER5_M64: #define __CLZERO__ 1 +// CHECK_ZNVER5_M64: #define __F16C__ 1 +// CHECK_ZNVER5_M64-NOT: #define __FMA4__ 1 +// CHECK_ZNVER5_M64: #define __FMA__ 1 +// CHECK_ZNVER5_M64: #define __FSGSBASE__ 1 +// CHECK_ZNVER5_M64: #define __GFNI__ 1 +// CHECK_ZNVER5_M64: #define __LZCNT__ 1 +// CHECK_ZNVER5_M64: #define __MMX__ 1 +// CHECK_ZNVER5_M64: #define __MOVDIR64B__ 1 +// CHECK_ZNVER5_M64: #define __MOVDIRI__ 1 +// CHECK_ZNVER5_M64: #define __PCLMUL__ 1 +// CHECK_ZNVER5_M64: #define __PKU__ 1 +// CHECK_ZNVER5_M64: #define __POPCNT__ 1 +// CHECK_ZNVER5_M64: #define __PREFETCHI__ 1 +// CHECK_ZNVER5_M64: #define __PRFCHW__ 1 +// CHECK_ZNVER5_M64: #define __RDPID__ 1 +// CHECK_ZNVER5_M64: #define __RDPRU__ 1 +// CHECK_ZNVER5_M64: #define __RDRND__ 1 +// CHECK_ZNVER5_M64: #define __RDSEED__ 1 +// CHECK_ZNVER5_M64: #define __SHA__ 1 +// CHECK_ZNVER5_M64: #define __SSE2_MATH__ 1 +// CHECK_ZNVER5_M64: #define __SSE2__ 1 +// CHECK_ZNVER5_M64: #define __SSE3__ 1 +// CHECK_ZNVER5_M64: #define __SSE4A__ 1 +// CHECK_ZNVER5_M64: #define __SSE4_1__ 1 +// CHECK_ZNVER5_M64: #define __SSE4_2__ 1 +// CHECK_ZNVER5_M64: #define __SSE_MATH__ 1 +// CHECK_ZNVER5_M64: #define __SSE__ 1 +// CHECK_ZNVER5_M64: #define __SSSE3__ 1 +// CHECK_ZNVER5_M64-NOT: #define __TBM__ 1 +// CHECK_ZNVER5_M64: #define __VAES__ 1 +// CHECK_ZNVER5_M64: #define __VPCLMULQDQ__ 1 +// CHECK_ZNVER5_M64: #define __WBNOINVD__ 1 +// CHECK_ZNVER5_M64-NOT: #define __XOP__ 1 +// CHECK_ZNVER5_M64: #define __XSAVEC__ 1 +// CHECK_ZNVER5_M64: #define __XSAVEOPT__ 1 +// CHECK_ZNVER5_M64: #define __XSAVES__ 1 +// CHECK_ZNVER5_M64: #define __XSAVE__ 1 +// CHECK_ZNVER5_M64: #define __amd64 1 +// CHECK_ZNVER5_M64: #define __amd64__ 1 +// CHECK_ZNVER5_M64: #define __tune_znver5__ 1 +// CHECK_ZNVER5_M64: #define __x86_64 1 +// CHECK_ZNVER5_M64: #define __x86_64__ 1 +// CHECK_ZNVER5_M64: #define __znver5 1 +// CHECK_ZNVER5_M64: #define __znver5__ 1 + // End X86/GCC/Linux tests ------------------ // Begin PPC/GCC/Linux tests ---------------- diff --git a/compiler-rt/lib/builtins/cpu_model/x86.c b/compiler-rt/lib/builtins/cpu_model/x86.c index 867ed97e57bf..b1c4abd9d11d 100644 --- a/compiler-rt/lib/builtins/cpu_model/x86.c +++ b/compiler-rt/lib/builtins/cpu_model/x86.c @@ -59,6 +59,7 @@ enum ProcessorTypes { INTEL_SIERRAFOREST, INTEL_GRANDRIDGE, INTEL_CLEARWATERFOREST, + AMDFAM1AH, CPU_TYPE_MAX }; @@ -97,6 +98,7 @@ enum ProcessorSubtypes { INTEL_COREI7_ARROWLAKE, INTEL_COREI7_ARROWLAKE_S, INTEL_COREI7_PANTHERLAKE, + AMDFAM1AH_ZNVER5, CPU_SUBTYPE_MAX }; @@ -803,6 +805,24 @@ static const char *getAMDProcessorTypeAndSubtype(unsigned Family, break; // "znver4" } break; // family 19h + case 26: + CPU = "znver5"; + *Type = AMDFAM1AH; + if (Model <= 0x77) { + // Models 00h-0Fh (Breithorn). + // Models 10h-1Fh (Breithorn-Dense). + // Models 20h-2Fh (Strix 1). + // Models 30h-37h (Strix 2). + // Models 38h-3Fh (Strix 3). + // Models 40h-4Fh (Granite Ridge). + // Models 50h-5Fh (Weisshorn). + // Models 60h-6Fh (Krackan1). + // Models 70h-77h (Sarlak). + CPU = "znver5"; + *Subtype = AMDFAM1AH_ZNVER5; + break; // "znver5" + } + break; default: break; // Unknown AMD CPU. } diff --git a/llvm/include/llvm/TargetParser/X86TargetParser.def b/llvm/include/llvm/TargetParser/X86TargetParser.def index 92798cbe4b4c..008cf5381c12 100644 --- a/llvm/include/llvm/TargetParser/X86TargetParser.def +++ b/llvm/include/llvm/TargetParser/X86TargetParser.def @@ -49,11 +49,13 @@ X86_CPU_TYPE(ZHAOXIN_FAM7H, "zhaoxin_fam7h") X86_CPU_TYPE(INTEL_SIERRAFOREST, "sierraforest") X86_CPU_TYPE(INTEL_GRANDRIDGE, "grandridge") X86_CPU_TYPE(INTEL_CLEARWATERFOREST, "clearwaterforest") +X86_CPU_TYPE(AMDFAM1AH, "amdfam1ah") // Alternate names supported by __builtin_cpu_is and target multiversioning. X86_CPU_TYPE_ALIAS(INTEL_BONNELL, "atom") X86_CPU_TYPE_ALIAS(AMDFAM10H, "amdfam10") X86_CPU_TYPE_ALIAS(AMDFAM15H, "amdfam15") +X86_CPU_TYPE_ALIAS(AMDFAM1AH, "amdfam1a") X86_CPU_TYPE_ALIAS(INTEL_SILVERMONT, "slm") #undef X86_CPU_TYPE_ALIAS @@ -104,6 +106,7 @@ X86_CPU_SUBTYPE(INTEL_COREI7_GRANITERAPIDS_D,"graniterapids-d") X86_CPU_SUBTYPE(INTEL_COREI7_ARROWLAKE, "arrowlake") X86_CPU_SUBTYPE(INTEL_COREI7_ARROWLAKE_S, "arrowlake-s") X86_CPU_SUBTYPE(INTEL_COREI7_PANTHERLAKE, "pantherlake") +X86_CPU_SUBTYPE(AMDFAM1AH_ZNVER5, "znver5") // Alternate names supported by __builtin_cpu_is and target multiversioning. X86_CPU_SUBTYPE_ALIAS(INTEL_COREI7_ALDERLAKE, "raptorlake") diff --git a/llvm/include/llvm/TargetParser/X86TargetParser.h b/llvm/include/llvm/TargetParser/X86TargetParser.h index 2083e585af4a..5468aaa81edb 100644 --- a/llvm/include/llvm/TargetParser/X86TargetParser.h +++ b/llvm/include/llvm/TargetParser/X86TargetParser.h @@ -147,6 +147,7 @@ enum CPUKind { CK_x86_64_v3, CK_x86_64_v4, CK_Geode, + CK_ZNVER5, }; /// Parse \p CPU string into a CPUKind. Will only accept 64-bit capable CPUs if diff --git a/llvm/lib/Target/X86/X86.td b/llvm/lib/Target/X86/X86.td index 9dafd5e628ca..e82e624f7099 100644 --- a/llvm/lib/Target/X86/X86.td +++ b/llvm/lib/Target/X86/X86.td @@ -1543,6 +1543,19 @@ def ProcessorFeatures { FeatureVPOPCNTDQ]; list ZN4Features = !listconcat(ZN3Features, ZN4AdditionalFeatures); + + + list ZN5Tuning = ZN4Tuning; + list ZN5AdditionalFeatures = [FeatureVNNI, + FeatureMOVDIRI, + FeatureMOVDIR64B, + FeatureVP2INTERSECT, + FeaturePREFETCHI, + FeatureAVXVNNI + ]; + list ZN5Features = + !listconcat(ZN4Features, ZN5AdditionalFeatures); + } //===----------------------------------------------------------------------===// @@ -1892,6 +1905,8 @@ def : ProcModel<"znver3", Znver3Model, ProcessorFeatures.ZN3Features, ProcessorFeatures.ZN3Tuning>; def : ProcModel<"znver4", Znver4Model, ProcessorFeatures.ZN4Features, ProcessorFeatures.ZN4Tuning>; +def : ProcModel<"znver5", Znver4Model, ProcessorFeatures.ZN5Features, + ProcessorFeatures.ZN5Tuning>; def : Proc<"geode", [FeatureX87, FeatureCX8, FeatureMMX, FeaturePRFCHW], [TuningSlowUAMem16, TuningInsertVZEROUPPER]>; diff --git a/llvm/lib/Target/X86/X86PfmCounters.td b/llvm/lib/Target/X86/X86PfmCounters.td index 2b1dac411c99..c30e989cdc2a 100644 --- a/llvm/lib/Target/X86/X86PfmCounters.td +++ b/llvm/lib/Target/X86/X86PfmCounters.td @@ -350,3 +350,4 @@ def ZnVer4PfmCounters : ProcPfmCounters { let ValidationCounters = DefaultAMDPfmValidationCounters; } def : PfmCountersBinding<"znver4", ZnVer4PfmCounters>; +def : PfmCountersBinding<"znver5", ZnVer4PfmCounters>; diff --git a/llvm/lib/TargetParser/Host.cpp b/llvm/lib/TargetParser/Host.cpp index 7e637cba4cfb..865b6a44adbb 100644 --- a/llvm/lib/TargetParser/Host.cpp +++ b/llvm/lib/TargetParser/Host.cpp @@ -1213,6 +1213,25 @@ static const char *getAMDProcessorTypeAndSubtype(unsigned Family, break; // "znver4" } break; // family 19h + case 26: + CPU = "znver5"; + *Type = X86::AMDFAM1AH; + if (Model <= 0x77) { + // Models 00h-0Fh (Breithorn). + // Models 10h-1Fh (Breithorn-Dense). + // Models 20h-2Fh (Strix 1). + // Models 30h-37h (Strix 2). + // Models 38h-3Fh (Strix 3). + // Models 40h-4Fh (Granite Ridge). + // Models 50h-5Fh (Weisshorn). + // Models 60h-6Fh (Krackan1). + // Models 70h-77h (Sarlak). + CPU = "znver5"; + *Subtype = X86::AMDFAM1AH_ZNVER5; + break; // "znver5" + } + break; + default: break; // Unknown AMD CPU. } diff --git a/llvm/lib/TargetParser/X86TargetParser.cpp b/llvm/lib/TargetParser/X86TargetParser.cpp index dcf9130052ac..a6f3b5ba5d33 100644 --- a/llvm/lib/TargetParser/X86TargetParser.cpp +++ b/llvm/lib/TargetParser/X86TargetParser.cpp @@ -238,6 +238,10 @@ static constexpr FeatureBitset FeaturesZNVER4 = FeatureAVX512BITALG | FeatureAVX512VPOPCNTDQ | FeatureAVX512BF16 | FeatureGFNI | FeatureSHSTK; +static constexpr FeatureBitset FeaturesZNVER5 = + FeaturesZNVER4 | FeatureAVXVNNI | FeatureMOVDIRI | FeatureMOVDIR64B | + FeatureAVX512VP2INTERSECT | FeaturePREFETCHI | FeatureAVXVNNI; + // D151696 tranplanted Mangling and OnlyForCPUDispatchSpecific from // X86TargetParser.def to here. They are assigned by following ways: // 1. Copy the mangling from the original CPU_SPEICIFC MACROs. If no, assign @@ -417,6 +421,7 @@ constexpr ProcInfo Processors[] = { { {"znver2"}, CK_ZNVER2, FEATURE_AVX2, FeaturesZNVER2, '\0', false }, { {"znver3"}, CK_ZNVER3, FEATURE_AVX2, FeaturesZNVER3, '\0', false }, { {"znver4"}, CK_ZNVER4, FEATURE_AVX512VBMI2, FeaturesZNVER4, '\0', false }, + { {"znver5"}, CK_ZNVER5, FEATURE_AVX512VP2INTERSECT, FeaturesZNVER5, '\0', false }, // Generic 64-bit processor. { {"x86-64"}, CK_x86_64, FEATURE_SSE2 , FeaturesX86_64, '\0', false }, { {"x86-64-v2"}, CK_x86_64_v2, FEATURE_SSE4_2 , FeaturesX86_64_V2, '\0', false }, diff --git a/llvm/test/CodeGen/X86/bypass-slow-division-64.ll b/llvm/test/CodeGen/X86/bypass-slow-division-64.ll index 6e0cfdd26a78..b0ca0069a526 100644 --- a/llvm/test/CodeGen/X86/bypass-slow-division-64.ll +++ b/llvm/test/CodeGen/X86/bypass-slow-division-64.ll @@ -23,6 +23,7 @@ ; RUN: llc < %s -mtriple=x86_64-- -mcpu=znver2 | FileCheck %s --check-prefixes=CHECK,SLOW-DIVQ ; RUN: llc < %s -mtriple=x86_64-- -mcpu=znver3 | FileCheck %s --check-prefixes=CHECK,SLOW-DIVQ ; RUN: llc < %s -mtriple=x86_64-- -mcpu=znver4 | FileCheck %s --check-prefixes=CHECK,SLOW-DIVQ +; RUN: llc < %s -mtriple=x86_64-- -mcpu=znver5 | FileCheck %s --check-prefixes=CHECK,SLOW-DIVQ ; Additional tests for 64-bit divide bypass diff --git a/llvm/test/CodeGen/X86/cmp16.ll b/llvm/test/CodeGen/X86/cmp16.ll index fa9e75ff16a5..8c14a78d9e11 100644 --- a/llvm/test/CodeGen/X86/cmp16.ll +++ b/llvm/test/CodeGen/X86/cmp16.ll @@ -13,6 +13,7 @@ ; RUN: llc < %s -mtriple=x86_64-- -mcpu=znver2 | FileCheck %s --check-prefixes=X64,X64-FAST ; RUN: llc < %s -mtriple=x86_64-- -mcpu=znver3 | FileCheck %s --check-prefixes=X64,X64-FAST ; RUN: llc < %s -mtriple=x86_64-- -mcpu=znver4 | FileCheck %s --check-prefixes=X64,X64-FAST +; RUN: llc < %s -mtriple=x86_64-- -mcpu=znver5 | FileCheck %s --check-prefixes=X64,X64-FAST define i1 @cmp16_reg_eq_reg(i16 %a0, i16 %a1) { ; X86-GENERIC-LABEL: cmp16_reg_eq_reg: diff --git a/llvm/test/CodeGen/X86/cpus-amd.ll b/llvm/test/CodeGen/X86/cpus-amd.ll index 228a00428c45..33b2cf373147 100644 --- a/llvm/test/CodeGen/X86/cpus-amd.ll +++ b/llvm/test/CodeGen/X86/cpus-amd.ll @@ -29,6 +29,7 @@ ; RUN: llc < %s -o /dev/null -mtriple=x86_64-unknown-unknown -mcpu=znver2 2>&1 | FileCheck %s --check-prefix=CHECK-NO-ERROR --allow-empty ; RUN: llc < %s -o /dev/null -mtriple=x86_64-unknown-unknown -mcpu=znver3 2>&1 | FileCheck %s --check-prefix=CHECK-NO-ERROR --allow-empty ; RUN: llc < %s -o /dev/null -mtriple=x86_64-unknown-unknown -mcpu=znver4 2>&1 | FileCheck %s --check-prefix=CHECK-NO-ERROR --allow-empty +; RUN: llc < %s -o /dev/null -mtriple=x86_64-unknown-unknown -mcpu=znver5 2>&1 | FileCheck %s --check-prefix=CHECK-NO-ERROR --allow-empty define void @foo() { ret void diff --git a/llvm/test/CodeGen/X86/rdpru.ll b/llvm/test/CodeGen/X86/rdpru.ll index 7771f52653cb..be79a4499a33 100644 --- a/llvm/test/CodeGen/X86/rdpru.ll +++ b/llvm/test/CodeGen/X86/rdpru.ll @@ -6,6 +6,7 @@ ; RUN: llc < %s -mtriple=x86_64-- -mcpu=znver2 | FileCheck %s --check-prefix=X64 ; RUN: llc < %s -mtriple=x86_64-- -mcpu=znver3 -fast-isel | FileCheck %s --check-prefix=X64 ; RUN: llc < %s -mtriple=x86_64-- -mcpu=znver4 -fast-isel | FileCheck %s --check-prefix=X64 +; RUN: llc < %s -mtriple=x86_64-- -mcpu=znver5 -fast-isel | FileCheck %s --check-prefix=X64 define void @rdpru_asm() { ; X86-LABEL: rdpru_asm: diff --git a/llvm/test/CodeGen/X86/shuffle-as-shifts.ll b/llvm/test/CodeGen/X86/shuffle-as-shifts.ll index e89197f5b42c..9c8729b3ea50 100644 --- a/llvm/test/CodeGen/X86/shuffle-as-shifts.ll +++ b/llvm/test/CodeGen/X86/shuffle-as-shifts.ll @@ -3,6 +3,7 @@ ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mcpu=icelake-server | FileCheck %s --check-prefixes=CHECK,CHECK-ICX ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mcpu=x86-64-v4 | FileCheck %s --check-prefixes=CHECK,CHECK-V4 ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mcpu=znver4 | FileCheck %s --check-prefixes=CHECK,CHECK-ZNVER4 +; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mcpu=znver5 | FileCheck %s --check-prefixes=CHECK,CHECK-ZNVER4 define <4 x i32> @shuf_rot_v4i32_1032(<4 x i32> %x) { diff --git a/llvm/test/CodeGen/X86/slow-unaligned-mem.ll b/llvm/test/CodeGen/X86/slow-unaligned-mem.ll index d74d195439bd..ceef3fb4bb18 100644 --- a/llvm/test/CodeGen/X86/slow-unaligned-mem.ll +++ b/llvm/test/CodeGen/X86/slow-unaligned-mem.ll @@ -50,6 +50,7 @@ ; RUN: llc < %s -mtriple=i386-unknown-unknown -mcpu=znver2 2>&1 | FileCheck %s --check-prefixes=FAST,FAST-AVX256 ; RUN: llc < %s -mtriple=i386-unknown-unknown -mcpu=znver3 2>&1 | FileCheck %s --check-prefixes=FAST,FAST-AVX256 ; RUN: llc < %s -mtriple=i386-unknown-unknown -mcpu=znver4 2>&1 | FileCheck %s --check-prefixes=FAST,FAST-AVX512 +; RUN: llc < %s -mtriple=i386-unknown-unknown -mcpu=znver5 2>&1 | FileCheck %s --check-prefixes=FAST,FAST-AVX512 ; Other chips with slow unaligned memory accesses diff --git a/llvm/test/CodeGen/X86/sqrt-fastmath-tune.ll b/llvm/test/CodeGen/X86/sqrt-fastmath-tune.ll index 9f2071ff14b8..2b78a70ebcc2 100644 --- a/llvm/test/CodeGen/X86/sqrt-fastmath-tune.ll +++ b/llvm/test/CodeGen/X86/sqrt-fastmath-tune.ll @@ -6,6 +6,7 @@ ; RUN: llc < %s -mtriple=x86_64-- -mcpu=znver1 | FileCheck %s --check-prefixes=FAST-SCALAR,FAST-VECTOR ; RUN: llc < %s -mtriple=x86_64-- -mcpu=znver3 | FileCheck %s --check-prefixes=FAST-SCALAR,FAST-VECTOR ; RUN: llc < %s -mtriple=x86_64-- -mcpu=znver4 | FileCheck %s --check-prefixes=FAST-SCALAR,FAST-VECTOR +; RUN: llc < %s -mtriple=x86_64-- -mcpu=znver5 | FileCheck %s --check-prefixes=FAST-SCALAR,FAST-VECTOR ; RUN: llc < %s -mtriple=x86_64-- -mcpu=x86-64 | FileCheck %s --check-prefixes=X86-64 define float @f32_no_daz(float %f) #0 { diff --git a/llvm/test/CodeGen/X86/tuning-shuffle-permilpd-avx512.ll b/llvm/test/CodeGen/X86/tuning-shuffle-permilpd-avx512.ll index 7d8bb567c09b..162ab71fc00d 100644 --- a/llvm/test/CodeGen/X86/tuning-shuffle-permilpd-avx512.ll +++ b/llvm/test/CodeGen/X86/tuning-shuffle-permilpd-avx512.ll @@ -4,6 +4,7 @@ ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mcpu=x86-64-v4 | FileCheck %s --check-prefixes=CHECK,CHECK-V4 ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512f,+avx512vl,+avx512bw,+avx512dq | FileCheck %s --check-prefixes=CHECK,CHECK-AVX512 ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mcpu=znver4 | FileCheck %s --check-prefixes=CHECK,CHECK-ZNVER4 +; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mcpu=znver5 | FileCheck %s --check-prefixes=CHECK,CHECK-ZNVER4 define <8 x double> @transform_VPERMILPSZrr(<8 x double> %a) nounwind { ; CHECK-LABEL: transform_VPERMILPSZrr: diff --git a/llvm/test/CodeGen/X86/tuning-shuffle-permilps-avx512.ll b/llvm/test/CodeGen/X86/tuning-shuffle-permilps-avx512.ll index 5d031f6017c7..cd97946da248 100644 --- a/llvm/test/CodeGen/X86/tuning-shuffle-permilps-avx512.ll +++ b/llvm/test/CodeGen/X86/tuning-shuffle-permilps-avx512.ll @@ -4,6 +4,7 @@ ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mcpu=x86-64-v4 | FileCheck %s --check-prefixes=CHECK,CHECK-V4 ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512f,+avx512vl,+avx512bw,+avx512dq | FileCheck %s --check-prefixes=CHECK,CHECK-AVX512 ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mcpu=znver4 | FileCheck %s --check-prefixes=CHECK,CHECK-ZNVER4 +; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mcpu=znver5 | FileCheck %s --check-prefixes=CHECK,CHECK-ZNVER4 define <16 x float> @transform_VPERMILPSZrr(<16 x float> %a) nounwind { ; CHECK-LABEL: transform_VPERMILPSZrr: diff --git a/llvm/test/CodeGen/X86/tuning-shuffle-unpckpd-avx512.ll b/llvm/test/CodeGen/X86/tuning-shuffle-unpckpd-avx512.ll index 4a160bc9debc..5ea991f85523 100644 --- a/llvm/test/CodeGen/X86/tuning-shuffle-unpckpd-avx512.ll +++ b/llvm/test/CodeGen/X86/tuning-shuffle-unpckpd-avx512.ll @@ -5,6 +5,7 @@ ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mcpu=x86-64-v4 | FileCheck %s --check-prefixes=CHECK,CHECK-V4 ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512f,+avx512vl,+avx512bw,+avx512dq | FileCheck %s --check-prefixes=CHECK,CHECK-AVX512 ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mcpu=znver4 | FileCheck %s --check-prefixes=CHECK,CHECK-ZNVER4 +; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mcpu=znver5 | FileCheck %s --check-prefixes=CHECK,CHECK-ZNVER4 define <16 x float> @transform_VUNPCKLPDZrr(<16 x float> %a, <16 x float> %b) nounwind { diff --git a/llvm/test/CodeGen/X86/tuning-shuffle-unpckps-avx512.ll b/llvm/test/CodeGen/X86/tuning-shuffle-unpckps-avx512.ll index d0e3ad9b1908..96155f0300d2 100644 --- a/llvm/test/CodeGen/X86/tuning-shuffle-unpckps-avx512.ll +++ b/llvm/test/CodeGen/X86/tuning-shuffle-unpckps-avx512.ll @@ -5,6 +5,7 @@ ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mcpu=x86-64-v4 | FileCheck %s --check-prefixes=CHECK,CHECK-V4 ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512f,+avx512vl,+avx512bw,+avx512dq | FileCheck %s --check-prefixes=CHECK,CHECK-AVX512 ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mcpu=znver4 | FileCheck %s --check-prefixes=CHECK,CHECK-ZNVER4 +; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mcpu=znver5 | FileCheck %s --check-prefixes=CHECK,CHECK-ZNVER4 define <16 x float> @transform_VUNPCKLPSZrr(<16 x float> %a, <16 x float> %b) nounwind { ; CHECK-LABEL: transform_VUNPCKLPSZrr: diff --git a/llvm/test/CodeGen/X86/vector-shuffle-fast-per-lane.ll b/llvm/test/CodeGen/X86/vector-shuffle-fast-per-lane.ll index e59532d4fef3..4021b1bf292b 100644 --- a/llvm/test/CodeGen/X86/vector-shuffle-fast-per-lane.ll +++ b/llvm/test/CodeGen/X86/vector-shuffle-fast-per-lane.ll @@ -8,6 +8,7 @@ ; RUN: llc < %s -mtriple=x86_64-unknown -mcpu=znver2 | FileCheck %s --check-prefixes=FAST ; RUN: llc < %s -mtriple=x86_64-unknown -mcpu=znver3 | FileCheck %s --check-prefixes=FAST ; RUN: llc < %s -mtriple=x86_64-unknown -mcpu=znver4 | FileCheck %s --check-prefixes=FAST +; RUN: llc < %s -mtriple=x86_64-unknown -mcpu=znver5 | FileCheck %s --check-prefixes=FAST ; RUN: llc < %s -mtriple=x86_64-unknown -mcpu=haswell | FileCheck %s --check-prefixes=FAST ; RUN: llc < %s -mtriple=x86_64-unknown -mcpu=skx | FileCheck %s --check-prefixes=FAST diff --git a/llvm/test/CodeGen/X86/vpdpwssd.ll b/llvm/test/CodeGen/X86/vpdpwssd.ll index e6a07b4aeb27..3c1eb92e9e3c 100644 --- a/llvm/test/CodeGen/X86/vpdpwssd.ll +++ b/llvm/test/CodeGen/X86/vpdpwssd.ll @@ -1,5 +1,6 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mcpu=znver4 | FileCheck %s +; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mcpu=znver5 | FileCheck %s ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512vnni,+fast-dpwssd | FileCheck %s define <16 x i32> @vpdpwssd_test(<16 x i32> %0, <16 x i32> %1, <16 x i32> %2) { diff --git a/llvm/test/CodeGen/X86/x86-64-double-shifts-var.ll b/llvm/test/CodeGen/X86/x86-64-double-shifts-var.ll index af6fbdc9f60d..bbaa41492470 100644 --- a/llvm/test/CodeGen/X86/x86-64-double-shifts-var.ll +++ b/llvm/test/CodeGen/X86/x86-64-double-shifts-var.ll @@ -16,6 +16,7 @@ ; RUN: llc < %s -mtriple=x86_64-- -mcpu=znver2 | FileCheck %s ; RUN: llc < %s -mtriple=x86_64-- -mcpu=znver3 | FileCheck %s ; RUN: llc < %s -mtriple=x86_64-- -mcpu=znver4 | FileCheck %s +; RUN: llc < %s -mtriple=x86_64-- -mcpu=znver5 | FileCheck %s ; Verify that for the X86_64 processors that are known to have poor latency ; double precision shift instructions we do not generate 'shld' or 'shrd' diff --git a/llvm/test/MC/X86/x86_long_nop.s b/llvm/test/MC/X86/x86_long_nop.s index 6136c3db9a3d..b79403bb5f1e 100644 --- a/llvm/test/MC/X86/x86_long_nop.s +++ b/llvm/test/MC/X86/x86_long_nop.s @@ -19,6 +19,8 @@ # RUN: llvm-mc -filetype=obj -arch=x86 -triple=i686-pc-linux-gnu %s -mcpu=znver3 | llvm-objdump -d --no-show-raw-insn - | FileCheck %s --check-prefix=LNOP15 # RUN: llvm-mc -filetype=obj -arch=x86 -triple=x86_64-pc-linux-gnu -mcpu=znver4 %s | llvm-objdump -d --no-show-raw-insn - | FileCheck %s --check-prefix=LNOP15 # RUN: llvm-mc -filetype=obj -arch=x86 -triple=i686-pc-linux-gnu %s -mcpu=znver4 | llvm-objdump -d --no-show-raw-insn - | FileCheck %s --check-prefix=LNOP15 +# RUN: llvm-mc -filetype=obj -arch=x86 -triple=x86_64-pc-linux-gnu -mcpu=znver5 %s | llvm-objdump -d --no-show-raw-insn - | FileCheck %s --check-prefix=LNOP15 +# RUN: llvm-mc -filetype=obj -arch=x86 -triple=i686-pc-linux-gnu %s -mcpu=znver5 | llvm-objdump -d --no-show-raw-insn - | FileCheck %s --check-prefix=LNOP15 # RUN: llvm-mc -filetype=obj -arch=x86 -triple=i686-pc-linux-gnu -mcpu=nehalem %s | llvm-objdump -d --no-show-raw-insn - | FileCheck --check-prefix=LNOP10 %s # RUN: llvm-mc -filetype=obj -arch=x86 -triple=i686-pc-linux-gnu -mcpu=westmere %s | llvm-objdump -d --no-show-raw-insn - | FileCheck --check-prefix=LNOP10 %s # RUN: llvm-mc -filetype=obj -arch=x86 -triple=i686-pc-linux-gnu -mcpu=sandybridge %s | llvm-objdump -d --no-show-raw-insn - | FileCheck --check-prefix=LNOP15 %s diff --git a/llvm/test/Transforms/LoopUnroll/X86/call-remark.ll b/llvm/test/Transforms/LoopUnroll/X86/call-remark.ll index abdcfcf7e074..b05994ddfa35 100644 --- a/llvm/test/Transforms/LoopUnroll/X86/call-remark.ll +++ b/llvm/test/Transforms/LoopUnroll/X86/call-remark.ll @@ -1,6 +1,7 @@ ; RUN: opt -passes=debugify,loop-unroll -mcpu=znver3 -pass-remarks=loop-unroll -pass-remarks-analysis=loop-unroll < %s -S 2>&1 | FileCheck --check-prefixes=ALL,UNROLL %s ; RUN: opt -passes=debugify,loop-unroll -mcpu=znver3 -pass-remarks=TTI -pass-remarks-analysis=TTI < %s -S 2>&1 | FileCheck --check-prefixes=ALL,TTI %s ; RUN: opt -passes=debugify,loop-unroll -mcpu=znver4 -pass-remarks=loop-unroll -pass-remarks-analysis=loop-unroll < %s -S 2>&1 | FileCheck --check-prefixes=ALL,UNROLL %s +; RUN: opt -passes=debugify,loop-unroll -mcpu=znver5 -pass-remarks=loop-unroll -pass-remarks-analysis=loop-unroll < %s -S 2>&1 | FileCheck --check-prefixes=ALL,UNROLL %s ; RUN: opt -passes=debugify,loop-unroll -mcpu=znver3 -pass-remarks=loop-unroll -pass-remarks-analysis=loop-unroll < %s -S 2>&1 --try-experimental-debuginfo-iterators | FileCheck --check-prefixes=ALL,UNROLL %s diff --git a/llvm/test/Transforms/SLPVectorizer/X86/pr63668.ll b/llvm/test/Transforms/SLPVectorizer/X86/pr63668.ll index 391771e06cab..037e073de9d5 100644 --- a/llvm/test/Transforms/SLPVectorizer/X86/pr63668.ll +++ b/llvm/test/Transforms/SLPVectorizer/X86/pr63668.ll @@ -1,5 +1,6 @@ ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 3 ; RUN: opt -passes=slp-vectorizer -mtriple=x86_64-unknown-linux-gnu -mcpu=znver4 -S < %s | FileCheck %s +; RUN: opt -passes=slp-vectorizer -mtriple=x86_64-unknown-linux-gnu -mcpu=znver5 -S < %s | FileCheck %s define internal i32 @testfunc() { ; CHECK-LABEL: define internal i32 @testfunc From bdae3c487cbb2b4161e7fbb54a855f0ba55da61a Mon Sep 17 00:00:00 2001 From: Zaara Syeda Date: Tue, 10 Sep 2024 14:14:01 -0400 Subject: [PATCH 26/29] [PowerPC] Fix assert exposed by PR 95931 in LowerBITCAST (#108062) Hit Assertion failed: Num < NumOperands && "Invalid child # of SDNode!" Fix by checking opcode and value type before calling getOperand. (cherry picked from commit 22067a8eb43a7194e65913b47a9c724fde3ed68f) --- llvm/lib/Target/PowerPC/PPCISelLowering.cpp | 9 +++++---- llvm/test/CodeGen/PowerPC/f128-bitcast.ll | 22 +++++++++++++++++++++ 2 files changed, 27 insertions(+), 4 deletions(-) diff --git a/llvm/lib/Target/PowerPC/PPCISelLowering.cpp b/llvm/lib/Target/PowerPC/PPCISelLowering.cpp index 21cf4d9eeac1..758de9d732fa 100644 --- a/llvm/lib/Target/PowerPC/PPCISelLowering.cpp +++ b/llvm/lib/Target/PowerPC/PPCISelLowering.cpp @@ -9338,12 +9338,13 @@ SDValue PPCTargetLowering::LowerBITCAST(SDValue Op, SelectionDAG &DAG) const { SDLoc dl(Op); SDValue Op0 = Op->getOperand(0); + if (!Subtarget.isPPC64() || (Op0.getOpcode() != ISD::BUILD_PAIR) || + (Op.getValueType() != MVT::f128)) + return SDValue(); + SDValue Lo = Op0.getOperand(0); SDValue Hi = Op0.getOperand(1); - - if ((Op.getValueType() != MVT::f128) || - (Op0.getOpcode() != ISD::BUILD_PAIR) || (Lo.getValueType() != MVT::i64) || - (Hi.getValueType() != MVT::i64) || !Subtarget.isPPC64()) + if ((Lo.getValueType() != MVT::i64) || (Hi.getValueType() != MVT::i64)) return SDValue(); if (!Subtarget.isLittleEndian()) diff --git a/llvm/test/CodeGen/PowerPC/f128-bitcast.ll b/llvm/test/CodeGen/PowerPC/f128-bitcast.ll index ffbfbd0c64ff..55ba3cb1e053 100644 --- a/llvm/test/CodeGen/PowerPC/f128-bitcast.ll +++ b/llvm/test/CodeGen/PowerPC/f128-bitcast.ll @@ -86,3 +86,25 @@ entry: ret i64 %1 } +define <4 x i32> @truncBitcast(i512 %a) { +; CHECK-LABEL: truncBitcast: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: mtvsrdd v2, r4, r3 +; CHECK-NEXT: blr +; +; CHECK-BE-LABEL: truncBitcast: +; CHECK-BE: # %bb.0: # %entry +; CHECK-BE-NEXT: mtvsrdd v2, r9, r10 +; CHECK-BE-NEXT: blr +; +; CHECK-P8-LABEL: truncBitcast: +; CHECK-P8: # %bb.0: # %entry +; CHECK-P8-NEXT: mtfprd f0, r3 +; CHECK-P8-NEXT: mtfprd f1, r4 +; CHECK-P8-NEXT: xxmrghd v2, vs1, vs0 +; CHECK-P8-NEXT: blr +entry: + %0 = trunc i512 %a to i128 + %1 = bitcast i128 %0 to <4 x i32> + ret <4 x i32> %1 +} From bd4ff65a601895ba816623cddb36ce466cceabe6 Mon Sep 17 00:00:00 2001 From: Tobias Hieta Date: Tue, 17 Sep 2024 09:39:01 +0200 Subject: [PATCH 27/29] Revert "[LoongArch] Eliminate the redundant sign extension of division (#107971)" This reverts commit d752f29fb333d47724484e08b32d6499cc1e460e. --- llvm/lib/Target/LoongArch/LoongArchOptWInstrs.cpp | 15 --------------- 1 file changed, 15 deletions(-) diff --git a/llvm/lib/Target/LoongArch/LoongArchOptWInstrs.cpp b/llvm/lib/Target/LoongArch/LoongArchOptWInstrs.cpp index ab90409fdf47..abac69054f3b 100644 --- a/llvm/lib/Target/LoongArch/LoongArchOptWInstrs.cpp +++ b/llvm/lib/Target/LoongArch/LoongArchOptWInstrs.cpp @@ -637,19 +637,6 @@ static bool isSignExtendedW(Register SrcReg, const LoongArchSubtarget &ST, break; } return false; - // If all incoming values are sign-extended and all users only use - // the lower 32 bits, then convert them to W versions. - case LoongArch::DIV_D: { - if (!AddRegToWorkList(MI->getOperand(1).getReg())) - return false; - if (!AddRegToWorkList(MI->getOperand(2).getReg())) - return false; - if (hasAllWUsers(*MI, ST, MRI)) { - FixableDef.insert(MI); - break; - } - return false; - } } } @@ -664,8 +651,6 @@ static unsigned getWOp(unsigned Opcode) { return LoongArch::ADDI_W; case LoongArch::ADD_D: return LoongArch::ADD_W; - case LoongArch::DIV_D: - return LoongArch::DIV_W; case LoongArch::LD_D: case LoongArch::LD_WU: return LoongArch::LD_W; From 560ed047d183348b341ffd4e27712c254d82f589 Mon Sep 17 00:00:00 2001 From: Tobias Hieta Date: Tue, 17 Sep 2024 09:39:18 +0200 Subject: [PATCH 28/29] Revert " [LoongArch][ISel] Check the number of sign bits in `PatGprGpr_32` (#107432)" This reverts commit 78654faa0c6d9dc2f72b81953b9cffbb7675755b. --- .../Target/LoongArch/LoongArchInstrInfo.td | 5 +- .../ir-instruction/sdiv-udiv-srem-urem.ll | 67 +------------------ 2 files changed, 3 insertions(+), 69 deletions(-) diff --git a/llvm/lib/Target/LoongArch/LoongArchInstrInfo.td b/llvm/lib/Target/LoongArch/LoongArchInstrInfo.td index 339d50bd8192..ef647a427787 100644 --- a/llvm/lib/Target/LoongArch/LoongArchInstrInfo.td +++ b/llvm/lib/Target/LoongArch/LoongArchInstrInfo.td @@ -1065,13 +1065,10 @@ def RDTIME_D : RDTIME_2R<0x00006800>; /// Generic pattern classes -def assertsexti32 : PatFrag<(ops node:$src), (assertsext node:$src), [{ - return cast(N->getOperand(1))->getVT().bitsLE(MVT::i32); -}]>; class PatGprGpr : Pat<(OpNode GPR:$rj, GPR:$rk), (Inst GPR:$rj, GPR:$rk)>; class PatGprGpr_32 - : Pat<(sext_inreg (OpNode (assertsexti32 GPR:$rj), (assertsexti32 GPR:$rk)), i32), (Inst GPR:$rj, GPR:$rk)>; + : Pat<(sext_inreg (OpNode GPR:$rj, GPR:$rk), i32), (Inst GPR:$rj, GPR:$rk)>; class PatGpr : Pat<(OpNode GPR:$rj), (Inst GPR:$rj)>; diff --git a/llvm/test/CodeGen/LoongArch/ir-instruction/sdiv-udiv-srem-urem.ll b/llvm/test/CodeGen/LoongArch/ir-instruction/sdiv-udiv-srem-urem.ll index c22acdb49690..ab3eec240db3 100644 --- a/llvm/test/CodeGen/LoongArch/ir-instruction/sdiv-udiv-srem-urem.ll +++ b/llvm/test/CodeGen/LoongArch/ir-instruction/sdiv-udiv-srem-urem.ll @@ -191,8 +191,7 @@ define signext i32 @sdiv_si32_ui32_ui32(i32 %a, i32 %b) { ; LA64: # %bb.0: # %entry ; LA64-NEXT: addi.w $a1, $a1, 0 ; LA64-NEXT: addi.w $a0, $a0, 0 -; LA64-NEXT: div.d $a0, $a0, $a1 -; LA64-NEXT: addi.w $a0, $a0, 0 +; LA64-NEXT: div.w $a0, $a0, $a1 ; LA64-NEXT: ret ; ; LA32-TRAP-LABEL: sdiv_si32_ui32_ui32: @@ -208,12 +207,11 @@ define signext i32 @sdiv_si32_ui32_ui32(i32 %a, i32 %b) { ; LA64-TRAP: # %bb.0: # %entry ; LA64-TRAP-NEXT: addi.w $a1, $a1, 0 ; LA64-TRAP-NEXT: addi.w $a0, $a0, 0 -; LA64-TRAP-NEXT: div.d $a0, $a0, $a1 +; LA64-TRAP-NEXT: div.w $a0, $a0, $a1 ; LA64-TRAP-NEXT: bnez $a1, .LBB5_2 ; LA64-TRAP-NEXT: # %bb.1: # %entry ; LA64-TRAP-NEXT: break 7 ; LA64-TRAP-NEXT: .LBB5_2: # %entry -; LA64-TRAP-NEXT: addi.w $a0, $a0, 0 ; LA64-TRAP-NEXT: ret entry: %r = sdiv i32 %a, %b @@ -1153,64 +1151,3 @@ entry: %r = urem i64 %a, %b ret i64 %r } - -define signext i32 @pr107414(i32 signext %x) { -; LA32-LABEL: pr107414: -; LA32: # %bb.0: # %entry -; LA32-NEXT: addi.w $sp, $sp, -16 -; LA32-NEXT: .cfi_def_cfa_offset 16 -; LA32-NEXT: st.w $ra, $sp, 12 # 4-byte Folded Spill -; LA32-NEXT: .cfi_offset 1, -4 -; LA32-NEXT: move $a2, $a0 -; LA32-NEXT: srai.w $a3, $a0, 31 -; LA32-NEXT: lu12i.w $a0, -266831 -; LA32-NEXT: ori $a0, $a0, 3337 -; LA32-NEXT: move $a1, $zero -; LA32-NEXT: bl %plt(__divdi3) -; LA32-NEXT: ld.w $ra, $sp, 12 # 4-byte Folded Reload -; LA32-NEXT: addi.w $sp, $sp, 16 -; LA32-NEXT: ret -; -; LA64-LABEL: pr107414: -; LA64: # %bb.0: # %entry -; LA64-NEXT: lu12i.w $a1, -266831 -; LA64-NEXT: ori $a1, $a1, 3337 -; LA64-NEXT: lu32i.d $a1, 0 -; LA64-NEXT: div.d $a0, $a1, $a0 -; LA64-NEXT: addi.w $a0, $a0, 0 -; LA64-NEXT: ret -; -; LA32-TRAP-LABEL: pr107414: -; LA32-TRAP: # %bb.0: # %entry -; LA32-TRAP-NEXT: addi.w $sp, $sp, -16 -; LA32-TRAP-NEXT: .cfi_def_cfa_offset 16 -; LA32-TRAP-NEXT: st.w $ra, $sp, 12 # 4-byte Folded Spill -; LA32-TRAP-NEXT: .cfi_offset 1, -4 -; LA32-TRAP-NEXT: move $a2, $a0 -; LA32-TRAP-NEXT: srai.w $a3, $a0, 31 -; LA32-TRAP-NEXT: lu12i.w $a0, -266831 -; LA32-TRAP-NEXT: ori $a0, $a0, 3337 -; LA32-TRAP-NEXT: move $a1, $zero -; LA32-TRAP-NEXT: bl %plt(__divdi3) -; LA32-TRAP-NEXT: ld.w $ra, $sp, 12 # 4-byte Folded Reload -; LA32-TRAP-NEXT: addi.w $sp, $sp, 16 -; LA32-TRAP-NEXT: ret -; -; LA64-TRAP-LABEL: pr107414: -; LA64-TRAP: # %bb.0: # %entry -; LA64-TRAP-NEXT: lu12i.w $a1, -266831 -; LA64-TRAP-NEXT: ori $a1, $a1, 3337 -; LA64-TRAP-NEXT: lu32i.d $a1, 0 -; LA64-TRAP-NEXT: div.d $a1, $a1, $a0 -; LA64-TRAP-NEXT: bnez $a0, .LBB32_2 -; LA64-TRAP-NEXT: # %bb.1: # %entry -; LA64-TRAP-NEXT: break 7 -; LA64-TRAP-NEXT: .LBB32_2: # %entry -; LA64-TRAP-NEXT: addi.w $a0, $a1, 0 -; LA64-TRAP-NEXT: ret -entry: - %conv = sext i32 %x to i64 - %div = sdiv i64 3202030857, %conv - %conv1 = trunc i64 %div to i32 - ret i32 %conv1 -} From a4bf6cd7cfb1a1421ba92bca9d017b49936c55e4 Mon Sep 17 00:00:00 2001 From: Tobias Hieta Date: Tue, 17 Sep 2024 13:26:36 +0200 Subject: [PATCH 29/29] Bump version to 19.1.0 (final) --- cmake/Modules/LLVMVersion.cmake | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cmake/Modules/LLVMVersion.cmake b/cmake/Modules/LLVMVersion.cmake index 7e3bb98c3577..928c6c439bd1 100644 --- a/cmake/Modules/LLVMVersion.cmake +++ b/cmake/Modules/LLVMVersion.cmake @@ -10,6 +10,6 @@ if(NOT DEFINED LLVM_VERSION_PATCH) set(LLVM_VERSION_PATCH 0) endif() if(NOT DEFINED LLVM_VERSION_SUFFIX) - set(LLVM_VERSION_SUFFIX -rc4) + set(LLVM_VERSION_SUFFIX) endif()