Skip to content

Commit

Permalink
[LV][EVL] Support call instruction with EVL-vectorization
Browse files Browse the repository at this point in the history
  • Loading branch information
LiqinWeng committed Oct 16, 2024
1 parent fcf02bc commit 177de6d
Show file tree
Hide file tree
Showing 14 changed files with 115 additions and 31 deletions.
6 changes: 6 additions & 0 deletions llvm/include/llvm/Analysis/VectorUtils.h
Original file line number Diff line number Diff line change
Expand Up @@ -160,6 +160,12 @@ bool isVectorIntrinsicWithOverloadTypeAtArg(Intrinsic::ID ID, int OpdIdx);
Intrinsic::ID getVectorIntrinsicIDForCall(const CallInst *CI,
const TargetLibraryInfo *TLI);

/// Returns VP intrinsic ID for call.
/// For the input call instruction it finds mapping intrinsic and returns
/// its intrinsic ID, in case it does not found it return not_intrinsic.
Intrinsic::ID getVPIntrinsicIDForCall(const CallInst *CI,
const TargetLibraryInfo *TLI);

/// Given a vector and an element number, see if the scalar value is
/// already around as a register, for example if it were inserted then extracted
/// from the vector.
Expand Down
4 changes: 2 additions & 2 deletions llvm/include/llvm/IR/VectorBuilder.h
Original file line number Diff line number Diff line change
Expand Up @@ -99,11 +99,11 @@ class VectorBuilder {
const Twine &Name = Twine());

/// Emit a VP reduction intrinsic call for recurrence kind.
/// \param RdxID The intrinsic ID of llvm.vector.reduce.*
/// \param ID The intrinsic ID of call Intrinsic
/// \param ValTy The type of operand which the reduction operation is
/// performed.
/// \param VecOpArray The operand list.
Value *createSimpleReduction(Intrinsic::ID RdxID, Type *ValTy,
Value *createSimpleIntrinsic(Intrinsic::ID RdxID, Type *ValTy,
ArrayRef<Value *> VecOpArray,
const Twine &Name = Twine());
};
Expand Down
7 changes: 7 additions & 0 deletions llvm/lib/Analysis/VectorUtils.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -169,6 +169,13 @@ Intrinsic::ID llvm::getVectorIntrinsicIDForCall(const CallInst *CI,
return Intrinsic::not_intrinsic;
}

Intrinsic::ID llvm::getVPIntrinsicIDForCall(const CallInst *CI,
const TargetLibraryInfo *TLI) {
Intrinsic::ID ID = getIntrinsicForCallSite(*CI, TLI);

return VPIntrinsic::getForIntrinsic(ID);
}

/// Given a vector and an element number, see if the scalar value is
/// already around as a register, for example if it were inserted then extracted
/// from the vector.
Expand Down
9 changes: 4 additions & 5 deletions llvm/lib/IR/VectorBuilder.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -60,13 +60,12 @@ Value *VectorBuilder::createVectorInstruction(unsigned Opcode, Type *ReturnTy,
return createVectorInstructionImpl(VPID, ReturnTy, InstOpArray, Name);
}

Value *VectorBuilder::createSimpleReduction(Intrinsic::ID RdxID,
Type *ValTy,
Value *VectorBuilder::createSimpleIntrinsic(Intrinsic::ID ID, Type *ValTy,
ArrayRef<Value *> InstOpArray,
const Twine &Name) {
auto VPID = VPIntrinsic::getForIntrinsic(RdxID);
assert(VPReductionIntrinsic::isVPReduction(VPID) &&
"No VPIntrinsic for this reduction");
auto VPID = VPIntrinsic::getForIntrinsic(ID);
assert(VPIntrinsic::isVPIntrinsic(VPID) &&
"No VPIntrinsic for this Intrinsic");
return createVectorInstructionImpl(VPID, ValTy, InstOpArray, Name);
}

Expand Down
8 changes: 8 additions & 0 deletions llvm/lib/Target/RISCV/RISCVTargetTransformInfo.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -1075,6 +1075,14 @@ RISCVTTIImpl::getIntrinsicInstrCost(const IntrinsicCostAttributes &ICA,
return getArithmeticInstrCost(*FOp, ICA.getReturnType(), CostKind);
break;
}
// TODO: Need push a new patch
case Intrinsic::vp_smax:
case Intrinsic::vp_smin:
case Intrinsic::vp_umax:
case Intrinsic::vp_umin: {
// return LT.first;
return 1;
}
// vp int cast ops.
case Intrinsic::vp_trunc:
case Intrinsic::vp_zext:
Expand Down
4 changes: 2 additions & 2 deletions llvm/lib/Transforms/Utils/LoopUtils.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -1300,7 +1300,7 @@ Value *llvm::createSimpleReduction(VectorBuilder &VBuilder, Value *Src,
Type *SrcEltTy = SrcTy->getElementType();
Value *Iden = getRecurrenceIdentity(Kind, SrcEltTy, Desc.getFastMathFlags());
Value *Ops[] = {Iden, Src};
return VBuilder.createSimpleReduction(Id, SrcTy, Ops);
return VBuilder.createSimpleIntrinsic(Id, SrcTy, Ops);
}

Value *llvm::createReduction(IRBuilderBase &B,
Expand Down Expand Up @@ -1343,7 +1343,7 @@ Value *llvm::createOrderedReduction(VectorBuilder &VBuilder,
Intrinsic::ID Id = getReductionIntrinsicID(RecurKind::FAdd);
auto *SrcTy = cast<VectorType>(Src->getType());
Value *Ops[] = {Start, Src};
return VBuilder.createSimpleReduction(Id, SrcTy, Ops);
return VBuilder.createSimpleIntrinsic(Id, SrcTy, Ops);
}

void llvm::propagateIRFlags(Value *I, ArrayRef<Value *> VL, Value *OpValue,
Expand Down
3 changes: 1 addition & 2 deletions llvm/lib/Transforms/Vectorize/LoopVectorize.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -8349,7 +8349,6 @@ VPSingleDefRecipe *VPRecipeBuilder::tryToWidenCall(CallInst *CI,
return nullptr;

SmallVector<VPValue *, 4> Ops(Operands.take_front(CI->arg_size()));

// Is it beneficial to perform intrinsic call compared to lib call?
bool ShouldUseVectorIntrinsic =
ID && LoopVectorizationPlanner::getDecisionAndClampRange(
Expand Down Expand Up @@ -8690,7 +8689,7 @@ void LoopVectorizationPlanner::buildVPlansWithVPRecipes(ElementCount MinVF,
// TODO: try to put it close to addActiveLaneMask().
// Discard the plan if it is not EVL-compatible
if (CM.foldTailWithEVL() &&
!VPlanTransforms::tryAddExplicitVectorLength(*Plan))
!VPlanTransforms::tryAddExplicitVectorLength(*Plan, *TLI))
break;
assert(verifyVPlanIsValid(*Plan) && "VPlan is invalid");
VPlans.push_back(std::move(Plan));
Expand Down
15 changes: 15 additions & 0 deletions llvm/lib/Transforms/Vectorize/VPlan.h
Original file line number Diff line number Diff line change
Expand Up @@ -1678,9 +1678,24 @@ class VPWidenIntrinsicRecipe : public VPRecipeWithIRFlags {
/// Returns true if the intrinsic may write to memory.
bool mayWriteToMemory() const { return MayWriteToMemory; }

operand_range arg_operands() {
unsigned argNum = VPIntrinsic::isVPIntrinsic(VectorIntrinsicID)
? getNumOperands() - 1
: getNumOperands();
return make_range(op_begin(), op_begin() + argNum);
}

const_operand_range arg_operands() const {
unsigned argNum = VPIntrinsic::isVPIntrinsic(VectorIntrinsicID)
? getNumOperands() - 1
: getNumOperands();
return make_range(op_begin(), op_begin() + argNum);
}

/// Returns true if the intrinsic may have side-effects.
bool mayHaveSideEffects() const { return MayHaveSideEffects; }

bool onlyFirstLaneUsed(const VPValue *Op) const override;
#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
/// Print the recipe.
void print(raw_ostream &O, const Twine &Indent,
Expand Down
2 changes: 2 additions & 0 deletions llvm/lib/Transforms/Vectorize/VPlanAnalysis.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -61,6 +61,8 @@ Type *VPTypeAnalysis::inferScalarTypeForRecipe(const VPInstruction *R) {
case Instruction::ICmp:
case VPInstruction::ActiveLaneMask:
return inferScalarType(R->getOperand(1));
case VPInstruction::ExplicitVectorLength:
return Type::getIntNTy(Ctx, 32);
case VPInstruction::FirstOrderRecurrenceSplice:
case VPInstruction::Not:
return SetResultTyFromOp();
Expand Down
52 changes: 40 additions & 12 deletions llvm/lib/Transforms/Vectorize/VPlanRecipes.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -962,17 +962,21 @@ void VPWidenCallRecipe::print(raw_ostream &O, const Twine &Indent,
void VPWidenIntrinsicRecipe::execute(VPTransformState &State) {
assert(State.VF.isVector() && "not widening");
State.setDebugLocFrom(getDebugLoc());

Intrinsic::ID FuncID =
VPIntrinsic::isVPIntrinsic(VectorIntrinsicID)
? VPIntrinsic::getFunctionalIntrinsicIDForVP(VectorIntrinsicID)
.value()
: VectorIntrinsicID;
SmallVector<Type *, 2> TysForDecl;
// Add return type if intrinsic is overloaded on it.
if (isVectorIntrinsicWithOverloadTypeAtArg(VectorIntrinsicID, -1))
if (isVectorIntrinsicWithOverloadTypeAtArg(FuncID, -1))
TysForDecl.push_back(VectorType::get(getResultType(), State.VF));
SmallVector<Value *, 4> Args;
for (const auto &I : enumerate(operands())) {
for (const auto &I : enumerate(arg_operands())) {
// Some intrinsics have a scalar argument - don't replace it with a
// vector.
Value *Arg;
if (isVectorIntrinsicWithScalarOpAtArg(VectorIntrinsicID, I.index()))
if (isVectorIntrinsicWithScalarOpAtArg(FuncID, I.index()))
Arg = State.get(I.value(), VPLane(0));
else
Arg = State.get(I.value(), onlyFirstLaneUsed(I.value()));
Expand All @@ -981,18 +985,34 @@ void VPWidenIntrinsicRecipe::execute(VPTransformState &State) {
Args.push_back(Arg);
}

// Use vector version of the intrinsic.
Module *M = State.Builder.GetInsertBlock()->getModule();
Function *VectorF =
Intrinsic::getOrInsertDeclaration(M, VectorIntrinsicID, TysForDecl);
assert(VectorF && "Can't retrieve vector intrinsic.");

CallInst *V = nullptr;
auto *CI = cast_or_null<CallInst>(getUnderlyingValue());
SmallVector<OperandBundleDef, 1> OpBundles;
if (CI)
CI->getOperandBundlesAsDefs(OpBundles);

CallInst *V = State.Builder.CreateCall(VectorF, Args, OpBundles);
if (VPIntrinsic::isVPIntrinsic(VectorIntrinsicID)) {
// Use vector version of the vector predicate Intrinsic
IRBuilderBase &BuilderIR = State.Builder;
VectorBuilder VBuilder(BuilderIR);
Value *Mask = BuilderIR.CreateVectorSplat(State.VF, BuilderIR.getTrue());
// VPValue EVL = getOperand(getNumOperands() - 1);
VBuilder.setMask(Mask).setEVL(
State.get(getOperand(getNumOperands() - 1), /*NeedsScalar=*/true));
auto *TyReturn = VectorType::get(getResultType(), State.VF);
Value *VPInst = VBuilder.createSimpleIntrinsic(VectorIntrinsicID, TyReturn,
Args, "vp.call");
if (VPInst) {
V = cast<CallInst>(VPInst);
}
} else {
// Use vector version of the intrinsic.
Module *M = State.Builder.GetInsertBlock()->getModule();
Function *VectorF =
Intrinsic::getOrInsertDeclaration(M, VectorIntrinsicID, TysForDecl);
assert(VectorF && "Can't retrieve vector intrinsic.");
V = State.Builder.CreateCall(VectorF, Args, OpBundles);
}

setFlags(V);

Expand All @@ -1011,7 +1031,7 @@ InstructionCost VPWidenIntrinsicRecipe::computeCost(ElementCount VF,
// clear Arguments.
// TODO: Rework TTI interface to be independent of concrete IR values.
SmallVector<const Value *> Arguments;
for (const auto &[Idx, Op] : enumerate(operands())) {
for (const auto &[Idx, Op] : enumerate(arg_operands())) {
auto *V = Op->getUnderlyingValue();
if (!V) {
if (auto *UI = dyn_cast_or_null<CallBase>(getUnderlyingValue())) {
Expand Down Expand Up @@ -1042,6 +1062,14 @@ StringRef VPWidenIntrinsicRecipe::getIntrinsicName() const {
return Intrinsic::getBaseName(VectorIntrinsicID);
}

bool VPWidenIntrinsicRecipe::onlyFirstLaneUsed(const VPValue *Op) const {
assert(is_contained(operands(), Op) && "Op must be an operand of the recipe");
// Vector predication intrinsics only demand the the first lane the last
// operand (the EVL operand).
return VPIntrinsic::isVPIntrinsic(VectorIntrinsicID) &&
Op == getOperand(getNumOperands() - 1);
}

#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
void VPWidenIntrinsicRecipe::print(raw_ostream &O, const Twine &Indent,
VPSlotTracker &SlotTracker) const {
Expand Down
21 changes: 18 additions & 3 deletions llvm/lib/Transforms/Vectorize/VPlanTransforms.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -1351,8 +1351,10 @@ void VPlanTransforms::addActiveLaneMask(
}

/// Replace recipes with their EVL variants.
static void transformRecipestoEVLRecipes(VPlan &Plan, VPValue &EVL) {
static void transformRecipestoEVLRecipes(VPlan &Plan, VPValue &EVL,
const TargetLibraryInfo &TLI) {
SmallVector<VPValue *> HeaderMasks = collectAllHeaderMasks(Plan);
VPTypeAnalysis TypeInfo(Plan.getCanonicalIV()->getScalarType());
for (VPValue *HeaderMask : collectAllHeaderMasks(Plan)) {
for (VPUser *U : collectUsersRecursively(HeaderMask)) {
auto *CurRecipe = dyn_cast<VPRecipeBase>(U);
Expand Down Expand Up @@ -1380,6 +1382,18 @@ static void transformRecipestoEVLRecipes(VPlan &Plan, VPValue &EVL) {
return nullptr;
return new VPWidenEVLRecipe(*W, EVL);
})
.Case<VPWidenIntrinsicRecipe>(
[&](VPWidenIntrinsicRecipe *CInst) -> VPRecipeBase * {
auto *CI = cast<CallInst>(CInst->getUnderlyingInstr());
// VPValue *NewMask = GetNewMask(CInst->getMask());
SmallVector<VPValue *> Ops(CInst->operands());
Ops.push_back(&EVL);
Intrinsic::ID VPID = getVPIntrinsicIDForCall(CI, &TLI);
if (VPID == Intrinsic::not_intrinsic)
return nullptr;
return new VPWidenIntrinsicRecipe(
*CI, VPID, Ops, CI->getType(), CI->getDebugLoc());
})
.Case<VPReductionRecipe>([&](VPReductionRecipe *Red) {
VPValue *NewMask = GetNewMask(Red->getCondOp());
return new VPReductionEVLRecipe(*Red, EVL, NewMask);
Expand Down Expand Up @@ -1430,7 +1444,8 @@ static void transformRecipestoEVLRecipes(VPlan &Plan, VPValue &EVL) {
/// %NextEVLIV = add IVSize (cast i32 %VPEVVL to IVSize), %EVLPhi
/// ...
///
bool VPlanTransforms::tryAddExplicitVectorLength(VPlan &Plan) {
bool VPlanTransforms::tryAddExplicitVectorLength(VPlan &Plan,
const TargetLibraryInfo &TLI) {
VPBasicBlock *Header = Plan.getVectorLoopRegion()->getEntryBasicBlock();
// The transform updates all users of inductions to work based on EVL, instead
// of the VF directly. At the moment, widened inductions cannot be updated, so
Expand Down Expand Up @@ -1482,7 +1497,7 @@ bool VPlanTransforms::tryAddExplicitVectorLength(VPlan &Plan) {
NextEVLIV->insertBefore(CanonicalIVIncrement);
EVLPhi->addOperand(NextEVLIV);

transformRecipestoEVLRecipes(Plan, *VPEVL);
transformRecipestoEVLRecipes(Plan, *VPEVL, TLI);

// Replace all uses of VPCanonicalIVPHIRecipe by
// VPEVLBasedIVPHIRecipe except for the canonical IV increment.
Expand Down
3 changes: 2 additions & 1 deletion llvm/lib/Transforms/Vectorize/VPlanTransforms.h
Original file line number Diff line number Diff line change
Expand Up @@ -108,7 +108,8 @@ struct VPlanTransforms {
/// VPCanonicalIVPHIRecipe is only used to control the loop after
/// this transformation.
/// \returns true if the transformation succeeds, or false if it doesn't.
static bool tryAddExplicitVectorLength(VPlan &Plan);
static bool tryAddExplicitVectorLength(VPlan &Plan,
const TargetLibraryInfo &TLI);

// For each Interleave Group in \p InterleaveGroups replace the Recipes
// widening its memory instructions with a single VPInterleaveRecipe at its
Expand Down
4 changes: 4 additions & 0 deletions llvm/lib/Transforms/Vectorize/VPlanVerifier.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -138,6 +138,10 @@ bool VPlanVerifier::verifyEVLRecipe(const VPInstruction &EVL) const {
};
for (const VPUser *U : EVL.users()) {
if (!TypeSwitch<const VPUser *, bool>(U)
.Case<VPWidenIntrinsicRecipe>(
[&](const VPWidenIntrinsicRecipe *S) {
return VerifyEVLUse(*S, S->getNumOperands() - 1);
})
.Case<VPWidenStoreEVLRecipe>([&](const VPWidenStoreEVLRecipe *S) {
return VerifyEVLUse(*S, 2);
})
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -27,7 +27,7 @@ define void @vp_smax(ptr noalias %a, ptr noalias %b, ptr noalias %c, i64 %N) {
; IF-EVL-NEXT: CLONE ir<[[GEP2:%.+]]> = getelementptr inbounds ir<%c>, vp<[[ST]]>
; IF-EVL-NEXT: vp<[[PTR2:%[0-9]+]]> = vector-pointer ir<[[GEP2]]>
; IF-EVL-NEXT: WIDEN ir<[[LD2:%.+]]> = vp.load vp<[[PTR2]]>, vp<[[EVL]]>
; IF-EVL-NEXT: WIDEN-INTRINSIC ir<[[SMAX:%.+]]> = call llvm.smax(ir<[[LD1]]>, ir<[[LD2]]>)
; IF-EVL-NEXT: WIDEN-INTRINSIC ir<[[SMAX:%.+]]> = call llvm.vp.smax(ir<[[LD1]]>, ir<[[LD2]]>, vp<[[EVL]]>)
; IF-EVL-NEXT: CLONE ir<[[GEP3:%.+]]> = getelementptr inbounds ir<%a>, vp<[[ST]]>
; IF-EVL-NEXT: vp<[[PTR3:%[0-9]+]]> = vector-pointer ir<[[GEP3]]>
; IF-EVL-NEXT: WIDEN vp.store vp<[[PTR3]]>, ir<[[SMAX]]>, vp<[[EVL]]>
Expand Down Expand Up @@ -80,7 +80,7 @@ define void @vp_smin(ptr noalias %a, ptr noalias %b, ptr noalias %c, i64 %N) {
; IF-EVL-NEXT: CLONE ir<[[GEP2:%.+]]> = getelementptr inbounds ir<%c>, vp<[[ST]]>
; IF-EVL-NEXT: vp<[[PTR2:%[0-9]+]]> = vector-pointer ir<[[GEP2]]>
; IF-EVL-NEXT: WIDEN ir<[[LD2:%.+]]> = vp.load vp<[[PTR2]]>, vp<[[EVL]]>
; IF-EVL-NEXT: WIDEN-INTRINSIC ir<[[SMIN:%.+]]> = call llvm.smin(ir<[[LD1]]>, ir<[[LD2]]>)
; IF-EVL-NEXT: WIDEN-INTRINSIC ir<[[SMIN:%.+]]> = call llvm.vp.smin(ir<[[LD1]]>, ir<[[LD2]]>, vp<[[EVL]]>)
; IF-EVL-NEXT: CLONE ir<[[GEP3:%.+]]> = getelementptr inbounds ir<%a>, vp<[[ST]]>
; IF-EVL-NEXT: vp<[[PTR3:%[0-9]+]]> = vector-pointer ir<[[GEP3]]>
; IF-EVL-NEXT: WIDEN vp.store vp<[[PTR3]]>, ir<[[SMIN]]>, vp<[[EVL]]>
Expand Down Expand Up @@ -133,7 +133,7 @@ define void @vp_umax(ptr noalias %a, ptr noalias %b, ptr noalias %c, i64 %N) {
; IF-EVL-NEXT: CLONE ir<[[GEP2:%.+]]> = getelementptr inbounds ir<%c>, vp<[[ST]]>
; IF-EVL-NEXT: vp<[[PTR2:%[0-9]+]]> = vector-pointer ir<[[GEP2]]>
; IF-EVL-NEXT: WIDEN ir<[[LD2:%.+]]> = vp.load vp<[[PTR2]]>, vp<[[EVL]]>
; IF-EVL-NEXT: WIDEN-INTRINSIC ir<[[UMAX:%.+]]> = call llvm.umax(ir<[[LD1]]>, ir<[[LD2]]>)
; IF-EVL-NEXT: WIDEN-INTRINSIC ir<[[UMAX:%.+]]> = call llvm.vp.umax(ir<[[LD1]]>, ir<[[LD2]]>, vp<[[EVL]]>)
; IF-EVL-NEXT: CLONE ir<[[GEP3:%.+]]> = getelementptr inbounds ir<%a>, vp<[[ST]]>
; IF-EVL-NEXT: vp<[[PTR3:%[0-9]+]]> = vector-pointer ir<[[GEP3]]>
; IF-EVL-NEXT: WIDEN vp.store vp<[[PTR3]]>, ir<[[UMAX]]>, vp<[[EVL]]>
Expand Down Expand Up @@ -186,7 +186,7 @@ define void @vp_umin(ptr noalias %a, ptr noalias %b, ptr noalias %c, i64 %N) {
; IF-EVL-NEXT: CLONE ir<[[GEP2:%.+]]> = getelementptr inbounds ir<%c>, vp<[[ST]]>
; IF-EVL-NEXT: vp<[[PTR2:%[0-9]+]]> = vector-pointer ir<[[GEP2]]>
; IF-EVL-NEXT: WIDEN ir<[[LD2:%.+]]> = vp.load vp<[[PTR2]]>, vp<[[EVL]]>
; IF-EVL-NEXT: WIDEN-INTRINSIC ir<[[UMIN:%.+]]> = call llvm.umin(ir<[[LD1]]>, ir<[[LD2]]>)
; IF-EVL-NEXT: WIDEN-INTRINSIC ir<[[UMIN:%.+]]> = call llvm.vp.umin(ir<[[LD1]]>, ir<[[LD2]]>, vp<[[EVL]]>)
; IF-EVL-NEXT: CLONE ir<[[GEP3:%.+]]> = getelementptr inbounds ir<%a>, vp<[[ST]]>
; IF-EVL-NEXT: vp<[[PTR3:%[0-9]+]]> = vector-pointer ir<[[GEP3]]>
; IF-EVL-NEXT: WIDEN vp.store vp<[[PTR3]]>, ir<[[UMIN]]>, vp<[[EVL]]>
Expand Down

0 comments on commit 177de6d

Please sign in to comment.