aboutsummaryrefslogtreecommitdiffstats
path: root/contrib/libs/llvm12/lib/Analysis/VectorUtils.cpp
diff options
context:
space:
mode:
authorshadchin <shadchin@yandex-team.ru>2022-02-10 16:44:30 +0300
committerDaniil Cherednik <dcherednik@yandex-team.ru>2022-02-10 16:44:30 +0300
commit2598ef1d0aee359b4b6d5fdd1758916d5907d04f (patch)
tree012bb94d777798f1f56ac1cec429509766d05181 /contrib/libs/llvm12/lib/Analysis/VectorUtils.cpp
parent6751af0b0c1b952fede40b19b71da8025b5d8bcf (diff)
downloadydb-2598ef1d0aee359b4b6d5fdd1758916d5907d04f.tar.gz
Restoring authorship annotation for <shadchin@yandex-team.ru>. Commit 1 of 2.
Diffstat (limited to 'contrib/libs/llvm12/lib/Analysis/VectorUtils.cpp')
-rw-r--r--contrib/libs/llvm12/lib/Analysis/VectorUtils.cpp106
1 files changed, 53 insertions, 53 deletions
diff --git a/contrib/libs/llvm12/lib/Analysis/VectorUtils.cpp b/contrib/libs/llvm12/lib/Analysis/VectorUtils.cpp
index 9a4c96b6f7..1c7a473bcf 100644
--- a/contrib/libs/llvm12/lib/Analysis/VectorUtils.cpp
+++ b/contrib/libs/llvm12/lib/Analysis/VectorUtils.cpp
@@ -43,18 +43,18 @@ static cl::opt<unsigned> MaxInterleaveGroupFactor(
/// hasVectorInstrinsicScalarOpd).
bool llvm::isTriviallyVectorizable(Intrinsic::ID ID) {
switch (ID) {
- case Intrinsic::abs: // Begin integer bit-manipulation.
- case Intrinsic::bswap:
+ case Intrinsic::abs: // Begin integer bit-manipulation.
+ case Intrinsic::bswap:
case Intrinsic::bitreverse:
case Intrinsic::ctpop:
case Intrinsic::ctlz:
case Intrinsic::cttz:
case Intrinsic::fshl:
case Intrinsic::fshr:
- case Intrinsic::smax:
- case Intrinsic::smin:
- case Intrinsic::umax:
- case Intrinsic::umin:
+ case Intrinsic::smax:
+ case Intrinsic::smin:
+ case Intrinsic::umax:
+ case Intrinsic::umin:
case Intrinsic::sadd_sat:
case Intrinsic::ssub_sat:
case Intrinsic::uadd_sat:
@@ -99,7 +99,7 @@ bool llvm::isTriviallyVectorizable(Intrinsic::ID ID) {
bool llvm::hasVectorInstrinsicScalarOpd(Intrinsic::ID ID,
unsigned ScalarOpdIdx) {
switch (ID) {
- case Intrinsic::abs:
+ case Intrinsic::abs:
case Intrinsic::ctlz:
case Intrinsic::cttz:
case Intrinsic::powi:
@@ -125,8 +125,8 @@ Intrinsic::ID llvm::getVectorIntrinsicIDForCall(const CallInst *CI,
if (isTriviallyVectorizable(ID) || ID == Intrinsic::lifetime_start ||
ID == Intrinsic::lifetime_end || ID == Intrinsic::assume ||
- ID == Intrinsic::experimental_noalias_scope_decl ||
- ID == Intrinsic::sideeffect || ID == Intrinsic::pseudoprobe)
+ ID == Intrinsic::experimental_noalias_scope_decl ||
+ ID == Intrinsic::sideeffect || ID == Intrinsic::pseudoprobe)
return ID;
return Intrinsic::not_intrinsic;
}
@@ -137,7 +137,7 @@ Intrinsic::ID llvm::getVectorIntrinsicIDForCall(const CallInst *CI,
unsigned llvm::getGEPInductionOperand(const GetElementPtrInst *Gep) {
const DataLayout &DL = Gep->getModule()->getDataLayout();
unsigned LastOperand = Gep->getNumOperands() - 1;
- TypeSize GEPAllocSize = DL.getTypeAllocSize(Gep->getResultElementType());
+ TypeSize GEPAllocSize = DL.getTypeAllocSize(Gep->getResultElementType());
// Walk backwards and try to peel off zeros.
while (LastOperand > 1 && match(Gep->getOperand(LastOperand), m_Zero())) {
@@ -209,7 +209,7 @@ Value *llvm::getStrideFromPointer(Value *Ptr, ScalarEvolution *SE, Loop *Lp) {
if (Ptr != OrigPtr)
// Strip off casts.
- while (const SCEVIntegralCastExpr *C = dyn_cast<SCEVIntegralCastExpr>(V))
+ while (const SCEVIntegralCastExpr *C = dyn_cast<SCEVIntegralCastExpr>(V))
V = C->getOperand();
const SCEVAddRecExpr *S = dyn_cast<SCEVAddRecExpr>(V);
@@ -242,7 +242,7 @@ Value *llvm::getStrideFromPointer(Value *Ptr, ScalarEvolution *SE, Loop *Lp) {
// Strip off casts.
Type *StripedOffRecurrenceCast = nullptr;
- if (const SCEVIntegralCastExpr *C = dyn_cast<SCEVIntegralCastExpr>(V)) {
+ if (const SCEVIntegralCastExpr *C = dyn_cast<SCEVIntegralCastExpr>(V)) {
StripedOffRecurrenceCast = C->getType();
V = C->getOperand();
}
@@ -291,10 +291,10 @@ Value *llvm::findScalarElement(Value *V, unsigned EltNo) {
if (EltNo == IIElt)
return III->getOperand(1);
- // Guard against infinite loop on malformed, unreachable IR.
- if (III == III->getOperand(0))
- return nullptr;
-
+ // Guard against infinite loop on malformed, unreachable IR.
+ if (III == III->getOperand(0))
+ return nullptr;
+
// Otherwise, the insertelement doesn't modify the value, recurse on its
// vector input.
return findScalarElement(III->getOperand(0), EltNo);
@@ -347,7 +347,7 @@ int llvm::getSplatIndex(ArrayRef<int> Mask) {
/// This function is not fully general. It checks only 2 cases:
/// the input value is (1) a splat constant vector or (2) a sequence
/// of instructions that broadcasts a scalar at element 0.
-Value *llvm::getSplatValue(const Value *V) {
+Value *llvm::getSplatValue(const Value *V) {
if (isa<VectorType>(V->getType()))
if (auto *C = dyn_cast<Constant>(V))
return C->getSplatValue();
@@ -363,7 +363,7 @@ Value *llvm::getSplatValue(const Value *V) {
}
bool llvm::isSplatValue(const Value *V, int Index, unsigned Depth) {
- assert(Depth <= MaxAnalysisRecursionDepth && "Limit Search Depth");
+ assert(Depth <= MaxAnalysisRecursionDepth && "Limit Search Depth");
if (isa<VectorType>(V->getType())) {
if (isa<UndefValue>(V))
@@ -390,7 +390,7 @@ bool llvm::isSplatValue(const Value *V, int Index, unsigned Depth) {
}
// The remaining tests are all recursive, so bail out if we hit the limit.
- if (Depth++ == MaxAnalysisRecursionDepth)
+ if (Depth++ == MaxAnalysisRecursionDepth)
return false;
// If both operands of a binop are splats, the result is a splat.
@@ -421,7 +421,7 @@ void llvm::narrowShuffleMaskElts(int Scale, ArrayRef<int> Mask,
ScaledMask.clear();
for (int MaskElt : Mask) {
if (MaskElt >= 0) {
- assert(((uint64_t)Scale * MaskElt + (Scale - 1)) <= INT32_MAX &&
+ assert(((uint64_t)Scale * MaskElt + (Scale - 1)) <= INT32_MAX &&
"Overflowed 32-bits");
}
for (int SliceElt = 0; SliceElt != Scale; ++SliceElt)
@@ -823,14 +823,14 @@ static Value *concatenateTwoVectors(IRBuilderBase &Builder, Value *V1,
VecTy1->getScalarType() == VecTy2->getScalarType() &&
"Expect two vectors with the same element type");
- unsigned NumElts1 = cast<FixedVectorType>(VecTy1)->getNumElements();
- unsigned NumElts2 = cast<FixedVectorType>(VecTy2)->getNumElements();
+ unsigned NumElts1 = cast<FixedVectorType>(VecTy1)->getNumElements();
+ unsigned NumElts2 = cast<FixedVectorType>(VecTy2)->getNumElements();
assert(NumElts1 >= NumElts2 && "Unexpect the first vector has less elements");
if (NumElts1 > NumElts2) {
// Extend with UNDEFs.
V2 = Builder.CreateShuffleVector(
- V2, createSequentialMask(0, NumElts2, NumElts1 - NumElts2));
+ V2, createSequentialMask(0, NumElts2, NumElts1 - NumElts2));
}
return Builder.CreateShuffleVector(
@@ -866,22 +866,22 @@ Value *llvm::concatenateVectors(IRBuilderBase &Builder,
}
bool llvm::maskIsAllZeroOrUndef(Value *Mask) {
- assert(isa<VectorType>(Mask->getType()) &&
- isa<IntegerType>(Mask->getType()->getScalarType()) &&
- cast<IntegerType>(Mask->getType()->getScalarType())->getBitWidth() ==
- 1 &&
- "Mask must be a vector of i1");
-
+ assert(isa<VectorType>(Mask->getType()) &&
+ isa<IntegerType>(Mask->getType()->getScalarType()) &&
+ cast<IntegerType>(Mask->getType()->getScalarType())->getBitWidth() ==
+ 1 &&
+ "Mask must be a vector of i1");
+
auto *ConstMask = dyn_cast<Constant>(Mask);
if (!ConstMask)
return false;
if (ConstMask->isNullValue() || isa<UndefValue>(ConstMask))
return true;
- if (isa<ScalableVectorType>(ConstMask->getType()))
- return false;
- for (unsigned
- I = 0,
- E = cast<FixedVectorType>(ConstMask->getType())->getNumElements();
+ if (isa<ScalableVectorType>(ConstMask->getType()))
+ return false;
+ for (unsigned
+ I = 0,
+ E = cast<FixedVectorType>(ConstMask->getType())->getNumElements();
I != E; ++I) {
if (auto *MaskElt = ConstMask->getAggregateElement(I))
if (MaskElt->isNullValue() || isa<UndefValue>(MaskElt))
@@ -893,22 +893,22 @@ bool llvm::maskIsAllZeroOrUndef(Value *Mask) {
bool llvm::maskIsAllOneOrUndef(Value *Mask) {
- assert(isa<VectorType>(Mask->getType()) &&
- isa<IntegerType>(Mask->getType()->getScalarType()) &&
- cast<IntegerType>(Mask->getType()->getScalarType())->getBitWidth() ==
- 1 &&
- "Mask must be a vector of i1");
-
+ assert(isa<VectorType>(Mask->getType()) &&
+ isa<IntegerType>(Mask->getType()->getScalarType()) &&
+ cast<IntegerType>(Mask->getType()->getScalarType())->getBitWidth() ==
+ 1 &&
+ "Mask must be a vector of i1");
+
auto *ConstMask = dyn_cast<Constant>(Mask);
if (!ConstMask)
return false;
if (ConstMask->isAllOnesValue() || isa<UndefValue>(ConstMask))
return true;
- if (isa<ScalableVectorType>(ConstMask->getType()))
- return false;
- for (unsigned
- I = 0,
- E = cast<FixedVectorType>(ConstMask->getType())->getNumElements();
+ if (isa<ScalableVectorType>(ConstMask->getType()))
+ return false;
+ for (unsigned
+ I = 0,
+ E = cast<FixedVectorType>(ConstMask->getType())->getNumElements();
I != E; ++I) {
if (auto *MaskElt = ConstMask->getAggregateElement(I))
if (MaskElt->isAllOnesValue() || isa<UndefValue>(MaskElt))
@@ -921,14 +921,14 @@ bool llvm::maskIsAllOneOrUndef(Value *Mask) {
/// TODO: This is a lot like known bits, but for
/// vectors. Is there something we can common this with?
APInt llvm::possiblyDemandedEltsInMask(Value *Mask) {
- assert(isa<FixedVectorType>(Mask->getType()) &&
- isa<IntegerType>(Mask->getType()->getScalarType()) &&
- cast<IntegerType>(Mask->getType()->getScalarType())->getBitWidth() ==
- 1 &&
- "Mask must be a fixed width vector of i1");
-
- const unsigned VWidth =
- cast<FixedVectorType>(Mask->getType())->getNumElements();
+ assert(isa<FixedVectorType>(Mask->getType()) &&
+ isa<IntegerType>(Mask->getType()->getScalarType()) &&
+ cast<IntegerType>(Mask->getType()->getScalarType())->getBitWidth() ==
+ 1 &&
+ "Mask must be a fixed width vector of i1");
+
+ const unsigned VWidth =
+ cast<FixedVectorType>(Mask->getType())->getNumElements();
APInt DemandedElts = APInt::getAllOnesValue(VWidth);
if (auto *CV = dyn_cast<ConstantVector>(Mask))
for (unsigned i = 0; i < VWidth; i++)