aboutsummaryrefslogtreecommitdiffstats
path: root/contrib/libs/llvm12/lib/Transforms/AggressiveInstCombine
diff options
context:
space:
mode:
authorshadchin <shadchin@yandex-team.ru>2022-02-10 16:44:39 +0300
committerDaniil Cherednik <dcherednik@yandex-team.ru>2022-02-10 16:44:39 +0300
commite9656aae26e0358d5378e5b63dcac5c8dbe0e4d0 (patch)
tree64175d5cadab313b3e7039ebaa06c5bc3295e274 /contrib/libs/llvm12/lib/Transforms/AggressiveInstCombine
parent2598ef1d0aee359b4b6d5fdd1758916d5907d04f (diff)
downloadydb-e9656aae26e0358d5378e5b63dcac5c8dbe0e4d0.tar.gz
Restoring authorship annotation for <shadchin@yandex-team.ru>. Commit 2 of 2.
Diffstat (limited to 'contrib/libs/llvm12/lib/Transforms/AggressiveInstCombine')
-rw-r--r--contrib/libs/llvm12/lib/Transforms/AggressiveInstCombine/AggressiveInstCombine.cpp182
-rw-r--r--contrib/libs/llvm12/lib/Transforms/AggressiveInstCombine/TruncInstCombine.cpp10
-rw-r--r--contrib/libs/llvm12/lib/Transforms/AggressiveInstCombine/ya.make12
3 files changed, 102 insertions, 102 deletions
diff --git a/contrib/libs/llvm12/lib/Transforms/AggressiveInstCombine/AggressiveInstCombine.cpp b/contrib/libs/llvm12/lib/Transforms/AggressiveInstCombine/AggressiveInstCombine.cpp
index c088631352..a7ae10d156 100644
--- a/contrib/libs/llvm12/lib/Transforms/AggressiveInstCombine/AggressiveInstCombine.cpp
+++ b/contrib/libs/llvm12/lib/Transforms/AggressiveInstCombine/AggressiveInstCombine.cpp
@@ -21,10 +21,10 @@
#include "llvm/Analysis/BasicAliasAnalysis.h"
#include "llvm/Analysis/GlobalsModRef.h"
#include "llvm/Analysis/TargetLibraryInfo.h"
-#include "llvm/Analysis/ValueTracking.h"
+#include "llvm/Analysis/ValueTracking.h"
#include "llvm/IR/DataLayout.h"
#include "llvm/IR/Dominators.h"
-#include "llvm/IR/Function.h"
+#include "llvm/IR/Function.h"
#include "llvm/IR/IRBuilder.h"
#include "llvm/IR/LegacyPassManager.h"
#include "llvm/IR/PatternMatch.h"
@@ -40,8 +40,8 @@ using namespace PatternMatch;
STATISTIC(NumAnyOrAllBitsSet, "Number of any/all-bits-set patterns folded");
STATISTIC(NumGuardedRotates,
"Number of guarded rotates transformed into funnel shifts");
-STATISTIC(NumGuardedFunnelShifts,
- "Number of guarded funnel shifts transformed into funnel shifts");
+STATISTIC(NumGuardedFunnelShifts,
+ "Number of guarded funnel shifts transformed into funnel shifts");
STATISTIC(NumPopCountRecognized, "Number of popcount idioms recognized");
namespace {
@@ -70,127 +70,127 @@ public:
};
} // namespace
-/// Match a pattern for a bitwise funnel/rotate operation that partially guards
-/// against undefined behavior by branching around the funnel-shift/rotation
-/// when the shift amount is 0.
-static bool foldGuardedFunnelShift(Instruction &I, const DominatorTree &DT) {
+/// Match a pattern for a bitwise funnel/rotate operation that partially guards
+/// against undefined behavior by branching around the funnel-shift/rotation
+/// when the shift amount is 0.
+static bool foldGuardedFunnelShift(Instruction &I, const DominatorTree &DT) {
if (I.getOpcode() != Instruction::PHI || I.getNumOperands() != 2)
return false;
// As with the one-use checks below, this is not strictly necessary, but we
// are being cautious to avoid potential perf regressions on targets that
- // do not actually have a funnel/rotate instruction (where the funnel shift
- // would be expanded back into math/shift/logic ops).
+ // do not actually have a funnel/rotate instruction (where the funnel shift
+ // would be expanded back into math/shift/logic ops).
if (!isPowerOf2_32(I.getType()->getScalarSizeInBits()))
return false;
- // Match V to funnel shift left/right and capture the source operands and
- // shift amount.
- auto matchFunnelShift = [](Value *V, Value *&ShVal0, Value *&ShVal1,
- Value *&ShAmt) {
- Value *SubAmt;
+ // Match V to funnel shift left/right and capture the source operands and
+ // shift amount.
+ auto matchFunnelShift = [](Value *V, Value *&ShVal0, Value *&ShVal1,
+ Value *&ShAmt) {
+ Value *SubAmt;
unsigned Width = V->getType()->getScalarSizeInBits();
- // fshl(ShVal0, ShVal1, ShAmt)
- // == (ShVal0 << ShAmt) | (ShVal1 >> (Width -ShAmt))
- if (match(V, m_OneUse(m_c_Or(
- m_Shl(m_Value(ShVal0), m_Value(ShAmt)),
- m_LShr(m_Value(ShVal1),
- m_Sub(m_SpecificInt(Width), m_Value(SubAmt))))))) {
- if (ShAmt == SubAmt) // TODO: Use m_Specific
- return Intrinsic::fshl;
+ // fshl(ShVal0, ShVal1, ShAmt)
+ // == (ShVal0 << ShAmt) | (ShVal1 >> (Width -ShAmt))
+ if (match(V, m_OneUse(m_c_Or(
+ m_Shl(m_Value(ShVal0), m_Value(ShAmt)),
+ m_LShr(m_Value(ShVal1),
+ m_Sub(m_SpecificInt(Width), m_Value(SubAmt))))))) {
+ if (ShAmt == SubAmt) // TODO: Use m_Specific
+ return Intrinsic::fshl;
}
- // fshr(ShVal0, ShVal1, ShAmt)
- // == (ShVal0 >> ShAmt) | (ShVal1 << (Width - ShAmt))
- if (match(V,
- m_OneUse(m_c_Or(m_Shl(m_Value(ShVal0), m_Sub(m_SpecificInt(Width),
- m_Value(SubAmt))),
- m_LShr(m_Value(ShVal1), m_Value(ShAmt)))))) {
- if (ShAmt == SubAmt) // TODO: Use m_Specific
- return Intrinsic::fshr;
+ // fshr(ShVal0, ShVal1, ShAmt)
+ // == (ShVal0 >> ShAmt) | (ShVal1 << (Width - ShAmt))
+ if (match(V,
+ m_OneUse(m_c_Or(m_Shl(m_Value(ShVal0), m_Sub(m_SpecificInt(Width),
+ m_Value(SubAmt))),
+ m_LShr(m_Value(ShVal1), m_Value(ShAmt)))))) {
+ if (ShAmt == SubAmt) // TODO: Use m_Specific
+ return Intrinsic::fshr;
}
return Intrinsic::not_intrinsic;
};
- // One phi operand must be a funnel/rotate operation, and the other phi
- // operand must be the source value of that funnel/rotate operation:
- // phi [ rotate(RotSrc, ShAmt), FunnelBB ], [ RotSrc, GuardBB ]
- // phi [ fshl(ShVal0, ShVal1, ShAmt), FunnelBB ], [ ShVal0, GuardBB ]
- // phi [ fshr(ShVal0, ShVal1, ShAmt), FunnelBB ], [ ShVal1, GuardBB ]
+ // One phi operand must be a funnel/rotate operation, and the other phi
+ // operand must be the source value of that funnel/rotate operation:
+ // phi [ rotate(RotSrc, ShAmt), FunnelBB ], [ RotSrc, GuardBB ]
+ // phi [ fshl(ShVal0, ShVal1, ShAmt), FunnelBB ], [ ShVal0, GuardBB ]
+ // phi [ fshr(ShVal0, ShVal1, ShAmt), FunnelBB ], [ ShVal1, GuardBB ]
PHINode &Phi = cast<PHINode>(I);
- unsigned FunnelOp = 0, GuardOp = 1;
+ unsigned FunnelOp = 0, GuardOp = 1;
Value *P0 = Phi.getOperand(0), *P1 = Phi.getOperand(1);
- Value *ShVal0, *ShVal1, *ShAmt;
- Intrinsic::ID IID = matchFunnelShift(P0, ShVal0, ShVal1, ShAmt);
- if (IID == Intrinsic::not_intrinsic ||
- (IID == Intrinsic::fshl && ShVal0 != P1) ||
- (IID == Intrinsic::fshr && ShVal1 != P1)) {
- IID = matchFunnelShift(P1, ShVal0, ShVal1, ShAmt);
- if (IID == Intrinsic::not_intrinsic ||
- (IID == Intrinsic::fshl && ShVal0 != P0) ||
- (IID == Intrinsic::fshr && ShVal1 != P0))
+ Value *ShVal0, *ShVal1, *ShAmt;
+ Intrinsic::ID IID = matchFunnelShift(P0, ShVal0, ShVal1, ShAmt);
+ if (IID == Intrinsic::not_intrinsic ||
+ (IID == Intrinsic::fshl && ShVal0 != P1) ||
+ (IID == Intrinsic::fshr && ShVal1 != P1)) {
+ IID = matchFunnelShift(P1, ShVal0, ShVal1, ShAmt);
+ if (IID == Intrinsic::not_intrinsic ||
+ (IID == Intrinsic::fshl && ShVal0 != P0) ||
+ (IID == Intrinsic::fshr && ShVal1 != P0))
return false;
assert((IID == Intrinsic::fshl || IID == Intrinsic::fshr) &&
"Pattern must match funnel shift left or right");
- std::swap(FunnelOp, GuardOp);
+ std::swap(FunnelOp, GuardOp);
}
// The incoming block with our source operand must be the "guard" block.
- // That must contain a cmp+branch to avoid the funnel/rotate when the shift
- // amount is equal to 0. The other incoming block is the block with the
- // funnel/rotate.
- BasicBlock *GuardBB = Phi.getIncomingBlock(GuardOp);
- BasicBlock *FunnelBB = Phi.getIncomingBlock(FunnelOp);
+ // That must contain a cmp+branch to avoid the funnel/rotate when the shift
+ // amount is equal to 0. The other incoming block is the block with the
+ // funnel/rotate.
+ BasicBlock *GuardBB = Phi.getIncomingBlock(GuardOp);
+ BasicBlock *FunnelBB = Phi.getIncomingBlock(FunnelOp);
Instruction *TermI = GuardBB->getTerminator();
-
- // Ensure that the shift values dominate each block.
- if (!DT.dominates(ShVal0, TermI) || !DT.dominates(ShVal1, TermI))
- return false;
-
+
+ // Ensure that the shift values dominate each block.
+ if (!DT.dominates(ShVal0, TermI) || !DT.dominates(ShVal1, TermI))
+ return false;
+
ICmpInst::Predicate Pred;
BasicBlock *PhiBB = Phi.getParent();
- if (!match(TermI, m_Br(m_ICmp(Pred, m_Specific(ShAmt), m_ZeroInt()),
- m_SpecificBB(PhiBB), m_SpecificBB(FunnelBB))))
+ if (!match(TermI, m_Br(m_ICmp(Pred, m_Specific(ShAmt), m_ZeroInt()),
+ m_SpecificBB(PhiBB), m_SpecificBB(FunnelBB))))
return false;
if (Pred != CmpInst::ICMP_EQ)
return false;
- IRBuilder<> Builder(PhiBB, PhiBB->getFirstInsertionPt());
-
- if (ShVal0 == ShVal1)
- ++NumGuardedRotates;
- else
- ++NumGuardedFunnelShifts;
-
- // If this is not a rotate then the select was blocking poison from the
- // 'shift-by-zero' non-TVal, but a funnel shift won't - so freeze it.
- bool IsFshl = IID == Intrinsic::fshl;
- if (ShVal0 != ShVal1) {
- if (IsFshl && !llvm::isGuaranteedNotToBePoison(ShVal1))
- ShVal1 = Builder.CreateFreeze(ShVal1);
- else if (!IsFshl && !llvm::isGuaranteedNotToBePoison(ShVal0))
- ShVal0 = Builder.CreateFreeze(ShVal0);
- }
-
+ IRBuilder<> Builder(PhiBB, PhiBB->getFirstInsertionPt());
+
+ if (ShVal0 == ShVal1)
+ ++NumGuardedRotates;
+ else
+ ++NumGuardedFunnelShifts;
+
+ // If this is not a rotate then the select was blocking poison from the
+ // 'shift-by-zero' non-TVal, but a funnel shift won't - so freeze it.
+ bool IsFshl = IID == Intrinsic::fshl;
+ if (ShVal0 != ShVal1) {
+ if (IsFshl && !llvm::isGuaranteedNotToBePoison(ShVal1))
+ ShVal1 = Builder.CreateFreeze(ShVal1);
+ else if (!IsFshl && !llvm::isGuaranteedNotToBePoison(ShVal0))
+ ShVal0 = Builder.CreateFreeze(ShVal0);
+ }
+
// We matched a variation of this IR pattern:
// GuardBB:
- // %cmp = icmp eq i32 %ShAmt, 0
- // br i1 %cmp, label %PhiBB, label %FunnelBB
- // FunnelBB:
- // %sub = sub i32 32, %ShAmt
- // %shr = lshr i32 %ShVal1, %sub
- // %shl = shl i32 %ShVal0, %ShAmt
- // %fsh = or i32 %shr, %shl
+ // %cmp = icmp eq i32 %ShAmt, 0
+ // br i1 %cmp, label %PhiBB, label %FunnelBB
+ // FunnelBB:
+ // %sub = sub i32 32, %ShAmt
+ // %shr = lshr i32 %ShVal1, %sub
+ // %shl = shl i32 %ShVal0, %ShAmt
+ // %fsh = or i32 %shr, %shl
// br label %PhiBB
// PhiBB:
- // %cond = phi i32 [ %fsh, %FunnelBB ], [ %ShVal0, %GuardBB ]
+ // %cond = phi i32 [ %fsh, %FunnelBB ], [ %ShVal0, %GuardBB ]
// -->
- // llvm.fshl.i32(i32 %ShVal0, i32 %ShVal1, i32 %ShAmt)
+ // llvm.fshl.i32(i32 %ShVal0, i32 %ShVal1, i32 %ShAmt)
Function *F = Intrinsic::getDeclaration(Phi.getModule(), IID, Phi.getType());
- Phi.replaceAllUsesWith(Builder.CreateCall(F, {ShVal0, ShVal1, ShAmt}));
+ Phi.replaceAllUsesWith(Builder.CreateCall(F, {ShVal0, ShVal1, ShAmt}));
return true;
}
@@ -237,8 +237,8 @@ static bool matchAndOrChain(Value *V, MaskOps &MOps) {
// We need a shift-right or a bare value representing a compare of bit 0 of
// the original source operand.
Value *Candidate;
- const APInt *BitIndex = nullptr;
- if (!match(V, m_LShr(m_Value(Candidate), m_APInt(BitIndex))))
+ const APInt *BitIndex = nullptr;
+ if (!match(V, m_LShr(m_Value(Candidate), m_APInt(BitIndex))))
Candidate = V;
// Initialize result source operand.
@@ -246,11 +246,11 @@ static bool matchAndOrChain(Value *V, MaskOps &MOps) {
MOps.Root = Candidate;
// The shift constant is out-of-range? This code hasn't been simplified.
- if (BitIndex && BitIndex->uge(MOps.Mask.getBitWidth()))
+ if (BitIndex && BitIndex->uge(MOps.Mask.getBitWidth()))
return false;
// Fill in the mask bit derived from the shift constant.
- MOps.Mask.setBit(BitIndex ? BitIndex->getZExtValue() : 0);
+ MOps.Mask.setBit(BitIndex ? BitIndex->getZExtValue() : 0);
return MOps.Root == Candidate;
}
@@ -379,7 +379,7 @@ static bool foldUnusualPatterns(Function &F, DominatorTree &DT) {
// iteratively in this loop rather than waiting until the end.
for (Instruction &I : make_range(BB.rbegin(), BB.rend())) {
MadeChange |= foldAnyOrAllBitsSet(I);
- MadeChange |= foldGuardedFunnelShift(I, DT);
+ MadeChange |= foldGuardedFunnelShift(I, DT);
MadeChange |= tryToRecognizePopCount(I);
}
}
diff --git a/contrib/libs/llvm12/lib/Transforms/AggressiveInstCombine/TruncInstCombine.cpp b/contrib/libs/llvm12/lib/Transforms/AggressiveInstCombine/TruncInstCombine.cpp
index 858907eed4..16b82219e8 100644
--- a/contrib/libs/llvm12/lib/Transforms/AggressiveInstCombine/TruncInstCombine.cpp
+++ b/contrib/libs/llvm12/lib/Transforms/AggressiveInstCombine/TruncInstCombine.cpp
@@ -31,7 +31,7 @@
#include "llvm/Analysis/TargetLibraryInfo.h"
#include "llvm/IR/DataLayout.h"
#include "llvm/IR/Dominators.h"
-#include "llvm/IR/IRBuilder.h"
+#include "llvm/IR/IRBuilder.h"
#include "llvm/IR/Instruction.h"
using namespace llvm;
@@ -130,7 +130,7 @@ bool TruncInstCombine::buildTruncExpressionDag() {
case Instruction::Select: {
SmallVector<Value *, 2> Operands;
getRelevantOperands(I, Operands);
- append_range(Worklist, Operands);
+ append_range(Worklist, Operands);
break;
}
default:
@@ -288,8 +288,8 @@ Type *TruncInstCombine::getBestTruncatedType() {
/// version of \p Ty, otherwise return \p Ty.
static Type *getReducedType(Value *V, Type *Ty) {
assert(Ty && !Ty->isVectorTy() && "Expect Scalar Type");
- if (auto *VTy = dyn_cast<VectorType>(V->getType()))
- return VectorType::get(Ty, VTy->getElementCount());
+ if (auto *VTy = dyn_cast<VectorType>(V->getType()))
+ return VectorType::get(Ty, VTy->getElementCount());
return Ty;
}
@@ -341,7 +341,7 @@ void TruncInstCombine::ReduceExpressionDag(Type *SclTy) {
// 1. Update Old-TruncInst -> New-TruncInst.
// 2. Remove Old-TruncInst (if New node is not TruncInst).
// 3. Add New-TruncInst (if Old node was not TruncInst).
- auto *Entry = find(Worklist, I);
+ auto *Entry = find(Worklist, I);
if (Entry != Worklist.end()) {
if (auto *NewCI = dyn_cast<TruncInst>(Res))
*Entry = NewCI;
diff --git a/contrib/libs/llvm12/lib/Transforms/AggressiveInstCombine/ya.make b/contrib/libs/llvm12/lib/Transforms/AggressiveInstCombine/ya.make
index 2885ad1ddf..c472a2054a 100644
--- a/contrib/libs/llvm12/lib/Transforms/AggressiveInstCombine/ya.make
+++ b/contrib/libs/llvm12/lib/Transforms/AggressiveInstCombine/ya.make
@@ -12,12 +12,12 @@ LICENSE(Apache-2.0 WITH LLVM-exception)
LICENSE_TEXTS(.yandex_meta/licenses.list.txt)
PEERDIR(
- contrib/libs/llvm12
- contrib/libs/llvm12/include
- contrib/libs/llvm12/lib/Analysis
- contrib/libs/llvm12/lib/IR
- contrib/libs/llvm12/lib/Support
- contrib/libs/llvm12/lib/Transforms/Utils
+ contrib/libs/llvm12
+ contrib/libs/llvm12/include
+ contrib/libs/llvm12/lib/Analysis
+ contrib/libs/llvm12/lib/IR
+ contrib/libs/llvm12/lib/Support
+ contrib/libs/llvm12/lib/Transforms/Utils
)
ADDINCL(