aboutsummaryrefslogtreecommitdiffstats
path: root/contrib/libs/llvm12/lib/CodeGen/Analysis.cpp
diff options
context:
space:
mode:
authorshadchin <shadchin@yandex-team.ru>2022-02-10 16:44:39 +0300
committerDaniil Cherednik <dcherednik@yandex-team.ru>2022-02-10 16:44:39 +0300
commite9656aae26e0358d5378e5b63dcac5c8dbe0e4d0 (patch)
tree64175d5cadab313b3e7039ebaa06c5bc3295e274 /contrib/libs/llvm12/lib/CodeGen/Analysis.cpp
parent2598ef1d0aee359b4b6d5fdd1758916d5907d04f (diff)
downloadydb-e9656aae26e0358d5378e5b63dcac5c8dbe0e4d0.tar.gz
Restoring authorship annotation for <shadchin@yandex-team.ru>. Commit 2 of 2.
Diffstat (limited to 'contrib/libs/llvm12/lib/CodeGen/Analysis.cpp')
-rw-r--r--contrib/libs/llvm12/lib/CodeGen/Analysis.cpp54
1 files changed, 27 insertions, 27 deletions
diff --git a/contrib/libs/llvm12/lib/CodeGen/Analysis.cpp b/contrib/libs/llvm12/lib/CodeGen/Analysis.cpp
index 08f8fa04fb..ebeff1fec3 100644
--- a/contrib/libs/llvm12/lib/CodeGen/Analysis.cpp
+++ b/contrib/libs/llvm12/lib/CodeGen/Analysis.cpp
@@ -88,25 +88,25 @@ void llvm::ComputeValueVTs(const TargetLowering &TLI, const DataLayout &DL,
uint64_t StartingOffset) {
// Given a struct type, recursively traverse the elements.
if (StructType *STy = dyn_cast<StructType>(Ty)) {
- // If the Offsets aren't needed, don't query the struct layout. This allows
- // us to support structs with scalable vectors for operations that don't
- // need offsets.
- const StructLayout *SL = Offsets ? DL.getStructLayout(STy) : nullptr;
+ // If the Offsets aren't needed, don't query the struct layout. This allows
+ // us to support structs with scalable vectors for operations that don't
+ // need offsets.
+ const StructLayout *SL = Offsets ? DL.getStructLayout(STy) : nullptr;
for (StructType::element_iterator EB = STy->element_begin(),
EI = EB,
EE = STy->element_end();
- EI != EE; ++EI) {
- // Don't compute the element offset if we didn't get a StructLayout above.
- uint64_t EltOffset = SL ? SL->getElementOffset(EI - EB) : 0;
+ EI != EE; ++EI) {
+ // Don't compute the element offset if we didn't get a StructLayout above.
+ uint64_t EltOffset = SL ? SL->getElementOffset(EI - EB) : 0;
ComputeValueVTs(TLI, DL, *EI, ValueVTs, MemVTs, Offsets,
- StartingOffset + EltOffset);
- }
+ StartingOffset + EltOffset);
+ }
return;
}
// Given an array type, recursively traverse the elements.
if (ArrayType *ATy = dyn_cast<ArrayType>(Ty)) {
Type *EltTy = ATy->getElementType();
- uint64_t EltSize = DL.getTypeAllocSize(EltTy).getFixedValue();
+ uint64_t EltSize = DL.getTypeAllocSize(EltTy).getFixedValue();
for (unsigned i = 0, e = ATy->getNumElements(); i != e; ++i)
ComputeValueVTs(TLI, DL, EltTy, ValueVTs, MemVTs, Offsets,
StartingOffset + i * EltSize);
@@ -137,21 +137,21 @@ void llvm::computeValueLLTs(const DataLayout &DL, Type &Ty,
uint64_t StartingOffset) {
// Given a struct type, recursively traverse the elements.
if (StructType *STy = dyn_cast<StructType>(&Ty)) {
- // If the Offsets aren't needed, don't query the struct layout. This allows
- // us to support structs with scalable vectors for operations that don't
- // need offsets.
- const StructLayout *SL = Offsets ? DL.getStructLayout(STy) : nullptr;
- for (unsigned I = 0, E = STy->getNumElements(); I != E; ++I) {
- uint64_t EltOffset = SL ? SL->getElementOffset(I) : 0;
+ // If the Offsets aren't needed, don't query the struct layout. This allows
+ // us to support structs with scalable vectors for operations that don't
+ // need offsets.
+ const StructLayout *SL = Offsets ? DL.getStructLayout(STy) : nullptr;
+ for (unsigned I = 0, E = STy->getNumElements(); I != E; ++I) {
+ uint64_t EltOffset = SL ? SL->getElementOffset(I) : 0;
computeValueLLTs(DL, *STy->getElementType(I), ValueTys, Offsets,
- StartingOffset + EltOffset);
- }
+ StartingOffset + EltOffset);
+ }
return;
}
// Given an array type, recursively traverse the elements.
if (ArrayType *ATy = dyn_cast<ArrayType>(&Ty)) {
Type *EltTy = ATy->getElementType();
- uint64_t EltSize = DL.getTypeAllocSize(EltTy).getFixedValue();
+ uint64_t EltSize = DL.getTypeAllocSize(EltTy).getFixedValue();
for (unsigned i = 0, e = ATy->getNumElements(); i != e; ++i)
computeValueLLTs(DL, *EltTy, ValueTys, Offsets,
StartingOffset + i * EltSize);
@@ -527,15 +527,15 @@ bool llvm::isInTailCallPosition(const CallBase &Call, const TargetMachine &TM) {
// Debug info intrinsics do not get in the way of tail call optimization.
if (isa<DbgInfoIntrinsic>(BBI))
continue;
- // Pseudo probe intrinsics do not block tail call optimization either.
- if (isa<PseudoProbeInst>(BBI))
- continue;
- // A lifetime end, assume or noalias.decl intrinsic should not stop tail
- // call optimization.
+ // Pseudo probe intrinsics do not block tail call optimization either.
+ if (isa<PseudoProbeInst>(BBI))
+ continue;
+ // A lifetime end, assume or noalias.decl intrinsic should not stop tail
+ // call optimization.
if (const IntrinsicInst *II = dyn_cast<IntrinsicInst>(BBI))
if (II->getIntrinsicID() == Intrinsic::lifetime_end ||
- II->getIntrinsicID() == Intrinsic::assume ||
- II->getIntrinsicID() == Intrinsic::experimental_noalias_scope_decl)
+ II->getIntrinsicID() == Intrinsic::assume ||
+ II->getIntrinsicID() == Intrinsic::experimental_noalias_scope_decl)
continue;
if (BBI->mayHaveSideEffects() || BBI->mayReadFromMemory() ||
!isSafeToSpeculativelyExecute(&*BBI))
@@ -733,7 +733,7 @@ static void collectEHScopeMembers(
if (Visiting->isEHScopeReturnBlock())
continue;
- append_range(Worklist, Visiting->successors());
+ append_range(Worklist, Visiting->successors());
}
}