aboutsummaryrefslogtreecommitdiffstats
path: root/contrib/libs/llvm12/lib/Target/ARM/ARMConstantIslandPass.cpp
diff options
context:
space:
mode:
authorshadchin <shadchin@yandex-team.ru>2022-02-10 16:44:39 +0300
committerDaniil Cherednik <dcherednik@yandex-team.ru>2022-02-10 16:44:39 +0300
commite9656aae26e0358d5378e5b63dcac5c8dbe0e4d0 (patch)
tree64175d5cadab313b3e7039ebaa06c5bc3295e274 /contrib/libs/llvm12/lib/Target/ARM/ARMConstantIslandPass.cpp
parent2598ef1d0aee359b4b6d5fdd1758916d5907d04f (diff)
downloadydb-e9656aae26e0358d5378e5b63dcac5c8dbe0e4d0.tar.gz
Restoring authorship annotation for <shadchin@yandex-team.ru>. Commit 2 of 2.
Diffstat (limited to 'contrib/libs/llvm12/lib/Target/ARM/ARMConstantIslandPass.cpp')
-rw-r--r--contrib/libs/llvm12/lib/Target/ARM/ARMConstantIslandPass.cpp132
1 files changed, 66 insertions, 66 deletions
diff --git a/contrib/libs/llvm12/lib/Target/ARM/ARMConstantIslandPass.cpp b/contrib/libs/llvm12/lib/Target/ARM/ARMConstantIslandPass.cpp
index 86faf511c9..630490f6f9 100644
--- a/contrib/libs/llvm12/lib/Target/ARM/ARMConstantIslandPass.cpp
+++ b/contrib/libs/llvm12/lib/Target/ARM/ARMConstantIslandPass.cpp
@@ -338,32 +338,32 @@ LLVM_DUMP_METHOD void ARMConstantIslands::dumpBBs() {
}
#endif
-// Align blocks where the previous block does not fall through. This may add
-// extra NOP's but they will not be executed. It uses the PrefLoopAlignment as a
-// measure of how much to align, and only runs at CodeGenOpt::Aggressive.
-static bool AlignBlocks(MachineFunction *MF) {
- if (MF->getTarget().getOptLevel() != CodeGenOpt::Aggressive ||
- MF->getFunction().hasOptSize())
- return false;
-
- auto *TLI = MF->getSubtarget().getTargetLowering();
- const Align Alignment = TLI->getPrefLoopAlignment();
- if (Alignment < 4)
- return false;
-
- bool Changed = false;
- bool PrevCanFallthough = true;
- for (auto &MBB : *MF) {
- if (!PrevCanFallthough) {
- Changed = true;
- MBB.setAlignment(Alignment);
- }
- PrevCanFallthough = MBB.canFallThrough();
- }
-
- return Changed;
-}
-
+// Align blocks where the previous block does not fall through. This may add
+// extra NOP's but they will not be executed. It uses the PrefLoopAlignment as a
+// measure of how much to align, and only runs at CodeGenOpt::Aggressive.
+static bool AlignBlocks(MachineFunction *MF) {
+ if (MF->getTarget().getOptLevel() != CodeGenOpt::Aggressive ||
+ MF->getFunction().hasOptSize())
+ return false;
+
+ auto *TLI = MF->getSubtarget().getTargetLowering();
+ const Align Alignment = TLI->getPrefLoopAlignment();
+ if (Alignment < 4)
+ return false;
+
+ bool Changed = false;
+ bool PrevCanFallthough = true;
+ for (auto &MBB : *MF) {
+ if (!PrevCanFallthough) {
+ Changed = true;
+ MBB.setAlignment(Alignment);
+ }
+ PrevCanFallthough = MBB.canFallThrough();
+ }
+
+ return Changed;
+}
+
bool ARMConstantIslands::runOnMachineFunction(MachineFunction &mf) {
MF = &mf;
MCP = mf.getConstantPool();
@@ -385,10 +385,10 @@ bool ARMConstantIslands::runOnMachineFunction(MachineFunction &mf) {
isThumb2 = AFI->isThumb2Function();
bool GenerateTBB = isThumb2 || (isThumb1 && SynthesizeThumb1TBB);
- // TBB generation code in this constant island pass has not been adapted to
- // deal with speculation barriers.
- if (STI->hardenSlsRetBr())
- GenerateTBB = false;
+ // TBB generation code in this constant island pass has not been adapted to
+ // deal with speculation barriers.
+ if (STI->hardenSlsRetBr())
+ GenerateTBB = false;
// Renumber all of the machine basic blocks in the function, guaranteeing that
// the numbers agree with the position of the block in the function.
@@ -406,9 +406,9 @@ bool ARMConstantIslands::runOnMachineFunction(MachineFunction &mf) {
MF->RenumberBlocks();
}
- // Align any non-fallthrough blocks
- MadeChange |= AlignBlocks(MF);
-
+ // Align any non-fallthrough blocks
+ MadeChange |= AlignBlocks(MF);
+
// Perform the initial placement of the constant pool entries. To start with,
// we put them all at the end of the function.
std::vector<MachineInstr*> CPEMIs;
@@ -524,11 +524,11 @@ ARMConstantIslands::doInitialConstPlacement(std::vector<MachineInstr*> &CPEMIs)
// The function needs to be as aligned as the basic blocks. The linker may
// move functions around based on their alignment.
- // Special case: halfword literals still need word alignment on the function.
- Align FuncAlign = MaxAlign;
- if (MaxAlign == 2)
- FuncAlign = Align(4);
- MF->ensureAlignment(FuncAlign);
+ // Special case: halfword literals still need word alignment on the function.
+ Align FuncAlign = MaxAlign;
+ if (MaxAlign == 2)
+ FuncAlign = Align(4);
+ MF->ensureAlignment(FuncAlign);
// Order the entries in BB by descending alignment. That ensures correct
// alignment of all entries as long as BB is sufficiently aligned. Keep
@@ -543,7 +543,7 @@ ARMConstantIslands::doInitialConstPlacement(std::vector<MachineInstr*> &CPEMIs)
const DataLayout &TD = MF->getDataLayout();
for (unsigned i = 0, e = CPs.size(); i != e; ++i) {
- unsigned Size = CPs[i].getSizeInBytes(TD);
+ unsigned Size = CPs[i].getSizeInBytes(TD);
Align Alignment = CPs[i].getAlign();
// Verify that all constant pool entries are a multiple of their alignment.
// If not, we would have to pad them out so that instructions stay aligned.
@@ -586,12 +586,12 @@ void ARMConstantIslands::doInitialJumpTablePlacement(
MachineBasicBlock *LastCorrectlyNumberedBB = nullptr;
for (MachineBasicBlock &MBB : *MF) {
auto MI = MBB.getLastNonDebugInstr();
- // Look past potential SpeculationBarriers at end of BB.
- while (MI != MBB.end() &&
- (isSpeculationBarrierEndBBOpcode(MI->getOpcode()) ||
- MI->isDebugInstr()))
- --MI;
-
+ // Look past potential SpeculationBarriers at end of BB.
+ while (MI != MBB.end() &&
+ (isSpeculationBarrierEndBBOpcode(MI->getOpcode()) ||
+ MI->isDebugInstr()))
+ --MI;
+
if (MI == MBB.end())
continue;
@@ -814,26 +814,26 @@ initializeFunctionInfo(const std::vector<MachineInstr*> &CPEMIs) {
// Taking the address of a CP entry.
case ARM::LEApcrel:
- case ARM::LEApcrelJT: {
- // This takes a SoImm, which is 8 bit immediate rotated. We'll
- // pretend the maximum offset is 255 * 4. Since each instruction
- // 4 byte wide, this is always correct. We'll check for other
- // displacements that fits in a SoImm as well.
- Bits = 8;
- NegOk = true;
- IsSoImm = true;
- unsigned CPI = I.getOperand(op).getIndex();
- assert(CPI < CPEMIs.size());
- MachineInstr *CPEMI = CPEMIs[CPI];
- const Align CPEAlign = getCPEAlign(CPEMI);
- const unsigned LogCPEAlign = Log2(CPEAlign);
- if (LogCPEAlign >= 2)
- Scale = 4;
- else
- // For constants with less than 4-byte alignment,
- // we'll pretend the maximum offset is 255 * 1.
- Scale = 1;
- }
+ case ARM::LEApcrelJT: {
+ // This takes a SoImm, which is 8 bit immediate rotated. We'll
+ // pretend the maximum offset is 255 * 4. Since each instruction
+ // 4 byte wide, this is always correct. We'll check for other
+ // displacements that fits in a SoImm as well.
+ Bits = 8;
+ NegOk = true;
+ IsSoImm = true;
+ unsigned CPI = I.getOperand(op).getIndex();
+ assert(CPI < CPEMIs.size());
+ MachineInstr *CPEMI = CPEMIs[CPI];
+ const Align CPEAlign = getCPEAlign(CPEMI);
+ const unsigned LogCPEAlign = Log2(CPEAlign);
+ if (LogCPEAlign >= 2)
+ Scale = 4;
+ else
+ // For constants with less than 4-byte alignment,
+ // we'll pretend the maximum offset is 255 * 1.
+ Scale = 1;
+ }
break;
case ARM::t2LEApcrel:
case ARM::t2LEApcrelJT:
@@ -2124,7 +2124,7 @@ static bool jumpTableFollowsTB(MachineInstr *JTMI, MachineInstr *CPEMI) {
MachineFunction *MF = MBB->getParent();
++MBB;
- return MBB != MF->end() && !MBB->empty() && &*MBB->begin() == CPEMI;
+ return MBB != MF->end() && !MBB->empty() && &*MBB->begin() == CPEMI;
}
static void RemoveDeadAddBetweenLEAAndJT(MachineInstr *LEAMI,