diff options
author | orivej <orivej@yandex-team.ru> | 2022-02-10 16:45:01 +0300 |
---|---|---|
committer | Daniil Cherednik <dcherednik@yandex-team.ru> | 2022-02-10 16:45:01 +0300 |
commit | 2d37894b1b037cf24231090eda8589bbb44fb6fc (patch) | |
tree | be835aa92c6248212e705f25388ebafcf84bc7a1 /contrib/libs/llvm12/lib/CodeGen/GlobalISel | |
parent | 718c552901d703c502ccbefdfc3c9028d608b947 (diff) | |
download | ydb-2d37894b1b037cf24231090eda8589bbb44fb6fc.tar.gz |
Restoring authorship annotation for <orivej@yandex-team.ru>. Commit 2 of 2.
Diffstat (limited to 'contrib/libs/llvm12/lib/CodeGen/GlobalISel')
25 files changed, 17115 insertions, 17115 deletions
diff --git a/contrib/libs/llvm12/lib/CodeGen/GlobalISel/CSEInfo.cpp b/contrib/libs/llvm12/lib/CodeGen/GlobalISel/CSEInfo.cpp index 513405a7e2..2fa208fbfa 100644 --- a/contrib/libs/llvm12/lib/CodeGen/GlobalISel/CSEInfo.cpp +++ b/contrib/libs/llvm12/lib/CodeGen/GlobalISel/CSEInfo.cpp @@ -1,372 +1,372 @@ -//===- CSEInfo.cpp ------------------------------===// -// -// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. -// See https://llvm.org/LICENSE.txt for license information. -// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception -// -//===----------------------------------------------------------------------===// -// -// -//===----------------------------------------------------------------------===// -#include "llvm/CodeGen/GlobalISel/CSEInfo.h" -#include "llvm/CodeGen/MachineRegisterInfo.h" -#include "llvm/InitializePasses.h" - -#define DEBUG_TYPE "cseinfo" - -using namespace llvm; -char llvm::GISelCSEAnalysisWrapperPass::ID = 0; -GISelCSEAnalysisWrapperPass::GISelCSEAnalysisWrapperPass() - : MachineFunctionPass(ID) { - initializeGISelCSEAnalysisWrapperPassPass(*PassRegistry::getPassRegistry()); -} -INITIALIZE_PASS_BEGIN(GISelCSEAnalysisWrapperPass, DEBUG_TYPE, - "Analysis containing CSE Info", false, true) -INITIALIZE_PASS_END(GISelCSEAnalysisWrapperPass, DEBUG_TYPE, - "Analysis containing CSE Info", false, true) - -/// -------- UniqueMachineInstr -------------// - -void UniqueMachineInstr::Profile(FoldingSetNodeID &ID) { - GISelInstProfileBuilder(ID, MI->getMF()->getRegInfo()).addNodeID(MI); -} -/// ----------------------------------------- - -/// --------- CSEConfigFull ---------- /// -bool CSEConfigFull::shouldCSEOpc(unsigned Opc) { - switch (Opc) { - default: - break; - case TargetOpcode::G_ADD: - case TargetOpcode::G_AND: - case TargetOpcode::G_ASHR: - case TargetOpcode::G_LSHR: - case TargetOpcode::G_MUL: - case TargetOpcode::G_OR: - case TargetOpcode::G_SHL: - case TargetOpcode::G_SUB: - case TargetOpcode::G_XOR: - case TargetOpcode::G_UDIV: - case TargetOpcode::G_SDIV: - case TargetOpcode::G_UREM: - case TargetOpcode::G_SREM: - case TargetOpcode::G_CONSTANT: - case TargetOpcode::G_FCONSTANT: - case TargetOpcode::G_IMPLICIT_DEF: - case TargetOpcode::G_ZEXT: - case TargetOpcode::G_SEXT: - case TargetOpcode::G_ANYEXT: - case TargetOpcode::G_UNMERGE_VALUES: - case TargetOpcode::G_TRUNC: - case TargetOpcode::G_PTR_ADD: +//===- CSEInfo.cpp ------------------------------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +// +// +//===----------------------------------------------------------------------===// +#include "llvm/CodeGen/GlobalISel/CSEInfo.h" +#include "llvm/CodeGen/MachineRegisterInfo.h" +#include "llvm/InitializePasses.h" + +#define DEBUG_TYPE "cseinfo" + +using namespace llvm; +char llvm::GISelCSEAnalysisWrapperPass::ID = 0; +GISelCSEAnalysisWrapperPass::GISelCSEAnalysisWrapperPass() + : MachineFunctionPass(ID) { + initializeGISelCSEAnalysisWrapperPassPass(*PassRegistry::getPassRegistry()); +} +INITIALIZE_PASS_BEGIN(GISelCSEAnalysisWrapperPass, DEBUG_TYPE, + "Analysis containing CSE Info", false, true) +INITIALIZE_PASS_END(GISelCSEAnalysisWrapperPass, DEBUG_TYPE, + "Analysis containing CSE Info", false, true) + +/// -------- UniqueMachineInstr -------------// + +void UniqueMachineInstr::Profile(FoldingSetNodeID &ID) { + GISelInstProfileBuilder(ID, MI->getMF()->getRegInfo()).addNodeID(MI); +} +/// ----------------------------------------- + +/// --------- CSEConfigFull ---------- /// +bool CSEConfigFull::shouldCSEOpc(unsigned Opc) { + switch (Opc) { + default: + break; + case TargetOpcode::G_ADD: + case TargetOpcode::G_AND: + case TargetOpcode::G_ASHR: + case TargetOpcode::G_LSHR: + case TargetOpcode::G_MUL: + case TargetOpcode::G_OR: + case TargetOpcode::G_SHL: + case TargetOpcode::G_SUB: + case TargetOpcode::G_XOR: + case TargetOpcode::G_UDIV: + case TargetOpcode::G_SDIV: + case TargetOpcode::G_UREM: + case TargetOpcode::G_SREM: + case TargetOpcode::G_CONSTANT: + case TargetOpcode::G_FCONSTANT: + case TargetOpcode::G_IMPLICIT_DEF: + case TargetOpcode::G_ZEXT: + case TargetOpcode::G_SEXT: + case TargetOpcode::G_ANYEXT: + case TargetOpcode::G_UNMERGE_VALUES: + case TargetOpcode::G_TRUNC: + case TargetOpcode::G_PTR_ADD: case TargetOpcode::G_EXTRACT: - return true; - } - return false; -} - -bool CSEConfigConstantOnly::shouldCSEOpc(unsigned Opc) { - return Opc == TargetOpcode::G_CONSTANT || Opc == TargetOpcode::G_IMPLICIT_DEF; -} - -std::unique_ptr<CSEConfigBase> -llvm::getStandardCSEConfigForOpt(CodeGenOpt::Level Level) { - std::unique_ptr<CSEConfigBase> Config; - if (Level == CodeGenOpt::None) - Config = std::make_unique<CSEConfigConstantOnly>(); - else - Config = std::make_unique<CSEConfigFull>(); - return Config; -} - -/// ----------------------------------------- - -/// -------- GISelCSEInfo -------------// -void GISelCSEInfo::setMF(MachineFunction &MF) { - this->MF = &MF; - this->MRI = &MF.getRegInfo(); -} - -GISelCSEInfo::~GISelCSEInfo() {} - -bool GISelCSEInfo::isUniqueMachineInstValid( - const UniqueMachineInstr &UMI) const { - // Should we check here and assert that the instruction has been fully - // constructed? - // FIXME: Any other checks required to be done here? Remove this method if - // none. - return true; -} - -void GISelCSEInfo::invalidateUniqueMachineInstr(UniqueMachineInstr *UMI) { - bool Removed = CSEMap.RemoveNode(UMI); - (void)Removed; - assert(Removed && "Invalidation called on invalid UMI"); - // FIXME: Should UMI be deallocated/destroyed? -} - -UniqueMachineInstr *GISelCSEInfo::getNodeIfExists(FoldingSetNodeID &ID, - MachineBasicBlock *MBB, - void *&InsertPos) { - auto *Node = CSEMap.FindNodeOrInsertPos(ID, InsertPos); - if (Node) { - if (!isUniqueMachineInstValid(*Node)) { - invalidateUniqueMachineInstr(Node); - return nullptr; - } - - if (Node->MI->getParent() != MBB) - return nullptr; - } - return Node; -} - -void GISelCSEInfo::insertNode(UniqueMachineInstr *UMI, void *InsertPos) { - handleRecordedInsts(); - assert(UMI); - UniqueMachineInstr *MaybeNewNode = UMI; - if (InsertPos) - CSEMap.InsertNode(UMI, InsertPos); - else - MaybeNewNode = CSEMap.GetOrInsertNode(UMI); - if (MaybeNewNode != UMI) { - // A similar node exists in the folding set. Let's ignore this one. - return; - } - assert(InstrMapping.count(UMI->MI) == 0 && - "This instruction should not be in the map"); - InstrMapping[UMI->MI] = MaybeNewNode; -} - -UniqueMachineInstr *GISelCSEInfo::getUniqueInstrForMI(const MachineInstr *MI) { - assert(shouldCSE(MI->getOpcode()) && "Trying to CSE an unsupported Node"); - auto *Node = new (UniqueInstrAllocator) UniqueMachineInstr(MI); - return Node; -} - -void GISelCSEInfo::insertInstr(MachineInstr *MI, void *InsertPos) { - assert(MI); - // If it exists in temporary insts, remove it. - TemporaryInsts.remove(MI); - auto *Node = getUniqueInstrForMI(MI); - insertNode(Node, InsertPos); -} - -MachineInstr *GISelCSEInfo::getMachineInstrIfExists(FoldingSetNodeID &ID, - MachineBasicBlock *MBB, - void *&InsertPos) { - handleRecordedInsts(); - if (auto *Inst = getNodeIfExists(ID, MBB, InsertPos)) { - LLVM_DEBUG(dbgs() << "CSEInfo::Found Instr " << *Inst->MI;); - return const_cast<MachineInstr *>(Inst->MI); - } - return nullptr; -} - -void GISelCSEInfo::countOpcodeHit(unsigned Opc) { -#ifndef NDEBUG - if (OpcodeHitTable.count(Opc)) - OpcodeHitTable[Opc] += 1; - else - OpcodeHitTable[Opc] = 1; -#endif - // Else do nothing. -} - -void GISelCSEInfo::recordNewInstruction(MachineInstr *MI) { - if (shouldCSE(MI->getOpcode())) { - TemporaryInsts.insert(MI); - LLVM_DEBUG(dbgs() << "CSEInfo::Recording new MI " << *MI); - } -} - -void GISelCSEInfo::handleRecordedInst(MachineInstr *MI) { - assert(shouldCSE(MI->getOpcode()) && "Invalid instruction for CSE"); - auto *UMI = InstrMapping.lookup(MI); - LLVM_DEBUG(dbgs() << "CSEInfo::Handling recorded MI " << *MI); - if (UMI) { - // Invalidate this MI. - invalidateUniqueMachineInstr(UMI); - InstrMapping.erase(MI); - } - /// Now insert the new instruction. - if (UMI) { - /// We'll reuse the same UniqueMachineInstr to avoid the new - /// allocation. - *UMI = UniqueMachineInstr(MI); - insertNode(UMI, nullptr); - } else { - /// This is a new instruction. Allocate a new UniqueMachineInstr and - /// Insert. - insertInstr(MI); - } -} - -void GISelCSEInfo::handleRemoveInst(MachineInstr *MI) { - if (auto *UMI = InstrMapping.lookup(MI)) { - invalidateUniqueMachineInstr(UMI); - InstrMapping.erase(MI); - } - TemporaryInsts.remove(MI); -} - -void GISelCSEInfo::handleRecordedInsts() { - while (!TemporaryInsts.empty()) { - auto *MI = TemporaryInsts.pop_back_val(); - handleRecordedInst(MI); - } -} - -bool GISelCSEInfo::shouldCSE(unsigned Opc) const { - assert(CSEOpt.get() && "CSEConfig not set"); - return CSEOpt->shouldCSEOpc(Opc); -} - -void GISelCSEInfo::erasingInstr(MachineInstr &MI) { handleRemoveInst(&MI); } -void GISelCSEInfo::createdInstr(MachineInstr &MI) { recordNewInstruction(&MI); } -void GISelCSEInfo::changingInstr(MachineInstr &MI) { - // For now, perform erase, followed by insert. - erasingInstr(MI); - createdInstr(MI); -} -void GISelCSEInfo::changedInstr(MachineInstr &MI) { changingInstr(MI); } - -void GISelCSEInfo::analyze(MachineFunction &MF) { - setMF(MF); - for (auto &MBB : MF) { - if (MBB.empty()) - continue; - for (MachineInstr &MI : MBB) { - if (!shouldCSE(MI.getOpcode())) - continue; - LLVM_DEBUG(dbgs() << "CSEInfo::Add MI: " << MI); - insertInstr(&MI); - } - } -} - -void GISelCSEInfo::releaseMemory() { - print(); - CSEMap.clear(); - InstrMapping.clear(); - UniqueInstrAllocator.Reset(); - TemporaryInsts.clear(); - CSEOpt.reset(); - MRI = nullptr; - MF = nullptr; -#ifndef NDEBUG - OpcodeHitTable.clear(); -#endif -} - -Error GISelCSEInfo::verify() { -#ifndef NDEBUG - handleRecordedInsts(); - // For each instruction in map from MI -> UMI, - // Profile(MI) and make sure UMI is found for that profile. - for (auto &It : InstrMapping) { - FoldingSetNodeID TmpID; - GISelInstProfileBuilder(TmpID, *MRI).addNodeID(It.first); - void *InsertPos; - UniqueMachineInstr *FoundNode = - CSEMap.FindNodeOrInsertPos(TmpID, InsertPos); - if (FoundNode != It.second) - return createStringError(std::errc::not_supported, - "CSEMap mismatch, InstrMapping has MIs without " - "corresponding Nodes in CSEMap"); - } - - // For every node in the CSEMap, make sure that the InstrMapping - // points to it. - for (auto It = CSEMap.begin(), End = CSEMap.end(); It != End; ++It) { - const UniqueMachineInstr &UMI = *It; - if (!InstrMapping.count(UMI.MI)) - return createStringError(std::errc::not_supported, - "Node in CSE without InstrMapping", UMI.MI); - - if (InstrMapping[UMI.MI] != &UMI) - return createStringError(std::make_error_code(std::errc::not_supported), - "Mismatch in CSE mapping"); - } -#endif - return Error::success(); -} - -void GISelCSEInfo::print() { - LLVM_DEBUG(for (auto &It - : OpcodeHitTable) { - dbgs() << "CSEInfo::CSE Hit for Opc " << It.first << " : " << It.second - << "\n"; - };); -} -/// ----------------------------------------- -// ---- Profiling methods for FoldingSetNode --- // -const GISelInstProfileBuilder & -GISelInstProfileBuilder::addNodeID(const MachineInstr *MI) const { - addNodeIDMBB(MI->getParent()); - addNodeIDOpcode(MI->getOpcode()); - for (auto &Op : MI->operands()) - addNodeIDMachineOperand(Op); - addNodeIDFlag(MI->getFlags()); - return *this; -} - -const GISelInstProfileBuilder & -GISelInstProfileBuilder::addNodeIDOpcode(unsigned Opc) const { - ID.AddInteger(Opc); - return *this; -} - -const GISelInstProfileBuilder & -GISelInstProfileBuilder::addNodeIDRegType(const LLT Ty) const { - uint64_t Val = Ty.getUniqueRAWLLTData(); - ID.AddInteger(Val); - return *this; -} - -const GISelInstProfileBuilder & -GISelInstProfileBuilder::addNodeIDRegType(const TargetRegisterClass *RC) const { - ID.AddPointer(RC); - return *this; -} - -const GISelInstProfileBuilder & -GISelInstProfileBuilder::addNodeIDRegType(const RegisterBank *RB) const { - ID.AddPointer(RB); - return *this; -} - -const GISelInstProfileBuilder & -GISelInstProfileBuilder::addNodeIDImmediate(int64_t Imm) const { - ID.AddInteger(Imm); - return *this; -} - -const GISelInstProfileBuilder & -GISelInstProfileBuilder::addNodeIDRegNum(Register Reg) const { - ID.AddInteger(Reg); - return *this; -} - -const GISelInstProfileBuilder & -GISelInstProfileBuilder::addNodeIDRegType(const Register Reg) const { - addNodeIDMachineOperand(MachineOperand::CreateReg(Reg, false)); - return *this; -} - -const GISelInstProfileBuilder & -GISelInstProfileBuilder::addNodeIDMBB(const MachineBasicBlock *MBB) const { - ID.AddPointer(MBB); - return *this; -} - -const GISelInstProfileBuilder & -GISelInstProfileBuilder::addNodeIDFlag(unsigned Flag) const { - if (Flag) - ID.AddInteger(Flag); - return *this; -} - + return true; + } + return false; +} + +bool CSEConfigConstantOnly::shouldCSEOpc(unsigned Opc) { + return Opc == TargetOpcode::G_CONSTANT || Opc == TargetOpcode::G_IMPLICIT_DEF; +} + +std::unique_ptr<CSEConfigBase> +llvm::getStandardCSEConfigForOpt(CodeGenOpt::Level Level) { + std::unique_ptr<CSEConfigBase> Config; + if (Level == CodeGenOpt::None) + Config = std::make_unique<CSEConfigConstantOnly>(); + else + Config = std::make_unique<CSEConfigFull>(); + return Config; +} + +/// ----------------------------------------- + +/// -------- GISelCSEInfo -------------// +void GISelCSEInfo::setMF(MachineFunction &MF) { + this->MF = &MF; + this->MRI = &MF.getRegInfo(); +} + +GISelCSEInfo::~GISelCSEInfo() {} + +bool GISelCSEInfo::isUniqueMachineInstValid( + const UniqueMachineInstr &UMI) const { + // Should we check here and assert that the instruction has been fully + // constructed? + // FIXME: Any other checks required to be done here? Remove this method if + // none. + return true; +} + +void GISelCSEInfo::invalidateUniqueMachineInstr(UniqueMachineInstr *UMI) { + bool Removed = CSEMap.RemoveNode(UMI); + (void)Removed; + assert(Removed && "Invalidation called on invalid UMI"); + // FIXME: Should UMI be deallocated/destroyed? +} + +UniqueMachineInstr *GISelCSEInfo::getNodeIfExists(FoldingSetNodeID &ID, + MachineBasicBlock *MBB, + void *&InsertPos) { + auto *Node = CSEMap.FindNodeOrInsertPos(ID, InsertPos); + if (Node) { + if (!isUniqueMachineInstValid(*Node)) { + invalidateUniqueMachineInstr(Node); + return nullptr; + } + + if (Node->MI->getParent() != MBB) + return nullptr; + } + return Node; +} + +void GISelCSEInfo::insertNode(UniqueMachineInstr *UMI, void *InsertPos) { + handleRecordedInsts(); + assert(UMI); + UniqueMachineInstr *MaybeNewNode = UMI; + if (InsertPos) + CSEMap.InsertNode(UMI, InsertPos); + else + MaybeNewNode = CSEMap.GetOrInsertNode(UMI); + if (MaybeNewNode != UMI) { + // A similar node exists in the folding set. Let's ignore this one. + return; + } + assert(InstrMapping.count(UMI->MI) == 0 && + "This instruction should not be in the map"); + InstrMapping[UMI->MI] = MaybeNewNode; +} + +UniqueMachineInstr *GISelCSEInfo::getUniqueInstrForMI(const MachineInstr *MI) { + assert(shouldCSE(MI->getOpcode()) && "Trying to CSE an unsupported Node"); + auto *Node = new (UniqueInstrAllocator) UniqueMachineInstr(MI); + return Node; +} + +void GISelCSEInfo::insertInstr(MachineInstr *MI, void *InsertPos) { + assert(MI); + // If it exists in temporary insts, remove it. + TemporaryInsts.remove(MI); + auto *Node = getUniqueInstrForMI(MI); + insertNode(Node, InsertPos); +} + +MachineInstr *GISelCSEInfo::getMachineInstrIfExists(FoldingSetNodeID &ID, + MachineBasicBlock *MBB, + void *&InsertPos) { + handleRecordedInsts(); + if (auto *Inst = getNodeIfExists(ID, MBB, InsertPos)) { + LLVM_DEBUG(dbgs() << "CSEInfo::Found Instr " << *Inst->MI;); + return const_cast<MachineInstr *>(Inst->MI); + } + return nullptr; +} + +void GISelCSEInfo::countOpcodeHit(unsigned Opc) { +#ifndef NDEBUG + if (OpcodeHitTable.count(Opc)) + OpcodeHitTable[Opc] += 1; + else + OpcodeHitTable[Opc] = 1; +#endif + // Else do nothing. +} + +void GISelCSEInfo::recordNewInstruction(MachineInstr *MI) { + if (shouldCSE(MI->getOpcode())) { + TemporaryInsts.insert(MI); + LLVM_DEBUG(dbgs() << "CSEInfo::Recording new MI " << *MI); + } +} + +void GISelCSEInfo::handleRecordedInst(MachineInstr *MI) { + assert(shouldCSE(MI->getOpcode()) && "Invalid instruction for CSE"); + auto *UMI = InstrMapping.lookup(MI); + LLVM_DEBUG(dbgs() << "CSEInfo::Handling recorded MI " << *MI); + if (UMI) { + // Invalidate this MI. + invalidateUniqueMachineInstr(UMI); + InstrMapping.erase(MI); + } + /// Now insert the new instruction. + if (UMI) { + /// We'll reuse the same UniqueMachineInstr to avoid the new + /// allocation. + *UMI = UniqueMachineInstr(MI); + insertNode(UMI, nullptr); + } else { + /// This is a new instruction. Allocate a new UniqueMachineInstr and + /// Insert. + insertInstr(MI); + } +} + +void GISelCSEInfo::handleRemoveInst(MachineInstr *MI) { + if (auto *UMI = InstrMapping.lookup(MI)) { + invalidateUniqueMachineInstr(UMI); + InstrMapping.erase(MI); + } + TemporaryInsts.remove(MI); +} + +void GISelCSEInfo::handleRecordedInsts() { + while (!TemporaryInsts.empty()) { + auto *MI = TemporaryInsts.pop_back_val(); + handleRecordedInst(MI); + } +} + +bool GISelCSEInfo::shouldCSE(unsigned Opc) const { + assert(CSEOpt.get() && "CSEConfig not set"); + return CSEOpt->shouldCSEOpc(Opc); +} + +void GISelCSEInfo::erasingInstr(MachineInstr &MI) { handleRemoveInst(&MI); } +void GISelCSEInfo::createdInstr(MachineInstr &MI) { recordNewInstruction(&MI); } +void GISelCSEInfo::changingInstr(MachineInstr &MI) { + // For now, perform erase, followed by insert. + erasingInstr(MI); + createdInstr(MI); +} +void GISelCSEInfo::changedInstr(MachineInstr &MI) { changingInstr(MI); } + +void GISelCSEInfo::analyze(MachineFunction &MF) { + setMF(MF); + for (auto &MBB : MF) { + if (MBB.empty()) + continue; + for (MachineInstr &MI : MBB) { + if (!shouldCSE(MI.getOpcode())) + continue; + LLVM_DEBUG(dbgs() << "CSEInfo::Add MI: " << MI); + insertInstr(&MI); + } + } +} + +void GISelCSEInfo::releaseMemory() { + print(); + CSEMap.clear(); + InstrMapping.clear(); + UniqueInstrAllocator.Reset(); + TemporaryInsts.clear(); + CSEOpt.reset(); + MRI = nullptr; + MF = nullptr; +#ifndef NDEBUG + OpcodeHitTable.clear(); +#endif +} + +Error GISelCSEInfo::verify() { +#ifndef NDEBUG + handleRecordedInsts(); + // For each instruction in map from MI -> UMI, + // Profile(MI) and make sure UMI is found for that profile. + for (auto &It : InstrMapping) { + FoldingSetNodeID TmpID; + GISelInstProfileBuilder(TmpID, *MRI).addNodeID(It.first); + void *InsertPos; + UniqueMachineInstr *FoundNode = + CSEMap.FindNodeOrInsertPos(TmpID, InsertPos); + if (FoundNode != It.second) + return createStringError(std::errc::not_supported, + "CSEMap mismatch, InstrMapping has MIs without " + "corresponding Nodes in CSEMap"); + } + + // For every node in the CSEMap, make sure that the InstrMapping + // points to it. + for (auto It = CSEMap.begin(), End = CSEMap.end(); It != End; ++It) { + const UniqueMachineInstr &UMI = *It; + if (!InstrMapping.count(UMI.MI)) + return createStringError(std::errc::not_supported, + "Node in CSE without InstrMapping", UMI.MI); + + if (InstrMapping[UMI.MI] != &UMI) + return createStringError(std::make_error_code(std::errc::not_supported), + "Mismatch in CSE mapping"); + } +#endif + return Error::success(); +} + +void GISelCSEInfo::print() { + LLVM_DEBUG(for (auto &It + : OpcodeHitTable) { + dbgs() << "CSEInfo::CSE Hit for Opc " << It.first << " : " << It.second + << "\n"; + };); +} +/// ----------------------------------------- +// ---- Profiling methods for FoldingSetNode --- // +const GISelInstProfileBuilder & +GISelInstProfileBuilder::addNodeID(const MachineInstr *MI) const { + addNodeIDMBB(MI->getParent()); + addNodeIDOpcode(MI->getOpcode()); + for (auto &Op : MI->operands()) + addNodeIDMachineOperand(Op); + addNodeIDFlag(MI->getFlags()); + return *this; +} + +const GISelInstProfileBuilder & +GISelInstProfileBuilder::addNodeIDOpcode(unsigned Opc) const { + ID.AddInteger(Opc); + return *this; +} + +const GISelInstProfileBuilder & +GISelInstProfileBuilder::addNodeIDRegType(const LLT Ty) const { + uint64_t Val = Ty.getUniqueRAWLLTData(); + ID.AddInteger(Val); + return *this; +} + +const GISelInstProfileBuilder & +GISelInstProfileBuilder::addNodeIDRegType(const TargetRegisterClass *RC) const { + ID.AddPointer(RC); + return *this; +} + +const GISelInstProfileBuilder & +GISelInstProfileBuilder::addNodeIDRegType(const RegisterBank *RB) const { + ID.AddPointer(RB); + return *this; +} + +const GISelInstProfileBuilder & +GISelInstProfileBuilder::addNodeIDImmediate(int64_t Imm) const { + ID.AddInteger(Imm); + return *this; +} + +const GISelInstProfileBuilder & +GISelInstProfileBuilder::addNodeIDRegNum(Register Reg) const { + ID.AddInteger(Reg); + return *this; +} + +const GISelInstProfileBuilder & +GISelInstProfileBuilder::addNodeIDRegType(const Register Reg) const { + addNodeIDMachineOperand(MachineOperand::CreateReg(Reg, false)); + return *this; +} + +const GISelInstProfileBuilder & +GISelInstProfileBuilder::addNodeIDMBB(const MachineBasicBlock *MBB) const { + ID.AddPointer(MBB); + return *this; +} + +const GISelInstProfileBuilder & +GISelInstProfileBuilder::addNodeIDFlag(unsigned Flag) const { + if (Flag) + ID.AddInteger(Flag); + return *this; +} + const GISelInstProfileBuilder & GISelInstProfileBuilder::addNodeIDReg(Register Reg) const { LLT Ty = MRI.getType(Reg); @@ -382,48 +382,48 @@ GISelInstProfileBuilder::addNodeIDReg(Register Reg) const { return *this; } -const GISelInstProfileBuilder &GISelInstProfileBuilder::addNodeIDMachineOperand( - const MachineOperand &MO) const { - if (MO.isReg()) { - Register Reg = MO.getReg(); - if (!MO.isDef()) - addNodeIDRegNum(Reg); - +const GISelInstProfileBuilder &GISelInstProfileBuilder::addNodeIDMachineOperand( + const MachineOperand &MO) const { + if (MO.isReg()) { + Register Reg = MO.getReg(); + if (!MO.isDef()) + addNodeIDRegNum(Reg); + // Profile the register properties. addNodeIDReg(Reg); - assert(!MO.isImplicit() && "Unhandled case"); - } else if (MO.isImm()) - ID.AddInteger(MO.getImm()); - else if (MO.isCImm()) - ID.AddPointer(MO.getCImm()); - else if (MO.isFPImm()) - ID.AddPointer(MO.getFPImm()); - else if (MO.isPredicate()) - ID.AddInteger(MO.getPredicate()); - else - llvm_unreachable("Unhandled operand type"); - // Handle other types - return *this; -} - -GISelCSEInfo & -GISelCSEAnalysisWrapper::get(std::unique_ptr<CSEConfigBase> CSEOpt, - bool Recompute) { - if (!AlreadyComputed || Recompute) { - Info.releaseMemory(); - Info.setCSEConfig(std::move(CSEOpt)); - Info.analyze(*MF); - AlreadyComputed = true; - } - return Info; -} -void GISelCSEAnalysisWrapperPass::getAnalysisUsage(AnalysisUsage &AU) const { - AU.setPreservesAll(); - MachineFunctionPass::getAnalysisUsage(AU); -} - -bool GISelCSEAnalysisWrapperPass::runOnMachineFunction(MachineFunction &MF) { - releaseMemory(); - Wrapper.setMF(MF); - return false; -} + assert(!MO.isImplicit() && "Unhandled case"); + } else if (MO.isImm()) + ID.AddInteger(MO.getImm()); + else if (MO.isCImm()) + ID.AddPointer(MO.getCImm()); + else if (MO.isFPImm()) + ID.AddPointer(MO.getFPImm()); + else if (MO.isPredicate()) + ID.AddInteger(MO.getPredicate()); + else + llvm_unreachable("Unhandled operand type"); + // Handle other types + return *this; +} + +GISelCSEInfo & +GISelCSEAnalysisWrapper::get(std::unique_ptr<CSEConfigBase> CSEOpt, + bool Recompute) { + if (!AlreadyComputed || Recompute) { + Info.releaseMemory(); + Info.setCSEConfig(std::move(CSEOpt)); + Info.analyze(*MF); + AlreadyComputed = true; + } + return Info; +} +void GISelCSEAnalysisWrapperPass::getAnalysisUsage(AnalysisUsage &AU) const { + AU.setPreservesAll(); + MachineFunctionPass::getAnalysisUsage(AU); +} + +bool GISelCSEAnalysisWrapperPass::runOnMachineFunction(MachineFunction &MF) { + releaseMemory(); + Wrapper.setMF(MF); + return false; +} diff --git a/contrib/libs/llvm12/lib/CodeGen/GlobalISel/CSEMIRBuilder.cpp b/contrib/libs/llvm12/lib/CodeGen/GlobalISel/CSEMIRBuilder.cpp index d11ed6f3d3..2c86f06a60 100644 --- a/contrib/libs/llvm12/lib/CodeGen/GlobalISel/CSEMIRBuilder.cpp +++ b/contrib/libs/llvm12/lib/CodeGen/GlobalISel/CSEMIRBuilder.cpp @@ -1,151 +1,151 @@ -//===-- llvm/CodeGen/GlobalISel/CSEMIRBuilder.cpp - MIBuilder--*- C++ -*-==// -// -// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. -// See https://llvm.org/LICENSE.txt for license information. -// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception -// -//===----------------------------------------------------------------------===// -/// \file -/// This file implements the CSEMIRBuilder class which CSEs as it builds -/// instructions. -//===----------------------------------------------------------------------===// -// - -#include "llvm/CodeGen/GlobalISel/CSEMIRBuilder.h" -#include "llvm/CodeGen/GlobalISel/GISelChangeObserver.h" +//===-- llvm/CodeGen/GlobalISel/CSEMIRBuilder.cpp - MIBuilder--*- C++ -*-==// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +/// \file +/// This file implements the CSEMIRBuilder class which CSEs as it builds +/// instructions. +//===----------------------------------------------------------------------===// +// + +#include "llvm/CodeGen/GlobalISel/CSEMIRBuilder.h" +#include "llvm/CodeGen/GlobalISel/GISelChangeObserver.h" #include "llvm/IR/DebugInfoMetadata.h" - -using namespace llvm; - -bool CSEMIRBuilder::dominates(MachineBasicBlock::const_iterator A, - MachineBasicBlock::const_iterator B) const { - auto MBBEnd = getMBB().end(); - if (B == MBBEnd) - return true; - assert(A->getParent() == B->getParent() && - "Iterators should be in same block"); - const MachineBasicBlock *BBA = A->getParent(); - MachineBasicBlock::const_iterator I = BBA->begin(); - for (; &*I != A && &*I != B; ++I) - ; - return &*I == A; -} - -MachineInstrBuilder -CSEMIRBuilder::getDominatingInstrForID(FoldingSetNodeID &ID, - void *&NodeInsertPos) { - GISelCSEInfo *CSEInfo = getCSEInfo(); - assert(CSEInfo && "Can't get here without setting CSEInfo"); - MachineBasicBlock *CurMBB = &getMBB(); - MachineInstr *MI = - CSEInfo->getMachineInstrIfExists(ID, CurMBB, NodeInsertPos); - if (MI) { - CSEInfo->countOpcodeHit(MI->getOpcode()); - auto CurrPos = getInsertPt(); + +using namespace llvm; + +bool CSEMIRBuilder::dominates(MachineBasicBlock::const_iterator A, + MachineBasicBlock::const_iterator B) const { + auto MBBEnd = getMBB().end(); + if (B == MBBEnd) + return true; + assert(A->getParent() == B->getParent() && + "Iterators should be in same block"); + const MachineBasicBlock *BBA = A->getParent(); + MachineBasicBlock::const_iterator I = BBA->begin(); + for (; &*I != A && &*I != B; ++I) + ; + return &*I == A; +} + +MachineInstrBuilder +CSEMIRBuilder::getDominatingInstrForID(FoldingSetNodeID &ID, + void *&NodeInsertPos) { + GISelCSEInfo *CSEInfo = getCSEInfo(); + assert(CSEInfo && "Can't get here without setting CSEInfo"); + MachineBasicBlock *CurMBB = &getMBB(); + MachineInstr *MI = + CSEInfo->getMachineInstrIfExists(ID, CurMBB, NodeInsertPos); + if (MI) { + CSEInfo->countOpcodeHit(MI->getOpcode()); + auto CurrPos = getInsertPt(); auto MII = MachineBasicBlock::iterator(MI); if (MII == CurrPos) { // Move the insert point ahead of the instruction so any future uses of // this builder will have the def ready. setInsertPt(*CurMBB, std::next(MII)); } else if (!dominates(MI, CurrPos)) { - CurMBB->splice(CurrPos, CurMBB, MI); + CurMBB->splice(CurrPos, CurMBB, MI); } - return MachineInstrBuilder(getMF(), MI); - } - return MachineInstrBuilder(); -} - -bool CSEMIRBuilder::canPerformCSEForOpc(unsigned Opc) const { - const GISelCSEInfo *CSEInfo = getCSEInfo(); - if (!CSEInfo || !CSEInfo->shouldCSE(Opc)) - return false; - return true; -} - -void CSEMIRBuilder::profileDstOp(const DstOp &Op, - GISelInstProfileBuilder &B) const { - switch (Op.getDstOpKind()) { - case DstOp::DstType::Ty_RC: - B.addNodeIDRegType(Op.getRegClass()); - break; + return MachineInstrBuilder(getMF(), MI); + } + return MachineInstrBuilder(); +} + +bool CSEMIRBuilder::canPerformCSEForOpc(unsigned Opc) const { + const GISelCSEInfo *CSEInfo = getCSEInfo(); + if (!CSEInfo || !CSEInfo->shouldCSE(Opc)) + return false; + return true; +} + +void CSEMIRBuilder::profileDstOp(const DstOp &Op, + GISelInstProfileBuilder &B) const { + switch (Op.getDstOpKind()) { + case DstOp::DstType::Ty_RC: + B.addNodeIDRegType(Op.getRegClass()); + break; case DstOp::DstType::Ty_Reg: { // Regs can have LLT&(RB|RC). If those exist, profile them as well. B.addNodeIDReg(Op.getReg()); break; } - default: - B.addNodeIDRegType(Op.getLLTTy(*getMRI())); - break; - } -} - -void CSEMIRBuilder::profileSrcOp(const SrcOp &Op, - GISelInstProfileBuilder &B) const { - switch (Op.getSrcOpKind()) { + default: + B.addNodeIDRegType(Op.getLLTTy(*getMRI())); + break; + } +} + +void CSEMIRBuilder::profileSrcOp(const SrcOp &Op, + GISelInstProfileBuilder &B) const { + switch (Op.getSrcOpKind()) { case SrcOp::SrcType::Ty_Imm: B.addNodeIDImmediate(static_cast<int64_t>(Op.getImm())); break; - case SrcOp::SrcType::Ty_Predicate: - B.addNodeIDImmediate(static_cast<int64_t>(Op.getPredicate())); - break; - default: - B.addNodeIDRegType(Op.getReg()); - break; - } -} - -void CSEMIRBuilder::profileMBBOpcode(GISelInstProfileBuilder &B, - unsigned Opc) const { - // First add the MBB (Local CSE). - B.addNodeIDMBB(&getMBB()); - // Then add the opcode. - B.addNodeIDOpcode(Opc); -} - -void CSEMIRBuilder::profileEverything(unsigned Opc, ArrayRef<DstOp> DstOps, - ArrayRef<SrcOp> SrcOps, - Optional<unsigned> Flags, - GISelInstProfileBuilder &B) const { - - profileMBBOpcode(B, Opc); - // Then add the DstOps. - profileDstOps(DstOps, B); - // Then add the SrcOps. - profileSrcOps(SrcOps, B); - // Add Flags if passed in. - if (Flags) - B.addNodeIDFlag(*Flags); -} - -MachineInstrBuilder CSEMIRBuilder::memoizeMI(MachineInstrBuilder MIB, - void *NodeInsertPos) { - assert(canPerformCSEForOpc(MIB->getOpcode()) && - "Attempting to CSE illegal op"); - MachineInstr *MIBInstr = MIB; - getCSEInfo()->insertInstr(MIBInstr, NodeInsertPos); - return MIB; -} - -bool CSEMIRBuilder::checkCopyToDefsPossible(ArrayRef<DstOp> DstOps) { - if (DstOps.size() == 1) - return true; // always possible to emit copy to just 1 vreg. - + case SrcOp::SrcType::Ty_Predicate: + B.addNodeIDImmediate(static_cast<int64_t>(Op.getPredicate())); + break; + default: + B.addNodeIDRegType(Op.getReg()); + break; + } +} + +void CSEMIRBuilder::profileMBBOpcode(GISelInstProfileBuilder &B, + unsigned Opc) const { + // First add the MBB (Local CSE). + B.addNodeIDMBB(&getMBB()); + // Then add the opcode. + B.addNodeIDOpcode(Opc); +} + +void CSEMIRBuilder::profileEverything(unsigned Opc, ArrayRef<DstOp> DstOps, + ArrayRef<SrcOp> SrcOps, + Optional<unsigned> Flags, + GISelInstProfileBuilder &B) const { + + profileMBBOpcode(B, Opc); + // Then add the DstOps. + profileDstOps(DstOps, B); + // Then add the SrcOps. + profileSrcOps(SrcOps, B); + // Add Flags if passed in. + if (Flags) + B.addNodeIDFlag(*Flags); +} + +MachineInstrBuilder CSEMIRBuilder::memoizeMI(MachineInstrBuilder MIB, + void *NodeInsertPos) { + assert(canPerformCSEForOpc(MIB->getOpcode()) && + "Attempting to CSE illegal op"); + MachineInstr *MIBInstr = MIB; + getCSEInfo()->insertInstr(MIBInstr, NodeInsertPos); + return MIB; +} + +bool CSEMIRBuilder::checkCopyToDefsPossible(ArrayRef<DstOp> DstOps) { + if (DstOps.size() == 1) + return true; // always possible to emit copy to just 1 vreg. + return llvm::all_of(DstOps, [](const DstOp &Op) { - DstOp::DstType DT = Op.getDstOpKind(); - return DT == DstOp::DstType::Ty_LLT || DT == DstOp::DstType::Ty_RC; - }); -} - -MachineInstrBuilder -CSEMIRBuilder::generateCopiesIfRequired(ArrayRef<DstOp> DstOps, - MachineInstrBuilder &MIB) { - assert(checkCopyToDefsPossible(DstOps) && - "Impossible return a single MIB with copies to multiple defs"); - if (DstOps.size() == 1) { - const DstOp &Op = DstOps[0]; - if (Op.getDstOpKind() == DstOp::DstType::Ty_Reg) - return buildCopy(Op.getReg(), MIB.getReg(0)); - } + DstOp::DstType DT = Op.getDstOpKind(); + return DT == DstOp::DstType::Ty_LLT || DT == DstOp::DstType::Ty_RC; + }); +} + +MachineInstrBuilder +CSEMIRBuilder::generateCopiesIfRequired(ArrayRef<DstOp> DstOps, + MachineInstrBuilder &MIB) { + assert(checkCopyToDefsPossible(DstOps) && + "Impossible return a single MIB with copies to multiple defs"); + if (DstOps.size() == 1) { + const DstOp &Op = DstOps[0]; + if (Op.getDstOpKind() == DstOp::DstType::Ty_Reg) + return buildCopy(Op.getReg(), MIB.getReg(0)); + } // If we didn't generate a copy then we're re-using an existing node directly // instead of emitting any code. Merge the debug location we wanted to emit @@ -161,125 +161,125 @@ CSEMIRBuilder::generateCopiesIfRequired(ArrayRef<DstOp> DstOps, Observer->changedInstr(*MIB); } - return MIB; -} - -MachineInstrBuilder CSEMIRBuilder::buildInstr(unsigned Opc, - ArrayRef<DstOp> DstOps, - ArrayRef<SrcOp> SrcOps, - Optional<unsigned> Flag) { - switch (Opc) { - default: - break; - case TargetOpcode::G_ADD: - case TargetOpcode::G_AND: - case TargetOpcode::G_ASHR: - case TargetOpcode::G_LSHR: - case TargetOpcode::G_MUL: - case TargetOpcode::G_OR: - case TargetOpcode::G_SHL: - case TargetOpcode::G_SUB: - case TargetOpcode::G_XOR: - case TargetOpcode::G_UDIV: - case TargetOpcode::G_SDIV: - case TargetOpcode::G_UREM: - case TargetOpcode::G_SREM: { - // Try to constant fold these. - assert(SrcOps.size() == 2 && "Invalid sources"); - assert(DstOps.size() == 1 && "Invalid dsts"); - if (Optional<APInt> Cst = ConstantFoldBinOp(Opc, SrcOps[0].getReg(), - SrcOps[1].getReg(), *getMRI())) - return buildConstant(DstOps[0], Cst->getSExtValue()); - break; - } - case TargetOpcode::G_SEXT_INREG: { - assert(DstOps.size() == 1 && "Invalid dst ops"); - assert(SrcOps.size() == 2 && "Invalid src ops"); - const DstOp &Dst = DstOps[0]; - const SrcOp &Src0 = SrcOps[0]; - const SrcOp &Src1 = SrcOps[1]; - if (auto MaybeCst = - ConstantFoldExtOp(Opc, Src0.getReg(), Src1.getImm(), *getMRI())) - return buildConstant(Dst, MaybeCst->getSExtValue()); - break; - } - } - bool CanCopy = checkCopyToDefsPossible(DstOps); - if (!canPerformCSEForOpc(Opc)) - return MachineIRBuilder::buildInstr(Opc, DstOps, SrcOps, Flag); - // If we can CSE this instruction, but involves generating copies to multiple - // regs, give up. This frequently happens to UNMERGEs. - if (!CanCopy) { - auto MIB = MachineIRBuilder::buildInstr(Opc, DstOps, SrcOps, Flag); - // CSEInfo would have tracked this instruction. Remove it from the temporary - // insts. - getCSEInfo()->handleRemoveInst(&*MIB); - return MIB; - } - FoldingSetNodeID ID; - GISelInstProfileBuilder ProfBuilder(ID, *getMRI()); - void *InsertPos = nullptr; - profileEverything(Opc, DstOps, SrcOps, Flag, ProfBuilder); - MachineInstrBuilder MIB = getDominatingInstrForID(ID, InsertPos); - if (MIB) { - // Handle generating copies here. - return generateCopiesIfRequired(DstOps, MIB); - } - // This instruction does not exist in the CSEInfo. Build it and CSE it. - MachineInstrBuilder NewMIB = - MachineIRBuilder::buildInstr(Opc, DstOps, SrcOps, Flag); - return memoizeMI(NewMIB, InsertPos); -} - -MachineInstrBuilder CSEMIRBuilder::buildConstant(const DstOp &Res, - const ConstantInt &Val) { - constexpr unsigned Opc = TargetOpcode::G_CONSTANT; - if (!canPerformCSEForOpc(Opc)) - return MachineIRBuilder::buildConstant(Res, Val); - - // For vectors, CSE the element only for now. - LLT Ty = Res.getLLTTy(*getMRI()); - if (Ty.isVector()) - return buildSplatVector(Res, buildConstant(Ty.getElementType(), Val)); - - FoldingSetNodeID ID; - GISelInstProfileBuilder ProfBuilder(ID, *getMRI()); - void *InsertPos = nullptr; - profileMBBOpcode(ProfBuilder, Opc); - profileDstOp(Res, ProfBuilder); - ProfBuilder.addNodeIDMachineOperand(MachineOperand::CreateCImm(&Val)); - MachineInstrBuilder MIB = getDominatingInstrForID(ID, InsertPos); - if (MIB) { - // Handle generating copies here. - return generateCopiesIfRequired({Res}, MIB); - } - - MachineInstrBuilder NewMIB = MachineIRBuilder::buildConstant(Res, Val); - return memoizeMI(NewMIB, InsertPos); -} - -MachineInstrBuilder CSEMIRBuilder::buildFConstant(const DstOp &Res, - const ConstantFP &Val) { - constexpr unsigned Opc = TargetOpcode::G_FCONSTANT; - if (!canPerformCSEForOpc(Opc)) - return MachineIRBuilder::buildFConstant(Res, Val); - - // For vectors, CSE the element only for now. - LLT Ty = Res.getLLTTy(*getMRI()); - if (Ty.isVector()) - return buildSplatVector(Res, buildFConstant(Ty.getElementType(), Val)); - - FoldingSetNodeID ID; - GISelInstProfileBuilder ProfBuilder(ID, *getMRI()); - void *InsertPos = nullptr; - profileMBBOpcode(ProfBuilder, Opc); - profileDstOp(Res, ProfBuilder); - ProfBuilder.addNodeIDMachineOperand(MachineOperand::CreateFPImm(&Val)); - MachineInstrBuilder MIB = getDominatingInstrForID(ID, InsertPos); - if (MIB) { - // Handle generating copies here. - return generateCopiesIfRequired({Res}, MIB); - } - MachineInstrBuilder NewMIB = MachineIRBuilder::buildFConstant(Res, Val); - return memoizeMI(NewMIB, InsertPos); -} + return MIB; +} + +MachineInstrBuilder CSEMIRBuilder::buildInstr(unsigned Opc, + ArrayRef<DstOp> DstOps, + ArrayRef<SrcOp> SrcOps, + Optional<unsigned> Flag) { + switch (Opc) { + default: + break; + case TargetOpcode::G_ADD: + case TargetOpcode::G_AND: + case TargetOpcode::G_ASHR: + case TargetOpcode::G_LSHR: + case TargetOpcode::G_MUL: + case TargetOpcode::G_OR: + case TargetOpcode::G_SHL: + case TargetOpcode::G_SUB: + case TargetOpcode::G_XOR: + case TargetOpcode::G_UDIV: + case TargetOpcode::G_SDIV: + case TargetOpcode::G_UREM: + case TargetOpcode::G_SREM: { + // Try to constant fold these. + assert(SrcOps.size() == 2 && "Invalid sources"); + assert(DstOps.size() == 1 && "Invalid dsts"); + if (Optional<APInt> Cst = ConstantFoldBinOp(Opc, SrcOps[0].getReg(), + SrcOps[1].getReg(), *getMRI())) + return buildConstant(DstOps[0], Cst->getSExtValue()); + break; + } + case TargetOpcode::G_SEXT_INREG: { + assert(DstOps.size() == 1 && "Invalid dst ops"); + assert(SrcOps.size() == 2 && "Invalid src ops"); + const DstOp &Dst = DstOps[0]; + const SrcOp &Src0 = SrcOps[0]; + const SrcOp &Src1 = SrcOps[1]; + if (auto MaybeCst = + ConstantFoldExtOp(Opc, Src0.getReg(), Src1.getImm(), *getMRI())) + return buildConstant(Dst, MaybeCst->getSExtValue()); + break; + } + } + bool CanCopy = checkCopyToDefsPossible(DstOps); + if (!canPerformCSEForOpc(Opc)) + return MachineIRBuilder::buildInstr(Opc, DstOps, SrcOps, Flag); + // If we can CSE this instruction, but involves generating copies to multiple + // regs, give up. This frequently happens to UNMERGEs. + if (!CanCopy) { + auto MIB = MachineIRBuilder::buildInstr(Opc, DstOps, SrcOps, Flag); + // CSEInfo would have tracked this instruction. Remove it from the temporary + // insts. + getCSEInfo()->handleRemoveInst(&*MIB); + return MIB; + } + FoldingSetNodeID ID; + GISelInstProfileBuilder ProfBuilder(ID, *getMRI()); + void *InsertPos = nullptr; + profileEverything(Opc, DstOps, SrcOps, Flag, ProfBuilder); + MachineInstrBuilder MIB = getDominatingInstrForID(ID, InsertPos); + if (MIB) { + // Handle generating copies here. + return generateCopiesIfRequired(DstOps, MIB); + } + // This instruction does not exist in the CSEInfo. Build it and CSE it. + MachineInstrBuilder NewMIB = + MachineIRBuilder::buildInstr(Opc, DstOps, SrcOps, Flag); + return memoizeMI(NewMIB, InsertPos); +} + +MachineInstrBuilder CSEMIRBuilder::buildConstant(const DstOp &Res, + const ConstantInt &Val) { + constexpr unsigned Opc = TargetOpcode::G_CONSTANT; + if (!canPerformCSEForOpc(Opc)) + return MachineIRBuilder::buildConstant(Res, Val); + + // For vectors, CSE the element only for now. + LLT Ty = Res.getLLTTy(*getMRI()); + if (Ty.isVector()) + return buildSplatVector(Res, buildConstant(Ty.getElementType(), Val)); + + FoldingSetNodeID ID; + GISelInstProfileBuilder ProfBuilder(ID, *getMRI()); + void *InsertPos = nullptr; + profileMBBOpcode(ProfBuilder, Opc); + profileDstOp(Res, ProfBuilder); + ProfBuilder.addNodeIDMachineOperand(MachineOperand::CreateCImm(&Val)); + MachineInstrBuilder MIB = getDominatingInstrForID(ID, InsertPos); + if (MIB) { + // Handle generating copies here. + return generateCopiesIfRequired({Res}, MIB); + } + + MachineInstrBuilder NewMIB = MachineIRBuilder::buildConstant(Res, Val); + return memoizeMI(NewMIB, InsertPos); +} + +MachineInstrBuilder CSEMIRBuilder::buildFConstant(const DstOp &Res, + const ConstantFP &Val) { + constexpr unsigned Opc = TargetOpcode::G_FCONSTANT; + if (!canPerformCSEForOpc(Opc)) + return MachineIRBuilder::buildFConstant(Res, Val); + + // For vectors, CSE the element only for now. + LLT Ty = Res.getLLTTy(*getMRI()); + if (Ty.isVector()) + return buildSplatVector(Res, buildFConstant(Ty.getElementType(), Val)); + + FoldingSetNodeID ID; + GISelInstProfileBuilder ProfBuilder(ID, *getMRI()); + void *InsertPos = nullptr; + profileMBBOpcode(ProfBuilder, Opc); + profileDstOp(Res, ProfBuilder); + ProfBuilder.addNodeIDMachineOperand(MachineOperand::CreateFPImm(&Val)); + MachineInstrBuilder MIB = getDominatingInstrForID(ID, InsertPos); + if (MIB) { + // Handle generating copies here. + return generateCopiesIfRequired({Res}, MIB); + } + MachineInstrBuilder NewMIB = MachineIRBuilder::buildFConstant(Res, Val); + return memoizeMI(NewMIB, InsertPos); +} diff --git a/contrib/libs/llvm12/lib/CodeGen/GlobalISel/CallLowering.cpp b/contrib/libs/llvm12/lib/CodeGen/GlobalISel/CallLowering.cpp index de0aa2e41e..803e1527a4 100644 --- a/contrib/libs/llvm12/lib/CodeGen/GlobalISel/CallLowering.cpp +++ b/contrib/libs/llvm12/lib/CodeGen/GlobalISel/CallLowering.cpp @@ -1,35 +1,35 @@ -//===-- lib/CodeGen/GlobalISel/CallLowering.cpp - Call lowering -----------===// -// -// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. -// See https://llvm.org/LICENSE.txt for license information. -// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception -// -//===----------------------------------------------------------------------===// -/// -/// \file -/// This file implements some simple delegations needed for call lowering. -/// -//===----------------------------------------------------------------------===// - -#include "llvm/CodeGen/Analysis.h" -#include "llvm/CodeGen/GlobalISel/CallLowering.h" -#include "llvm/CodeGen/GlobalISel/Utils.h" -#include "llvm/CodeGen/GlobalISel/MachineIRBuilder.h" -#include "llvm/CodeGen/MachineOperand.h" -#include "llvm/CodeGen/MachineRegisterInfo.h" -#include "llvm/CodeGen/TargetLowering.h" -#include "llvm/IR/DataLayout.h" -#include "llvm/IR/Instructions.h" -#include "llvm/IR/LLVMContext.h" -#include "llvm/IR/Module.h" -#include "llvm/Target/TargetMachine.h" - -#define DEBUG_TYPE "call-lowering" - -using namespace llvm; - -void CallLowering::anchor() {} - +//===-- lib/CodeGen/GlobalISel/CallLowering.cpp - Call lowering -----------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +/// +/// \file +/// This file implements some simple delegations needed for call lowering. +/// +//===----------------------------------------------------------------------===// + +#include "llvm/CodeGen/Analysis.h" +#include "llvm/CodeGen/GlobalISel/CallLowering.h" +#include "llvm/CodeGen/GlobalISel/Utils.h" +#include "llvm/CodeGen/GlobalISel/MachineIRBuilder.h" +#include "llvm/CodeGen/MachineOperand.h" +#include "llvm/CodeGen/MachineRegisterInfo.h" +#include "llvm/CodeGen/TargetLowering.h" +#include "llvm/IR/DataLayout.h" +#include "llvm/IR/Instructions.h" +#include "llvm/IR/LLVMContext.h" +#include "llvm/IR/Module.h" +#include "llvm/Target/TargetMachine.h" + +#define DEBUG_TYPE "call-lowering" + +using namespace llvm; + +void CallLowering::anchor() {} + /// Helper function which updates \p Flags when \p AttrFn returns true. static void addFlagsUsingAttrFn(ISD::ArgFlagsTy &Flags, @@ -75,20 +75,20 @@ void CallLowering::addArgFlagsFromAttributes(ISD::ArgFlagsTy &Flags, }); } -bool CallLowering::lowerCall(MachineIRBuilder &MIRBuilder, const CallBase &CB, - ArrayRef<Register> ResRegs, - ArrayRef<ArrayRef<Register>> ArgRegs, - Register SwiftErrorVReg, - std::function<unsigned()> GetCalleeReg) const { - CallLoweringInfo Info; - const DataLayout &DL = MIRBuilder.getDataLayout(); +bool CallLowering::lowerCall(MachineIRBuilder &MIRBuilder, const CallBase &CB, + ArrayRef<Register> ResRegs, + ArrayRef<ArrayRef<Register>> ArgRegs, + Register SwiftErrorVReg, + std::function<unsigned()> GetCalleeReg) const { + CallLoweringInfo Info; + const DataLayout &DL = MIRBuilder.getDataLayout(); MachineFunction &MF = MIRBuilder.getMF(); bool CanBeTailCalled = CB.isTailCall() && isInTailCallPosition(CB, MF.getTarget()) && (MF.getFunction() .getFnAttribute("disable-tail-calls") .getValueAsString() != "true"); - + CallingConv::ID CallConv = CB.getCallingConv(); Type *RetTy = CB.getType(); bool IsVarArg = CB.getFunctionType()->isVarArg(); @@ -106,154 +106,154 @@ bool CallLowering::lowerCall(MachineIRBuilder &MIRBuilder, const CallBase &CB, CanBeTailCalled = false; } - // First step is to marshall all the function's parameters into the correct - // physregs and memory locations. Gather the sequence of argument types that - // we'll pass to the assigner function. - unsigned i = 0; - unsigned NumFixedArgs = CB.getFunctionType()->getNumParams(); - for (auto &Arg : CB.args()) { + // First step is to marshall all the function's parameters into the correct + // physregs and memory locations. Gather the sequence of argument types that + // we'll pass to the assigner function. + unsigned i = 0; + unsigned NumFixedArgs = CB.getFunctionType()->getNumParams(); + for (auto &Arg : CB.args()) { ArgInfo OrigArg{ArgRegs[i], Arg->getType(), getAttributesForArgIdx(CB, i), - i < NumFixedArgs}; - setArgFlags(OrigArg, i + AttributeList::FirstArgIndex, DL, CB); + i < NumFixedArgs}; + setArgFlags(OrigArg, i + AttributeList::FirstArgIndex, DL, CB); // If we have an explicit sret argument that is an Instruction, (i.e., it // might point to function-local memory), we can't meaningfully tail-call. if (OrigArg.Flags[0].isSRet() && isa<Instruction>(&Arg)) CanBeTailCalled = false; - Info.OrigArgs.push_back(OrigArg); - ++i; - } - - // Try looking through a bitcast from one function type to another. - // Commonly happens with calls to objc_msgSend(). - const Value *CalleeV = CB.getCalledOperand()->stripPointerCasts(); - if (const Function *F = dyn_cast<Function>(CalleeV)) - Info.Callee = MachineOperand::CreateGA(F, 0); - else - Info.Callee = MachineOperand::CreateReg(GetCalleeReg(), false); - + Info.OrigArgs.push_back(OrigArg); + ++i; + } + + // Try looking through a bitcast from one function type to another. + // Commonly happens with calls to objc_msgSend(). + const Value *CalleeV = CB.getCalledOperand()->stripPointerCasts(); + if (const Function *F = dyn_cast<Function>(CalleeV)) + Info.Callee = MachineOperand::CreateGA(F, 0); + else + Info.Callee = MachineOperand::CreateReg(GetCalleeReg(), false); + Info.OrigRet = ArgInfo{ResRegs, RetTy, ISD::ArgFlagsTy{}}; - if (!Info.OrigRet.Ty->isVoidTy()) - setArgFlags(Info.OrigRet, AttributeList::ReturnIndex, DL, CB); - - Info.KnownCallees = CB.getMetadata(LLVMContext::MD_callees); + if (!Info.OrigRet.Ty->isVoidTy()) + setArgFlags(Info.OrigRet, AttributeList::ReturnIndex, DL, CB); + + Info.KnownCallees = CB.getMetadata(LLVMContext::MD_callees); Info.CallConv = CallConv; - Info.SwiftErrorVReg = SwiftErrorVReg; - Info.IsMustTailCall = CB.isMustTailCall(); + Info.SwiftErrorVReg = SwiftErrorVReg; + Info.IsMustTailCall = CB.isMustTailCall(); Info.IsTailCall = CanBeTailCalled; Info.IsVarArg = IsVarArg; - return lowerCall(MIRBuilder, Info); -} - -template <typename FuncInfoTy> -void CallLowering::setArgFlags(CallLowering::ArgInfo &Arg, unsigned OpIdx, - const DataLayout &DL, - const FuncInfoTy &FuncInfo) const { - auto &Flags = Arg.Flags[0]; - const AttributeList &Attrs = FuncInfo.getAttributes(); + return lowerCall(MIRBuilder, Info); +} + +template <typename FuncInfoTy> +void CallLowering::setArgFlags(CallLowering::ArgInfo &Arg, unsigned OpIdx, + const DataLayout &DL, + const FuncInfoTy &FuncInfo) const { + auto &Flags = Arg.Flags[0]; + const AttributeList &Attrs = FuncInfo.getAttributes(); addArgFlagsFromAttributes(Flags, Attrs, OpIdx); - - if (Flags.isByVal() || Flags.isInAlloca() || Flags.isPreallocated()) { - Type *ElementTy = cast<PointerType>(Arg.Ty)->getElementType(); - - auto Ty = Attrs.getAttribute(OpIdx, Attribute::ByVal).getValueAsType(); - Flags.setByValSize(DL.getTypeAllocSize(Ty ? Ty : ElementTy)); - - // For ByVal, alignment should be passed from FE. BE will guess if - // this info is not there but there are cases it cannot get right. - Align FrameAlign; - if (auto ParamAlign = FuncInfo.getParamAlign(OpIdx - 2)) - FrameAlign = *ParamAlign; - else - FrameAlign = Align(getTLI()->getByValTypeAlignment(ElementTy, DL)); - Flags.setByValAlign(FrameAlign); - } - Flags.setOrigAlign(DL.getABITypeAlign(Arg.Ty)); -} - -template void -CallLowering::setArgFlags<Function>(CallLowering::ArgInfo &Arg, unsigned OpIdx, - const DataLayout &DL, - const Function &FuncInfo) const; - -template void -CallLowering::setArgFlags<CallBase>(CallLowering::ArgInfo &Arg, unsigned OpIdx, - const DataLayout &DL, - const CallBase &FuncInfo) const; - -Register CallLowering::packRegs(ArrayRef<Register> SrcRegs, Type *PackedTy, - MachineIRBuilder &MIRBuilder) const { - assert(SrcRegs.size() > 1 && "Nothing to pack"); - - const DataLayout &DL = MIRBuilder.getMF().getDataLayout(); - MachineRegisterInfo *MRI = MIRBuilder.getMRI(); - - LLT PackedLLT = getLLTForType(*PackedTy, DL); - - SmallVector<LLT, 8> LLTs; - SmallVector<uint64_t, 8> Offsets; - computeValueLLTs(DL, *PackedTy, LLTs, &Offsets); - assert(LLTs.size() == SrcRegs.size() && "Regs / types mismatch"); - - Register Dst = MRI->createGenericVirtualRegister(PackedLLT); - MIRBuilder.buildUndef(Dst); - for (unsigned i = 0; i < SrcRegs.size(); ++i) { - Register NewDst = MRI->createGenericVirtualRegister(PackedLLT); - MIRBuilder.buildInsert(NewDst, Dst, SrcRegs[i], Offsets[i]); - Dst = NewDst; - } - - return Dst; -} - -void CallLowering::unpackRegs(ArrayRef<Register> DstRegs, Register SrcReg, - Type *PackedTy, - MachineIRBuilder &MIRBuilder) const { - assert(DstRegs.size() > 1 && "Nothing to unpack"); - - const DataLayout &DL = MIRBuilder.getDataLayout(); - - SmallVector<LLT, 8> LLTs; - SmallVector<uint64_t, 8> Offsets; - computeValueLLTs(DL, *PackedTy, LLTs, &Offsets); - assert(LLTs.size() == DstRegs.size() && "Regs / types mismatch"); - - for (unsigned i = 0; i < DstRegs.size(); ++i) - MIRBuilder.buildExtract(DstRegs[i], SrcReg, Offsets[i]); -} - -bool CallLowering::handleAssignments(MachineIRBuilder &MIRBuilder, - SmallVectorImpl<ArgInfo> &Args, - ValueHandler &Handler) const { - MachineFunction &MF = MIRBuilder.getMF(); - const Function &F = MF.getFunction(); - SmallVector<CCValAssign, 16> ArgLocs; - CCState CCInfo(F.getCallingConv(), F.isVarArg(), MF, ArgLocs, F.getContext()); - return handleAssignments(CCInfo, ArgLocs, MIRBuilder, Args, Handler); -} - -bool CallLowering::handleAssignments(CCState &CCInfo, - SmallVectorImpl<CCValAssign> &ArgLocs, - MachineIRBuilder &MIRBuilder, - SmallVectorImpl<ArgInfo> &Args, - ValueHandler &Handler) const { - MachineFunction &MF = MIRBuilder.getMF(); - const Function &F = MF.getFunction(); - const DataLayout &DL = F.getParent()->getDataLayout(); - - unsigned NumArgs = Args.size(); - for (unsigned i = 0; i != NumArgs; ++i) { - EVT CurVT = EVT::getEVT(Args[i].Ty); + + if (Flags.isByVal() || Flags.isInAlloca() || Flags.isPreallocated()) { + Type *ElementTy = cast<PointerType>(Arg.Ty)->getElementType(); + + auto Ty = Attrs.getAttribute(OpIdx, Attribute::ByVal).getValueAsType(); + Flags.setByValSize(DL.getTypeAllocSize(Ty ? Ty : ElementTy)); + + // For ByVal, alignment should be passed from FE. BE will guess if + // this info is not there but there are cases it cannot get right. + Align FrameAlign; + if (auto ParamAlign = FuncInfo.getParamAlign(OpIdx - 2)) + FrameAlign = *ParamAlign; + else + FrameAlign = Align(getTLI()->getByValTypeAlignment(ElementTy, DL)); + Flags.setByValAlign(FrameAlign); + } + Flags.setOrigAlign(DL.getABITypeAlign(Arg.Ty)); +} + +template void +CallLowering::setArgFlags<Function>(CallLowering::ArgInfo &Arg, unsigned OpIdx, + const DataLayout &DL, + const Function &FuncInfo) const; + +template void +CallLowering::setArgFlags<CallBase>(CallLowering::ArgInfo &Arg, unsigned OpIdx, + const DataLayout &DL, + const CallBase &FuncInfo) const; + +Register CallLowering::packRegs(ArrayRef<Register> SrcRegs, Type *PackedTy, + MachineIRBuilder &MIRBuilder) const { + assert(SrcRegs.size() > 1 && "Nothing to pack"); + + const DataLayout &DL = MIRBuilder.getMF().getDataLayout(); + MachineRegisterInfo *MRI = MIRBuilder.getMRI(); + + LLT PackedLLT = getLLTForType(*PackedTy, DL); + + SmallVector<LLT, 8> LLTs; + SmallVector<uint64_t, 8> Offsets; + computeValueLLTs(DL, *PackedTy, LLTs, &Offsets); + assert(LLTs.size() == SrcRegs.size() && "Regs / types mismatch"); + + Register Dst = MRI->createGenericVirtualRegister(PackedLLT); + MIRBuilder.buildUndef(Dst); + for (unsigned i = 0; i < SrcRegs.size(); ++i) { + Register NewDst = MRI->createGenericVirtualRegister(PackedLLT); + MIRBuilder.buildInsert(NewDst, Dst, SrcRegs[i], Offsets[i]); + Dst = NewDst; + } + + return Dst; +} + +void CallLowering::unpackRegs(ArrayRef<Register> DstRegs, Register SrcReg, + Type *PackedTy, + MachineIRBuilder &MIRBuilder) const { + assert(DstRegs.size() > 1 && "Nothing to unpack"); + + const DataLayout &DL = MIRBuilder.getDataLayout(); + + SmallVector<LLT, 8> LLTs; + SmallVector<uint64_t, 8> Offsets; + computeValueLLTs(DL, *PackedTy, LLTs, &Offsets); + assert(LLTs.size() == DstRegs.size() && "Regs / types mismatch"); + + for (unsigned i = 0; i < DstRegs.size(); ++i) + MIRBuilder.buildExtract(DstRegs[i], SrcReg, Offsets[i]); +} + +bool CallLowering::handleAssignments(MachineIRBuilder &MIRBuilder, + SmallVectorImpl<ArgInfo> &Args, + ValueHandler &Handler) const { + MachineFunction &MF = MIRBuilder.getMF(); + const Function &F = MF.getFunction(); + SmallVector<CCValAssign, 16> ArgLocs; + CCState CCInfo(F.getCallingConv(), F.isVarArg(), MF, ArgLocs, F.getContext()); + return handleAssignments(CCInfo, ArgLocs, MIRBuilder, Args, Handler); +} + +bool CallLowering::handleAssignments(CCState &CCInfo, + SmallVectorImpl<CCValAssign> &ArgLocs, + MachineIRBuilder &MIRBuilder, + SmallVectorImpl<ArgInfo> &Args, + ValueHandler &Handler) const { + MachineFunction &MF = MIRBuilder.getMF(); + const Function &F = MF.getFunction(); + const DataLayout &DL = F.getParent()->getDataLayout(); + + unsigned NumArgs = Args.size(); + for (unsigned i = 0; i != NumArgs; ++i) { + EVT CurVT = EVT::getEVT(Args[i].Ty); if (CurVT.isSimple() && !Handler.assignArg(i, CurVT.getSimpleVT(), CurVT.getSimpleVT(), CCValAssign::Full, Args[i], Args[i].Flags[0], CCInfo)) continue; - + MVT NewVT = TLI->getRegisterTypeForCallingConv( F.getContext(), F.getCallingConv(), EVT(CurVT)); - + // If we need to split the type over multiple regs, check it's a scenario // we currently support. unsigned NumParts = TLI->getNumRegistersForCallingConv( @@ -297,19 +297,19 @@ bool CallLowering::handleAssignments(CCState &CCInfo, ISD::ArgFlagsTy Flags = OrigFlags; if (Part == 0) { Flags.setSplit(); - } else { + } else { Flags.setOrigAlign(Align(1)); if (Part == NumParts - 1) Flags.setSplitEnd(); - } + } Args[i].Regs.push_back(Reg); Args[i].Flags.push_back(Flags); if (Handler.assignArg(i, NewVT, NewVT, CCValAssign::Full, Args[i], Args[i].Flags[Part], CCInfo)) { // Still couldn't assign this smaller part type for some reason. return false; - } - } + } + } } else { // This type is passed via multiple registers in the calling convention. // We need to extract the individual parts. @@ -336,35 +336,35 @@ bool CallLowering::handleAssignments(CCState &CCInfo, Args[i], Args[i].Flags[PartIdx], CCInfo)) return false; } - } - } - - for (unsigned i = 0, e = Args.size(), j = 0; i != e; ++i, ++j) { - assert(j < ArgLocs.size() && "Skipped too many arg locs"); - - CCValAssign &VA = ArgLocs[j]; - assert(VA.getValNo() == i && "Location doesn't correspond to current arg"); - - if (VA.needsCustom()) { - unsigned NumArgRegs = - Handler.assignCustomValue(Args[i], makeArrayRef(ArgLocs).slice(j)); - if (!NumArgRegs) - return false; - j += NumArgRegs; - continue; - } - - // FIXME: Pack registers if we have more than one. - Register ArgReg = Args[i].Regs[0]; - - EVT OrigVT = EVT::getEVT(Args[i].Ty); - EVT VAVT = VA.getValVT(); - const LLT OrigTy = getLLTForType(*Args[i].Ty, DL); - + } + } + + for (unsigned i = 0, e = Args.size(), j = 0; i != e; ++i, ++j) { + assert(j < ArgLocs.size() && "Skipped too many arg locs"); + + CCValAssign &VA = ArgLocs[j]; + assert(VA.getValNo() == i && "Location doesn't correspond to current arg"); + + if (VA.needsCustom()) { + unsigned NumArgRegs = + Handler.assignCustomValue(Args[i], makeArrayRef(ArgLocs).slice(j)); + if (!NumArgRegs) + return false; + j += NumArgRegs; + continue; + } + + // FIXME: Pack registers if we have more than one. + Register ArgReg = Args[i].Regs[0]; + + EVT OrigVT = EVT::getEVT(Args[i].Ty); + EVT VAVT = VA.getValVT(); + const LLT OrigTy = getLLTForType(*Args[i].Ty, DL); + // Expected to be multiple regs for a single incoming arg. // There should be Regs.size() ArgLocs per argument. unsigned NumArgRegs = Args[i].Regs.size(); - + assert((j + (NumArgRegs - 1)) < ArgLocs.size() && "Too many regs for number of args"); for (unsigned Part = 0; Part < NumArgRegs; ++Part) { @@ -378,7 +378,7 @@ bool CallLowering::handleAssignments(CCState &CCInfo, dbgs() << "Load/store a split arg to/from the stack not implemented yet\n"); return false; - } + } // FIXME: Use correct address space for pointer size EVT LocVT = VA.getValVT(); @@ -420,12 +420,12 @@ bool CallLowering::handleAssignments(CCState &CCInfo, LLVM_DEBUG(dbgs() << "Incoming promoted vector arg has too many elts"); return false; - } + } auto Unmerge = MIRBuilder.buildUnmerge({OrigTy, OrigTy}, {NewReg}); MIRBuilder.buildCopy(ArgReg, Unmerge.getReg(0)); - } else { + } else { MIRBuilder.buildTrunc(ArgReg, {NewReg}).getReg(0); - } + } } // Now that all pieces have been handled, re-pack any arguments into any @@ -437,12 +437,12 @@ bool CallLowering::handleAssignments(CCState &CCInfo, // Merge the split registers into the expected larger result vreg // of the original call. MIRBuilder.buildMerge(Args[i].OrigRegs[0], Args[i].Regs); - } + } } - + j += NumArgRegs - 1; } - + return true; } @@ -555,11 +555,11 @@ bool CallLowering::checkReturn(CCState &CCInfo, for (unsigned I = 0, E = Outs.size(); I < E; ++I) { MVT VT = MVT::getVT(Outs[I].Ty); if (Fn(I, VT, VT, CCValAssign::Full, Outs[I].Flags[0], CCInfo)) - return false; - } - return true; -} - + return false; + } + return true; +} + void CallLowering::getReturnInfo(CallingConv::ID CallConv, Type *RetTy, AttributeList Attrs, SmallVectorImpl<BaseArgInfo> &Outs, @@ -594,23 +594,23 @@ bool CallLowering::checkReturnTypeForCallConv(MachineFunction &MF) const { return canLowerReturn(MF, CallConv, SplitArgs, F.isVarArg()); } -bool CallLowering::analyzeArgInfo(CCState &CCState, - SmallVectorImpl<ArgInfo> &Args, - CCAssignFn &AssignFnFixed, - CCAssignFn &AssignFnVarArg) const { - for (unsigned i = 0, e = Args.size(); i < e; ++i) { - MVT VT = MVT::getVT(Args[i].Ty); - CCAssignFn &Fn = Args[i].IsFixed ? AssignFnFixed : AssignFnVarArg; - if (Fn(i, VT, VT, CCValAssign::Full, Args[i].Flags[0], CCState)) { - // Bail out on anything we can't handle. - LLVM_DEBUG(dbgs() << "Cannot analyze " << EVT(VT).getEVTString() - << " (arg number = " << i << "\n"); - return false; - } - } - return true; -} - +bool CallLowering::analyzeArgInfo(CCState &CCState, + SmallVectorImpl<ArgInfo> &Args, + CCAssignFn &AssignFnFixed, + CCAssignFn &AssignFnVarArg) const { + for (unsigned i = 0, e = Args.size(); i < e; ++i) { + MVT VT = MVT::getVT(Args[i].Ty); + CCAssignFn &Fn = Args[i].IsFixed ? AssignFnFixed : AssignFnVarArg; + if (Fn(i, VT, VT, CCValAssign::Full, Args[i].Flags[0], CCState)) { + // Bail out on anything we can't handle. + LLVM_DEBUG(dbgs() << "Cannot analyze " << EVT(VT).getEVTString() + << " (arg number = " << i << "\n"); + return false; + } + } + return true; +} + bool CallLowering::parametersInCSRMatch( const MachineRegisterInfo &MRI, const uint32_t *CallerPreservedMask, const SmallVectorImpl<CCValAssign> &OutLocs, @@ -663,101 +663,101 @@ bool CallLowering::parametersInCSRMatch( return true; } -bool CallLowering::resultsCompatible(CallLoweringInfo &Info, - MachineFunction &MF, - SmallVectorImpl<ArgInfo> &InArgs, - CCAssignFn &CalleeAssignFnFixed, - CCAssignFn &CalleeAssignFnVarArg, - CCAssignFn &CallerAssignFnFixed, - CCAssignFn &CallerAssignFnVarArg) const { - const Function &F = MF.getFunction(); - CallingConv::ID CalleeCC = Info.CallConv; - CallingConv::ID CallerCC = F.getCallingConv(); - - if (CallerCC == CalleeCC) - return true; - - SmallVector<CCValAssign, 16> ArgLocs1; - CCState CCInfo1(CalleeCC, false, MF, ArgLocs1, F.getContext()); - if (!analyzeArgInfo(CCInfo1, InArgs, CalleeAssignFnFixed, - CalleeAssignFnVarArg)) - return false; - - SmallVector<CCValAssign, 16> ArgLocs2; - CCState CCInfo2(CallerCC, false, MF, ArgLocs2, F.getContext()); - if (!analyzeArgInfo(CCInfo2, InArgs, CallerAssignFnFixed, - CalleeAssignFnVarArg)) - return false; - - // We need the argument locations to match up exactly. If there's more in - // one than the other, then we are done. - if (ArgLocs1.size() != ArgLocs2.size()) - return false; - - // Make sure that each location is passed in exactly the same way. - for (unsigned i = 0, e = ArgLocs1.size(); i < e; ++i) { - const CCValAssign &Loc1 = ArgLocs1[i]; - const CCValAssign &Loc2 = ArgLocs2[i]; - - // We need both of them to be the same. So if one is a register and one - // isn't, we're done. - if (Loc1.isRegLoc() != Loc2.isRegLoc()) - return false; - - if (Loc1.isRegLoc()) { - // If they don't have the same register location, we're done. - if (Loc1.getLocReg() != Loc2.getLocReg()) - return false; - - // They matched, so we can move to the next ArgLoc. - continue; - } - - // Loc1 wasn't a RegLoc, so they both must be MemLocs. Check if they match. - if (Loc1.getLocMemOffset() != Loc2.getLocMemOffset()) - return false; - } - - return true; -} - -Register CallLowering::ValueHandler::extendRegister(Register ValReg, - CCValAssign &VA, - unsigned MaxSizeBits) { - LLT LocTy{VA.getLocVT()}; - LLT ValTy = MRI.getType(ValReg); - if (LocTy.getSizeInBits() == ValTy.getSizeInBits()) - return ValReg; - - if (LocTy.isScalar() && MaxSizeBits && MaxSizeBits < LocTy.getSizeInBits()) { - if (MaxSizeBits <= ValTy.getSizeInBits()) - return ValReg; - LocTy = LLT::scalar(MaxSizeBits); - } - - switch (VA.getLocInfo()) { - default: break; - case CCValAssign::Full: - case CCValAssign::BCvt: - // FIXME: bitconverting between vector types may or may not be a - // nop in big-endian situations. - return ValReg; - case CCValAssign::AExt: { - auto MIB = MIRBuilder.buildAnyExt(LocTy, ValReg); - return MIB.getReg(0); - } - case CCValAssign::SExt: { - Register NewReg = MRI.createGenericVirtualRegister(LocTy); - MIRBuilder.buildSExt(NewReg, ValReg); - return NewReg; - } - case CCValAssign::ZExt: { - Register NewReg = MRI.createGenericVirtualRegister(LocTy); - MIRBuilder.buildZExt(NewReg, ValReg); - return NewReg; - } - } - llvm_unreachable("unable to extend register"); -} - -void CallLowering::ValueHandler::anchor() {} +bool CallLowering::resultsCompatible(CallLoweringInfo &Info, + MachineFunction &MF, + SmallVectorImpl<ArgInfo> &InArgs, + CCAssignFn &CalleeAssignFnFixed, + CCAssignFn &CalleeAssignFnVarArg, + CCAssignFn &CallerAssignFnFixed, + CCAssignFn &CallerAssignFnVarArg) const { + const Function &F = MF.getFunction(); + CallingConv::ID CalleeCC = Info.CallConv; + CallingConv::ID CallerCC = F.getCallingConv(); + + if (CallerCC == CalleeCC) + return true; + + SmallVector<CCValAssign, 16> ArgLocs1; + CCState CCInfo1(CalleeCC, false, MF, ArgLocs1, F.getContext()); + if (!analyzeArgInfo(CCInfo1, InArgs, CalleeAssignFnFixed, + CalleeAssignFnVarArg)) + return false; + + SmallVector<CCValAssign, 16> ArgLocs2; + CCState CCInfo2(CallerCC, false, MF, ArgLocs2, F.getContext()); + if (!analyzeArgInfo(CCInfo2, InArgs, CallerAssignFnFixed, + CalleeAssignFnVarArg)) + return false; + + // We need the argument locations to match up exactly. If there's more in + // one than the other, then we are done. + if (ArgLocs1.size() != ArgLocs2.size()) + return false; + + // Make sure that each location is passed in exactly the same way. + for (unsigned i = 0, e = ArgLocs1.size(); i < e; ++i) { + const CCValAssign &Loc1 = ArgLocs1[i]; + const CCValAssign &Loc2 = ArgLocs2[i]; + + // We need both of them to be the same. So if one is a register and one + // isn't, we're done. + if (Loc1.isRegLoc() != Loc2.isRegLoc()) + return false; + + if (Loc1.isRegLoc()) { + // If they don't have the same register location, we're done. + if (Loc1.getLocReg() != Loc2.getLocReg()) + return false; + + // They matched, so we can move to the next ArgLoc. + continue; + } + + // Loc1 wasn't a RegLoc, so they both must be MemLocs. Check if they match. + if (Loc1.getLocMemOffset() != Loc2.getLocMemOffset()) + return false; + } + + return true; +} + +Register CallLowering::ValueHandler::extendRegister(Register ValReg, + CCValAssign &VA, + unsigned MaxSizeBits) { + LLT LocTy{VA.getLocVT()}; + LLT ValTy = MRI.getType(ValReg); + if (LocTy.getSizeInBits() == ValTy.getSizeInBits()) + return ValReg; + + if (LocTy.isScalar() && MaxSizeBits && MaxSizeBits < LocTy.getSizeInBits()) { + if (MaxSizeBits <= ValTy.getSizeInBits()) + return ValReg; + LocTy = LLT::scalar(MaxSizeBits); + } + + switch (VA.getLocInfo()) { + default: break; + case CCValAssign::Full: + case CCValAssign::BCvt: + // FIXME: bitconverting between vector types may or may not be a + // nop in big-endian situations. + return ValReg; + case CCValAssign::AExt: { + auto MIB = MIRBuilder.buildAnyExt(LocTy, ValReg); + return MIB.getReg(0); + } + case CCValAssign::SExt: { + Register NewReg = MRI.createGenericVirtualRegister(LocTy); + MIRBuilder.buildSExt(NewReg, ValReg); + return NewReg; + } + case CCValAssign::ZExt: { + Register NewReg = MRI.createGenericVirtualRegister(LocTy); + MIRBuilder.buildZExt(NewReg, ValReg); + return NewReg; + } + } + llvm_unreachable("unable to extend register"); +} + +void CallLowering::ValueHandler::anchor() {} diff --git a/contrib/libs/llvm12/lib/CodeGen/GlobalISel/Combiner.cpp b/contrib/libs/llvm12/lib/CodeGen/GlobalISel/Combiner.cpp index ce54b4c151..f1071d96e5 100644 --- a/contrib/libs/llvm12/lib/CodeGen/GlobalISel/Combiner.cpp +++ b/contrib/libs/llvm12/lib/CodeGen/GlobalISel/Combiner.cpp @@ -1,160 +1,160 @@ -//===-- lib/CodeGen/GlobalISel/Combiner.cpp -------------------------------===// -// -// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. -// See https://llvm.org/LICENSE.txt for license information. -// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception -// -//===----------------------------------------------------------------------===// -// -// This file constains common code to combine machine functions at generic -// level. -//===----------------------------------------------------------------------===// - -#include "llvm/CodeGen/GlobalISel/Combiner.h" -#include "llvm/ADT/PostOrderIterator.h" -#include "llvm/CodeGen/GlobalISel/CSEInfo.h" -#include "llvm/CodeGen/GlobalISel/CombinerInfo.h" -#include "llvm/CodeGen/GlobalISel/CSEMIRBuilder.h" -#include "llvm/CodeGen/GlobalISel/GISelChangeObserver.h" -#include "llvm/CodeGen/GlobalISel/GISelWorkList.h" -#include "llvm/CodeGen/GlobalISel/MachineIRBuilder.h" -#include "llvm/CodeGen/GlobalISel/Utils.h" -#include "llvm/CodeGen/MachineOptimizationRemarkEmitter.h" -#include "llvm/CodeGen/MachineRegisterInfo.h" -#include "llvm/Support/Debug.h" - -#define DEBUG_TYPE "gi-combiner" - -using namespace llvm; - -namespace llvm { -cl::OptionCategory GICombinerOptionCategory( - "GlobalISel Combiner", - "Control the rules which are enabled. These options all take a comma " - "separated list of rules to disable and may be specified by number " - "or number range (e.g. 1-10)." -#ifndef NDEBUG - " They may also be specified by name." -#endif -); -} // end namespace llvm - -namespace { -/// This class acts as the glue the joins the CombinerHelper to the overall -/// Combine algorithm. The CombinerHelper is intended to report the -/// modifications it makes to the MIR to the GISelChangeObserver and the -/// observer subclass will act on these events. In this case, instruction -/// erasure will cancel any future visits to the erased instruction and -/// instruction creation will schedule that instruction for a future visit. -/// Other Combiner implementations may require more complex behaviour from -/// their GISelChangeObserver subclass. -class WorkListMaintainer : public GISelChangeObserver { - using WorkListTy = GISelWorkList<512>; - WorkListTy &WorkList; - /// The instructions that have been created but we want to report once they - /// have their operands. This is only maintained if debug output is requested. - SmallPtrSet<const MachineInstr *, 4> CreatedInstrs; - -public: - WorkListMaintainer(WorkListTy &WorkList) - : GISelChangeObserver(), WorkList(WorkList) {} - virtual ~WorkListMaintainer() { - } - - void erasingInstr(MachineInstr &MI) override { - LLVM_DEBUG(dbgs() << "Erasing: " << MI << "\n"); - WorkList.remove(&MI); - } - void createdInstr(MachineInstr &MI) override { - LLVM_DEBUG(dbgs() << "Creating: " << MI << "\n"); - WorkList.insert(&MI); - LLVM_DEBUG(CreatedInstrs.insert(&MI)); - } - void changingInstr(MachineInstr &MI) override { - LLVM_DEBUG(dbgs() << "Changing: " << MI << "\n"); - WorkList.insert(&MI); - } - void changedInstr(MachineInstr &MI) override { - LLVM_DEBUG(dbgs() << "Changed: " << MI << "\n"); - WorkList.insert(&MI); - } - - void reportFullyCreatedInstrs() { - LLVM_DEBUG(for (const auto *MI - : CreatedInstrs) { - dbgs() << "Created: "; - MI->print(dbgs()); - }); - LLVM_DEBUG(CreatedInstrs.clear()); - } -}; -} - -Combiner::Combiner(CombinerInfo &Info, const TargetPassConfig *TPC) - : CInfo(Info), TPC(TPC) { - (void)this->TPC; // FIXME: Remove when used. -} - -bool Combiner::combineMachineInstrs(MachineFunction &MF, - GISelCSEInfo *CSEInfo) { - // If the ISel pipeline failed, do not bother running this pass. - // FIXME: Should this be here or in individual combiner passes. - if (MF.getProperties().hasProperty( - MachineFunctionProperties::Property::FailedISel)) - return false; - - Builder = - CSEInfo ? std::make_unique<CSEMIRBuilder>() : std::make_unique<MachineIRBuilder>(); - MRI = &MF.getRegInfo(); - Builder->setMF(MF); - if (CSEInfo) - Builder->setCSEInfo(CSEInfo); - - LLVM_DEBUG(dbgs() << "Generic MI Combiner for: " << MF.getName() << '\n'); - - MachineOptimizationRemarkEmitter MORE(MF, /*MBFI=*/nullptr); - - bool MFChanged = false; - bool Changed; - MachineIRBuilder &B = *Builder.get(); - - do { - // Collect all instructions. Do a post order traversal for basic blocks and - // insert with list bottom up, so while we pop_back_val, we'll traverse top - // down RPOT. - Changed = false; - GISelWorkList<512> WorkList; - WorkListMaintainer Observer(WorkList); - GISelObserverWrapper WrapperObserver(&Observer); - if (CSEInfo) - WrapperObserver.addObserver(CSEInfo); - RAIIDelegateInstaller DelInstall(MF, &WrapperObserver); - for (MachineBasicBlock *MBB : post_order(&MF)) { - for (auto MII = MBB->rbegin(), MIE = MBB->rend(); MII != MIE;) { - MachineInstr *CurMI = &*MII; - ++MII; - // Erase dead insts before even adding to the list. - if (isTriviallyDead(*CurMI, *MRI)) { - LLVM_DEBUG(dbgs() << *CurMI << "Is dead; erasing.\n"); - CurMI->eraseFromParentAndMarkDBGValuesForRemoval(); - continue; - } - WorkList.deferred_insert(CurMI); - } - } - WorkList.finalize(); - // Main Loop. Process the instructions here. - while (!WorkList.empty()) { - MachineInstr *CurrInst = WorkList.pop_back_val(); - LLVM_DEBUG(dbgs() << "\nTry combining " << *CurrInst;); - Changed |= CInfo.combine(WrapperObserver, *CurrInst, B); - Observer.reportFullyCreatedInstrs(); - } - MFChanged |= Changed; - } while (Changed); - +//===-- lib/CodeGen/GlobalISel/Combiner.cpp -------------------------------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +// +// This file constains common code to combine machine functions at generic +// level. +//===----------------------------------------------------------------------===// + +#include "llvm/CodeGen/GlobalISel/Combiner.h" +#include "llvm/ADT/PostOrderIterator.h" +#include "llvm/CodeGen/GlobalISel/CSEInfo.h" +#include "llvm/CodeGen/GlobalISel/CombinerInfo.h" +#include "llvm/CodeGen/GlobalISel/CSEMIRBuilder.h" +#include "llvm/CodeGen/GlobalISel/GISelChangeObserver.h" +#include "llvm/CodeGen/GlobalISel/GISelWorkList.h" +#include "llvm/CodeGen/GlobalISel/MachineIRBuilder.h" +#include "llvm/CodeGen/GlobalISel/Utils.h" +#include "llvm/CodeGen/MachineOptimizationRemarkEmitter.h" +#include "llvm/CodeGen/MachineRegisterInfo.h" +#include "llvm/Support/Debug.h" + +#define DEBUG_TYPE "gi-combiner" + +using namespace llvm; + +namespace llvm { +cl::OptionCategory GICombinerOptionCategory( + "GlobalISel Combiner", + "Control the rules which are enabled. These options all take a comma " + "separated list of rules to disable and may be specified by number " + "or number range (e.g. 1-10)." +#ifndef NDEBUG + " They may also be specified by name." +#endif +); +} // end namespace llvm + +namespace { +/// This class acts as the glue the joins the CombinerHelper to the overall +/// Combine algorithm. The CombinerHelper is intended to report the +/// modifications it makes to the MIR to the GISelChangeObserver and the +/// observer subclass will act on these events. In this case, instruction +/// erasure will cancel any future visits to the erased instruction and +/// instruction creation will schedule that instruction for a future visit. +/// Other Combiner implementations may require more complex behaviour from +/// their GISelChangeObserver subclass. +class WorkListMaintainer : public GISelChangeObserver { + using WorkListTy = GISelWorkList<512>; + WorkListTy &WorkList; + /// The instructions that have been created but we want to report once they + /// have their operands. This is only maintained if debug output is requested. + SmallPtrSet<const MachineInstr *, 4> CreatedInstrs; + +public: + WorkListMaintainer(WorkListTy &WorkList) + : GISelChangeObserver(), WorkList(WorkList) {} + virtual ~WorkListMaintainer() { + } + + void erasingInstr(MachineInstr &MI) override { + LLVM_DEBUG(dbgs() << "Erasing: " << MI << "\n"); + WorkList.remove(&MI); + } + void createdInstr(MachineInstr &MI) override { + LLVM_DEBUG(dbgs() << "Creating: " << MI << "\n"); + WorkList.insert(&MI); + LLVM_DEBUG(CreatedInstrs.insert(&MI)); + } + void changingInstr(MachineInstr &MI) override { + LLVM_DEBUG(dbgs() << "Changing: " << MI << "\n"); + WorkList.insert(&MI); + } + void changedInstr(MachineInstr &MI) override { + LLVM_DEBUG(dbgs() << "Changed: " << MI << "\n"); + WorkList.insert(&MI); + } + + void reportFullyCreatedInstrs() { + LLVM_DEBUG(for (const auto *MI + : CreatedInstrs) { + dbgs() << "Created: "; + MI->print(dbgs()); + }); + LLVM_DEBUG(CreatedInstrs.clear()); + } +}; +} + +Combiner::Combiner(CombinerInfo &Info, const TargetPassConfig *TPC) + : CInfo(Info), TPC(TPC) { + (void)this->TPC; // FIXME: Remove when used. +} + +bool Combiner::combineMachineInstrs(MachineFunction &MF, + GISelCSEInfo *CSEInfo) { + // If the ISel pipeline failed, do not bother running this pass. + // FIXME: Should this be here or in individual combiner passes. + if (MF.getProperties().hasProperty( + MachineFunctionProperties::Property::FailedISel)) + return false; + + Builder = + CSEInfo ? std::make_unique<CSEMIRBuilder>() : std::make_unique<MachineIRBuilder>(); + MRI = &MF.getRegInfo(); + Builder->setMF(MF); + if (CSEInfo) + Builder->setCSEInfo(CSEInfo); + + LLVM_DEBUG(dbgs() << "Generic MI Combiner for: " << MF.getName() << '\n'); + + MachineOptimizationRemarkEmitter MORE(MF, /*MBFI=*/nullptr); + + bool MFChanged = false; + bool Changed; + MachineIRBuilder &B = *Builder.get(); + + do { + // Collect all instructions. Do a post order traversal for basic blocks and + // insert with list bottom up, so while we pop_back_val, we'll traverse top + // down RPOT. + Changed = false; + GISelWorkList<512> WorkList; + WorkListMaintainer Observer(WorkList); + GISelObserverWrapper WrapperObserver(&Observer); + if (CSEInfo) + WrapperObserver.addObserver(CSEInfo); + RAIIDelegateInstaller DelInstall(MF, &WrapperObserver); + for (MachineBasicBlock *MBB : post_order(&MF)) { + for (auto MII = MBB->rbegin(), MIE = MBB->rend(); MII != MIE;) { + MachineInstr *CurMI = &*MII; + ++MII; + // Erase dead insts before even adding to the list. + if (isTriviallyDead(*CurMI, *MRI)) { + LLVM_DEBUG(dbgs() << *CurMI << "Is dead; erasing.\n"); + CurMI->eraseFromParentAndMarkDBGValuesForRemoval(); + continue; + } + WorkList.deferred_insert(CurMI); + } + } + WorkList.finalize(); + // Main Loop. Process the instructions here. + while (!WorkList.empty()) { + MachineInstr *CurrInst = WorkList.pop_back_val(); + LLVM_DEBUG(dbgs() << "\nTry combining " << *CurrInst;); + Changed |= CInfo.combine(WrapperObserver, *CurrInst, B); + Observer.reportFullyCreatedInstrs(); + } + MFChanged |= Changed; + } while (Changed); + assert(!CSEInfo || (!errorToBool(CSEInfo->verify()) && "CSEInfo is not consistent. Likely missing calls to " "observer on mutations")); - return MFChanged; -} + return MFChanged; +} diff --git a/contrib/libs/llvm12/lib/CodeGen/GlobalISel/CombinerHelper.cpp b/contrib/libs/llvm12/lib/CodeGen/GlobalISel/CombinerHelper.cpp index 3dfe1658e9..a9353bdfb7 100644 --- a/contrib/libs/llvm12/lib/CodeGen/GlobalISel/CombinerHelper.cpp +++ b/contrib/libs/llvm12/lib/CodeGen/GlobalISel/CombinerHelper.cpp @@ -1,49 +1,49 @@ -//===-- lib/CodeGen/GlobalISel/GICombinerHelper.cpp -----------------------===// -// -// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. -// See https://llvm.org/LICENSE.txt for license information. -// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception -// -//===----------------------------------------------------------------------===// -#include "llvm/CodeGen/GlobalISel/CombinerHelper.h" -#include "llvm/CodeGen/GlobalISel/Combiner.h" -#include "llvm/CodeGen/GlobalISel/GISelChangeObserver.h" -#include "llvm/CodeGen/GlobalISel/GISelKnownBits.h" -#include "llvm/CodeGen/GlobalISel/LegalizerInfo.h" -#include "llvm/CodeGen/GlobalISel/MIPatternMatch.h" -#include "llvm/CodeGen/GlobalISel/MachineIRBuilder.h" -#include "llvm/CodeGen/GlobalISel/Utils.h" -#include "llvm/CodeGen/MachineDominators.h" -#include "llvm/CodeGen/MachineFrameInfo.h" -#include "llvm/CodeGen/MachineInstr.h" +//===-- lib/CodeGen/GlobalISel/GICombinerHelper.cpp -----------------------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +#include "llvm/CodeGen/GlobalISel/CombinerHelper.h" +#include "llvm/CodeGen/GlobalISel/Combiner.h" +#include "llvm/CodeGen/GlobalISel/GISelChangeObserver.h" +#include "llvm/CodeGen/GlobalISel/GISelKnownBits.h" +#include "llvm/CodeGen/GlobalISel/LegalizerInfo.h" +#include "llvm/CodeGen/GlobalISel/MIPatternMatch.h" +#include "llvm/CodeGen/GlobalISel/MachineIRBuilder.h" +#include "llvm/CodeGen/GlobalISel/Utils.h" +#include "llvm/CodeGen/MachineDominators.h" +#include "llvm/CodeGen/MachineFrameInfo.h" +#include "llvm/CodeGen/MachineInstr.h" #include "llvm/CodeGen/MachineMemOperand.h" -#include "llvm/CodeGen/MachineRegisterInfo.h" -#include "llvm/CodeGen/TargetInstrInfo.h" -#include "llvm/CodeGen/TargetLowering.h" -#include "llvm/Support/MathExtras.h" -#include "llvm/Target/TargetMachine.h" - -#define DEBUG_TYPE "gi-combiner" - -using namespace llvm; -using namespace MIPatternMatch; - -// Option to allow testing of the combiner while no targets know about indexed -// addressing. -static cl::opt<bool> - ForceLegalIndexing("force-legal-indexing", cl::Hidden, cl::init(false), - cl::desc("Force all indexed operations to be " - "legal for the GlobalISel combiner")); - -CombinerHelper::CombinerHelper(GISelChangeObserver &Observer, - MachineIRBuilder &B, GISelKnownBits *KB, - MachineDominatorTree *MDT, - const LegalizerInfo *LI) - : Builder(B), MRI(Builder.getMF().getRegInfo()), Observer(Observer), - KB(KB), MDT(MDT), LI(LI) { - (void)this->KB; -} - +#include "llvm/CodeGen/MachineRegisterInfo.h" +#include "llvm/CodeGen/TargetInstrInfo.h" +#include "llvm/CodeGen/TargetLowering.h" +#include "llvm/Support/MathExtras.h" +#include "llvm/Target/TargetMachine.h" + +#define DEBUG_TYPE "gi-combiner" + +using namespace llvm; +using namespace MIPatternMatch; + +// Option to allow testing of the combiner while no targets know about indexed +// addressing. +static cl::opt<bool> + ForceLegalIndexing("force-legal-indexing", cl::Hidden, cl::init(false), + cl::desc("Force all indexed operations to be " + "legal for the GlobalISel combiner")); + +CombinerHelper::CombinerHelper(GISelChangeObserver &Observer, + MachineIRBuilder &B, GISelKnownBits *KB, + MachineDominatorTree *MDT, + const LegalizerInfo *LI) + : Builder(B), MRI(Builder.getMF().getRegInfo()), Observer(Observer), + KB(KB), MDT(MDT), LI(LI) { + (void)this->KB; +} + const TargetLowering &CombinerHelper::getTargetLowering() const { return *Builder.getMF().getSubtarget().getTargetLowering(); } @@ -113,517 +113,517 @@ bool CombinerHelper::isLegalOrBeforeLegalizer( return !LI || LI->getAction(Query).Action == LegalizeActions::Legal; } -void CombinerHelper::replaceRegWith(MachineRegisterInfo &MRI, Register FromReg, - Register ToReg) const { - Observer.changingAllUsesOfReg(MRI, FromReg); - - if (MRI.constrainRegAttrs(ToReg, FromReg)) - MRI.replaceRegWith(FromReg, ToReg); - else - Builder.buildCopy(ToReg, FromReg); - - Observer.finishedChangingAllUsesOfReg(); -} - -void CombinerHelper::replaceRegOpWith(MachineRegisterInfo &MRI, - MachineOperand &FromRegOp, - Register ToReg) const { - assert(FromRegOp.getParent() && "Expected an operand in an MI"); - Observer.changingInstr(*FromRegOp.getParent()); - - FromRegOp.setReg(ToReg); - - Observer.changedInstr(*FromRegOp.getParent()); -} - -bool CombinerHelper::tryCombineCopy(MachineInstr &MI) { - if (matchCombineCopy(MI)) { - applyCombineCopy(MI); - return true; - } - return false; -} -bool CombinerHelper::matchCombineCopy(MachineInstr &MI) { - if (MI.getOpcode() != TargetOpcode::COPY) - return false; - Register DstReg = MI.getOperand(0).getReg(); - Register SrcReg = MI.getOperand(1).getReg(); - return canReplaceReg(DstReg, SrcReg, MRI); -} -void CombinerHelper::applyCombineCopy(MachineInstr &MI) { - Register DstReg = MI.getOperand(0).getReg(); - Register SrcReg = MI.getOperand(1).getReg(); - MI.eraseFromParent(); - replaceRegWith(MRI, DstReg, SrcReg); -} - -bool CombinerHelper::tryCombineConcatVectors(MachineInstr &MI) { - bool IsUndef = false; - SmallVector<Register, 4> Ops; - if (matchCombineConcatVectors(MI, IsUndef, Ops)) { - applyCombineConcatVectors(MI, IsUndef, Ops); - return true; - } - return false; -} - -bool CombinerHelper::matchCombineConcatVectors(MachineInstr &MI, bool &IsUndef, - SmallVectorImpl<Register> &Ops) { - assert(MI.getOpcode() == TargetOpcode::G_CONCAT_VECTORS && - "Invalid instruction"); - IsUndef = true; - MachineInstr *Undef = nullptr; - - // Walk over all the operands of concat vectors and check if they are - // build_vector themselves or undef. - // Then collect their operands in Ops. - for (const MachineOperand &MO : MI.uses()) { - Register Reg = MO.getReg(); - MachineInstr *Def = MRI.getVRegDef(Reg); - assert(Def && "Operand not defined"); - switch (Def->getOpcode()) { - case TargetOpcode::G_BUILD_VECTOR: - IsUndef = false; - // Remember the operands of the build_vector to fold - // them into the yet-to-build flattened concat vectors. - for (const MachineOperand &BuildVecMO : Def->uses()) - Ops.push_back(BuildVecMO.getReg()); - break; - case TargetOpcode::G_IMPLICIT_DEF: { - LLT OpType = MRI.getType(Reg); - // Keep one undef value for all the undef operands. - if (!Undef) { - Builder.setInsertPt(*MI.getParent(), MI); - Undef = Builder.buildUndef(OpType.getScalarType()); - } - assert(MRI.getType(Undef->getOperand(0).getReg()) == - OpType.getScalarType() && - "All undefs should have the same type"); - // Break the undef vector in as many scalar elements as needed - // for the flattening. - for (unsigned EltIdx = 0, EltEnd = OpType.getNumElements(); - EltIdx != EltEnd; ++EltIdx) - Ops.push_back(Undef->getOperand(0).getReg()); - break; - } - default: - return false; - } - } - return true; -} -void CombinerHelper::applyCombineConcatVectors( - MachineInstr &MI, bool IsUndef, const ArrayRef<Register> Ops) { - // We determined that the concat_vectors can be flatten. - // Generate the flattened build_vector. - Register DstReg = MI.getOperand(0).getReg(); - Builder.setInsertPt(*MI.getParent(), MI); - Register NewDstReg = MRI.cloneVirtualRegister(DstReg); - - // Note: IsUndef is sort of redundant. We could have determine it by - // checking that at all Ops are undef. Alternatively, we could have - // generate a build_vector of undefs and rely on another combine to - // clean that up. For now, given we already gather this information - // in tryCombineConcatVectors, just save compile time and issue the - // right thing. - if (IsUndef) - Builder.buildUndef(NewDstReg); - else - Builder.buildBuildVector(NewDstReg, Ops); - MI.eraseFromParent(); - replaceRegWith(MRI, DstReg, NewDstReg); -} - -bool CombinerHelper::tryCombineShuffleVector(MachineInstr &MI) { - SmallVector<Register, 4> Ops; - if (matchCombineShuffleVector(MI, Ops)) { - applyCombineShuffleVector(MI, Ops); - return true; - } - return false; -} - -bool CombinerHelper::matchCombineShuffleVector(MachineInstr &MI, - SmallVectorImpl<Register> &Ops) { - assert(MI.getOpcode() == TargetOpcode::G_SHUFFLE_VECTOR && - "Invalid instruction kind"); - LLT DstType = MRI.getType(MI.getOperand(0).getReg()); - Register Src1 = MI.getOperand(1).getReg(); - LLT SrcType = MRI.getType(Src1); - // As bizarre as it may look, shuffle vector can actually produce - // scalar! This is because at the IR level a <1 x ty> shuffle - // vector is perfectly valid. - unsigned DstNumElts = DstType.isVector() ? DstType.getNumElements() : 1; - unsigned SrcNumElts = SrcType.isVector() ? SrcType.getNumElements() : 1; - - // If the resulting vector is smaller than the size of the source - // vectors being concatenated, we won't be able to replace the - // shuffle vector into a concat_vectors. - // - // Note: We may still be able to produce a concat_vectors fed by - // extract_vector_elt and so on. It is less clear that would - // be better though, so don't bother for now. - // - // If the destination is a scalar, the size of the sources doesn't - // matter. we will lower the shuffle to a plain copy. This will - // work only if the source and destination have the same size. But - // that's covered by the next condition. - // - // TODO: If the size between the source and destination don't match - // we could still emit an extract vector element in that case. - if (DstNumElts < 2 * SrcNumElts && DstNumElts != 1) - return false; - - // Check that the shuffle mask can be broken evenly between the - // different sources. - if (DstNumElts % SrcNumElts != 0) - return false; - - // Mask length is a multiple of the source vector length. - // Check if the shuffle is some kind of concatenation of the input - // vectors. - unsigned NumConcat = DstNumElts / SrcNumElts; - SmallVector<int, 8> ConcatSrcs(NumConcat, -1); - ArrayRef<int> Mask = MI.getOperand(3).getShuffleMask(); - for (unsigned i = 0; i != DstNumElts; ++i) { - int Idx = Mask[i]; - // Undef value. - if (Idx < 0) - continue; - // Ensure the indices in each SrcType sized piece are sequential and that - // the same source is used for the whole piece. - if ((Idx % SrcNumElts != (i % SrcNumElts)) || - (ConcatSrcs[i / SrcNumElts] >= 0 && - ConcatSrcs[i / SrcNumElts] != (int)(Idx / SrcNumElts))) - return false; - // Remember which source this index came from. - ConcatSrcs[i / SrcNumElts] = Idx / SrcNumElts; - } - - // The shuffle is concatenating multiple vectors together. - // Collect the different operands for that. - Register UndefReg; - Register Src2 = MI.getOperand(2).getReg(); - for (auto Src : ConcatSrcs) { - if (Src < 0) { - if (!UndefReg) { - Builder.setInsertPt(*MI.getParent(), MI); - UndefReg = Builder.buildUndef(SrcType).getReg(0); - } - Ops.push_back(UndefReg); - } else if (Src == 0) - Ops.push_back(Src1); - else - Ops.push_back(Src2); - } - return true; -} - -void CombinerHelper::applyCombineShuffleVector(MachineInstr &MI, - const ArrayRef<Register> Ops) { - Register DstReg = MI.getOperand(0).getReg(); - Builder.setInsertPt(*MI.getParent(), MI); - Register NewDstReg = MRI.cloneVirtualRegister(DstReg); - - if (Ops.size() == 1) - Builder.buildCopy(NewDstReg, Ops[0]); - else - Builder.buildMerge(NewDstReg, Ops); - - MI.eraseFromParent(); - replaceRegWith(MRI, DstReg, NewDstReg); -} - -namespace { - -/// Select a preference between two uses. CurrentUse is the current preference -/// while *ForCandidate is attributes of the candidate under consideration. -PreferredTuple ChoosePreferredUse(PreferredTuple &CurrentUse, - const LLT TyForCandidate, - unsigned OpcodeForCandidate, - MachineInstr *MIForCandidate) { - if (!CurrentUse.Ty.isValid()) { - if (CurrentUse.ExtendOpcode == OpcodeForCandidate || - CurrentUse.ExtendOpcode == TargetOpcode::G_ANYEXT) - return {TyForCandidate, OpcodeForCandidate, MIForCandidate}; - return CurrentUse; - } - - // We permit the extend to hoist through basic blocks but this is only - // sensible if the target has extending loads. If you end up lowering back - // into a load and extend during the legalizer then the end result is - // hoisting the extend up to the load. - - // Prefer defined extensions to undefined extensions as these are more - // likely to reduce the number of instructions. - if (OpcodeForCandidate == TargetOpcode::G_ANYEXT && - CurrentUse.ExtendOpcode != TargetOpcode::G_ANYEXT) - return CurrentUse; - else if (CurrentUse.ExtendOpcode == TargetOpcode::G_ANYEXT && - OpcodeForCandidate != TargetOpcode::G_ANYEXT) - return {TyForCandidate, OpcodeForCandidate, MIForCandidate}; - - // Prefer sign extensions to zero extensions as sign-extensions tend to be - // more expensive. - if (CurrentUse.Ty == TyForCandidate) { - if (CurrentUse.ExtendOpcode == TargetOpcode::G_SEXT && - OpcodeForCandidate == TargetOpcode::G_ZEXT) - return CurrentUse; - else if (CurrentUse.ExtendOpcode == TargetOpcode::G_ZEXT && - OpcodeForCandidate == TargetOpcode::G_SEXT) - return {TyForCandidate, OpcodeForCandidate, MIForCandidate}; - } - - // This is potentially target specific. We've chosen the largest type - // because G_TRUNC is usually free. One potential catch with this is that - // some targets have a reduced number of larger registers than smaller - // registers and this choice potentially increases the live-range for the - // larger value. - if (TyForCandidate.getSizeInBits() > CurrentUse.Ty.getSizeInBits()) { - return {TyForCandidate, OpcodeForCandidate, MIForCandidate}; - } - return CurrentUse; -} - -/// Find a suitable place to insert some instructions and insert them. This -/// function accounts for special cases like inserting before a PHI node. -/// The current strategy for inserting before PHI's is to duplicate the -/// instructions for each predecessor. However, while that's ok for G_TRUNC -/// on most targets since it generally requires no code, other targets/cases may -/// want to try harder to find a dominating block. -static void InsertInsnsWithoutSideEffectsBeforeUse( - MachineIRBuilder &Builder, MachineInstr &DefMI, MachineOperand &UseMO, - std::function<void(MachineBasicBlock *, MachineBasicBlock::iterator, - MachineOperand &UseMO)> - Inserter) { - MachineInstr &UseMI = *UseMO.getParent(); - - MachineBasicBlock *InsertBB = UseMI.getParent(); - - // If the use is a PHI then we want the predecessor block instead. - if (UseMI.isPHI()) { - MachineOperand *PredBB = std::next(&UseMO); - InsertBB = PredBB->getMBB(); - } - - // If the block is the same block as the def then we want to insert just after - // the def instead of at the start of the block. - if (InsertBB == DefMI.getParent()) { - MachineBasicBlock::iterator InsertPt = &DefMI; - Inserter(InsertBB, std::next(InsertPt), UseMO); - return; - } - - // Otherwise we want the start of the BB - Inserter(InsertBB, InsertBB->getFirstNonPHI(), UseMO); -} -} // end anonymous namespace - -bool CombinerHelper::tryCombineExtendingLoads(MachineInstr &MI) { - PreferredTuple Preferred; - if (matchCombineExtendingLoads(MI, Preferred)) { - applyCombineExtendingLoads(MI, Preferred); - return true; - } - return false; -} - -bool CombinerHelper::matchCombineExtendingLoads(MachineInstr &MI, - PreferredTuple &Preferred) { - // We match the loads and follow the uses to the extend instead of matching - // the extends and following the def to the load. This is because the load - // must remain in the same position for correctness (unless we also add code - // to find a safe place to sink it) whereas the extend is freely movable. - // It also prevents us from duplicating the load for the volatile case or just - // for performance. - - if (MI.getOpcode() != TargetOpcode::G_LOAD && - MI.getOpcode() != TargetOpcode::G_SEXTLOAD && - MI.getOpcode() != TargetOpcode::G_ZEXTLOAD) - return false; - - auto &LoadValue = MI.getOperand(0); - assert(LoadValue.isReg() && "Result wasn't a register?"); - - LLT LoadValueTy = MRI.getType(LoadValue.getReg()); - if (!LoadValueTy.isScalar()) - return false; - - // Most architectures are going to legalize <s8 loads into at least a 1 byte - // load, and the MMOs can only describe memory accesses in multiples of bytes. - // If we try to perform extload combining on those, we can end up with - // %a(s8) = extload %ptr (load 1 byte from %ptr) - // ... which is an illegal extload instruction. - if (LoadValueTy.getSizeInBits() < 8) - return false; - - // For non power-of-2 types, they will very likely be legalized into multiple - // loads. Don't bother trying to match them into extending loads. - if (!isPowerOf2_32(LoadValueTy.getSizeInBits())) - return false; - - // Find the preferred type aside from the any-extends (unless it's the only - // one) and non-extending ops. We'll emit an extending load to that type and - // and emit a variant of (extend (trunc X)) for the others according to the - // relative type sizes. At the same time, pick an extend to use based on the - // extend involved in the chosen type. - unsigned PreferredOpcode = MI.getOpcode() == TargetOpcode::G_LOAD - ? TargetOpcode::G_ANYEXT - : MI.getOpcode() == TargetOpcode::G_SEXTLOAD - ? TargetOpcode::G_SEXT - : TargetOpcode::G_ZEXT; - Preferred = {LLT(), PreferredOpcode, nullptr}; - for (auto &UseMI : MRI.use_nodbg_instructions(LoadValue.getReg())) { - if (UseMI.getOpcode() == TargetOpcode::G_SEXT || - UseMI.getOpcode() == TargetOpcode::G_ZEXT || - (UseMI.getOpcode() == TargetOpcode::G_ANYEXT)) { - // Check for legality. - if (LI) { - LegalityQuery::MemDesc MMDesc; - const auto &MMO = **MI.memoperands_begin(); - MMDesc.SizeInBits = MMO.getSizeInBits(); - MMDesc.AlignInBits = MMO.getAlign().value() * 8; - MMDesc.Ordering = MMO.getOrdering(); - LLT UseTy = MRI.getType(UseMI.getOperand(0).getReg()); - LLT SrcTy = MRI.getType(MI.getOperand(1).getReg()); - if (LI->getAction({MI.getOpcode(), {UseTy, SrcTy}, {MMDesc}}).Action != - LegalizeActions::Legal) - continue; - } - Preferred = ChoosePreferredUse(Preferred, - MRI.getType(UseMI.getOperand(0).getReg()), - UseMI.getOpcode(), &UseMI); - } - } - - // There were no extends - if (!Preferred.MI) - return false; - // It should be impossible to chose an extend without selecting a different - // type since by definition the result of an extend is larger. - assert(Preferred.Ty != LoadValueTy && "Extending to same type?"); - - LLVM_DEBUG(dbgs() << "Preferred use is: " << *Preferred.MI); - return true; -} - -void CombinerHelper::applyCombineExtendingLoads(MachineInstr &MI, - PreferredTuple &Preferred) { - // Rewrite the load to the chosen extending load. - Register ChosenDstReg = Preferred.MI->getOperand(0).getReg(); - - // Inserter to insert a truncate back to the original type at a given point - // with some basic CSE to limit truncate duplication to one per BB. - DenseMap<MachineBasicBlock *, MachineInstr *> EmittedInsns; - auto InsertTruncAt = [&](MachineBasicBlock *InsertIntoBB, - MachineBasicBlock::iterator InsertBefore, - MachineOperand &UseMO) { - MachineInstr *PreviouslyEmitted = EmittedInsns.lookup(InsertIntoBB); - if (PreviouslyEmitted) { - Observer.changingInstr(*UseMO.getParent()); - UseMO.setReg(PreviouslyEmitted->getOperand(0).getReg()); - Observer.changedInstr(*UseMO.getParent()); - return; - } - - Builder.setInsertPt(*InsertIntoBB, InsertBefore); - Register NewDstReg = MRI.cloneVirtualRegister(MI.getOperand(0).getReg()); - MachineInstr *NewMI = Builder.buildTrunc(NewDstReg, ChosenDstReg); - EmittedInsns[InsertIntoBB] = NewMI; - replaceRegOpWith(MRI, UseMO, NewDstReg); - }; - - Observer.changingInstr(MI); - MI.setDesc( - Builder.getTII().get(Preferred.ExtendOpcode == TargetOpcode::G_SEXT - ? TargetOpcode::G_SEXTLOAD - : Preferred.ExtendOpcode == TargetOpcode::G_ZEXT - ? TargetOpcode::G_ZEXTLOAD - : TargetOpcode::G_LOAD)); - - // Rewrite all the uses to fix up the types. - auto &LoadValue = MI.getOperand(0); - SmallVector<MachineOperand *, 4> Uses; - for (auto &UseMO : MRI.use_operands(LoadValue.getReg())) - Uses.push_back(&UseMO); - - for (auto *UseMO : Uses) { - MachineInstr *UseMI = UseMO->getParent(); - - // If the extend is compatible with the preferred extend then we should fix - // up the type and extend so that it uses the preferred use. - if (UseMI->getOpcode() == Preferred.ExtendOpcode || - UseMI->getOpcode() == TargetOpcode::G_ANYEXT) { - Register UseDstReg = UseMI->getOperand(0).getReg(); - MachineOperand &UseSrcMO = UseMI->getOperand(1); - const LLT UseDstTy = MRI.getType(UseDstReg); - if (UseDstReg != ChosenDstReg) { - if (Preferred.Ty == UseDstTy) { - // If the use has the same type as the preferred use, then merge - // the vregs and erase the extend. For example: - // %1:_(s8) = G_LOAD ... - // %2:_(s32) = G_SEXT %1(s8) - // %3:_(s32) = G_ANYEXT %1(s8) - // ... = ... %3(s32) - // rewrites to: - // %2:_(s32) = G_SEXTLOAD ... - // ... = ... %2(s32) - replaceRegWith(MRI, UseDstReg, ChosenDstReg); - Observer.erasingInstr(*UseMO->getParent()); - UseMO->getParent()->eraseFromParent(); - } else if (Preferred.Ty.getSizeInBits() < UseDstTy.getSizeInBits()) { - // If the preferred size is smaller, then keep the extend but extend - // from the result of the extending load. For example: - // %1:_(s8) = G_LOAD ... - // %2:_(s32) = G_SEXT %1(s8) - // %3:_(s64) = G_ANYEXT %1(s8) - // ... = ... %3(s64) - /// rewrites to: - // %2:_(s32) = G_SEXTLOAD ... - // %3:_(s64) = G_ANYEXT %2:_(s32) - // ... = ... %3(s64) - replaceRegOpWith(MRI, UseSrcMO, ChosenDstReg); - } else { - // If the preferred size is large, then insert a truncate. For - // example: - // %1:_(s8) = G_LOAD ... - // %2:_(s64) = G_SEXT %1(s8) - // %3:_(s32) = G_ZEXT %1(s8) - // ... = ... %3(s32) - /// rewrites to: - // %2:_(s64) = G_SEXTLOAD ... - // %4:_(s8) = G_TRUNC %2:_(s32) - // %3:_(s64) = G_ZEXT %2:_(s8) - // ... = ... %3(s64) - InsertInsnsWithoutSideEffectsBeforeUse(Builder, MI, *UseMO, - InsertTruncAt); - } - continue; - } - // The use is (one of) the uses of the preferred use we chose earlier. - // We're going to update the load to def this value later so just erase - // the old extend. - Observer.erasingInstr(*UseMO->getParent()); - UseMO->getParent()->eraseFromParent(); - continue; - } - - // The use isn't an extend. Truncate back to the type we originally loaded. - // This is free on many targets. - InsertInsnsWithoutSideEffectsBeforeUse(Builder, MI, *UseMO, InsertTruncAt); - } - - MI.getOperand(0).setReg(ChosenDstReg); - Observer.changedInstr(MI); -} - -bool CombinerHelper::isPredecessor(const MachineInstr &DefMI, - const MachineInstr &UseMI) { - assert(!DefMI.isDebugInstr() && !UseMI.isDebugInstr() && - "shouldn't consider debug uses"); - assert(DefMI.getParent() == UseMI.getParent()); - if (&DefMI == &UseMI) - return false; +void CombinerHelper::replaceRegWith(MachineRegisterInfo &MRI, Register FromReg, + Register ToReg) const { + Observer.changingAllUsesOfReg(MRI, FromReg); + + if (MRI.constrainRegAttrs(ToReg, FromReg)) + MRI.replaceRegWith(FromReg, ToReg); + else + Builder.buildCopy(ToReg, FromReg); + + Observer.finishedChangingAllUsesOfReg(); +} + +void CombinerHelper::replaceRegOpWith(MachineRegisterInfo &MRI, + MachineOperand &FromRegOp, + Register ToReg) const { + assert(FromRegOp.getParent() && "Expected an operand in an MI"); + Observer.changingInstr(*FromRegOp.getParent()); + + FromRegOp.setReg(ToReg); + + Observer.changedInstr(*FromRegOp.getParent()); +} + +bool CombinerHelper::tryCombineCopy(MachineInstr &MI) { + if (matchCombineCopy(MI)) { + applyCombineCopy(MI); + return true; + } + return false; +} +bool CombinerHelper::matchCombineCopy(MachineInstr &MI) { + if (MI.getOpcode() != TargetOpcode::COPY) + return false; + Register DstReg = MI.getOperand(0).getReg(); + Register SrcReg = MI.getOperand(1).getReg(); + return canReplaceReg(DstReg, SrcReg, MRI); +} +void CombinerHelper::applyCombineCopy(MachineInstr &MI) { + Register DstReg = MI.getOperand(0).getReg(); + Register SrcReg = MI.getOperand(1).getReg(); + MI.eraseFromParent(); + replaceRegWith(MRI, DstReg, SrcReg); +} + +bool CombinerHelper::tryCombineConcatVectors(MachineInstr &MI) { + bool IsUndef = false; + SmallVector<Register, 4> Ops; + if (matchCombineConcatVectors(MI, IsUndef, Ops)) { + applyCombineConcatVectors(MI, IsUndef, Ops); + return true; + } + return false; +} + +bool CombinerHelper::matchCombineConcatVectors(MachineInstr &MI, bool &IsUndef, + SmallVectorImpl<Register> &Ops) { + assert(MI.getOpcode() == TargetOpcode::G_CONCAT_VECTORS && + "Invalid instruction"); + IsUndef = true; + MachineInstr *Undef = nullptr; + + // Walk over all the operands of concat vectors and check if they are + // build_vector themselves or undef. + // Then collect their operands in Ops. + for (const MachineOperand &MO : MI.uses()) { + Register Reg = MO.getReg(); + MachineInstr *Def = MRI.getVRegDef(Reg); + assert(Def && "Operand not defined"); + switch (Def->getOpcode()) { + case TargetOpcode::G_BUILD_VECTOR: + IsUndef = false; + // Remember the operands of the build_vector to fold + // them into the yet-to-build flattened concat vectors. + for (const MachineOperand &BuildVecMO : Def->uses()) + Ops.push_back(BuildVecMO.getReg()); + break; + case TargetOpcode::G_IMPLICIT_DEF: { + LLT OpType = MRI.getType(Reg); + // Keep one undef value for all the undef operands. + if (!Undef) { + Builder.setInsertPt(*MI.getParent(), MI); + Undef = Builder.buildUndef(OpType.getScalarType()); + } + assert(MRI.getType(Undef->getOperand(0).getReg()) == + OpType.getScalarType() && + "All undefs should have the same type"); + // Break the undef vector in as many scalar elements as needed + // for the flattening. + for (unsigned EltIdx = 0, EltEnd = OpType.getNumElements(); + EltIdx != EltEnd; ++EltIdx) + Ops.push_back(Undef->getOperand(0).getReg()); + break; + } + default: + return false; + } + } + return true; +} +void CombinerHelper::applyCombineConcatVectors( + MachineInstr &MI, bool IsUndef, const ArrayRef<Register> Ops) { + // We determined that the concat_vectors can be flatten. + // Generate the flattened build_vector. + Register DstReg = MI.getOperand(0).getReg(); + Builder.setInsertPt(*MI.getParent(), MI); + Register NewDstReg = MRI.cloneVirtualRegister(DstReg); + + // Note: IsUndef is sort of redundant. We could have determine it by + // checking that at all Ops are undef. Alternatively, we could have + // generate a build_vector of undefs and rely on another combine to + // clean that up. For now, given we already gather this information + // in tryCombineConcatVectors, just save compile time and issue the + // right thing. + if (IsUndef) + Builder.buildUndef(NewDstReg); + else + Builder.buildBuildVector(NewDstReg, Ops); + MI.eraseFromParent(); + replaceRegWith(MRI, DstReg, NewDstReg); +} + +bool CombinerHelper::tryCombineShuffleVector(MachineInstr &MI) { + SmallVector<Register, 4> Ops; + if (matchCombineShuffleVector(MI, Ops)) { + applyCombineShuffleVector(MI, Ops); + return true; + } + return false; +} + +bool CombinerHelper::matchCombineShuffleVector(MachineInstr &MI, + SmallVectorImpl<Register> &Ops) { + assert(MI.getOpcode() == TargetOpcode::G_SHUFFLE_VECTOR && + "Invalid instruction kind"); + LLT DstType = MRI.getType(MI.getOperand(0).getReg()); + Register Src1 = MI.getOperand(1).getReg(); + LLT SrcType = MRI.getType(Src1); + // As bizarre as it may look, shuffle vector can actually produce + // scalar! This is because at the IR level a <1 x ty> shuffle + // vector is perfectly valid. + unsigned DstNumElts = DstType.isVector() ? DstType.getNumElements() : 1; + unsigned SrcNumElts = SrcType.isVector() ? SrcType.getNumElements() : 1; + + // If the resulting vector is smaller than the size of the source + // vectors being concatenated, we won't be able to replace the + // shuffle vector into a concat_vectors. + // + // Note: We may still be able to produce a concat_vectors fed by + // extract_vector_elt and so on. It is less clear that would + // be better though, so don't bother for now. + // + // If the destination is a scalar, the size of the sources doesn't + // matter. we will lower the shuffle to a plain copy. This will + // work only if the source and destination have the same size. But + // that's covered by the next condition. + // + // TODO: If the size between the source and destination don't match + // we could still emit an extract vector element in that case. + if (DstNumElts < 2 * SrcNumElts && DstNumElts != 1) + return false; + + // Check that the shuffle mask can be broken evenly between the + // different sources. + if (DstNumElts % SrcNumElts != 0) + return false; + + // Mask length is a multiple of the source vector length. + // Check if the shuffle is some kind of concatenation of the input + // vectors. + unsigned NumConcat = DstNumElts / SrcNumElts; + SmallVector<int, 8> ConcatSrcs(NumConcat, -1); + ArrayRef<int> Mask = MI.getOperand(3).getShuffleMask(); + for (unsigned i = 0; i != DstNumElts; ++i) { + int Idx = Mask[i]; + // Undef value. + if (Idx < 0) + continue; + // Ensure the indices in each SrcType sized piece are sequential and that + // the same source is used for the whole piece. + if ((Idx % SrcNumElts != (i % SrcNumElts)) || + (ConcatSrcs[i / SrcNumElts] >= 0 && + ConcatSrcs[i / SrcNumElts] != (int)(Idx / SrcNumElts))) + return false; + // Remember which source this index came from. + ConcatSrcs[i / SrcNumElts] = Idx / SrcNumElts; + } + + // The shuffle is concatenating multiple vectors together. + // Collect the different operands for that. + Register UndefReg; + Register Src2 = MI.getOperand(2).getReg(); + for (auto Src : ConcatSrcs) { + if (Src < 0) { + if (!UndefReg) { + Builder.setInsertPt(*MI.getParent(), MI); + UndefReg = Builder.buildUndef(SrcType).getReg(0); + } + Ops.push_back(UndefReg); + } else if (Src == 0) + Ops.push_back(Src1); + else + Ops.push_back(Src2); + } + return true; +} + +void CombinerHelper::applyCombineShuffleVector(MachineInstr &MI, + const ArrayRef<Register> Ops) { + Register DstReg = MI.getOperand(0).getReg(); + Builder.setInsertPt(*MI.getParent(), MI); + Register NewDstReg = MRI.cloneVirtualRegister(DstReg); + + if (Ops.size() == 1) + Builder.buildCopy(NewDstReg, Ops[0]); + else + Builder.buildMerge(NewDstReg, Ops); + + MI.eraseFromParent(); + replaceRegWith(MRI, DstReg, NewDstReg); +} + +namespace { + +/// Select a preference between two uses. CurrentUse is the current preference +/// while *ForCandidate is attributes of the candidate under consideration. +PreferredTuple ChoosePreferredUse(PreferredTuple &CurrentUse, + const LLT TyForCandidate, + unsigned OpcodeForCandidate, + MachineInstr *MIForCandidate) { + if (!CurrentUse.Ty.isValid()) { + if (CurrentUse.ExtendOpcode == OpcodeForCandidate || + CurrentUse.ExtendOpcode == TargetOpcode::G_ANYEXT) + return {TyForCandidate, OpcodeForCandidate, MIForCandidate}; + return CurrentUse; + } + + // We permit the extend to hoist through basic blocks but this is only + // sensible if the target has extending loads. If you end up lowering back + // into a load and extend during the legalizer then the end result is + // hoisting the extend up to the load. + + // Prefer defined extensions to undefined extensions as these are more + // likely to reduce the number of instructions. + if (OpcodeForCandidate == TargetOpcode::G_ANYEXT && + CurrentUse.ExtendOpcode != TargetOpcode::G_ANYEXT) + return CurrentUse; + else if (CurrentUse.ExtendOpcode == TargetOpcode::G_ANYEXT && + OpcodeForCandidate != TargetOpcode::G_ANYEXT) + return {TyForCandidate, OpcodeForCandidate, MIForCandidate}; + + // Prefer sign extensions to zero extensions as sign-extensions tend to be + // more expensive. + if (CurrentUse.Ty == TyForCandidate) { + if (CurrentUse.ExtendOpcode == TargetOpcode::G_SEXT && + OpcodeForCandidate == TargetOpcode::G_ZEXT) + return CurrentUse; + else if (CurrentUse.ExtendOpcode == TargetOpcode::G_ZEXT && + OpcodeForCandidate == TargetOpcode::G_SEXT) + return {TyForCandidate, OpcodeForCandidate, MIForCandidate}; + } + + // This is potentially target specific. We've chosen the largest type + // because G_TRUNC is usually free. One potential catch with this is that + // some targets have a reduced number of larger registers than smaller + // registers and this choice potentially increases the live-range for the + // larger value. + if (TyForCandidate.getSizeInBits() > CurrentUse.Ty.getSizeInBits()) { + return {TyForCandidate, OpcodeForCandidate, MIForCandidate}; + } + return CurrentUse; +} + +/// Find a suitable place to insert some instructions and insert them. This +/// function accounts for special cases like inserting before a PHI node. +/// The current strategy for inserting before PHI's is to duplicate the +/// instructions for each predecessor. However, while that's ok for G_TRUNC +/// on most targets since it generally requires no code, other targets/cases may +/// want to try harder to find a dominating block. +static void InsertInsnsWithoutSideEffectsBeforeUse( + MachineIRBuilder &Builder, MachineInstr &DefMI, MachineOperand &UseMO, + std::function<void(MachineBasicBlock *, MachineBasicBlock::iterator, + MachineOperand &UseMO)> + Inserter) { + MachineInstr &UseMI = *UseMO.getParent(); + + MachineBasicBlock *InsertBB = UseMI.getParent(); + + // If the use is a PHI then we want the predecessor block instead. + if (UseMI.isPHI()) { + MachineOperand *PredBB = std::next(&UseMO); + InsertBB = PredBB->getMBB(); + } + + // If the block is the same block as the def then we want to insert just after + // the def instead of at the start of the block. + if (InsertBB == DefMI.getParent()) { + MachineBasicBlock::iterator InsertPt = &DefMI; + Inserter(InsertBB, std::next(InsertPt), UseMO); + return; + } + + // Otherwise we want the start of the BB + Inserter(InsertBB, InsertBB->getFirstNonPHI(), UseMO); +} +} // end anonymous namespace + +bool CombinerHelper::tryCombineExtendingLoads(MachineInstr &MI) { + PreferredTuple Preferred; + if (matchCombineExtendingLoads(MI, Preferred)) { + applyCombineExtendingLoads(MI, Preferred); + return true; + } + return false; +} + +bool CombinerHelper::matchCombineExtendingLoads(MachineInstr &MI, + PreferredTuple &Preferred) { + // We match the loads and follow the uses to the extend instead of matching + // the extends and following the def to the load. This is because the load + // must remain in the same position for correctness (unless we also add code + // to find a safe place to sink it) whereas the extend is freely movable. + // It also prevents us from duplicating the load for the volatile case or just + // for performance. + + if (MI.getOpcode() != TargetOpcode::G_LOAD && + MI.getOpcode() != TargetOpcode::G_SEXTLOAD && + MI.getOpcode() != TargetOpcode::G_ZEXTLOAD) + return false; + + auto &LoadValue = MI.getOperand(0); + assert(LoadValue.isReg() && "Result wasn't a register?"); + + LLT LoadValueTy = MRI.getType(LoadValue.getReg()); + if (!LoadValueTy.isScalar()) + return false; + + // Most architectures are going to legalize <s8 loads into at least a 1 byte + // load, and the MMOs can only describe memory accesses in multiples of bytes. + // If we try to perform extload combining on those, we can end up with + // %a(s8) = extload %ptr (load 1 byte from %ptr) + // ... which is an illegal extload instruction. + if (LoadValueTy.getSizeInBits() < 8) + return false; + + // For non power-of-2 types, they will very likely be legalized into multiple + // loads. Don't bother trying to match them into extending loads. + if (!isPowerOf2_32(LoadValueTy.getSizeInBits())) + return false; + + // Find the preferred type aside from the any-extends (unless it's the only + // one) and non-extending ops. We'll emit an extending load to that type and + // and emit a variant of (extend (trunc X)) for the others according to the + // relative type sizes. At the same time, pick an extend to use based on the + // extend involved in the chosen type. + unsigned PreferredOpcode = MI.getOpcode() == TargetOpcode::G_LOAD + ? TargetOpcode::G_ANYEXT + : MI.getOpcode() == TargetOpcode::G_SEXTLOAD + ? TargetOpcode::G_SEXT + : TargetOpcode::G_ZEXT; + Preferred = {LLT(), PreferredOpcode, nullptr}; + for (auto &UseMI : MRI.use_nodbg_instructions(LoadValue.getReg())) { + if (UseMI.getOpcode() == TargetOpcode::G_SEXT || + UseMI.getOpcode() == TargetOpcode::G_ZEXT || + (UseMI.getOpcode() == TargetOpcode::G_ANYEXT)) { + // Check for legality. + if (LI) { + LegalityQuery::MemDesc MMDesc; + const auto &MMO = **MI.memoperands_begin(); + MMDesc.SizeInBits = MMO.getSizeInBits(); + MMDesc.AlignInBits = MMO.getAlign().value() * 8; + MMDesc.Ordering = MMO.getOrdering(); + LLT UseTy = MRI.getType(UseMI.getOperand(0).getReg()); + LLT SrcTy = MRI.getType(MI.getOperand(1).getReg()); + if (LI->getAction({MI.getOpcode(), {UseTy, SrcTy}, {MMDesc}}).Action != + LegalizeActions::Legal) + continue; + } + Preferred = ChoosePreferredUse(Preferred, + MRI.getType(UseMI.getOperand(0).getReg()), + UseMI.getOpcode(), &UseMI); + } + } + + // There were no extends + if (!Preferred.MI) + return false; + // It should be impossible to chose an extend without selecting a different + // type since by definition the result of an extend is larger. + assert(Preferred.Ty != LoadValueTy && "Extending to same type?"); + + LLVM_DEBUG(dbgs() << "Preferred use is: " << *Preferred.MI); + return true; +} + +void CombinerHelper::applyCombineExtendingLoads(MachineInstr &MI, + PreferredTuple &Preferred) { + // Rewrite the load to the chosen extending load. + Register ChosenDstReg = Preferred.MI->getOperand(0).getReg(); + + // Inserter to insert a truncate back to the original type at a given point + // with some basic CSE to limit truncate duplication to one per BB. + DenseMap<MachineBasicBlock *, MachineInstr *> EmittedInsns; + auto InsertTruncAt = [&](MachineBasicBlock *InsertIntoBB, + MachineBasicBlock::iterator InsertBefore, + MachineOperand &UseMO) { + MachineInstr *PreviouslyEmitted = EmittedInsns.lookup(InsertIntoBB); + if (PreviouslyEmitted) { + Observer.changingInstr(*UseMO.getParent()); + UseMO.setReg(PreviouslyEmitted->getOperand(0).getReg()); + Observer.changedInstr(*UseMO.getParent()); + return; + } + + Builder.setInsertPt(*InsertIntoBB, InsertBefore); + Register NewDstReg = MRI.cloneVirtualRegister(MI.getOperand(0).getReg()); + MachineInstr *NewMI = Builder.buildTrunc(NewDstReg, ChosenDstReg); + EmittedInsns[InsertIntoBB] = NewMI; + replaceRegOpWith(MRI, UseMO, NewDstReg); + }; + + Observer.changingInstr(MI); + MI.setDesc( + Builder.getTII().get(Preferred.ExtendOpcode == TargetOpcode::G_SEXT + ? TargetOpcode::G_SEXTLOAD + : Preferred.ExtendOpcode == TargetOpcode::G_ZEXT + ? TargetOpcode::G_ZEXTLOAD + : TargetOpcode::G_LOAD)); + + // Rewrite all the uses to fix up the types. + auto &LoadValue = MI.getOperand(0); + SmallVector<MachineOperand *, 4> Uses; + for (auto &UseMO : MRI.use_operands(LoadValue.getReg())) + Uses.push_back(&UseMO); + + for (auto *UseMO : Uses) { + MachineInstr *UseMI = UseMO->getParent(); + + // If the extend is compatible with the preferred extend then we should fix + // up the type and extend so that it uses the preferred use. + if (UseMI->getOpcode() == Preferred.ExtendOpcode || + UseMI->getOpcode() == TargetOpcode::G_ANYEXT) { + Register UseDstReg = UseMI->getOperand(0).getReg(); + MachineOperand &UseSrcMO = UseMI->getOperand(1); + const LLT UseDstTy = MRI.getType(UseDstReg); + if (UseDstReg != ChosenDstReg) { + if (Preferred.Ty == UseDstTy) { + // If the use has the same type as the preferred use, then merge + // the vregs and erase the extend. For example: + // %1:_(s8) = G_LOAD ... + // %2:_(s32) = G_SEXT %1(s8) + // %3:_(s32) = G_ANYEXT %1(s8) + // ... = ... %3(s32) + // rewrites to: + // %2:_(s32) = G_SEXTLOAD ... + // ... = ... %2(s32) + replaceRegWith(MRI, UseDstReg, ChosenDstReg); + Observer.erasingInstr(*UseMO->getParent()); + UseMO->getParent()->eraseFromParent(); + } else if (Preferred.Ty.getSizeInBits() < UseDstTy.getSizeInBits()) { + // If the preferred size is smaller, then keep the extend but extend + // from the result of the extending load. For example: + // %1:_(s8) = G_LOAD ... + // %2:_(s32) = G_SEXT %1(s8) + // %3:_(s64) = G_ANYEXT %1(s8) + // ... = ... %3(s64) + /// rewrites to: + // %2:_(s32) = G_SEXTLOAD ... + // %3:_(s64) = G_ANYEXT %2:_(s32) + // ... = ... %3(s64) + replaceRegOpWith(MRI, UseSrcMO, ChosenDstReg); + } else { + // If the preferred size is large, then insert a truncate. For + // example: + // %1:_(s8) = G_LOAD ... + // %2:_(s64) = G_SEXT %1(s8) + // %3:_(s32) = G_ZEXT %1(s8) + // ... = ... %3(s32) + /// rewrites to: + // %2:_(s64) = G_SEXTLOAD ... + // %4:_(s8) = G_TRUNC %2:_(s32) + // %3:_(s64) = G_ZEXT %2:_(s8) + // ... = ... %3(s64) + InsertInsnsWithoutSideEffectsBeforeUse(Builder, MI, *UseMO, + InsertTruncAt); + } + continue; + } + // The use is (one of) the uses of the preferred use we chose earlier. + // We're going to update the load to def this value later so just erase + // the old extend. + Observer.erasingInstr(*UseMO->getParent()); + UseMO->getParent()->eraseFromParent(); + continue; + } + + // The use isn't an extend. Truncate back to the type we originally loaded. + // This is free on many targets. + InsertInsnsWithoutSideEffectsBeforeUse(Builder, MI, *UseMO, InsertTruncAt); + } + + MI.getOperand(0).setReg(ChosenDstReg); + Observer.changedInstr(MI); +} + +bool CombinerHelper::isPredecessor(const MachineInstr &DefMI, + const MachineInstr &UseMI) { + assert(!DefMI.isDebugInstr() && !UseMI.isDebugInstr() && + "shouldn't consider debug uses"); + assert(DefMI.getParent() == UseMI.getParent()); + if (&DefMI == &UseMI) + return false; const MachineBasicBlock &MBB = *DefMI.getParent(); auto DefOrUse = find_if(MBB, [&DefMI, &UseMI](const MachineInstr &MI) { return &MI == &DefMI || &MI == &UseMI; @@ -631,23 +631,23 @@ bool CombinerHelper::isPredecessor(const MachineInstr &DefMI, if (DefOrUse == MBB.end()) llvm_unreachable("Block must contain both DefMI and UseMI!"); return &*DefOrUse == &DefMI; -} - -bool CombinerHelper::dominates(const MachineInstr &DefMI, - const MachineInstr &UseMI) { - assert(!DefMI.isDebugInstr() && !UseMI.isDebugInstr() && - "shouldn't consider debug uses"); - if (MDT) - return MDT->dominates(&DefMI, &UseMI); - else if (DefMI.getParent() != UseMI.getParent()) - return false; - - return isPredecessor(DefMI, UseMI); -} - +} + +bool CombinerHelper::dominates(const MachineInstr &DefMI, + const MachineInstr &UseMI) { + assert(!DefMI.isDebugInstr() && !UseMI.isDebugInstr() && + "shouldn't consider debug uses"); + if (MDT) + return MDT->dominates(&DefMI, &UseMI); + else if (DefMI.getParent() != UseMI.getParent()) + return false; + + return isPredecessor(DefMI, UseMI); +} + bool CombinerHelper::matchSextTruncSextLoad(MachineInstr &MI) { - assert(MI.getOpcode() == TargetOpcode::G_SEXT_INREG); - Register SrcReg = MI.getOperand(1).getReg(); + assert(MI.getOpcode() == TargetOpcode::G_SEXT_INREG); + Register SrcReg = MI.getOperand(1).getReg(); Register LoadUser = SrcReg; if (MRI.getType(SrcReg).isVector()) @@ -669,16 +669,16 @@ bool CombinerHelper::matchSextTruncSextLoad(MachineInstr &MI) { return true; } return false; -} - +} + bool CombinerHelper::applySextTruncSextLoad(MachineInstr &MI) { - assert(MI.getOpcode() == TargetOpcode::G_SEXT_INREG); + assert(MI.getOpcode() == TargetOpcode::G_SEXT_INREG); Builder.setInstrAndDebugLoc(MI); Builder.buildCopy(MI.getOperand(0).getReg(), MI.getOperand(1).getReg()); - MI.eraseFromParent(); - return true; -} - + MI.eraseFromParent(); + return true; +} + bool CombinerHelper::matchSextInRegOfLoad( MachineInstr &MI, std::tuple<Register, unsigned> &MatchInfo) { assert(MI.getOpcode() == TargetOpcode::G_SEXT_INREG); @@ -740,246 +740,246 @@ bool CombinerHelper::applySextInRegOfLoad( return true; } -bool CombinerHelper::findPostIndexCandidate(MachineInstr &MI, Register &Addr, - Register &Base, Register &Offset) { - auto &MF = *MI.getParent()->getParent(); - const auto &TLI = *MF.getSubtarget().getTargetLowering(); - -#ifndef NDEBUG - unsigned Opcode = MI.getOpcode(); - assert(Opcode == TargetOpcode::G_LOAD || Opcode == TargetOpcode::G_SEXTLOAD || - Opcode == TargetOpcode::G_ZEXTLOAD || Opcode == TargetOpcode::G_STORE); -#endif - - Base = MI.getOperand(1).getReg(); - MachineInstr *BaseDef = MRI.getUniqueVRegDef(Base); - if (BaseDef && BaseDef->getOpcode() == TargetOpcode::G_FRAME_INDEX) - return false; - - LLVM_DEBUG(dbgs() << "Searching for post-indexing opportunity for: " << MI); +bool CombinerHelper::findPostIndexCandidate(MachineInstr &MI, Register &Addr, + Register &Base, Register &Offset) { + auto &MF = *MI.getParent()->getParent(); + const auto &TLI = *MF.getSubtarget().getTargetLowering(); + +#ifndef NDEBUG + unsigned Opcode = MI.getOpcode(); + assert(Opcode == TargetOpcode::G_LOAD || Opcode == TargetOpcode::G_SEXTLOAD || + Opcode == TargetOpcode::G_ZEXTLOAD || Opcode == TargetOpcode::G_STORE); +#endif + + Base = MI.getOperand(1).getReg(); + MachineInstr *BaseDef = MRI.getUniqueVRegDef(Base); + if (BaseDef && BaseDef->getOpcode() == TargetOpcode::G_FRAME_INDEX) + return false; + + LLVM_DEBUG(dbgs() << "Searching for post-indexing opportunity for: " << MI); // FIXME: The following use traversal needs a bail out for patholigical cases. - for (auto &Use : MRI.use_nodbg_instructions(Base)) { - if (Use.getOpcode() != TargetOpcode::G_PTR_ADD) - continue; - - Offset = Use.getOperand(2).getReg(); - if (!ForceLegalIndexing && - !TLI.isIndexingLegal(MI, Base, Offset, /*IsPre*/ false, MRI)) { - LLVM_DEBUG(dbgs() << " Ignoring candidate with illegal addrmode: " - << Use); - continue; - } - - // Make sure the offset calculation is before the potentially indexed op. - // FIXME: we really care about dependency here. The offset calculation might - // be movable. - MachineInstr *OffsetDef = MRI.getUniqueVRegDef(Offset); - if (!OffsetDef || !dominates(*OffsetDef, MI)) { - LLVM_DEBUG(dbgs() << " Ignoring candidate with offset after mem-op: " - << Use); - continue; - } - - // FIXME: check whether all uses of Base are load/store with foldable - // addressing modes. If so, using the normal addr-modes is better than - // forming an indexed one. - - bool MemOpDominatesAddrUses = true; - for (auto &PtrAddUse : - MRI.use_nodbg_instructions(Use.getOperand(0).getReg())) { - if (!dominates(MI, PtrAddUse)) { - MemOpDominatesAddrUses = false; - break; - } - } - - if (!MemOpDominatesAddrUses) { - LLVM_DEBUG( - dbgs() << " Ignoring candidate as memop does not dominate uses: " - << Use); - continue; - } - - LLVM_DEBUG(dbgs() << " Found match: " << Use); - Addr = Use.getOperand(0).getReg(); - return true; - } - - return false; -} - -bool CombinerHelper::findPreIndexCandidate(MachineInstr &MI, Register &Addr, - Register &Base, Register &Offset) { - auto &MF = *MI.getParent()->getParent(); - const auto &TLI = *MF.getSubtarget().getTargetLowering(); - -#ifndef NDEBUG - unsigned Opcode = MI.getOpcode(); - assert(Opcode == TargetOpcode::G_LOAD || Opcode == TargetOpcode::G_SEXTLOAD || - Opcode == TargetOpcode::G_ZEXTLOAD || Opcode == TargetOpcode::G_STORE); -#endif - - Addr = MI.getOperand(1).getReg(); - MachineInstr *AddrDef = getOpcodeDef(TargetOpcode::G_PTR_ADD, Addr, MRI); - if (!AddrDef || MRI.hasOneNonDBGUse(Addr)) - return false; - - Base = AddrDef->getOperand(1).getReg(); - Offset = AddrDef->getOperand(2).getReg(); - - LLVM_DEBUG(dbgs() << "Found potential pre-indexed load_store: " << MI); - - if (!ForceLegalIndexing && - !TLI.isIndexingLegal(MI, Base, Offset, /*IsPre*/ true, MRI)) { - LLVM_DEBUG(dbgs() << " Skipping, not legal for target"); - return false; - } - - MachineInstr *BaseDef = getDefIgnoringCopies(Base, MRI); - if (BaseDef->getOpcode() == TargetOpcode::G_FRAME_INDEX) { - LLVM_DEBUG(dbgs() << " Skipping, frame index would need copy anyway."); - return false; - } - - if (MI.getOpcode() == TargetOpcode::G_STORE) { - // Would require a copy. - if (Base == MI.getOperand(0).getReg()) { - LLVM_DEBUG(dbgs() << " Skipping, storing base so need copy anyway."); - return false; - } - - // We're expecting one use of Addr in MI, but it could also be the - // value stored, which isn't actually dominated by the instruction. - if (MI.getOperand(0).getReg() == Addr) { - LLVM_DEBUG(dbgs() << " Skipping, does not dominate all addr uses"); - return false; - } - } - - // FIXME: check whether all uses of the base pointer are constant PtrAdds. - // That might allow us to end base's liveness here by adjusting the constant. - - for (auto &UseMI : MRI.use_nodbg_instructions(Addr)) { - if (!dominates(MI, UseMI)) { - LLVM_DEBUG(dbgs() << " Skipping, does not dominate all addr uses."); - return false; - } - } - - return true; -} - -bool CombinerHelper::tryCombineIndexedLoadStore(MachineInstr &MI) { - IndexedLoadStoreMatchInfo MatchInfo; - if (matchCombineIndexedLoadStore(MI, MatchInfo)) { - applyCombineIndexedLoadStore(MI, MatchInfo); - return true; - } - return false; -} - -bool CombinerHelper::matchCombineIndexedLoadStore(MachineInstr &MI, IndexedLoadStoreMatchInfo &MatchInfo) { - unsigned Opcode = MI.getOpcode(); - if (Opcode != TargetOpcode::G_LOAD && Opcode != TargetOpcode::G_SEXTLOAD && - Opcode != TargetOpcode::G_ZEXTLOAD && Opcode != TargetOpcode::G_STORE) - return false; - + for (auto &Use : MRI.use_nodbg_instructions(Base)) { + if (Use.getOpcode() != TargetOpcode::G_PTR_ADD) + continue; + + Offset = Use.getOperand(2).getReg(); + if (!ForceLegalIndexing && + !TLI.isIndexingLegal(MI, Base, Offset, /*IsPre*/ false, MRI)) { + LLVM_DEBUG(dbgs() << " Ignoring candidate with illegal addrmode: " + << Use); + continue; + } + + // Make sure the offset calculation is before the potentially indexed op. + // FIXME: we really care about dependency here. The offset calculation might + // be movable. + MachineInstr *OffsetDef = MRI.getUniqueVRegDef(Offset); + if (!OffsetDef || !dominates(*OffsetDef, MI)) { + LLVM_DEBUG(dbgs() << " Ignoring candidate with offset after mem-op: " + << Use); + continue; + } + + // FIXME: check whether all uses of Base are load/store with foldable + // addressing modes. If so, using the normal addr-modes is better than + // forming an indexed one. + + bool MemOpDominatesAddrUses = true; + for (auto &PtrAddUse : + MRI.use_nodbg_instructions(Use.getOperand(0).getReg())) { + if (!dominates(MI, PtrAddUse)) { + MemOpDominatesAddrUses = false; + break; + } + } + + if (!MemOpDominatesAddrUses) { + LLVM_DEBUG( + dbgs() << " Ignoring candidate as memop does not dominate uses: " + << Use); + continue; + } + + LLVM_DEBUG(dbgs() << " Found match: " << Use); + Addr = Use.getOperand(0).getReg(); + return true; + } + + return false; +} + +bool CombinerHelper::findPreIndexCandidate(MachineInstr &MI, Register &Addr, + Register &Base, Register &Offset) { + auto &MF = *MI.getParent()->getParent(); + const auto &TLI = *MF.getSubtarget().getTargetLowering(); + +#ifndef NDEBUG + unsigned Opcode = MI.getOpcode(); + assert(Opcode == TargetOpcode::G_LOAD || Opcode == TargetOpcode::G_SEXTLOAD || + Opcode == TargetOpcode::G_ZEXTLOAD || Opcode == TargetOpcode::G_STORE); +#endif + + Addr = MI.getOperand(1).getReg(); + MachineInstr *AddrDef = getOpcodeDef(TargetOpcode::G_PTR_ADD, Addr, MRI); + if (!AddrDef || MRI.hasOneNonDBGUse(Addr)) + return false; + + Base = AddrDef->getOperand(1).getReg(); + Offset = AddrDef->getOperand(2).getReg(); + + LLVM_DEBUG(dbgs() << "Found potential pre-indexed load_store: " << MI); + + if (!ForceLegalIndexing && + !TLI.isIndexingLegal(MI, Base, Offset, /*IsPre*/ true, MRI)) { + LLVM_DEBUG(dbgs() << " Skipping, not legal for target"); + return false; + } + + MachineInstr *BaseDef = getDefIgnoringCopies(Base, MRI); + if (BaseDef->getOpcode() == TargetOpcode::G_FRAME_INDEX) { + LLVM_DEBUG(dbgs() << " Skipping, frame index would need copy anyway."); + return false; + } + + if (MI.getOpcode() == TargetOpcode::G_STORE) { + // Would require a copy. + if (Base == MI.getOperand(0).getReg()) { + LLVM_DEBUG(dbgs() << " Skipping, storing base so need copy anyway."); + return false; + } + + // We're expecting one use of Addr in MI, but it could also be the + // value stored, which isn't actually dominated by the instruction. + if (MI.getOperand(0).getReg() == Addr) { + LLVM_DEBUG(dbgs() << " Skipping, does not dominate all addr uses"); + return false; + } + } + + // FIXME: check whether all uses of the base pointer are constant PtrAdds. + // That might allow us to end base's liveness here by adjusting the constant. + + for (auto &UseMI : MRI.use_nodbg_instructions(Addr)) { + if (!dominates(MI, UseMI)) { + LLVM_DEBUG(dbgs() << " Skipping, does not dominate all addr uses."); + return false; + } + } + + return true; +} + +bool CombinerHelper::tryCombineIndexedLoadStore(MachineInstr &MI) { + IndexedLoadStoreMatchInfo MatchInfo; + if (matchCombineIndexedLoadStore(MI, MatchInfo)) { + applyCombineIndexedLoadStore(MI, MatchInfo); + return true; + } + return false; +} + +bool CombinerHelper::matchCombineIndexedLoadStore(MachineInstr &MI, IndexedLoadStoreMatchInfo &MatchInfo) { + unsigned Opcode = MI.getOpcode(); + if (Opcode != TargetOpcode::G_LOAD && Opcode != TargetOpcode::G_SEXTLOAD && + Opcode != TargetOpcode::G_ZEXTLOAD && Opcode != TargetOpcode::G_STORE) + return false; + // For now, no targets actually support these opcodes so don't waste time // running these unless we're forced to for testing. if (!ForceLegalIndexing) return false; - MatchInfo.IsPre = findPreIndexCandidate(MI, MatchInfo.Addr, MatchInfo.Base, - MatchInfo.Offset); - if (!MatchInfo.IsPre && - !findPostIndexCandidate(MI, MatchInfo.Addr, MatchInfo.Base, - MatchInfo.Offset)) - return false; - - return true; -} - -void CombinerHelper::applyCombineIndexedLoadStore( - MachineInstr &MI, IndexedLoadStoreMatchInfo &MatchInfo) { - MachineInstr &AddrDef = *MRI.getUniqueVRegDef(MatchInfo.Addr); - MachineIRBuilder MIRBuilder(MI); - unsigned Opcode = MI.getOpcode(); - bool IsStore = Opcode == TargetOpcode::G_STORE; - unsigned NewOpcode; - switch (Opcode) { - case TargetOpcode::G_LOAD: - NewOpcode = TargetOpcode::G_INDEXED_LOAD; - break; - case TargetOpcode::G_SEXTLOAD: - NewOpcode = TargetOpcode::G_INDEXED_SEXTLOAD; - break; - case TargetOpcode::G_ZEXTLOAD: - NewOpcode = TargetOpcode::G_INDEXED_ZEXTLOAD; - break; - case TargetOpcode::G_STORE: - NewOpcode = TargetOpcode::G_INDEXED_STORE; - break; - default: - llvm_unreachable("Unknown load/store opcode"); - } - - auto MIB = MIRBuilder.buildInstr(NewOpcode); - if (IsStore) { - MIB.addDef(MatchInfo.Addr); - MIB.addUse(MI.getOperand(0).getReg()); - } else { - MIB.addDef(MI.getOperand(0).getReg()); - MIB.addDef(MatchInfo.Addr); - } - - MIB.addUse(MatchInfo.Base); - MIB.addUse(MatchInfo.Offset); - MIB.addImm(MatchInfo.IsPre); - MI.eraseFromParent(); - AddrDef.eraseFromParent(); - - LLVM_DEBUG(dbgs() << " Combinined to indexed operation"); -} - + MatchInfo.IsPre = findPreIndexCandidate(MI, MatchInfo.Addr, MatchInfo.Base, + MatchInfo.Offset); + if (!MatchInfo.IsPre && + !findPostIndexCandidate(MI, MatchInfo.Addr, MatchInfo.Base, + MatchInfo.Offset)) + return false; + + return true; +} + +void CombinerHelper::applyCombineIndexedLoadStore( + MachineInstr &MI, IndexedLoadStoreMatchInfo &MatchInfo) { + MachineInstr &AddrDef = *MRI.getUniqueVRegDef(MatchInfo.Addr); + MachineIRBuilder MIRBuilder(MI); + unsigned Opcode = MI.getOpcode(); + bool IsStore = Opcode == TargetOpcode::G_STORE; + unsigned NewOpcode; + switch (Opcode) { + case TargetOpcode::G_LOAD: + NewOpcode = TargetOpcode::G_INDEXED_LOAD; + break; + case TargetOpcode::G_SEXTLOAD: + NewOpcode = TargetOpcode::G_INDEXED_SEXTLOAD; + break; + case TargetOpcode::G_ZEXTLOAD: + NewOpcode = TargetOpcode::G_INDEXED_ZEXTLOAD; + break; + case TargetOpcode::G_STORE: + NewOpcode = TargetOpcode::G_INDEXED_STORE; + break; + default: + llvm_unreachable("Unknown load/store opcode"); + } + + auto MIB = MIRBuilder.buildInstr(NewOpcode); + if (IsStore) { + MIB.addDef(MatchInfo.Addr); + MIB.addUse(MI.getOperand(0).getReg()); + } else { + MIB.addDef(MI.getOperand(0).getReg()); + MIB.addDef(MatchInfo.Addr); + } + + MIB.addUse(MatchInfo.Base); + MIB.addUse(MatchInfo.Offset); + MIB.addImm(MatchInfo.IsPre); + MI.eraseFromParent(); + AddrDef.eraseFromParent(); + + LLVM_DEBUG(dbgs() << " Combinined to indexed operation"); +} + bool CombinerHelper::matchOptBrCondByInvertingCond(MachineInstr &MI) { - if (MI.getOpcode() != TargetOpcode::G_BR) - return false; - - // Try to match the following: - // bb1: - // G_BRCOND %c1, %bb2 - // G_BR %bb3 - // bb2: - // ... - // bb3: - - // The above pattern does not have a fall through to the successor bb2, always - // resulting in a branch no matter which path is taken. Here we try to find - // and replace that pattern with conditional branch to bb3 and otherwise + if (MI.getOpcode() != TargetOpcode::G_BR) + return false; + + // Try to match the following: + // bb1: + // G_BRCOND %c1, %bb2 + // G_BR %bb3 + // bb2: + // ... + // bb3: + + // The above pattern does not have a fall through to the successor bb2, always + // resulting in a branch no matter which path is taken. Here we try to find + // and replace that pattern with conditional branch to bb3 and otherwise // fallthrough to bb2. This is generally better for branch predictors. - - MachineBasicBlock *MBB = MI.getParent(); - MachineBasicBlock::iterator BrIt(MI); - if (BrIt == MBB->begin()) - return false; - assert(std::next(BrIt) == MBB->end() && "expected G_BR to be a terminator"); - - MachineInstr *BrCond = &*std::prev(BrIt); - if (BrCond->getOpcode() != TargetOpcode::G_BRCOND) - return false; - + + MachineBasicBlock *MBB = MI.getParent(); + MachineBasicBlock::iterator BrIt(MI); + if (BrIt == MBB->begin()) + return false; + assert(std::next(BrIt) == MBB->end() && "expected G_BR to be a terminator"); + + MachineInstr *BrCond = &*std::prev(BrIt); + if (BrCond->getOpcode() != TargetOpcode::G_BRCOND) + return false; + // Check that the next block is the conditional branch target. Also make sure // that it isn't the same as the G_BR's target (otherwise, this will loop.) MachineBasicBlock *BrCondTarget = BrCond->getOperand(1).getMBB(); return BrCondTarget != MI.getOperand(0).getMBB() && MBB->isLayoutSuccessor(BrCondTarget); -} - +} + void CombinerHelper::applyOptBrCondByInvertingCond(MachineInstr &MI) { - MachineBasicBlock *BrTarget = MI.getOperand(0).getMBB(); - MachineBasicBlock::iterator BrIt(MI); - MachineInstr *BrCond = &*std::prev(BrIt); - + MachineBasicBlock *BrTarget = MI.getOperand(0).getMBB(); + MachineBasicBlock::iterator BrIt(MI); + MachineInstr *BrCond = &*std::prev(BrIt); + Builder.setInstrAndDebugLoc(*BrCond); LLT Ty = MRI.getType(BrCond->getOperand(0).getReg()); // FIXME: Does int/fp matter for this? If so, we might need to restrict @@ -988,508 +988,508 @@ void CombinerHelper::applyOptBrCondByInvertingCond(MachineInstr &MI) { auto True = Builder.buildConstant( Ty, getICmpTrueVal(getTargetLowering(), false, false)); auto Xor = Builder.buildXor(Ty, BrCond->getOperand(0), True); - + auto *FallthroughBB = BrCond->getOperand(1).getMBB(); Observer.changingInstr(MI); MI.getOperand(0).setMBB(FallthroughBB); Observer.changedInstr(MI); - + // Change the conditional branch to use the inverted condition and // new target block. - Observer.changingInstr(*BrCond); + Observer.changingInstr(*BrCond); BrCond->getOperand(0).setReg(Xor.getReg(0)); - BrCond->getOperand(1).setMBB(BrTarget); - Observer.changedInstr(*BrCond); -} - -static bool shouldLowerMemFuncForSize(const MachineFunction &MF) { - // On Darwin, -Os means optimize for size without hurting performance, so - // only really optimize for size when -Oz (MinSize) is used. - if (MF.getTarget().getTargetTriple().isOSDarwin()) - return MF.getFunction().hasMinSize(); - return MF.getFunction().hasOptSize(); -} - -// Returns a list of types to use for memory op lowering in MemOps. A partial -// port of findOptimalMemOpLowering in TargetLowering. -static bool findGISelOptimalMemOpLowering(std::vector<LLT> &MemOps, - unsigned Limit, const MemOp &Op, - unsigned DstAS, unsigned SrcAS, - const AttributeList &FuncAttributes, - const TargetLowering &TLI) { - if (Op.isMemcpyWithFixedDstAlign() && Op.getSrcAlign() < Op.getDstAlign()) - return false; - - LLT Ty = TLI.getOptimalMemOpLLT(Op, FuncAttributes); - - if (Ty == LLT()) { - // Use the largest scalar type whose alignment constraints are satisfied. - // We only need to check DstAlign here as SrcAlign is always greater or - // equal to DstAlign (or zero). - Ty = LLT::scalar(64); - if (Op.isFixedDstAlign()) - while (Op.getDstAlign() < Ty.getSizeInBytes() && - !TLI.allowsMisalignedMemoryAccesses(Ty, DstAS, Op.getDstAlign())) - Ty = LLT::scalar(Ty.getSizeInBytes()); - assert(Ty.getSizeInBits() > 0 && "Could not find valid type"); - // FIXME: check for the largest legal type we can load/store to. - } - - unsigned NumMemOps = 0; - uint64_t Size = Op.size(); - while (Size) { - unsigned TySize = Ty.getSizeInBytes(); - while (TySize > Size) { - // For now, only use non-vector load / store's for the left-over pieces. - LLT NewTy = Ty; - // FIXME: check for mem op safety and legality of the types. Not all of - // SDAGisms map cleanly to GISel concepts. - if (NewTy.isVector()) - NewTy = NewTy.getSizeInBits() > 64 ? LLT::scalar(64) : LLT::scalar(32); - NewTy = LLT::scalar(PowerOf2Floor(NewTy.getSizeInBits() - 1)); - unsigned NewTySize = NewTy.getSizeInBytes(); - assert(NewTySize > 0 && "Could not find appropriate type"); - - // If the new LLT cannot cover all of the remaining bits, then consider - // issuing a (or a pair of) unaligned and overlapping load / store. - bool Fast; - // Need to get a VT equivalent for allowMisalignedMemoryAccesses(). - MVT VT = getMVTForLLT(Ty); - if (NumMemOps && Op.allowOverlap() && NewTySize < Size && - TLI.allowsMisalignedMemoryAccesses( - VT, DstAS, Op.isFixedDstAlign() ? Op.getDstAlign().value() : 0, - MachineMemOperand::MONone, &Fast) && - Fast) - TySize = Size; - else { - Ty = NewTy; - TySize = NewTySize; - } - } - - if (++NumMemOps > Limit) - return false; - - MemOps.push_back(Ty); - Size -= TySize; - } - - return true; -} - -static Type *getTypeForLLT(LLT Ty, LLVMContext &C) { - if (Ty.isVector()) - return FixedVectorType::get(IntegerType::get(C, Ty.getScalarSizeInBits()), - Ty.getNumElements()); - return IntegerType::get(C, Ty.getSizeInBits()); -} - -// Get a vectorized representation of the memset value operand, GISel edition. -static Register getMemsetValue(Register Val, LLT Ty, MachineIRBuilder &MIB) { - MachineRegisterInfo &MRI = *MIB.getMRI(); - unsigned NumBits = Ty.getScalarSizeInBits(); - auto ValVRegAndVal = getConstantVRegValWithLookThrough(Val, MRI); - if (!Ty.isVector() && ValVRegAndVal) { + BrCond->getOperand(1).setMBB(BrTarget); + Observer.changedInstr(*BrCond); +} + +static bool shouldLowerMemFuncForSize(const MachineFunction &MF) { + // On Darwin, -Os means optimize for size without hurting performance, so + // only really optimize for size when -Oz (MinSize) is used. + if (MF.getTarget().getTargetTriple().isOSDarwin()) + return MF.getFunction().hasMinSize(); + return MF.getFunction().hasOptSize(); +} + +// Returns a list of types to use for memory op lowering in MemOps. A partial +// port of findOptimalMemOpLowering in TargetLowering. +static bool findGISelOptimalMemOpLowering(std::vector<LLT> &MemOps, + unsigned Limit, const MemOp &Op, + unsigned DstAS, unsigned SrcAS, + const AttributeList &FuncAttributes, + const TargetLowering &TLI) { + if (Op.isMemcpyWithFixedDstAlign() && Op.getSrcAlign() < Op.getDstAlign()) + return false; + + LLT Ty = TLI.getOptimalMemOpLLT(Op, FuncAttributes); + + if (Ty == LLT()) { + // Use the largest scalar type whose alignment constraints are satisfied. + // We only need to check DstAlign here as SrcAlign is always greater or + // equal to DstAlign (or zero). + Ty = LLT::scalar(64); + if (Op.isFixedDstAlign()) + while (Op.getDstAlign() < Ty.getSizeInBytes() && + !TLI.allowsMisalignedMemoryAccesses(Ty, DstAS, Op.getDstAlign())) + Ty = LLT::scalar(Ty.getSizeInBytes()); + assert(Ty.getSizeInBits() > 0 && "Could not find valid type"); + // FIXME: check for the largest legal type we can load/store to. + } + + unsigned NumMemOps = 0; + uint64_t Size = Op.size(); + while (Size) { + unsigned TySize = Ty.getSizeInBytes(); + while (TySize > Size) { + // For now, only use non-vector load / store's for the left-over pieces. + LLT NewTy = Ty; + // FIXME: check for mem op safety and legality of the types. Not all of + // SDAGisms map cleanly to GISel concepts. + if (NewTy.isVector()) + NewTy = NewTy.getSizeInBits() > 64 ? LLT::scalar(64) : LLT::scalar(32); + NewTy = LLT::scalar(PowerOf2Floor(NewTy.getSizeInBits() - 1)); + unsigned NewTySize = NewTy.getSizeInBytes(); + assert(NewTySize > 0 && "Could not find appropriate type"); + + // If the new LLT cannot cover all of the remaining bits, then consider + // issuing a (or a pair of) unaligned and overlapping load / store. + bool Fast; + // Need to get a VT equivalent for allowMisalignedMemoryAccesses(). + MVT VT = getMVTForLLT(Ty); + if (NumMemOps && Op.allowOverlap() && NewTySize < Size && + TLI.allowsMisalignedMemoryAccesses( + VT, DstAS, Op.isFixedDstAlign() ? Op.getDstAlign().value() : 0, + MachineMemOperand::MONone, &Fast) && + Fast) + TySize = Size; + else { + Ty = NewTy; + TySize = NewTySize; + } + } + + if (++NumMemOps > Limit) + return false; + + MemOps.push_back(Ty); + Size -= TySize; + } + + return true; +} + +static Type *getTypeForLLT(LLT Ty, LLVMContext &C) { + if (Ty.isVector()) + return FixedVectorType::get(IntegerType::get(C, Ty.getScalarSizeInBits()), + Ty.getNumElements()); + return IntegerType::get(C, Ty.getSizeInBits()); +} + +// Get a vectorized representation of the memset value operand, GISel edition. +static Register getMemsetValue(Register Val, LLT Ty, MachineIRBuilder &MIB) { + MachineRegisterInfo &MRI = *MIB.getMRI(); + unsigned NumBits = Ty.getScalarSizeInBits(); + auto ValVRegAndVal = getConstantVRegValWithLookThrough(Val, MRI); + if (!Ty.isVector() && ValVRegAndVal) { APInt Scalar = ValVRegAndVal->Value.truncOrSelf(8); - APInt SplatVal = APInt::getSplat(NumBits, Scalar); - return MIB.buildConstant(Ty, SplatVal).getReg(0); - } - - // Extend the byte value to the larger type, and then multiply by a magic - // value 0x010101... in order to replicate it across every byte. - // Unless it's zero, in which case just emit a larger G_CONSTANT 0. - if (ValVRegAndVal && ValVRegAndVal->Value == 0) { - return MIB.buildConstant(Ty, 0).getReg(0); - } - - LLT ExtType = Ty.getScalarType(); - auto ZExt = MIB.buildZExtOrTrunc(ExtType, Val); - if (NumBits > 8) { - APInt Magic = APInt::getSplat(NumBits, APInt(8, 0x01)); - auto MagicMI = MIB.buildConstant(ExtType, Magic); - Val = MIB.buildMul(ExtType, ZExt, MagicMI).getReg(0); - } - - // For vector types create a G_BUILD_VECTOR. - if (Ty.isVector()) - Val = MIB.buildSplatVector(Ty, Val).getReg(0); - - return Val; -} - -bool CombinerHelper::optimizeMemset(MachineInstr &MI, Register Dst, - Register Val, unsigned KnownLen, - Align Alignment, bool IsVolatile) { - auto &MF = *MI.getParent()->getParent(); - const auto &TLI = *MF.getSubtarget().getTargetLowering(); - auto &DL = MF.getDataLayout(); - LLVMContext &C = MF.getFunction().getContext(); - - assert(KnownLen != 0 && "Have a zero length memset length!"); - - bool DstAlignCanChange = false; - MachineFrameInfo &MFI = MF.getFrameInfo(); - bool OptSize = shouldLowerMemFuncForSize(MF); - - MachineInstr *FIDef = getOpcodeDef(TargetOpcode::G_FRAME_INDEX, Dst, MRI); - if (FIDef && !MFI.isFixedObjectIndex(FIDef->getOperand(1).getIndex())) - DstAlignCanChange = true; - - unsigned Limit = TLI.getMaxStoresPerMemset(OptSize); - std::vector<LLT> MemOps; - - const auto &DstMMO = **MI.memoperands_begin(); - MachinePointerInfo DstPtrInfo = DstMMO.getPointerInfo(); - - auto ValVRegAndVal = getConstantVRegValWithLookThrough(Val, MRI); - bool IsZeroVal = ValVRegAndVal && ValVRegAndVal->Value == 0; - - if (!findGISelOptimalMemOpLowering(MemOps, Limit, - MemOp::Set(KnownLen, DstAlignCanChange, - Alignment, - /*IsZeroMemset=*/IsZeroVal, - /*IsVolatile=*/IsVolatile), - DstPtrInfo.getAddrSpace(), ~0u, - MF.getFunction().getAttributes(), TLI)) - return false; - - if (DstAlignCanChange) { - // Get an estimate of the type from the LLT. - Type *IRTy = getTypeForLLT(MemOps[0], C); - Align NewAlign = DL.getABITypeAlign(IRTy); - if (NewAlign > Alignment) { - Alignment = NewAlign; - unsigned FI = FIDef->getOperand(1).getIndex(); - // Give the stack frame object a larger alignment if needed. - if (MFI.getObjectAlign(FI) < Alignment) - MFI.setObjectAlignment(FI, Alignment); - } - } - - MachineIRBuilder MIB(MI); - // Find the largest store and generate the bit pattern for it. - LLT LargestTy = MemOps[0]; - for (unsigned i = 1; i < MemOps.size(); i++) - if (MemOps[i].getSizeInBits() > LargestTy.getSizeInBits()) - LargestTy = MemOps[i]; - - // The memset stored value is always defined as an s8, so in order to make it - // work with larger store types we need to repeat the bit pattern across the - // wider type. - Register MemSetValue = getMemsetValue(Val, LargestTy, MIB); - - if (!MemSetValue) - return false; - - // Generate the stores. For each store type in the list, we generate the - // matching store of that type to the destination address. - LLT PtrTy = MRI.getType(Dst); - unsigned DstOff = 0; - unsigned Size = KnownLen; - for (unsigned I = 0; I < MemOps.size(); I++) { - LLT Ty = MemOps[I]; - unsigned TySize = Ty.getSizeInBytes(); - if (TySize > Size) { - // Issuing an unaligned load / store pair that overlaps with the previous - // pair. Adjust the offset accordingly. - assert(I == MemOps.size() - 1 && I != 0); - DstOff -= TySize - Size; - } - - // If this store is smaller than the largest store see whether we can get - // the smaller value for free with a truncate. - Register Value = MemSetValue; - if (Ty.getSizeInBits() < LargestTy.getSizeInBits()) { - MVT VT = getMVTForLLT(Ty); - MVT LargestVT = getMVTForLLT(LargestTy); - if (!LargestTy.isVector() && !Ty.isVector() && - TLI.isTruncateFree(LargestVT, VT)) - Value = MIB.buildTrunc(Ty, MemSetValue).getReg(0); - else - Value = getMemsetValue(Val, Ty, MIB); - if (!Value) - return false; - } - - auto *StoreMMO = - MF.getMachineMemOperand(&DstMMO, DstOff, Ty.getSizeInBytes()); - - Register Ptr = Dst; - if (DstOff != 0) { - auto Offset = - MIB.buildConstant(LLT::scalar(PtrTy.getSizeInBits()), DstOff); - Ptr = MIB.buildPtrAdd(PtrTy, Dst, Offset).getReg(0); - } - - MIB.buildStore(Value, Ptr, *StoreMMO); - DstOff += Ty.getSizeInBytes(); - Size -= TySize; - } - - MI.eraseFromParent(); - return true; -} - -bool CombinerHelper::optimizeMemcpy(MachineInstr &MI, Register Dst, - Register Src, unsigned KnownLen, - Align DstAlign, Align SrcAlign, - bool IsVolatile) { - auto &MF = *MI.getParent()->getParent(); - const auto &TLI = *MF.getSubtarget().getTargetLowering(); - auto &DL = MF.getDataLayout(); - LLVMContext &C = MF.getFunction().getContext(); - - assert(KnownLen != 0 && "Have a zero length memcpy length!"); - - bool DstAlignCanChange = false; - MachineFrameInfo &MFI = MF.getFrameInfo(); - bool OptSize = shouldLowerMemFuncForSize(MF); - Align Alignment = commonAlignment(DstAlign, SrcAlign); - - MachineInstr *FIDef = getOpcodeDef(TargetOpcode::G_FRAME_INDEX, Dst, MRI); - if (FIDef && !MFI.isFixedObjectIndex(FIDef->getOperand(1).getIndex())) - DstAlignCanChange = true; - - // FIXME: infer better src pointer alignment like SelectionDAG does here. - // FIXME: also use the equivalent of isMemSrcFromConstant and alwaysinlining - // if the memcpy is in a tail call position. - - unsigned Limit = TLI.getMaxStoresPerMemcpy(OptSize); - std::vector<LLT> MemOps; - - const auto &DstMMO = **MI.memoperands_begin(); - const auto &SrcMMO = **std::next(MI.memoperands_begin()); - MachinePointerInfo DstPtrInfo = DstMMO.getPointerInfo(); - MachinePointerInfo SrcPtrInfo = SrcMMO.getPointerInfo(); - - if (!findGISelOptimalMemOpLowering( - MemOps, Limit, - MemOp::Copy(KnownLen, DstAlignCanChange, Alignment, SrcAlign, - IsVolatile), - DstPtrInfo.getAddrSpace(), SrcPtrInfo.getAddrSpace(), - MF.getFunction().getAttributes(), TLI)) - return false; - - if (DstAlignCanChange) { - // Get an estimate of the type from the LLT. - Type *IRTy = getTypeForLLT(MemOps[0], C); - Align NewAlign = DL.getABITypeAlign(IRTy); - - // Don't promote to an alignment that would require dynamic stack - // realignment. - const TargetRegisterInfo *TRI = MF.getSubtarget().getRegisterInfo(); - if (!TRI->needsStackRealignment(MF)) - while (NewAlign > Alignment && DL.exceedsNaturalStackAlignment(NewAlign)) - NewAlign = NewAlign / 2; - - if (NewAlign > Alignment) { - Alignment = NewAlign; - unsigned FI = FIDef->getOperand(1).getIndex(); - // Give the stack frame object a larger alignment if needed. - if (MFI.getObjectAlign(FI) < Alignment) - MFI.setObjectAlignment(FI, Alignment); - } - } - - LLVM_DEBUG(dbgs() << "Inlining memcpy: " << MI << " into loads & stores\n"); - - MachineIRBuilder MIB(MI); - // Now we need to emit a pair of load and stores for each of the types we've - // collected. I.e. for each type, generate a load from the source pointer of - // that type width, and then generate a corresponding store to the dest buffer - // of that value loaded. This can result in a sequence of loads and stores - // mixed types, depending on what the target specifies as good types to use. - unsigned CurrOffset = 0; - LLT PtrTy = MRI.getType(Src); - unsigned Size = KnownLen; - for (auto CopyTy : MemOps) { - // Issuing an unaligned load / store pair that overlaps with the previous - // pair. Adjust the offset accordingly. - if (CopyTy.getSizeInBytes() > Size) - CurrOffset -= CopyTy.getSizeInBytes() - Size; - - // Construct MMOs for the accesses. - auto *LoadMMO = - MF.getMachineMemOperand(&SrcMMO, CurrOffset, CopyTy.getSizeInBytes()); - auto *StoreMMO = - MF.getMachineMemOperand(&DstMMO, CurrOffset, CopyTy.getSizeInBytes()); - - // Create the load. - Register LoadPtr = Src; - Register Offset; - if (CurrOffset != 0) { - Offset = MIB.buildConstant(LLT::scalar(PtrTy.getSizeInBits()), CurrOffset) - .getReg(0); - LoadPtr = MIB.buildPtrAdd(PtrTy, Src, Offset).getReg(0); - } - auto LdVal = MIB.buildLoad(CopyTy, LoadPtr, *LoadMMO); - - // Create the store. - Register StorePtr = - CurrOffset == 0 ? Dst : MIB.buildPtrAdd(PtrTy, Dst, Offset).getReg(0); - MIB.buildStore(LdVal, StorePtr, *StoreMMO); - CurrOffset += CopyTy.getSizeInBytes(); - Size -= CopyTy.getSizeInBytes(); - } - - MI.eraseFromParent(); - return true; -} - -bool CombinerHelper::optimizeMemmove(MachineInstr &MI, Register Dst, - Register Src, unsigned KnownLen, - Align DstAlign, Align SrcAlign, - bool IsVolatile) { - auto &MF = *MI.getParent()->getParent(); - const auto &TLI = *MF.getSubtarget().getTargetLowering(); - auto &DL = MF.getDataLayout(); - LLVMContext &C = MF.getFunction().getContext(); - - assert(KnownLen != 0 && "Have a zero length memmove length!"); - - bool DstAlignCanChange = false; - MachineFrameInfo &MFI = MF.getFrameInfo(); - bool OptSize = shouldLowerMemFuncForSize(MF); - Align Alignment = commonAlignment(DstAlign, SrcAlign); - - MachineInstr *FIDef = getOpcodeDef(TargetOpcode::G_FRAME_INDEX, Dst, MRI); - if (FIDef && !MFI.isFixedObjectIndex(FIDef->getOperand(1).getIndex())) - DstAlignCanChange = true; - - unsigned Limit = TLI.getMaxStoresPerMemmove(OptSize); - std::vector<LLT> MemOps; - - const auto &DstMMO = **MI.memoperands_begin(); - const auto &SrcMMO = **std::next(MI.memoperands_begin()); - MachinePointerInfo DstPtrInfo = DstMMO.getPointerInfo(); - MachinePointerInfo SrcPtrInfo = SrcMMO.getPointerInfo(); - - // FIXME: SelectionDAG always passes false for 'AllowOverlap', apparently due - // to a bug in it's findOptimalMemOpLowering implementation. For now do the - // same thing here. - if (!findGISelOptimalMemOpLowering( - MemOps, Limit, - MemOp::Copy(KnownLen, DstAlignCanChange, Alignment, SrcAlign, - /*IsVolatile*/ true), - DstPtrInfo.getAddrSpace(), SrcPtrInfo.getAddrSpace(), - MF.getFunction().getAttributes(), TLI)) - return false; - - if (DstAlignCanChange) { - // Get an estimate of the type from the LLT. - Type *IRTy = getTypeForLLT(MemOps[0], C); - Align NewAlign = DL.getABITypeAlign(IRTy); - - // Don't promote to an alignment that would require dynamic stack - // realignment. - const TargetRegisterInfo *TRI = MF.getSubtarget().getRegisterInfo(); - if (!TRI->needsStackRealignment(MF)) - while (NewAlign > Alignment && DL.exceedsNaturalStackAlignment(NewAlign)) - NewAlign = NewAlign / 2; - - if (NewAlign > Alignment) { - Alignment = NewAlign; - unsigned FI = FIDef->getOperand(1).getIndex(); - // Give the stack frame object a larger alignment if needed. - if (MFI.getObjectAlign(FI) < Alignment) - MFI.setObjectAlignment(FI, Alignment); - } - } - - LLVM_DEBUG(dbgs() << "Inlining memmove: " << MI << " into loads & stores\n"); - - MachineIRBuilder MIB(MI); - // Memmove requires that we perform the loads first before issuing the stores. - // Apart from that, this loop is pretty much doing the same thing as the - // memcpy codegen function. - unsigned CurrOffset = 0; - LLT PtrTy = MRI.getType(Src); - SmallVector<Register, 16> LoadVals; - for (auto CopyTy : MemOps) { - // Construct MMO for the load. - auto *LoadMMO = - MF.getMachineMemOperand(&SrcMMO, CurrOffset, CopyTy.getSizeInBytes()); - - // Create the load. - Register LoadPtr = Src; - if (CurrOffset != 0) { - auto Offset = - MIB.buildConstant(LLT::scalar(PtrTy.getSizeInBits()), CurrOffset); - LoadPtr = MIB.buildPtrAdd(PtrTy, Src, Offset).getReg(0); - } - LoadVals.push_back(MIB.buildLoad(CopyTy, LoadPtr, *LoadMMO).getReg(0)); - CurrOffset += CopyTy.getSizeInBytes(); - } - - CurrOffset = 0; - for (unsigned I = 0; I < MemOps.size(); ++I) { - LLT CopyTy = MemOps[I]; - // Now store the values loaded. - auto *StoreMMO = - MF.getMachineMemOperand(&DstMMO, CurrOffset, CopyTy.getSizeInBytes()); - - Register StorePtr = Dst; - if (CurrOffset != 0) { - auto Offset = - MIB.buildConstant(LLT::scalar(PtrTy.getSizeInBits()), CurrOffset); - StorePtr = MIB.buildPtrAdd(PtrTy, Dst, Offset).getReg(0); - } - MIB.buildStore(LoadVals[I], StorePtr, *StoreMMO); - CurrOffset += CopyTy.getSizeInBytes(); - } - MI.eraseFromParent(); - return true; -} - -bool CombinerHelper::tryCombineMemCpyFamily(MachineInstr &MI, unsigned MaxLen) { + APInt SplatVal = APInt::getSplat(NumBits, Scalar); + return MIB.buildConstant(Ty, SplatVal).getReg(0); + } + + // Extend the byte value to the larger type, and then multiply by a magic + // value 0x010101... in order to replicate it across every byte. + // Unless it's zero, in which case just emit a larger G_CONSTANT 0. + if (ValVRegAndVal && ValVRegAndVal->Value == 0) { + return MIB.buildConstant(Ty, 0).getReg(0); + } + + LLT ExtType = Ty.getScalarType(); + auto ZExt = MIB.buildZExtOrTrunc(ExtType, Val); + if (NumBits > 8) { + APInt Magic = APInt::getSplat(NumBits, APInt(8, 0x01)); + auto MagicMI = MIB.buildConstant(ExtType, Magic); + Val = MIB.buildMul(ExtType, ZExt, MagicMI).getReg(0); + } + + // For vector types create a G_BUILD_VECTOR. + if (Ty.isVector()) + Val = MIB.buildSplatVector(Ty, Val).getReg(0); + + return Val; +} + +bool CombinerHelper::optimizeMemset(MachineInstr &MI, Register Dst, + Register Val, unsigned KnownLen, + Align Alignment, bool IsVolatile) { + auto &MF = *MI.getParent()->getParent(); + const auto &TLI = *MF.getSubtarget().getTargetLowering(); + auto &DL = MF.getDataLayout(); + LLVMContext &C = MF.getFunction().getContext(); + + assert(KnownLen != 0 && "Have a zero length memset length!"); + + bool DstAlignCanChange = false; + MachineFrameInfo &MFI = MF.getFrameInfo(); + bool OptSize = shouldLowerMemFuncForSize(MF); + + MachineInstr *FIDef = getOpcodeDef(TargetOpcode::G_FRAME_INDEX, Dst, MRI); + if (FIDef && !MFI.isFixedObjectIndex(FIDef->getOperand(1).getIndex())) + DstAlignCanChange = true; + + unsigned Limit = TLI.getMaxStoresPerMemset(OptSize); + std::vector<LLT> MemOps; + + const auto &DstMMO = **MI.memoperands_begin(); + MachinePointerInfo DstPtrInfo = DstMMO.getPointerInfo(); + + auto ValVRegAndVal = getConstantVRegValWithLookThrough(Val, MRI); + bool IsZeroVal = ValVRegAndVal && ValVRegAndVal->Value == 0; + + if (!findGISelOptimalMemOpLowering(MemOps, Limit, + MemOp::Set(KnownLen, DstAlignCanChange, + Alignment, + /*IsZeroMemset=*/IsZeroVal, + /*IsVolatile=*/IsVolatile), + DstPtrInfo.getAddrSpace(), ~0u, + MF.getFunction().getAttributes(), TLI)) + return false; + + if (DstAlignCanChange) { + // Get an estimate of the type from the LLT. + Type *IRTy = getTypeForLLT(MemOps[0], C); + Align NewAlign = DL.getABITypeAlign(IRTy); + if (NewAlign > Alignment) { + Alignment = NewAlign; + unsigned FI = FIDef->getOperand(1).getIndex(); + // Give the stack frame object a larger alignment if needed. + if (MFI.getObjectAlign(FI) < Alignment) + MFI.setObjectAlignment(FI, Alignment); + } + } + + MachineIRBuilder MIB(MI); + // Find the largest store and generate the bit pattern for it. + LLT LargestTy = MemOps[0]; + for (unsigned i = 1; i < MemOps.size(); i++) + if (MemOps[i].getSizeInBits() > LargestTy.getSizeInBits()) + LargestTy = MemOps[i]; + + // The memset stored value is always defined as an s8, so in order to make it + // work with larger store types we need to repeat the bit pattern across the + // wider type. + Register MemSetValue = getMemsetValue(Val, LargestTy, MIB); + + if (!MemSetValue) + return false; + + // Generate the stores. For each store type in the list, we generate the + // matching store of that type to the destination address. + LLT PtrTy = MRI.getType(Dst); + unsigned DstOff = 0; + unsigned Size = KnownLen; + for (unsigned I = 0; I < MemOps.size(); I++) { + LLT Ty = MemOps[I]; + unsigned TySize = Ty.getSizeInBytes(); + if (TySize > Size) { + // Issuing an unaligned load / store pair that overlaps with the previous + // pair. Adjust the offset accordingly. + assert(I == MemOps.size() - 1 && I != 0); + DstOff -= TySize - Size; + } + + // If this store is smaller than the largest store see whether we can get + // the smaller value for free with a truncate. + Register Value = MemSetValue; + if (Ty.getSizeInBits() < LargestTy.getSizeInBits()) { + MVT VT = getMVTForLLT(Ty); + MVT LargestVT = getMVTForLLT(LargestTy); + if (!LargestTy.isVector() && !Ty.isVector() && + TLI.isTruncateFree(LargestVT, VT)) + Value = MIB.buildTrunc(Ty, MemSetValue).getReg(0); + else + Value = getMemsetValue(Val, Ty, MIB); + if (!Value) + return false; + } + + auto *StoreMMO = + MF.getMachineMemOperand(&DstMMO, DstOff, Ty.getSizeInBytes()); + + Register Ptr = Dst; + if (DstOff != 0) { + auto Offset = + MIB.buildConstant(LLT::scalar(PtrTy.getSizeInBits()), DstOff); + Ptr = MIB.buildPtrAdd(PtrTy, Dst, Offset).getReg(0); + } + + MIB.buildStore(Value, Ptr, *StoreMMO); + DstOff += Ty.getSizeInBytes(); + Size -= TySize; + } + + MI.eraseFromParent(); + return true; +} + +bool CombinerHelper::optimizeMemcpy(MachineInstr &MI, Register Dst, + Register Src, unsigned KnownLen, + Align DstAlign, Align SrcAlign, + bool IsVolatile) { + auto &MF = *MI.getParent()->getParent(); + const auto &TLI = *MF.getSubtarget().getTargetLowering(); + auto &DL = MF.getDataLayout(); + LLVMContext &C = MF.getFunction().getContext(); + + assert(KnownLen != 0 && "Have a zero length memcpy length!"); + + bool DstAlignCanChange = false; + MachineFrameInfo &MFI = MF.getFrameInfo(); + bool OptSize = shouldLowerMemFuncForSize(MF); + Align Alignment = commonAlignment(DstAlign, SrcAlign); + + MachineInstr *FIDef = getOpcodeDef(TargetOpcode::G_FRAME_INDEX, Dst, MRI); + if (FIDef && !MFI.isFixedObjectIndex(FIDef->getOperand(1).getIndex())) + DstAlignCanChange = true; + + // FIXME: infer better src pointer alignment like SelectionDAG does here. + // FIXME: also use the equivalent of isMemSrcFromConstant and alwaysinlining + // if the memcpy is in a tail call position. + + unsigned Limit = TLI.getMaxStoresPerMemcpy(OptSize); + std::vector<LLT> MemOps; + + const auto &DstMMO = **MI.memoperands_begin(); + const auto &SrcMMO = **std::next(MI.memoperands_begin()); + MachinePointerInfo DstPtrInfo = DstMMO.getPointerInfo(); + MachinePointerInfo SrcPtrInfo = SrcMMO.getPointerInfo(); + + if (!findGISelOptimalMemOpLowering( + MemOps, Limit, + MemOp::Copy(KnownLen, DstAlignCanChange, Alignment, SrcAlign, + IsVolatile), + DstPtrInfo.getAddrSpace(), SrcPtrInfo.getAddrSpace(), + MF.getFunction().getAttributes(), TLI)) + return false; + + if (DstAlignCanChange) { + // Get an estimate of the type from the LLT. + Type *IRTy = getTypeForLLT(MemOps[0], C); + Align NewAlign = DL.getABITypeAlign(IRTy); + + // Don't promote to an alignment that would require dynamic stack + // realignment. + const TargetRegisterInfo *TRI = MF.getSubtarget().getRegisterInfo(); + if (!TRI->needsStackRealignment(MF)) + while (NewAlign > Alignment && DL.exceedsNaturalStackAlignment(NewAlign)) + NewAlign = NewAlign / 2; + + if (NewAlign > Alignment) { + Alignment = NewAlign; + unsigned FI = FIDef->getOperand(1).getIndex(); + // Give the stack frame object a larger alignment if needed. + if (MFI.getObjectAlign(FI) < Alignment) + MFI.setObjectAlignment(FI, Alignment); + } + } + + LLVM_DEBUG(dbgs() << "Inlining memcpy: " << MI << " into loads & stores\n"); + + MachineIRBuilder MIB(MI); + // Now we need to emit a pair of load and stores for each of the types we've + // collected. I.e. for each type, generate a load from the source pointer of + // that type width, and then generate a corresponding store to the dest buffer + // of that value loaded. This can result in a sequence of loads and stores + // mixed types, depending on what the target specifies as good types to use. + unsigned CurrOffset = 0; + LLT PtrTy = MRI.getType(Src); + unsigned Size = KnownLen; + for (auto CopyTy : MemOps) { + // Issuing an unaligned load / store pair that overlaps with the previous + // pair. Adjust the offset accordingly. + if (CopyTy.getSizeInBytes() > Size) + CurrOffset -= CopyTy.getSizeInBytes() - Size; + + // Construct MMOs for the accesses. + auto *LoadMMO = + MF.getMachineMemOperand(&SrcMMO, CurrOffset, CopyTy.getSizeInBytes()); + auto *StoreMMO = + MF.getMachineMemOperand(&DstMMO, CurrOffset, CopyTy.getSizeInBytes()); + + // Create the load. + Register LoadPtr = Src; + Register Offset; + if (CurrOffset != 0) { + Offset = MIB.buildConstant(LLT::scalar(PtrTy.getSizeInBits()), CurrOffset) + .getReg(0); + LoadPtr = MIB.buildPtrAdd(PtrTy, Src, Offset).getReg(0); + } + auto LdVal = MIB.buildLoad(CopyTy, LoadPtr, *LoadMMO); + + // Create the store. + Register StorePtr = + CurrOffset == 0 ? Dst : MIB.buildPtrAdd(PtrTy, Dst, Offset).getReg(0); + MIB.buildStore(LdVal, StorePtr, *StoreMMO); + CurrOffset += CopyTy.getSizeInBytes(); + Size -= CopyTy.getSizeInBytes(); + } + + MI.eraseFromParent(); + return true; +} + +bool CombinerHelper::optimizeMemmove(MachineInstr &MI, Register Dst, + Register Src, unsigned KnownLen, + Align DstAlign, Align SrcAlign, + bool IsVolatile) { + auto &MF = *MI.getParent()->getParent(); + const auto &TLI = *MF.getSubtarget().getTargetLowering(); + auto &DL = MF.getDataLayout(); + LLVMContext &C = MF.getFunction().getContext(); + + assert(KnownLen != 0 && "Have a zero length memmove length!"); + + bool DstAlignCanChange = false; + MachineFrameInfo &MFI = MF.getFrameInfo(); + bool OptSize = shouldLowerMemFuncForSize(MF); + Align Alignment = commonAlignment(DstAlign, SrcAlign); + + MachineInstr *FIDef = getOpcodeDef(TargetOpcode::G_FRAME_INDEX, Dst, MRI); + if (FIDef && !MFI.isFixedObjectIndex(FIDef->getOperand(1).getIndex())) + DstAlignCanChange = true; + + unsigned Limit = TLI.getMaxStoresPerMemmove(OptSize); + std::vector<LLT> MemOps; + + const auto &DstMMO = **MI.memoperands_begin(); + const auto &SrcMMO = **std::next(MI.memoperands_begin()); + MachinePointerInfo DstPtrInfo = DstMMO.getPointerInfo(); + MachinePointerInfo SrcPtrInfo = SrcMMO.getPointerInfo(); + + // FIXME: SelectionDAG always passes false for 'AllowOverlap', apparently due + // to a bug in it's findOptimalMemOpLowering implementation. For now do the + // same thing here. + if (!findGISelOptimalMemOpLowering( + MemOps, Limit, + MemOp::Copy(KnownLen, DstAlignCanChange, Alignment, SrcAlign, + /*IsVolatile*/ true), + DstPtrInfo.getAddrSpace(), SrcPtrInfo.getAddrSpace(), + MF.getFunction().getAttributes(), TLI)) + return false; + + if (DstAlignCanChange) { + // Get an estimate of the type from the LLT. + Type *IRTy = getTypeForLLT(MemOps[0], C); + Align NewAlign = DL.getABITypeAlign(IRTy); + + // Don't promote to an alignment that would require dynamic stack + // realignment. + const TargetRegisterInfo *TRI = MF.getSubtarget().getRegisterInfo(); + if (!TRI->needsStackRealignment(MF)) + while (NewAlign > Alignment && DL.exceedsNaturalStackAlignment(NewAlign)) + NewAlign = NewAlign / 2; + + if (NewAlign > Alignment) { + Alignment = NewAlign; + unsigned FI = FIDef->getOperand(1).getIndex(); + // Give the stack frame object a larger alignment if needed. + if (MFI.getObjectAlign(FI) < Alignment) + MFI.setObjectAlignment(FI, Alignment); + } + } + + LLVM_DEBUG(dbgs() << "Inlining memmove: " << MI << " into loads & stores\n"); + + MachineIRBuilder MIB(MI); + // Memmove requires that we perform the loads first before issuing the stores. + // Apart from that, this loop is pretty much doing the same thing as the + // memcpy codegen function. + unsigned CurrOffset = 0; + LLT PtrTy = MRI.getType(Src); + SmallVector<Register, 16> LoadVals; + for (auto CopyTy : MemOps) { + // Construct MMO for the load. + auto *LoadMMO = + MF.getMachineMemOperand(&SrcMMO, CurrOffset, CopyTy.getSizeInBytes()); + + // Create the load. + Register LoadPtr = Src; + if (CurrOffset != 0) { + auto Offset = + MIB.buildConstant(LLT::scalar(PtrTy.getSizeInBits()), CurrOffset); + LoadPtr = MIB.buildPtrAdd(PtrTy, Src, Offset).getReg(0); + } + LoadVals.push_back(MIB.buildLoad(CopyTy, LoadPtr, *LoadMMO).getReg(0)); + CurrOffset += CopyTy.getSizeInBytes(); + } + + CurrOffset = 0; + for (unsigned I = 0; I < MemOps.size(); ++I) { + LLT CopyTy = MemOps[I]; + // Now store the values loaded. + auto *StoreMMO = + MF.getMachineMemOperand(&DstMMO, CurrOffset, CopyTy.getSizeInBytes()); + + Register StorePtr = Dst; + if (CurrOffset != 0) { + auto Offset = + MIB.buildConstant(LLT::scalar(PtrTy.getSizeInBits()), CurrOffset); + StorePtr = MIB.buildPtrAdd(PtrTy, Dst, Offset).getReg(0); + } + MIB.buildStore(LoadVals[I], StorePtr, *StoreMMO); + CurrOffset += CopyTy.getSizeInBytes(); + } + MI.eraseFromParent(); + return true; +} + +bool CombinerHelper::tryCombineMemCpyFamily(MachineInstr &MI, unsigned MaxLen) { const unsigned Opc = MI.getOpcode(); - // This combine is fairly complex so it's not written with a separate - // matcher function. + // This combine is fairly complex so it's not written with a separate + // matcher function. assert((Opc == TargetOpcode::G_MEMCPY || Opc == TargetOpcode::G_MEMMOVE || Opc == TargetOpcode::G_MEMSET) && "Expected memcpy like instruction"); - - auto MMOIt = MI.memoperands_begin(); - const MachineMemOperand *MemOp = *MMOIt; - bool IsVolatile = MemOp->isVolatile(); - // Don't try to optimize volatile. - if (IsVolatile) - return false; - - Align DstAlign = MemOp->getBaseAlign(); - Align SrcAlign; + + auto MMOIt = MI.memoperands_begin(); + const MachineMemOperand *MemOp = *MMOIt; + bool IsVolatile = MemOp->isVolatile(); + // Don't try to optimize volatile. + if (IsVolatile) + return false; + + Align DstAlign = MemOp->getBaseAlign(); + Align SrcAlign; Register Dst = MI.getOperand(0).getReg(); Register Src = MI.getOperand(1).getReg(); Register Len = MI.getOperand(2).getReg(); - + if (Opc != TargetOpcode::G_MEMSET) { - assert(MMOIt != MI.memoperands_end() && "Expected a second MMO on MI"); - MemOp = *(++MMOIt); - SrcAlign = MemOp->getBaseAlign(); - } - - // See if this is a constant length copy - auto LenVRegAndVal = getConstantVRegValWithLookThrough(Len, MRI); - if (!LenVRegAndVal) - return false; // Leave it to the legalizer to lower it to a libcall. + assert(MMOIt != MI.memoperands_end() && "Expected a second MMO on MI"); + MemOp = *(++MMOIt); + SrcAlign = MemOp->getBaseAlign(); + } + + // See if this is a constant length copy + auto LenVRegAndVal = getConstantVRegValWithLookThrough(Len, MRI); + if (!LenVRegAndVal) + return false; // Leave it to the legalizer to lower it to a libcall. unsigned KnownLen = LenVRegAndVal->Value.getZExtValue(); - - if (KnownLen == 0) { - MI.eraseFromParent(); - return true; - } - - if (MaxLen && KnownLen > MaxLen) - return false; - + + if (KnownLen == 0) { + MI.eraseFromParent(); + return true; + } + + if (MaxLen && KnownLen > MaxLen) + return false; + if (Opc == TargetOpcode::G_MEMCPY) - return optimizeMemcpy(MI, Dst, Src, KnownLen, DstAlign, SrcAlign, IsVolatile); + return optimizeMemcpy(MI, Dst, Src, KnownLen, DstAlign, SrcAlign, IsVolatile); if (Opc == TargetOpcode::G_MEMMOVE) - return optimizeMemmove(MI, Dst, Src, KnownLen, DstAlign, SrcAlign, IsVolatile); + return optimizeMemmove(MI, Dst, Src, KnownLen, DstAlign, SrcAlign, IsVolatile); if (Opc == TargetOpcode::G_MEMSET) - return optimizeMemset(MI, Dst, Src, KnownLen, DstAlign, IsVolatile); - return false; -} - + return optimizeMemset(MI, Dst, Src, KnownLen, DstAlign, IsVolatile); + return false; +} + static Optional<APFloat> constantFoldFpUnary(unsigned Opcode, LLT DstTy, const Register Op, const MachineRegisterInfo &MRI) { @@ -1553,52 +1553,52 @@ bool CombinerHelper::applyCombineConstantFoldFpUnary(MachineInstr &MI, return true; } -bool CombinerHelper::matchPtrAddImmedChain(MachineInstr &MI, - PtrAddChain &MatchInfo) { - // We're trying to match the following pattern: - // %t1 = G_PTR_ADD %base, G_CONSTANT imm1 - // %root = G_PTR_ADD %t1, G_CONSTANT imm2 - // --> - // %root = G_PTR_ADD %base, G_CONSTANT (imm1 + imm2) - - if (MI.getOpcode() != TargetOpcode::G_PTR_ADD) - return false; - - Register Add2 = MI.getOperand(1).getReg(); - Register Imm1 = MI.getOperand(2).getReg(); - auto MaybeImmVal = getConstantVRegValWithLookThrough(Imm1, MRI); - if (!MaybeImmVal) - return false; - - MachineInstr *Add2Def = MRI.getUniqueVRegDef(Add2); - if (!Add2Def || Add2Def->getOpcode() != TargetOpcode::G_PTR_ADD) - return false; - - Register Base = Add2Def->getOperand(1).getReg(); - Register Imm2 = Add2Def->getOperand(2).getReg(); - auto MaybeImm2Val = getConstantVRegValWithLookThrough(Imm2, MRI); - if (!MaybeImm2Val) - return false; - - // Pass the combined immediate to the apply function. +bool CombinerHelper::matchPtrAddImmedChain(MachineInstr &MI, + PtrAddChain &MatchInfo) { + // We're trying to match the following pattern: + // %t1 = G_PTR_ADD %base, G_CONSTANT imm1 + // %root = G_PTR_ADD %t1, G_CONSTANT imm2 + // --> + // %root = G_PTR_ADD %base, G_CONSTANT (imm1 + imm2) + + if (MI.getOpcode() != TargetOpcode::G_PTR_ADD) + return false; + + Register Add2 = MI.getOperand(1).getReg(); + Register Imm1 = MI.getOperand(2).getReg(); + auto MaybeImmVal = getConstantVRegValWithLookThrough(Imm1, MRI); + if (!MaybeImmVal) + return false; + + MachineInstr *Add2Def = MRI.getUniqueVRegDef(Add2); + if (!Add2Def || Add2Def->getOpcode() != TargetOpcode::G_PTR_ADD) + return false; + + Register Base = Add2Def->getOperand(1).getReg(); + Register Imm2 = Add2Def->getOperand(2).getReg(); + auto MaybeImm2Val = getConstantVRegValWithLookThrough(Imm2, MRI); + if (!MaybeImm2Val) + return false; + + // Pass the combined immediate to the apply function. MatchInfo.Imm = (MaybeImmVal->Value + MaybeImm2Val->Value).getSExtValue(); - MatchInfo.Base = Base; - return true; -} - -bool CombinerHelper::applyPtrAddImmedChain(MachineInstr &MI, - PtrAddChain &MatchInfo) { - assert(MI.getOpcode() == TargetOpcode::G_PTR_ADD && "Expected G_PTR_ADD"); - MachineIRBuilder MIB(MI); - LLT OffsetTy = MRI.getType(MI.getOperand(2).getReg()); - auto NewOffset = MIB.buildConstant(OffsetTy, MatchInfo.Imm); - Observer.changingInstr(MI); - MI.getOperand(1).setReg(MatchInfo.Base); - MI.getOperand(2).setReg(NewOffset.getReg(0)); - Observer.changedInstr(MI); - return true; -} - + MatchInfo.Base = Base; + return true; +} + +bool CombinerHelper::applyPtrAddImmedChain(MachineInstr &MI, + PtrAddChain &MatchInfo) { + assert(MI.getOpcode() == TargetOpcode::G_PTR_ADD && "Expected G_PTR_ADD"); + MachineIRBuilder MIB(MI); + LLT OffsetTy = MRI.getType(MI.getOperand(2).getReg()); + auto NewOffset = MIB.buildConstant(OffsetTy, MatchInfo.Imm); + Observer.changingInstr(MI); + MI.getOperand(1).setReg(MatchInfo.Base); + MI.getOperand(2).setReg(NewOffset.getReg(0)); + Observer.changedInstr(MI); + return true; +} + bool CombinerHelper::matchShiftImmedChain(MachineInstr &MI, RegisterImmPair &MatchInfo) { // We're trying to match the following pattern with any of @@ -1794,31 +1794,31 @@ bool CombinerHelper::applyShiftOfShiftedLogic(MachineInstr &MI, return true; } -bool CombinerHelper::matchCombineMulToShl(MachineInstr &MI, - unsigned &ShiftVal) { - assert(MI.getOpcode() == TargetOpcode::G_MUL && "Expected a G_MUL"); - auto MaybeImmVal = - getConstantVRegValWithLookThrough(MI.getOperand(2).getReg(), MRI); +bool CombinerHelper::matchCombineMulToShl(MachineInstr &MI, + unsigned &ShiftVal) { + assert(MI.getOpcode() == TargetOpcode::G_MUL && "Expected a G_MUL"); + auto MaybeImmVal = + getConstantVRegValWithLookThrough(MI.getOperand(2).getReg(), MRI); if (!MaybeImmVal) - return false; + return false; ShiftVal = MaybeImmVal->Value.exactLogBase2(); return (static_cast<int32_t>(ShiftVal) != -1); -} - -bool CombinerHelper::applyCombineMulToShl(MachineInstr &MI, - unsigned &ShiftVal) { - assert(MI.getOpcode() == TargetOpcode::G_MUL && "Expected a G_MUL"); - MachineIRBuilder MIB(MI); - LLT ShiftTy = MRI.getType(MI.getOperand(0).getReg()); - auto ShiftCst = MIB.buildConstant(ShiftTy, ShiftVal); - Observer.changingInstr(MI); - MI.setDesc(MIB.getTII().get(TargetOpcode::G_SHL)); - MI.getOperand(2).setReg(ShiftCst.getReg(0)); - Observer.changedInstr(MI); - return true; -} - +} + +bool CombinerHelper::applyCombineMulToShl(MachineInstr &MI, + unsigned &ShiftVal) { + assert(MI.getOpcode() == TargetOpcode::G_MUL && "Expected a G_MUL"); + MachineIRBuilder MIB(MI); + LLT ShiftTy = MRI.getType(MI.getOperand(0).getReg()); + auto ShiftCst = MIB.buildConstant(ShiftTy, ShiftVal); + Observer.changingInstr(MI); + MI.setDesc(MIB.getTII().get(TargetOpcode::G_SHL)); + MI.getOperand(2).setReg(ShiftCst.getReg(0)); + Observer.changedInstr(MI); + return true; +} + // shl ([sza]ext x), y => zext (shl x, y), if shift does not overflow source bool CombinerHelper::matchCombineShlOfExtend(MachineInstr &MI, RegisterImmPair &MatchData) { @@ -2067,116 +2067,116 @@ bool CombinerHelper::applyCombineUnmergeZExtToZExt(MachineInstr &MI) { return true; } -bool CombinerHelper::matchCombineShiftToUnmerge(MachineInstr &MI, - unsigned TargetShiftSize, - unsigned &ShiftVal) { - assert((MI.getOpcode() == TargetOpcode::G_SHL || - MI.getOpcode() == TargetOpcode::G_LSHR || - MI.getOpcode() == TargetOpcode::G_ASHR) && "Expected a shift"); - - LLT Ty = MRI.getType(MI.getOperand(0).getReg()); - if (Ty.isVector()) // TODO: - return false; - - // Don't narrow further than the requested size. - unsigned Size = Ty.getSizeInBits(); - if (Size <= TargetShiftSize) - return false; - - auto MaybeImmVal = - getConstantVRegValWithLookThrough(MI.getOperand(2).getReg(), MRI); - if (!MaybeImmVal) - return false; - +bool CombinerHelper::matchCombineShiftToUnmerge(MachineInstr &MI, + unsigned TargetShiftSize, + unsigned &ShiftVal) { + assert((MI.getOpcode() == TargetOpcode::G_SHL || + MI.getOpcode() == TargetOpcode::G_LSHR || + MI.getOpcode() == TargetOpcode::G_ASHR) && "Expected a shift"); + + LLT Ty = MRI.getType(MI.getOperand(0).getReg()); + if (Ty.isVector()) // TODO: + return false; + + // Don't narrow further than the requested size. + unsigned Size = Ty.getSizeInBits(); + if (Size <= TargetShiftSize) + return false; + + auto MaybeImmVal = + getConstantVRegValWithLookThrough(MI.getOperand(2).getReg(), MRI); + if (!MaybeImmVal) + return false; + ShiftVal = MaybeImmVal->Value.getSExtValue(); - return ShiftVal >= Size / 2 && ShiftVal < Size; -} - -bool CombinerHelper::applyCombineShiftToUnmerge(MachineInstr &MI, - const unsigned &ShiftVal) { - Register DstReg = MI.getOperand(0).getReg(); - Register SrcReg = MI.getOperand(1).getReg(); - LLT Ty = MRI.getType(SrcReg); - unsigned Size = Ty.getSizeInBits(); - unsigned HalfSize = Size / 2; - assert(ShiftVal >= HalfSize); - - LLT HalfTy = LLT::scalar(HalfSize); - - Builder.setInstr(MI); - auto Unmerge = Builder.buildUnmerge(HalfTy, SrcReg); - unsigned NarrowShiftAmt = ShiftVal - HalfSize; - - if (MI.getOpcode() == TargetOpcode::G_LSHR) { - Register Narrowed = Unmerge.getReg(1); - - // dst = G_LSHR s64:x, C for C >= 32 - // => - // lo, hi = G_UNMERGE_VALUES x - // dst = G_MERGE_VALUES (G_LSHR hi, C - 32), 0 - - if (NarrowShiftAmt != 0) { - Narrowed = Builder.buildLShr(HalfTy, Narrowed, - Builder.buildConstant(HalfTy, NarrowShiftAmt)).getReg(0); - } - - auto Zero = Builder.buildConstant(HalfTy, 0); - Builder.buildMerge(DstReg, { Narrowed, Zero }); - } else if (MI.getOpcode() == TargetOpcode::G_SHL) { - Register Narrowed = Unmerge.getReg(0); - // dst = G_SHL s64:x, C for C >= 32 - // => - // lo, hi = G_UNMERGE_VALUES x - // dst = G_MERGE_VALUES 0, (G_SHL hi, C - 32) - if (NarrowShiftAmt != 0) { - Narrowed = Builder.buildShl(HalfTy, Narrowed, - Builder.buildConstant(HalfTy, NarrowShiftAmt)).getReg(0); - } - - auto Zero = Builder.buildConstant(HalfTy, 0); - Builder.buildMerge(DstReg, { Zero, Narrowed }); - } else { - assert(MI.getOpcode() == TargetOpcode::G_ASHR); - auto Hi = Builder.buildAShr( - HalfTy, Unmerge.getReg(1), - Builder.buildConstant(HalfTy, HalfSize - 1)); - - if (ShiftVal == HalfSize) { - // (G_ASHR i64:x, 32) -> - // G_MERGE_VALUES hi_32(x), (G_ASHR hi_32(x), 31) - Builder.buildMerge(DstReg, { Unmerge.getReg(1), Hi }); - } else if (ShiftVal == Size - 1) { - // Don't need a second shift. - // (G_ASHR i64:x, 63) -> - // %narrowed = (G_ASHR hi_32(x), 31) - // G_MERGE_VALUES %narrowed, %narrowed - Builder.buildMerge(DstReg, { Hi, Hi }); - } else { - auto Lo = Builder.buildAShr( - HalfTy, Unmerge.getReg(1), - Builder.buildConstant(HalfTy, ShiftVal - HalfSize)); - - // (G_ASHR i64:x, C) ->, for C >= 32 - // G_MERGE_VALUES (G_ASHR hi_32(x), C - 32), (G_ASHR hi_32(x), 31) - Builder.buildMerge(DstReg, { Lo, Hi }); - } - } - - MI.eraseFromParent(); - return true; -} - -bool CombinerHelper::tryCombineShiftToUnmerge(MachineInstr &MI, - unsigned TargetShiftAmount) { - unsigned ShiftAmt; - if (matchCombineShiftToUnmerge(MI, TargetShiftAmount, ShiftAmt)) { - applyCombineShiftToUnmerge(MI, ShiftAmt); - return true; - } - - return false; -} - + return ShiftVal >= Size / 2 && ShiftVal < Size; +} + +bool CombinerHelper::applyCombineShiftToUnmerge(MachineInstr &MI, + const unsigned &ShiftVal) { + Register DstReg = MI.getOperand(0).getReg(); + Register SrcReg = MI.getOperand(1).getReg(); + LLT Ty = MRI.getType(SrcReg); + unsigned Size = Ty.getSizeInBits(); + unsigned HalfSize = Size / 2; + assert(ShiftVal >= HalfSize); + + LLT HalfTy = LLT::scalar(HalfSize); + + Builder.setInstr(MI); + auto Unmerge = Builder.buildUnmerge(HalfTy, SrcReg); + unsigned NarrowShiftAmt = ShiftVal - HalfSize; + + if (MI.getOpcode() == TargetOpcode::G_LSHR) { + Register Narrowed = Unmerge.getReg(1); + + // dst = G_LSHR s64:x, C for C >= 32 + // => + // lo, hi = G_UNMERGE_VALUES x + // dst = G_MERGE_VALUES (G_LSHR hi, C - 32), 0 + + if (NarrowShiftAmt != 0) { + Narrowed = Builder.buildLShr(HalfTy, Narrowed, + Builder.buildConstant(HalfTy, NarrowShiftAmt)).getReg(0); + } + + auto Zero = Builder.buildConstant(HalfTy, 0); + Builder.buildMerge(DstReg, { Narrowed, Zero }); + } else if (MI.getOpcode() == TargetOpcode::G_SHL) { + Register Narrowed = Unmerge.getReg(0); + // dst = G_SHL s64:x, C for C >= 32 + // => + // lo, hi = G_UNMERGE_VALUES x + // dst = G_MERGE_VALUES 0, (G_SHL hi, C - 32) + if (NarrowShiftAmt != 0) { + Narrowed = Builder.buildShl(HalfTy, Narrowed, + Builder.buildConstant(HalfTy, NarrowShiftAmt)).getReg(0); + } + + auto Zero = Builder.buildConstant(HalfTy, 0); + Builder.buildMerge(DstReg, { Zero, Narrowed }); + } else { + assert(MI.getOpcode() == TargetOpcode::G_ASHR); + auto Hi = Builder.buildAShr( + HalfTy, Unmerge.getReg(1), + Builder.buildConstant(HalfTy, HalfSize - 1)); + + if (ShiftVal == HalfSize) { + // (G_ASHR i64:x, 32) -> + // G_MERGE_VALUES hi_32(x), (G_ASHR hi_32(x), 31) + Builder.buildMerge(DstReg, { Unmerge.getReg(1), Hi }); + } else if (ShiftVal == Size - 1) { + // Don't need a second shift. + // (G_ASHR i64:x, 63) -> + // %narrowed = (G_ASHR hi_32(x), 31) + // G_MERGE_VALUES %narrowed, %narrowed + Builder.buildMerge(DstReg, { Hi, Hi }); + } else { + auto Lo = Builder.buildAShr( + HalfTy, Unmerge.getReg(1), + Builder.buildConstant(HalfTy, ShiftVal - HalfSize)); + + // (G_ASHR i64:x, C) ->, for C >= 32 + // G_MERGE_VALUES (G_ASHR hi_32(x), C - 32), (G_ASHR hi_32(x), 31) + Builder.buildMerge(DstReg, { Lo, Hi }); + } + } + + MI.eraseFromParent(); + return true; +} + +bool CombinerHelper::tryCombineShiftToUnmerge(MachineInstr &MI, + unsigned TargetShiftAmount) { + unsigned ShiftAmt; + if (matchCombineShiftToUnmerge(MI, TargetShiftAmount, ShiftAmt)) { + applyCombineShiftToUnmerge(MI, ShiftAmt); + return true; + } + + return false; +} + bool CombinerHelper::matchCombineI2PToP2I(MachineInstr &MI, Register &Reg) { assert(MI.getOpcode() == TargetOpcode::G_INTTOPTR && "Expected a G_INTTOPTR"); Register DstReg = MI.getOperand(0).getReg(); @@ -2467,32 +2467,32 @@ bool CombinerHelper::applyCombineTruncOfShl( return true; } -bool CombinerHelper::matchAnyExplicitUseIsUndef(MachineInstr &MI) { - return any_of(MI.explicit_uses(), [this](const MachineOperand &MO) { - return MO.isReg() && - getOpcodeDef(TargetOpcode::G_IMPLICIT_DEF, MO.getReg(), MRI); - }); -} - -bool CombinerHelper::matchAllExplicitUsesAreUndef(MachineInstr &MI) { - return all_of(MI.explicit_uses(), [this](const MachineOperand &MO) { - return !MO.isReg() || - getOpcodeDef(TargetOpcode::G_IMPLICIT_DEF, MO.getReg(), MRI); - }); -} - -bool CombinerHelper::matchUndefShuffleVectorMask(MachineInstr &MI) { - assert(MI.getOpcode() == TargetOpcode::G_SHUFFLE_VECTOR); - ArrayRef<int> Mask = MI.getOperand(3).getShuffleMask(); - return all_of(Mask, [](int Elt) { return Elt < 0; }); -} - -bool CombinerHelper::matchUndefStore(MachineInstr &MI) { - assert(MI.getOpcode() == TargetOpcode::G_STORE); - return getOpcodeDef(TargetOpcode::G_IMPLICIT_DEF, MI.getOperand(0).getReg(), - MRI); -} - +bool CombinerHelper::matchAnyExplicitUseIsUndef(MachineInstr &MI) { + return any_of(MI.explicit_uses(), [this](const MachineOperand &MO) { + return MO.isReg() && + getOpcodeDef(TargetOpcode::G_IMPLICIT_DEF, MO.getReg(), MRI); + }); +} + +bool CombinerHelper::matchAllExplicitUsesAreUndef(MachineInstr &MI) { + return all_of(MI.explicit_uses(), [this](const MachineOperand &MO) { + return !MO.isReg() || + getOpcodeDef(TargetOpcode::G_IMPLICIT_DEF, MO.getReg(), MRI); + }); +} + +bool CombinerHelper::matchUndefShuffleVectorMask(MachineInstr &MI) { + assert(MI.getOpcode() == TargetOpcode::G_SHUFFLE_VECTOR); + ArrayRef<int> Mask = MI.getOperand(3).getShuffleMask(); + return all_of(Mask, [](int Elt) { return Elt < 0; }); +} + +bool CombinerHelper::matchUndefStore(MachineInstr &MI) { + assert(MI.getOpcode() == TargetOpcode::G_STORE); + return getOpcodeDef(TargetOpcode::G_IMPLICIT_DEF, MI.getOperand(0).getReg(), + MRI); +} + bool CombinerHelper::matchUndefSelectCmp(MachineInstr &MI) { assert(MI.getOpcode() == TargetOpcode::G_SELECT); return getOpcodeDef(TargetOpcode::G_IMPLICIT_DEF, MI.getOperand(1).getReg(), @@ -2509,102 +2509,102 @@ bool CombinerHelper::matchConstantSelectCmp(MachineInstr &MI, unsigned &OpIdx) { return false; } -bool CombinerHelper::eraseInst(MachineInstr &MI) { - MI.eraseFromParent(); - return true; -} - -bool CombinerHelper::matchEqualDefs(const MachineOperand &MOP1, - const MachineOperand &MOP2) { - if (!MOP1.isReg() || !MOP2.isReg()) - return false; - MachineInstr *I1 = getDefIgnoringCopies(MOP1.getReg(), MRI); - if (!I1) - return false; - MachineInstr *I2 = getDefIgnoringCopies(MOP2.getReg(), MRI); - if (!I2) - return false; - - // Handle a case like this: - // - // %0:_(s64), %1:_(s64) = G_UNMERGE_VALUES %2:_(<2 x s64>) - // - // Even though %0 and %1 are produced by the same instruction they are not - // the same values. - if (I1 == I2) - return MOP1.getReg() == MOP2.getReg(); - - // If we have an instruction which loads or stores, we can't guarantee that - // it is identical. - // - // For example, we may have - // - // %x1 = G_LOAD %addr (load N from @somewhere) - // ... - // call @foo - // ... - // %x2 = G_LOAD %addr (load N from @somewhere) - // ... - // %or = G_OR %x1, %x2 - // - // It's possible that @foo will modify whatever lives at the address we're - // loading from. To be safe, let's just assume that all loads and stores - // are different (unless we have something which is guaranteed to not - // change.) - if (I1->mayLoadOrStore() && !I1->isDereferenceableInvariantLoad(nullptr)) - return false; - - // Check for physical registers on the instructions first to avoid cases - // like this: - // - // %a = COPY $physreg - // ... - // SOMETHING implicit-def $physreg - // ... - // %b = COPY $physreg - // - // These copies are not equivalent. - if (any_of(I1->uses(), [](const MachineOperand &MO) { - return MO.isReg() && MO.getReg().isPhysical(); - })) { - // Check if we have a case like this: - // - // %a = COPY $physreg - // %b = COPY %a - // - // In this case, I1 and I2 will both be equal to %a = COPY $physreg. - // From that, we know that they must have the same value, since they must - // have come from the same COPY. - return I1->isIdenticalTo(*I2); - } - - // We don't have any physical registers, so we don't necessarily need the - // same vreg defs. - // - // On the off-chance that there's some target instruction feeding into the - // instruction, let's use produceSameValue instead of isIdenticalTo. - return Builder.getTII().produceSameValue(*I1, *I2, &MRI); -} - -bool CombinerHelper::matchConstantOp(const MachineOperand &MOP, int64_t C) { - if (!MOP.isReg()) - return false; - // MIPatternMatch doesn't let us look through G_ZEXT etc. - auto ValAndVReg = getConstantVRegValWithLookThrough(MOP.getReg(), MRI); - return ValAndVReg && ValAndVReg->Value == C; -} - -bool CombinerHelper::replaceSingleDefInstWithOperand(MachineInstr &MI, - unsigned OpIdx) { - assert(MI.getNumExplicitDefs() == 1 && "Expected one explicit def?"); - Register OldReg = MI.getOperand(0).getReg(); - Register Replacement = MI.getOperand(OpIdx).getReg(); - assert(canReplaceReg(OldReg, Replacement, MRI) && "Cannot replace register?"); - MI.eraseFromParent(); - replaceRegWith(MRI, OldReg, Replacement); - return true; -} - +bool CombinerHelper::eraseInst(MachineInstr &MI) { + MI.eraseFromParent(); + return true; +} + +bool CombinerHelper::matchEqualDefs(const MachineOperand &MOP1, + const MachineOperand &MOP2) { + if (!MOP1.isReg() || !MOP2.isReg()) + return false; + MachineInstr *I1 = getDefIgnoringCopies(MOP1.getReg(), MRI); + if (!I1) + return false; + MachineInstr *I2 = getDefIgnoringCopies(MOP2.getReg(), MRI); + if (!I2) + return false; + + // Handle a case like this: + // + // %0:_(s64), %1:_(s64) = G_UNMERGE_VALUES %2:_(<2 x s64>) + // + // Even though %0 and %1 are produced by the same instruction they are not + // the same values. + if (I1 == I2) + return MOP1.getReg() == MOP2.getReg(); + + // If we have an instruction which loads or stores, we can't guarantee that + // it is identical. + // + // For example, we may have + // + // %x1 = G_LOAD %addr (load N from @somewhere) + // ... + // call @foo + // ... + // %x2 = G_LOAD %addr (load N from @somewhere) + // ... + // %or = G_OR %x1, %x2 + // + // It's possible that @foo will modify whatever lives at the address we're + // loading from. To be safe, let's just assume that all loads and stores + // are different (unless we have something which is guaranteed to not + // change.) + if (I1->mayLoadOrStore() && !I1->isDereferenceableInvariantLoad(nullptr)) + return false; + + // Check for physical registers on the instructions first to avoid cases + // like this: + // + // %a = COPY $physreg + // ... + // SOMETHING implicit-def $physreg + // ... + // %b = COPY $physreg + // + // These copies are not equivalent. + if (any_of(I1->uses(), [](const MachineOperand &MO) { + return MO.isReg() && MO.getReg().isPhysical(); + })) { + // Check if we have a case like this: + // + // %a = COPY $physreg + // %b = COPY %a + // + // In this case, I1 and I2 will both be equal to %a = COPY $physreg. + // From that, we know that they must have the same value, since they must + // have come from the same COPY. + return I1->isIdenticalTo(*I2); + } + + // We don't have any physical registers, so we don't necessarily need the + // same vreg defs. + // + // On the off-chance that there's some target instruction feeding into the + // instruction, let's use produceSameValue instead of isIdenticalTo. + return Builder.getTII().produceSameValue(*I1, *I2, &MRI); +} + +bool CombinerHelper::matchConstantOp(const MachineOperand &MOP, int64_t C) { + if (!MOP.isReg()) + return false; + // MIPatternMatch doesn't let us look through G_ZEXT etc. + auto ValAndVReg = getConstantVRegValWithLookThrough(MOP.getReg(), MRI); + return ValAndVReg && ValAndVReg->Value == C; +} + +bool CombinerHelper::replaceSingleDefInstWithOperand(MachineInstr &MI, + unsigned OpIdx) { + assert(MI.getNumExplicitDefs() == 1 && "Expected one explicit def?"); + Register OldReg = MI.getOperand(0).getReg(); + Register Replacement = MI.getOperand(OpIdx).getReg(); + assert(canReplaceReg(OldReg, Replacement, MRI) && "Cannot replace register?"); + MI.eraseFromParent(); + replaceRegWith(MRI, OldReg, Replacement); + return true; +} + bool CombinerHelper::replaceSingleDefInstWithReg(MachineInstr &MI, Register Replacement) { assert(MI.getNumExplicitDefs() == 1 && "Expected one explicit def?"); @@ -2615,26 +2615,26 @@ bool CombinerHelper::replaceSingleDefInstWithReg(MachineInstr &MI, return true; } -bool CombinerHelper::matchSelectSameVal(MachineInstr &MI) { - assert(MI.getOpcode() == TargetOpcode::G_SELECT); - // Match (cond ? x : x) - return matchEqualDefs(MI.getOperand(2), MI.getOperand(3)) && - canReplaceReg(MI.getOperand(0).getReg(), MI.getOperand(2).getReg(), - MRI); -} - -bool CombinerHelper::matchBinOpSameVal(MachineInstr &MI) { - return matchEqualDefs(MI.getOperand(1), MI.getOperand(2)) && - canReplaceReg(MI.getOperand(0).getReg(), MI.getOperand(1).getReg(), - MRI); -} - -bool CombinerHelper::matchOperandIsZero(MachineInstr &MI, unsigned OpIdx) { - return matchConstantOp(MI.getOperand(OpIdx), 0) && - canReplaceReg(MI.getOperand(0).getReg(), MI.getOperand(OpIdx).getReg(), - MRI); -} - +bool CombinerHelper::matchSelectSameVal(MachineInstr &MI) { + assert(MI.getOpcode() == TargetOpcode::G_SELECT); + // Match (cond ? x : x) + return matchEqualDefs(MI.getOperand(2), MI.getOperand(3)) && + canReplaceReg(MI.getOperand(0).getReg(), MI.getOperand(2).getReg(), + MRI); +} + +bool CombinerHelper::matchBinOpSameVal(MachineInstr &MI) { + return matchEqualDefs(MI.getOperand(1), MI.getOperand(2)) && + canReplaceReg(MI.getOperand(0).getReg(), MI.getOperand(1).getReg(), + MRI); +} + +bool CombinerHelper::matchOperandIsZero(MachineInstr &MI, unsigned OpIdx) { + return matchConstantOp(MI.getOperand(OpIdx), 0) && + canReplaceReg(MI.getOperand(0).getReg(), MI.getOperand(OpIdx).getReg(), + MRI); +} + bool CombinerHelper::matchOperandIsUndef(MachineInstr &MI, unsigned OpIdx) { MachineOperand &MO = MI.getOperand(OpIdx); return MO.isReg() && @@ -2647,50 +2647,50 @@ bool CombinerHelper::matchOperandIsKnownToBeAPowerOfTwo(MachineInstr &MI, return isKnownToBeAPowerOfTwo(MO.getReg(), MRI, KB); } -bool CombinerHelper::replaceInstWithFConstant(MachineInstr &MI, double C) { - assert(MI.getNumDefs() == 1 && "Expected only one def?"); - Builder.setInstr(MI); - Builder.buildFConstant(MI.getOperand(0), C); - MI.eraseFromParent(); - return true; -} - -bool CombinerHelper::replaceInstWithConstant(MachineInstr &MI, int64_t C) { - assert(MI.getNumDefs() == 1 && "Expected only one def?"); - Builder.setInstr(MI); - Builder.buildConstant(MI.getOperand(0), C); - MI.eraseFromParent(); - return true; -} - -bool CombinerHelper::replaceInstWithUndef(MachineInstr &MI) { - assert(MI.getNumDefs() == 1 && "Expected only one def?"); - Builder.setInstr(MI); - Builder.buildUndef(MI.getOperand(0)); - MI.eraseFromParent(); - return true; -} - -bool CombinerHelper::matchSimplifyAddToSub( - MachineInstr &MI, std::tuple<Register, Register> &MatchInfo) { - Register LHS = MI.getOperand(1).getReg(); - Register RHS = MI.getOperand(2).getReg(); - Register &NewLHS = std::get<0>(MatchInfo); - Register &NewRHS = std::get<1>(MatchInfo); - - // Helper lambda to check for opportunities for - // ((0-A) + B) -> B - A - // (A + (0-B)) -> A - B - auto CheckFold = [&](Register &MaybeSub, Register &MaybeNewLHS) { +bool CombinerHelper::replaceInstWithFConstant(MachineInstr &MI, double C) { + assert(MI.getNumDefs() == 1 && "Expected only one def?"); + Builder.setInstr(MI); + Builder.buildFConstant(MI.getOperand(0), C); + MI.eraseFromParent(); + return true; +} + +bool CombinerHelper::replaceInstWithConstant(MachineInstr &MI, int64_t C) { + assert(MI.getNumDefs() == 1 && "Expected only one def?"); + Builder.setInstr(MI); + Builder.buildConstant(MI.getOperand(0), C); + MI.eraseFromParent(); + return true; +} + +bool CombinerHelper::replaceInstWithUndef(MachineInstr &MI) { + assert(MI.getNumDefs() == 1 && "Expected only one def?"); + Builder.setInstr(MI); + Builder.buildUndef(MI.getOperand(0)); + MI.eraseFromParent(); + return true; +} + +bool CombinerHelper::matchSimplifyAddToSub( + MachineInstr &MI, std::tuple<Register, Register> &MatchInfo) { + Register LHS = MI.getOperand(1).getReg(); + Register RHS = MI.getOperand(2).getReg(); + Register &NewLHS = std::get<0>(MatchInfo); + Register &NewRHS = std::get<1>(MatchInfo); + + // Helper lambda to check for opportunities for + // ((0-A) + B) -> B - A + // (A + (0-B)) -> A - B + auto CheckFold = [&](Register &MaybeSub, Register &MaybeNewLHS) { if (!mi_match(MaybeSub, MRI, m_Neg(m_Reg(NewRHS)))) - return false; - NewLHS = MaybeNewLHS; - return true; - }; - - return CheckFold(LHS, RHS) || CheckFold(RHS, LHS); -} - + return false; + NewLHS = MaybeNewLHS; + return true; + }; + + return CheckFold(LHS, RHS) || CheckFold(RHS, LHS); +} + bool CombinerHelper::matchCombineInsertVecElts( MachineInstr &MI, SmallVectorImpl<Register> &MatchInfo) { assert(MI.getOpcode() == TargetOpcode::G_INSERT_VECTOR_ELT && @@ -2752,16 +2752,16 @@ bool CombinerHelper::applyCombineInsertVecElts( return true; } -bool CombinerHelper::applySimplifyAddToSub( - MachineInstr &MI, std::tuple<Register, Register> &MatchInfo) { - Builder.setInstr(MI); - Register SubLHS, SubRHS; - std::tie(SubLHS, SubRHS) = MatchInfo; - Builder.buildSub(MI.getOperand(0).getReg(), SubLHS, SubRHS); - MI.eraseFromParent(); - return true; -} - +bool CombinerHelper::applySimplifyAddToSub( + MachineInstr &MI, std::tuple<Register, Register> &MatchInfo) { + Builder.setInstr(MI); + Register SubLHS, SubRHS; + std::tie(SubLHS, SubRHS) = MatchInfo; + Builder.buildSub(MI.getOperand(0).getReg(), SubLHS, SubRHS); + MI.eraseFromParent(); + return true; +} + bool CombinerHelper::matchHoistLogicOpWithSameOpcodeHands( MachineInstr &MI, InstructionStepsMatchInfo &MatchInfo) { // Matches: logic (hand x, ...), (hand y, ...) -> hand (logic x, y), ... @@ -3568,12 +3568,12 @@ bool CombinerHelper::applyLoadOrCombine( return true; } -bool CombinerHelper::tryCombine(MachineInstr &MI) { - if (tryCombineCopy(MI)) - return true; - if (tryCombineExtendingLoads(MI)) - return true; - if (tryCombineIndexedLoadStore(MI)) - return true; - return false; -} +bool CombinerHelper::tryCombine(MachineInstr &MI) { + if (tryCombineCopy(MI)) + return true; + if (tryCombineExtendingLoads(MI)) + return true; + if (tryCombineIndexedLoadStore(MI)) + return true; + return false; +} diff --git a/contrib/libs/llvm12/lib/CodeGen/GlobalISel/GISelChangeObserver.cpp b/contrib/libs/llvm12/lib/CodeGen/GlobalISel/GISelChangeObserver.cpp index c8bbb6a3ed..59f4d60a41 100644 --- a/contrib/libs/llvm12/lib/CodeGen/GlobalISel/GISelChangeObserver.cpp +++ b/contrib/libs/llvm12/lib/CodeGen/GlobalISel/GISelChangeObserver.cpp @@ -1,48 +1,48 @@ -//===-- lib/CodeGen/GlobalISel/GISelChangeObserver.cpp --------------------===// -// -// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. -// See https://llvm.org/LICENSE.txt for license information. -// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception -// -//===----------------------------------------------------------------------===// -// -// This file constains common code to combine machine functions at generic -// level. -//===----------------------------------------------------------------------===// - -#include "llvm/CodeGen/GlobalISel/GISelChangeObserver.h" -#include "llvm/CodeGen/MachineRegisterInfo.h" - -using namespace llvm; - -void GISelChangeObserver::changingAllUsesOfReg( +//===-- lib/CodeGen/GlobalISel/GISelChangeObserver.cpp --------------------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +// +// This file constains common code to combine machine functions at generic +// level. +//===----------------------------------------------------------------------===// + +#include "llvm/CodeGen/GlobalISel/GISelChangeObserver.h" +#include "llvm/CodeGen/MachineRegisterInfo.h" + +using namespace llvm; + +void GISelChangeObserver::changingAllUsesOfReg( const MachineRegisterInfo &MRI, Register Reg) { - for (auto &ChangingMI : MRI.use_instructions(Reg)) { - changingInstr(ChangingMI); - ChangingAllUsesOfReg.insert(&ChangingMI); - } -} - -void GISelChangeObserver::finishedChangingAllUsesOfReg() { - for (auto *ChangedMI : ChangingAllUsesOfReg) - changedInstr(*ChangedMI); - ChangingAllUsesOfReg.clear(); -} - -RAIIDelegateInstaller::RAIIDelegateInstaller(MachineFunction &MF, - MachineFunction::Delegate *Del) - : MF(MF), Delegate(Del) { - // Register this as the delegate for handling insertions and deletions of - // instructions. - MF.setDelegate(Del); -} - -RAIIDelegateInstaller::~RAIIDelegateInstaller() { MF.resetDelegate(Delegate); } - -RAIIMFObserverInstaller::RAIIMFObserverInstaller(MachineFunction &MF, - GISelChangeObserver &Observer) - : MF(MF) { - MF.setObserver(&Observer); -} - -RAIIMFObserverInstaller::~RAIIMFObserverInstaller() { MF.setObserver(nullptr); } + for (auto &ChangingMI : MRI.use_instructions(Reg)) { + changingInstr(ChangingMI); + ChangingAllUsesOfReg.insert(&ChangingMI); + } +} + +void GISelChangeObserver::finishedChangingAllUsesOfReg() { + for (auto *ChangedMI : ChangingAllUsesOfReg) + changedInstr(*ChangedMI); + ChangingAllUsesOfReg.clear(); +} + +RAIIDelegateInstaller::RAIIDelegateInstaller(MachineFunction &MF, + MachineFunction::Delegate *Del) + : MF(MF), Delegate(Del) { + // Register this as the delegate for handling insertions and deletions of + // instructions. + MF.setDelegate(Del); +} + +RAIIDelegateInstaller::~RAIIDelegateInstaller() { MF.resetDelegate(Delegate); } + +RAIIMFObserverInstaller::RAIIMFObserverInstaller(MachineFunction &MF, + GISelChangeObserver &Observer) + : MF(MF) { + MF.setObserver(&Observer); +} + +RAIIMFObserverInstaller::~RAIIMFObserverInstaller() { MF.setObserver(nullptr); } diff --git a/contrib/libs/llvm12/lib/CodeGen/GlobalISel/GISelKnownBits.cpp b/contrib/libs/llvm12/lib/CodeGen/GlobalISel/GISelKnownBits.cpp index 084c9d6569..2de20489e1 100644 --- a/contrib/libs/llvm12/lib/CodeGen/GlobalISel/GISelKnownBits.cpp +++ b/contrib/libs/llvm12/lib/CodeGen/GlobalISel/GISelKnownBits.cpp @@ -1,99 +1,99 @@ -//===- lib/CodeGen/GlobalISel/GISelKnownBits.cpp --------------*- C++ *-===// -// -// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. -// See https://llvm.org/LICENSE.txt for license information. -// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception -// -//===----------------------------------------------------------------------===// -// -/// Provides analysis for querying information about KnownBits during GISel -/// passes. -// -//===------------------ -#include "llvm/CodeGen/GlobalISel/GISelKnownBits.h" -#include "llvm/Analysis/ValueTracking.h" -#include "llvm/CodeGen/GlobalISel/Utils.h" -#include "llvm/CodeGen/MachineFrameInfo.h" -#include "llvm/CodeGen/MachineRegisterInfo.h" -#include "llvm/CodeGen/TargetLowering.h" -#include "llvm/CodeGen/TargetOpcodes.h" - -#define DEBUG_TYPE "gisel-known-bits" - -using namespace llvm; - -char llvm::GISelKnownBitsAnalysis::ID = 0; - -INITIALIZE_PASS(GISelKnownBitsAnalysis, DEBUG_TYPE, - "Analysis for ComputingKnownBits", false, true) - -GISelKnownBits::GISelKnownBits(MachineFunction &MF, unsigned MaxDepth) - : MF(MF), MRI(MF.getRegInfo()), TL(*MF.getSubtarget().getTargetLowering()), - DL(MF.getFunction().getParent()->getDataLayout()), MaxDepth(MaxDepth) {} - -Align GISelKnownBits::computeKnownAlignment(Register R, unsigned Depth) { - const MachineInstr *MI = MRI.getVRegDef(R); - switch (MI->getOpcode()) { - case TargetOpcode::COPY: - return computeKnownAlignment(MI->getOperand(1).getReg(), Depth); - case TargetOpcode::G_FRAME_INDEX: { - int FrameIdx = MI->getOperand(1).getIndex(); - return MF.getFrameInfo().getObjectAlign(FrameIdx); - } - case TargetOpcode::G_INTRINSIC: - case TargetOpcode::G_INTRINSIC_W_SIDE_EFFECTS: - default: - return TL.computeKnownAlignForTargetInstr(*this, R, MRI, Depth + 1); - } -} - -KnownBits GISelKnownBits::getKnownBits(MachineInstr &MI) { - assert(MI.getNumExplicitDefs() == 1 && - "expected single return generic instruction"); - return getKnownBits(MI.getOperand(0).getReg()); -} - -KnownBits GISelKnownBits::getKnownBits(Register R) { - const LLT Ty = MRI.getType(R); - APInt DemandedElts = - Ty.isVector() ? APInt::getAllOnesValue(Ty.getNumElements()) : APInt(1, 1); - return getKnownBits(R, DemandedElts); -} - -KnownBits GISelKnownBits::getKnownBits(Register R, const APInt &DemandedElts, - unsigned Depth) { - // For now, we only maintain the cache during one request. - assert(ComputeKnownBitsCache.empty() && "Cache should have been cleared"); - - KnownBits Known; - computeKnownBitsImpl(R, Known, DemandedElts); - ComputeKnownBitsCache.clear(); - return Known; -} - -bool GISelKnownBits::signBitIsZero(Register R) { - LLT Ty = MRI.getType(R); - unsigned BitWidth = Ty.getScalarSizeInBits(); - return maskedValueIsZero(R, APInt::getSignMask(BitWidth)); -} - -APInt GISelKnownBits::getKnownZeroes(Register R) { - return getKnownBits(R).Zero; -} - -APInt GISelKnownBits::getKnownOnes(Register R) { return getKnownBits(R).One; } - -LLVM_ATTRIBUTE_UNUSED static void -dumpResult(const MachineInstr &MI, const KnownBits &Known, unsigned Depth) { - dbgs() << "[" << Depth << "] Compute known bits: " << MI << "[" << Depth - << "] Computed for: " << MI << "[" << Depth << "] Known: 0x" - << (Known.Zero | Known.One).toString(16, false) << "\n" - << "[" << Depth << "] Zero: 0x" << Known.Zero.toString(16, false) - << "\n" - << "[" << Depth << "] One: 0x" << Known.One.toString(16, false) - << "\n"; -} - +//===- lib/CodeGen/GlobalISel/GISelKnownBits.cpp --------------*- C++ *-===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +// +/// Provides analysis for querying information about KnownBits during GISel +/// passes. +// +//===------------------ +#include "llvm/CodeGen/GlobalISel/GISelKnownBits.h" +#include "llvm/Analysis/ValueTracking.h" +#include "llvm/CodeGen/GlobalISel/Utils.h" +#include "llvm/CodeGen/MachineFrameInfo.h" +#include "llvm/CodeGen/MachineRegisterInfo.h" +#include "llvm/CodeGen/TargetLowering.h" +#include "llvm/CodeGen/TargetOpcodes.h" + +#define DEBUG_TYPE "gisel-known-bits" + +using namespace llvm; + +char llvm::GISelKnownBitsAnalysis::ID = 0; + +INITIALIZE_PASS(GISelKnownBitsAnalysis, DEBUG_TYPE, + "Analysis for ComputingKnownBits", false, true) + +GISelKnownBits::GISelKnownBits(MachineFunction &MF, unsigned MaxDepth) + : MF(MF), MRI(MF.getRegInfo()), TL(*MF.getSubtarget().getTargetLowering()), + DL(MF.getFunction().getParent()->getDataLayout()), MaxDepth(MaxDepth) {} + +Align GISelKnownBits::computeKnownAlignment(Register R, unsigned Depth) { + const MachineInstr *MI = MRI.getVRegDef(R); + switch (MI->getOpcode()) { + case TargetOpcode::COPY: + return computeKnownAlignment(MI->getOperand(1).getReg(), Depth); + case TargetOpcode::G_FRAME_INDEX: { + int FrameIdx = MI->getOperand(1).getIndex(); + return MF.getFrameInfo().getObjectAlign(FrameIdx); + } + case TargetOpcode::G_INTRINSIC: + case TargetOpcode::G_INTRINSIC_W_SIDE_EFFECTS: + default: + return TL.computeKnownAlignForTargetInstr(*this, R, MRI, Depth + 1); + } +} + +KnownBits GISelKnownBits::getKnownBits(MachineInstr &MI) { + assert(MI.getNumExplicitDefs() == 1 && + "expected single return generic instruction"); + return getKnownBits(MI.getOperand(0).getReg()); +} + +KnownBits GISelKnownBits::getKnownBits(Register R) { + const LLT Ty = MRI.getType(R); + APInt DemandedElts = + Ty.isVector() ? APInt::getAllOnesValue(Ty.getNumElements()) : APInt(1, 1); + return getKnownBits(R, DemandedElts); +} + +KnownBits GISelKnownBits::getKnownBits(Register R, const APInt &DemandedElts, + unsigned Depth) { + // For now, we only maintain the cache during one request. + assert(ComputeKnownBitsCache.empty() && "Cache should have been cleared"); + + KnownBits Known; + computeKnownBitsImpl(R, Known, DemandedElts); + ComputeKnownBitsCache.clear(); + return Known; +} + +bool GISelKnownBits::signBitIsZero(Register R) { + LLT Ty = MRI.getType(R); + unsigned BitWidth = Ty.getScalarSizeInBits(); + return maskedValueIsZero(R, APInt::getSignMask(BitWidth)); +} + +APInt GISelKnownBits::getKnownZeroes(Register R) { + return getKnownBits(R).Zero; +} + +APInt GISelKnownBits::getKnownOnes(Register R) { return getKnownBits(R).One; } + +LLVM_ATTRIBUTE_UNUSED static void +dumpResult(const MachineInstr &MI, const KnownBits &Known, unsigned Depth) { + dbgs() << "[" << Depth << "] Compute known bits: " << MI << "[" << Depth + << "] Computed for: " << MI << "[" << Depth << "] Known: 0x" + << (Known.Zero | Known.One).toString(16, false) << "\n" + << "[" << Depth << "] Zero: 0x" << Known.Zero.toString(16, false) + << "\n" + << "[" << Depth << "] One: 0x" << Known.One.toString(16, false) + << "\n"; +} + /// Compute known bits for the intersection of \p Src0 and \p Src1 void GISelKnownBits::computeKnownBitsMin(Register Src0, Register Src1, KnownBits &Known, @@ -113,181 +113,181 @@ void GISelKnownBits::computeKnownBitsMin(Register Src0, Register Src1, Known = KnownBits::commonBits(Known, Known2); } -void GISelKnownBits::computeKnownBitsImpl(Register R, KnownBits &Known, - const APInt &DemandedElts, - unsigned Depth) { - MachineInstr &MI = *MRI.getVRegDef(R); - unsigned Opcode = MI.getOpcode(); - LLT DstTy = MRI.getType(R); - - // Handle the case where this is called on a register that does not have a - // type constraint (i.e. it has a register class constraint instead). This is - // unlikely to occur except by looking through copies but it is possible for - // the initial register being queried to be in this state. - if (!DstTy.isValid()) { - Known = KnownBits(); - return; - } - - unsigned BitWidth = DstTy.getSizeInBits(); - auto CacheEntry = ComputeKnownBitsCache.find(R); - if (CacheEntry != ComputeKnownBitsCache.end()) { - Known = CacheEntry->second; - LLVM_DEBUG(dbgs() << "Cache hit at "); - LLVM_DEBUG(dumpResult(MI, Known, Depth)); - assert(Known.getBitWidth() == BitWidth && "Cache entry size doesn't match"); - return; - } - Known = KnownBits(BitWidth); // Don't know anything - - if (DstTy.isVector()) - return; // TODO: Handle vectors. - - // Depth may get bigger than max depth if it gets passed to a different - // GISelKnownBits object. - // This may happen when say a generic part uses a GISelKnownBits object - // with some max depth, but then we hit TL.computeKnownBitsForTargetInstr - // which creates a new GISelKnownBits object with a different and smaller - // depth. If we just check for equality, we would never exit if the depth - // that is passed down to the target specific GISelKnownBits object is - // already bigger than its max depth. - if (Depth >= getMaxDepth()) - return; - - if (!DemandedElts) - return; // No demanded elts, better to assume we don't know anything. - - KnownBits Known2; - - switch (Opcode) { - default: - TL.computeKnownBitsForTargetInstr(*this, R, Known, DemandedElts, MRI, - Depth); - break; - case TargetOpcode::COPY: - case TargetOpcode::G_PHI: - case TargetOpcode::PHI: { - Known.One = APInt::getAllOnesValue(BitWidth); - Known.Zero = APInt::getAllOnesValue(BitWidth); - // Destination registers should not have subregisters at this - // point of the pipeline, otherwise the main live-range will be - // defined more than once, which is against SSA. - assert(MI.getOperand(0).getSubReg() == 0 && "Is this code in SSA?"); - // Record in the cache that we know nothing for MI. - // This will get updated later and in the meantime, if we reach that - // phi again, because of a loop, we will cut the search thanks to this - // cache entry. - // We could actually build up more information on the phi by not cutting - // the search, but that additional information is more a side effect - // than an intended choice. - // Therefore, for now, save on compile time until we derive a proper way - // to derive known bits for PHIs within loops. - ComputeKnownBitsCache[R] = KnownBits(BitWidth); - // PHI's operand are a mix of registers and basic blocks interleaved. - // We only care about the register ones. - for (unsigned Idx = 1; Idx < MI.getNumOperands(); Idx += 2) { - const MachineOperand &Src = MI.getOperand(Idx); - Register SrcReg = Src.getReg(); - // Look through trivial copies and phis but don't look through trivial - // copies or phis of the form `%1:(s32) = OP %0:gpr32`, known-bits - // analysis is currently unable to determine the bit width of a - // register class. - // - // We can't use NoSubRegister by name as it's defined by each target but - // it's always defined to be 0 by tablegen. - if (SrcReg.isVirtual() && Src.getSubReg() == 0 /*NoSubRegister*/ && - MRI.getType(SrcReg).isValid()) { - // For COPYs we don't do anything, don't increase the depth. - computeKnownBitsImpl(SrcReg, Known2, DemandedElts, - Depth + (Opcode != TargetOpcode::COPY)); +void GISelKnownBits::computeKnownBitsImpl(Register R, KnownBits &Known, + const APInt &DemandedElts, + unsigned Depth) { + MachineInstr &MI = *MRI.getVRegDef(R); + unsigned Opcode = MI.getOpcode(); + LLT DstTy = MRI.getType(R); + + // Handle the case where this is called on a register that does not have a + // type constraint (i.e. it has a register class constraint instead). This is + // unlikely to occur except by looking through copies but it is possible for + // the initial register being queried to be in this state. + if (!DstTy.isValid()) { + Known = KnownBits(); + return; + } + + unsigned BitWidth = DstTy.getSizeInBits(); + auto CacheEntry = ComputeKnownBitsCache.find(R); + if (CacheEntry != ComputeKnownBitsCache.end()) { + Known = CacheEntry->second; + LLVM_DEBUG(dbgs() << "Cache hit at "); + LLVM_DEBUG(dumpResult(MI, Known, Depth)); + assert(Known.getBitWidth() == BitWidth && "Cache entry size doesn't match"); + return; + } + Known = KnownBits(BitWidth); // Don't know anything + + if (DstTy.isVector()) + return; // TODO: Handle vectors. + + // Depth may get bigger than max depth if it gets passed to a different + // GISelKnownBits object. + // This may happen when say a generic part uses a GISelKnownBits object + // with some max depth, but then we hit TL.computeKnownBitsForTargetInstr + // which creates a new GISelKnownBits object with a different and smaller + // depth. If we just check for equality, we would never exit if the depth + // that is passed down to the target specific GISelKnownBits object is + // already bigger than its max depth. + if (Depth >= getMaxDepth()) + return; + + if (!DemandedElts) + return; // No demanded elts, better to assume we don't know anything. + + KnownBits Known2; + + switch (Opcode) { + default: + TL.computeKnownBitsForTargetInstr(*this, R, Known, DemandedElts, MRI, + Depth); + break; + case TargetOpcode::COPY: + case TargetOpcode::G_PHI: + case TargetOpcode::PHI: { + Known.One = APInt::getAllOnesValue(BitWidth); + Known.Zero = APInt::getAllOnesValue(BitWidth); + // Destination registers should not have subregisters at this + // point of the pipeline, otherwise the main live-range will be + // defined more than once, which is against SSA. + assert(MI.getOperand(0).getSubReg() == 0 && "Is this code in SSA?"); + // Record in the cache that we know nothing for MI. + // This will get updated later and in the meantime, if we reach that + // phi again, because of a loop, we will cut the search thanks to this + // cache entry. + // We could actually build up more information on the phi by not cutting + // the search, but that additional information is more a side effect + // than an intended choice. + // Therefore, for now, save on compile time until we derive a proper way + // to derive known bits for PHIs within loops. + ComputeKnownBitsCache[R] = KnownBits(BitWidth); + // PHI's operand are a mix of registers and basic blocks interleaved. + // We only care about the register ones. + for (unsigned Idx = 1; Idx < MI.getNumOperands(); Idx += 2) { + const MachineOperand &Src = MI.getOperand(Idx); + Register SrcReg = Src.getReg(); + // Look through trivial copies and phis but don't look through trivial + // copies or phis of the form `%1:(s32) = OP %0:gpr32`, known-bits + // analysis is currently unable to determine the bit width of a + // register class. + // + // We can't use NoSubRegister by name as it's defined by each target but + // it's always defined to be 0 by tablegen. + if (SrcReg.isVirtual() && Src.getSubReg() == 0 /*NoSubRegister*/ && + MRI.getType(SrcReg).isValid()) { + // For COPYs we don't do anything, don't increase the depth. + computeKnownBitsImpl(SrcReg, Known2, DemandedElts, + Depth + (Opcode != TargetOpcode::COPY)); Known = KnownBits::commonBits(Known, Known2); - // If we reach a point where we don't know anything - // just stop looking through the operands. - if (Known.One == 0 && Known.Zero == 0) - break; - } else { - // We know nothing. - Known = KnownBits(BitWidth); - break; - } - } - break; - } - case TargetOpcode::G_CONSTANT: { - auto CstVal = getConstantVRegVal(R, MRI); - if (!CstVal) - break; + // If we reach a point where we don't know anything + // just stop looking through the operands. + if (Known.One == 0 && Known.Zero == 0) + break; + } else { + // We know nothing. + Known = KnownBits(BitWidth); + break; + } + } + break; + } + case TargetOpcode::G_CONSTANT: { + auto CstVal = getConstantVRegVal(R, MRI); + if (!CstVal) + break; Known = KnownBits::makeConstant(*CstVal); - break; - } - case TargetOpcode::G_FRAME_INDEX: { - int FrameIdx = MI.getOperand(1).getIndex(); - TL.computeKnownBitsForFrameIndex(FrameIdx, Known, MF); - break; - } - case TargetOpcode::G_SUB: { - computeKnownBitsImpl(MI.getOperand(1).getReg(), Known, DemandedElts, - Depth + 1); - computeKnownBitsImpl(MI.getOperand(2).getReg(), Known2, DemandedElts, - Depth + 1); - Known = KnownBits::computeForAddSub(/*Add*/ false, /*NSW*/ false, Known, - Known2); - break; - } - case TargetOpcode::G_XOR: { - computeKnownBitsImpl(MI.getOperand(2).getReg(), Known, DemandedElts, - Depth + 1); - computeKnownBitsImpl(MI.getOperand(1).getReg(), Known2, DemandedElts, - Depth + 1); - - Known ^= Known2; - break; - } - case TargetOpcode::G_PTR_ADD: { - // G_PTR_ADD is like G_ADD. FIXME: Is this true for all targets? - LLT Ty = MRI.getType(MI.getOperand(1).getReg()); - if (DL.isNonIntegralAddressSpace(Ty.getAddressSpace())) - break; - LLVM_FALLTHROUGH; - } - case TargetOpcode::G_ADD: { - computeKnownBitsImpl(MI.getOperand(1).getReg(), Known, DemandedElts, - Depth + 1); - computeKnownBitsImpl(MI.getOperand(2).getReg(), Known2, DemandedElts, - Depth + 1); - Known = - KnownBits::computeForAddSub(/*Add*/ true, /*NSW*/ false, Known, Known2); - break; - } - case TargetOpcode::G_AND: { - // If either the LHS or the RHS are Zero, the result is zero. - computeKnownBitsImpl(MI.getOperand(2).getReg(), Known, DemandedElts, - Depth + 1); - computeKnownBitsImpl(MI.getOperand(1).getReg(), Known2, DemandedElts, - Depth + 1); - - Known &= Known2; - break; - } - case TargetOpcode::G_OR: { - // If either the LHS or the RHS are Zero, the result is zero. - computeKnownBitsImpl(MI.getOperand(2).getReg(), Known, DemandedElts, - Depth + 1); - computeKnownBitsImpl(MI.getOperand(1).getReg(), Known2, DemandedElts, - Depth + 1); - - Known |= Known2; - break; - } - case TargetOpcode::G_MUL: { - computeKnownBitsImpl(MI.getOperand(2).getReg(), Known, DemandedElts, - Depth + 1); - computeKnownBitsImpl(MI.getOperand(1).getReg(), Known2, DemandedElts, - Depth + 1); + break; + } + case TargetOpcode::G_FRAME_INDEX: { + int FrameIdx = MI.getOperand(1).getIndex(); + TL.computeKnownBitsForFrameIndex(FrameIdx, Known, MF); + break; + } + case TargetOpcode::G_SUB: { + computeKnownBitsImpl(MI.getOperand(1).getReg(), Known, DemandedElts, + Depth + 1); + computeKnownBitsImpl(MI.getOperand(2).getReg(), Known2, DemandedElts, + Depth + 1); + Known = KnownBits::computeForAddSub(/*Add*/ false, /*NSW*/ false, Known, + Known2); + break; + } + case TargetOpcode::G_XOR: { + computeKnownBitsImpl(MI.getOperand(2).getReg(), Known, DemandedElts, + Depth + 1); + computeKnownBitsImpl(MI.getOperand(1).getReg(), Known2, DemandedElts, + Depth + 1); + + Known ^= Known2; + break; + } + case TargetOpcode::G_PTR_ADD: { + // G_PTR_ADD is like G_ADD. FIXME: Is this true for all targets? + LLT Ty = MRI.getType(MI.getOperand(1).getReg()); + if (DL.isNonIntegralAddressSpace(Ty.getAddressSpace())) + break; + LLVM_FALLTHROUGH; + } + case TargetOpcode::G_ADD: { + computeKnownBitsImpl(MI.getOperand(1).getReg(), Known, DemandedElts, + Depth + 1); + computeKnownBitsImpl(MI.getOperand(2).getReg(), Known2, DemandedElts, + Depth + 1); + Known = + KnownBits::computeForAddSub(/*Add*/ true, /*NSW*/ false, Known, Known2); + break; + } + case TargetOpcode::G_AND: { + // If either the LHS or the RHS are Zero, the result is zero. + computeKnownBitsImpl(MI.getOperand(2).getReg(), Known, DemandedElts, + Depth + 1); + computeKnownBitsImpl(MI.getOperand(1).getReg(), Known2, DemandedElts, + Depth + 1); + + Known &= Known2; + break; + } + case TargetOpcode::G_OR: { + // If either the LHS or the RHS are Zero, the result is zero. + computeKnownBitsImpl(MI.getOperand(2).getReg(), Known, DemandedElts, + Depth + 1); + computeKnownBitsImpl(MI.getOperand(1).getReg(), Known2, DemandedElts, + Depth + 1); + + Known |= Known2; + break; + } + case TargetOpcode::G_MUL: { + computeKnownBitsImpl(MI.getOperand(2).getReg(), Known, DemandedElts, + Depth + 1); + computeKnownBitsImpl(MI.getOperand(1).getReg(), Known2, DemandedElts, + Depth + 1); Known = KnownBits::computeForMul(Known, Known2); - break; - } - case TargetOpcode::G_SELECT: { + break; + } + case TargetOpcode::G_SELECT: { computeKnownBitsMin(MI.getOperand(2).getReg(), MI.getOperand(3).getReg(), Known, DemandedElts, Depth + 1); break; @@ -296,12 +296,12 @@ void GISelKnownBits::computeKnownBitsImpl(Register R, KnownBits &Known, // TODO: Handle clamp pattern with number of sign bits KnownBits KnownRHS; computeKnownBitsImpl(MI.getOperand(1).getReg(), Known, DemandedElts, - Depth + 1); + Depth + 1); computeKnownBitsImpl(MI.getOperand(2).getReg(), KnownRHS, DemandedElts, - Depth + 1); + Depth + 1); Known = KnownBits::smin(Known, KnownRHS); - break; - } + break; + } case TargetOpcode::G_SMAX: { // TODO: Handle clamp pattern with number of sign bits KnownBits KnownRHS; @@ -330,66 +330,66 @@ void GISelKnownBits::computeKnownBitsImpl(Register R, KnownBits &Known, Known = KnownBits::umax(Known, KnownRHS); break; } - case TargetOpcode::G_FCMP: - case TargetOpcode::G_ICMP: { - if (TL.getBooleanContents(DstTy.isVector(), - Opcode == TargetOpcode::G_FCMP) == - TargetLowering::ZeroOrOneBooleanContent && - BitWidth > 1) - Known.Zero.setBitsFrom(1); - break; - } - case TargetOpcode::G_SEXT: { - computeKnownBitsImpl(MI.getOperand(1).getReg(), Known, DemandedElts, - Depth + 1); - // If the sign bit is known to be zero or one, then sext will extend - // it to the top bits, else it will just zext. - Known = Known.sext(BitWidth); - break; - } + case TargetOpcode::G_FCMP: + case TargetOpcode::G_ICMP: { + if (TL.getBooleanContents(DstTy.isVector(), + Opcode == TargetOpcode::G_FCMP) == + TargetLowering::ZeroOrOneBooleanContent && + BitWidth > 1) + Known.Zero.setBitsFrom(1); + break; + } + case TargetOpcode::G_SEXT: { + computeKnownBitsImpl(MI.getOperand(1).getReg(), Known, DemandedElts, + Depth + 1); + // If the sign bit is known to be zero or one, then sext will extend + // it to the top bits, else it will just zext. + Known = Known.sext(BitWidth); + break; + } case TargetOpcode::G_SEXT_INREG: { computeKnownBitsImpl(MI.getOperand(1).getReg(), Known, DemandedElts, Depth + 1); Known = Known.sextInReg(MI.getOperand(2).getImm()); break; } - case TargetOpcode::G_ANYEXT: { - computeKnownBitsImpl(MI.getOperand(1).getReg(), Known, DemandedElts, - Depth + 1); + case TargetOpcode::G_ANYEXT: { + computeKnownBitsImpl(MI.getOperand(1).getReg(), Known, DemandedElts, + Depth + 1); Known = Known.anyext(BitWidth); - break; - } - case TargetOpcode::G_LOAD: { + break; + } + case TargetOpcode::G_LOAD: { const MachineMemOperand *MMO = *MI.memoperands_begin(); if (const MDNode *Ranges = MMO->getRanges()) { computeKnownBitsFromRangeMetadata(*Ranges, Known); - } + } - break; - } - case TargetOpcode::G_ZEXTLOAD: { - // Everything above the retrieved bits is zero + break; + } + case TargetOpcode::G_ZEXTLOAD: { + // Everything above the retrieved bits is zero Known.Zero.setBitsFrom((*MI.memoperands_begin())->getSizeInBits()); - break; - } + break; + } case TargetOpcode::G_ASHR: { KnownBits LHSKnown, RHSKnown; computeKnownBitsImpl(MI.getOperand(1).getReg(), LHSKnown, DemandedElts, Depth + 1); - computeKnownBitsImpl(MI.getOperand(2).getReg(), RHSKnown, DemandedElts, - Depth + 1); + computeKnownBitsImpl(MI.getOperand(2).getReg(), RHSKnown, DemandedElts, + Depth + 1); Known = KnownBits::ashr(LHSKnown, RHSKnown); break; } case TargetOpcode::G_LSHR: { KnownBits LHSKnown, RHSKnown; computeKnownBitsImpl(MI.getOperand(1).getReg(), LHSKnown, DemandedElts, - Depth + 1); + Depth + 1); computeKnownBitsImpl(MI.getOperand(2).getReg(), RHSKnown, DemandedElts, Depth + 1); Known = KnownBits::lshr(LHSKnown, RHSKnown); - break; - } + break; + } case TargetOpcode::G_SHL: { KnownBits LHSKnown, RHSKnown; computeKnownBitsImpl(MI.getOperand(1).getReg(), LHSKnown, DemandedElts, @@ -399,25 +399,25 @@ void GISelKnownBits::computeKnownBitsImpl(Register R, KnownBits &Known, Known = KnownBits::shl(LHSKnown, RHSKnown); break; } - case TargetOpcode::G_INTTOPTR: - case TargetOpcode::G_PTRTOINT: - // Fall through and handle them the same as zext/trunc. - LLVM_FALLTHROUGH; - case TargetOpcode::G_ZEXT: - case TargetOpcode::G_TRUNC: { - Register SrcReg = MI.getOperand(1).getReg(); - LLT SrcTy = MRI.getType(SrcReg); - unsigned SrcBitWidth = SrcTy.isPointer() - ? DL.getIndexSizeInBits(SrcTy.getAddressSpace()) - : SrcTy.getSizeInBits(); - assert(SrcBitWidth && "SrcBitWidth can't be zero"); - Known = Known.zextOrTrunc(SrcBitWidth); - computeKnownBitsImpl(SrcReg, Known, DemandedElts, Depth + 1); - Known = Known.zextOrTrunc(BitWidth); - if (BitWidth > SrcBitWidth) - Known.Zero.setBitsFrom(SrcBitWidth); - break; - } + case TargetOpcode::G_INTTOPTR: + case TargetOpcode::G_PTRTOINT: + // Fall through and handle them the same as zext/trunc. + LLVM_FALLTHROUGH; + case TargetOpcode::G_ZEXT: + case TargetOpcode::G_TRUNC: { + Register SrcReg = MI.getOperand(1).getReg(); + LLT SrcTy = MRI.getType(SrcReg); + unsigned SrcBitWidth = SrcTy.isPointer() + ? DL.getIndexSizeInBits(SrcTy.getAddressSpace()) + : SrcTy.getSizeInBits(); + assert(SrcBitWidth && "SrcBitWidth can't be zero"); + Known = Known.zextOrTrunc(SrcBitWidth); + computeKnownBitsImpl(SrcReg, Known, DemandedElts, Depth + 1); + Known = Known.zextOrTrunc(BitWidth); + if (BitWidth > SrcBitWidth) + Known.Zero.setBitsFrom(SrcBitWidth); + break; + } case TargetOpcode::G_MERGE_VALUES: { unsigned NumOps = MI.getNumOperands(); unsigned OpSize = MRI.getType(MI.getOperand(1).getReg()).getSizeInBits(); @@ -429,13 +429,13 @@ void GISelKnownBits::computeKnownBitsImpl(Register R, KnownBits &Known, Known.insertBits(SrcOpKnown, I * OpSize); } break; - } + } case TargetOpcode::G_UNMERGE_VALUES: { unsigned NumOps = MI.getNumOperands(); Register SrcReg = MI.getOperand(NumOps - 1).getReg(); if (MRI.getType(SrcReg).isVector()) return; // TODO: Handle vectors. - + KnownBits SrcOpKnown; computeKnownBitsImpl(SrcReg, SrcOpKnown, DemandedElts, Depth + 1); @@ -462,13 +462,13 @@ void GISelKnownBits::computeKnownBitsImpl(Register R, KnownBits &Known, } } - assert(!Known.hasConflict() && "Bits known to be one AND zero?"); - LLVM_DEBUG(dumpResult(MI, Known, Depth)); - - // Update the cache. - ComputeKnownBitsCache[R] = Known; -} - + assert(!Known.hasConflict() && "Bits known to be one AND zero?"); + LLVM_DEBUG(dumpResult(MI, Known, Depth)); + + // Update the cache. + ComputeKnownBitsCache[R] = Known; +} + /// Compute number of sign bits for the intersection of \p Src0 and \p Src1 unsigned GISelKnownBits::computeNumSignBitsMin(Register Src0, Register Src1, const APInt &DemandedElts, @@ -480,49 +480,49 @@ unsigned GISelKnownBits::computeNumSignBitsMin(Register Src0, Register Src1, return std::min(computeNumSignBits(Src0, DemandedElts, Depth), Src1SignBits); } -unsigned GISelKnownBits::computeNumSignBits(Register R, - const APInt &DemandedElts, - unsigned Depth) { - MachineInstr &MI = *MRI.getVRegDef(R); - unsigned Opcode = MI.getOpcode(); - - if (Opcode == TargetOpcode::G_CONSTANT) - return MI.getOperand(1).getCImm()->getValue().getNumSignBits(); - - if (Depth == getMaxDepth()) - return 1; - - if (!DemandedElts) - return 1; // No demanded elts, better to assume we don't know anything. - - LLT DstTy = MRI.getType(R); - const unsigned TyBits = DstTy.getScalarSizeInBits(); - - // Handle the case where this is called on a register that does not have a - // type constraint. This is unlikely to occur except by looking through copies - // but it is possible for the initial register being queried to be in this - // state. - if (!DstTy.isValid()) - return 1; - - unsigned FirstAnswer = 1; - switch (Opcode) { - case TargetOpcode::COPY: { - MachineOperand &Src = MI.getOperand(1); - if (Src.getReg().isVirtual() && Src.getSubReg() == 0 && - MRI.getType(Src.getReg()).isValid()) { - // Don't increment Depth for this one since we didn't do any work. - return computeNumSignBits(Src.getReg(), DemandedElts, Depth); - } - - return 1; - } - case TargetOpcode::G_SEXT: { - Register Src = MI.getOperand(1).getReg(); - LLT SrcTy = MRI.getType(Src); - unsigned Tmp = DstTy.getScalarSizeInBits() - SrcTy.getScalarSizeInBits(); - return computeNumSignBits(Src, DemandedElts, Depth + 1) + Tmp; - } +unsigned GISelKnownBits::computeNumSignBits(Register R, + const APInt &DemandedElts, + unsigned Depth) { + MachineInstr &MI = *MRI.getVRegDef(R); + unsigned Opcode = MI.getOpcode(); + + if (Opcode == TargetOpcode::G_CONSTANT) + return MI.getOperand(1).getCImm()->getValue().getNumSignBits(); + + if (Depth == getMaxDepth()) + return 1; + + if (!DemandedElts) + return 1; // No demanded elts, better to assume we don't know anything. + + LLT DstTy = MRI.getType(R); + const unsigned TyBits = DstTy.getScalarSizeInBits(); + + // Handle the case where this is called on a register that does not have a + // type constraint. This is unlikely to occur except by looking through copies + // but it is possible for the initial register being queried to be in this + // state. + if (!DstTy.isValid()) + return 1; + + unsigned FirstAnswer = 1; + switch (Opcode) { + case TargetOpcode::COPY: { + MachineOperand &Src = MI.getOperand(1); + if (Src.getReg().isVirtual() && Src.getSubReg() == 0 && + MRI.getType(Src.getReg()).isValid()) { + // Don't increment Depth for this one since we didn't do any work. + return computeNumSignBits(Src.getReg(), DemandedElts, Depth); + } + + return 1; + } + case TargetOpcode::G_SEXT: { + Register Src = MI.getOperand(1).getReg(); + LLT SrcTy = MRI.getType(Src); + unsigned Tmp = DstTy.getScalarSizeInBits() - SrcTy.getScalarSizeInBits(); + return computeNumSignBits(Src, DemandedElts, Depth + 1) + Tmp; + } case TargetOpcode::G_SEXT_INREG: { // Max of the input and what this extends. Register Src = MI.getOperand(1).getReg(); @@ -530,7 +530,7 @@ unsigned GISelKnownBits::computeNumSignBits(Register R, unsigned InRegBits = TyBits - SrcBits + 1; return std::max(computeNumSignBits(Src, DemandedElts, Depth + 1), InRegBits); } - case TargetOpcode::G_SEXTLOAD: { + case TargetOpcode::G_SEXTLOAD: { // FIXME: We need an in-memory type representation. if (DstTy.isVector()) return 1; @@ -538,7 +538,7 @@ unsigned GISelKnownBits::computeNumSignBits(Register R, // e.g. i16->i32 = '17' bits known. const MachineMemOperand *MMO = *MI.memoperands_begin(); return TyBits - MMO->getSizeInBits() + 1; - } + } case TargetOpcode::G_ZEXTLOAD: { // FIXME: We need an in-memory type representation. if (DstTy.isVector()) @@ -548,66 +548,66 @@ unsigned GISelKnownBits::computeNumSignBits(Register R, const MachineMemOperand *MMO = *MI.memoperands_begin(); return TyBits - MMO->getSizeInBits(); } - case TargetOpcode::G_TRUNC: { - Register Src = MI.getOperand(1).getReg(); - LLT SrcTy = MRI.getType(Src); - - // Check if the sign bits of source go down as far as the truncated value. - unsigned DstTyBits = DstTy.getScalarSizeInBits(); - unsigned NumSrcBits = SrcTy.getScalarSizeInBits(); - unsigned NumSrcSignBits = computeNumSignBits(Src, DemandedElts, Depth + 1); - if (NumSrcSignBits > (NumSrcBits - DstTyBits)) - return NumSrcSignBits - (NumSrcBits - DstTyBits); - break; - } + case TargetOpcode::G_TRUNC: { + Register Src = MI.getOperand(1).getReg(); + LLT SrcTy = MRI.getType(Src); + + // Check if the sign bits of source go down as far as the truncated value. + unsigned DstTyBits = DstTy.getScalarSizeInBits(); + unsigned NumSrcBits = SrcTy.getScalarSizeInBits(); + unsigned NumSrcSignBits = computeNumSignBits(Src, DemandedElts, Depth + 1); + if (NumSrcSignBits > (NumSrcBits - DstTyBits)) + return NumSrcSignBits - (NumSrcBits - DstTyBits); + break; + } case TargetOpcode::G_SELECT: { return computeNumSignBitsMin(MI.getOperand(2).getReg(), MI.getOperand(3).getReg(), DemandedElts, Depth + 1); } - case TargetOpcode::G_INTRINSIC: - case TargetOpcode::G_INTRINSIC_W_SIDE_EFFECTS: - default: { - unsigned NumBits = - TL.computeNumSignBitsForTargetInstr(*this, R, DemandedElts, MRI, Depth); - if (NumBits > 1) - FirstAnswer = std::max(FirstAnswer, NumBits); - break; - } - } - - // Finally, if we can prove that the top bits of the result are 0's or 1's, - // use this information. - KnownBits Known = getKnownBits(R, DemandedElts, Depth); - APInt Mask; - if (Known.isNonNegative()) { // sign bit is 0 - Mask = Known.Zero; - } else if (Known.isNegative()) { // sign bit is 1; - Mask = Known.One; - } else { - // Nothing known. - return FirstAnswer; - } - - // Okay, we know that the sign bit in Mask is set. Use CLO to determine - // the number of identical bits in the top of the input value. - Mask <<= Mask.getBitWidth() - TyBits; - return std::max(FirstAnswer, Mask.countLeadingOnes()); -} - -unsigned GISelKnownBits::computeNumSignBits(Register R, unsigned Depth) { - LLT Ty = MRI.getType(R); - APInt DemandedElts = Ty.isVector() - ? APInt::getAllOnesValue(Ty.getNumElements()) - : APInt(1, 1); - return computeNumSignBits(R, DemandedElts, Depth); -} - -void GISelKnownBitsAnalysis::getAnalysisUsage(AnalysisUsage &AU) const { - AU.setPreservesAll(); - MachineFunctionPass::getAnalysisUsage(AU); -} - -bool GISelKnownBitsAnalysis::runOnMachineFunction(MachineFunction &MF) { - return false; -} + case TargetOpcode::G_INTRINSIC: + case TargetOpcode::G_INTRINSIC_W_SIDE_EFFECTS: + default: { + unsigned NumBits = + TL.computeNumSignBitsForTargetInstr(*this, R, DemandedElts, MRI, Depth); + if (NumBits > 1) + FirstAnswer = std::max(FirstAnswer, NumBits); + break; + } + } + + // Finally, if we can prove that the top bits of the result are 0's or 1's, + // use this information. + KnownBits Known = getKnownBits(R, DemandedElts, Depth); + APInt Mask; + if (Known.isNonNegative()) { // sign bit is 0 + Mask = Known.Zero; + } else if (Known.isNegative()) { // sign bit is 1; + Mask = Known.One; + } else { + // Nothing known. + return FirstAnswer; + } + + // Okay, we know that the sign bit in Mask is set. Use CLO to determine + // the number of identical bits in the top of the input value. + Mask <<= Mask.getBitWidth() - TyBits; + return std::max(FirstAnswer, Mask.countLeadingOnes()); +} + +unsigned GISelKnownBits::computeNumSignBits(Register R, unsigned Depth) { + LLT Ty = MRI.getType(R); + APInt DemandedElts = Ty.isVector() + ? APInt::getAllOnesValue(Ty.getNumElements()) + : APInt(1, 1); + return computeNumSignBits(R, DemandedElts, Depth); +} + +void GISelKnownBitsAnalysis::getAnalysisUsage(AnalysisUsage &AU) const { + AU.setPreservesAll(); + MachineFunctionPass::getAnalysisUsage(AU); +} + +bool GISelKnownBitsAnalysis::runOnMachineFunction(MachineFunction &MF) { + return false; +} diff --git a/contrib/libs/llvm12/lib/CodeGen/GlobalISel/GlobalISel.cpp b/contrib/libs/llvm12/lib/CodeGen/GlobalISel/GlobalISel.cpp index ec4acc9150..e0391e6f64 100644 --- a/contrib/libs/llvm12/lib/CodeGen/GlobalISel/GlobalISel.cpp +++ b/contrib/libs/llvm12/lib/CodeGen/GlobalISel/GlobalISel.cpp @@ -1,24 +1,24 @@ -//===-- llvm/CodeGen/GlobalISel/GlobalIsel.cpp --- GlobalISel ----*- C++ -*-==// -// -// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. -// See https://llvm.org/LICENSE.txt for license information. -// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception -// -//===----------------------------------------------------------------------===// -/// \file -// This file implements the common initialization routines for the -// GlobalISel library. -//===----------------------------------------------------------------------===// - -#include "llvm/InitializePasses.h" -#include "llvm/PassRegistry.h" - -using namespace llvm; - -void llvm::initializeGlobalISel(PassRegistry &Registry) { - initializeIRTranslatorPass(Registry); - initializeLegalizerPass(Registry); - initializeLocalizerPass(Registry); - initializeRegBankSelectPass(Registry); - initializeInstructionSelectPass(Registry); -} +//===-- llvm/CodeGen/GlobalISel/GlobalIsel.cpp --- GlobalISel ----*- C++ -*-==// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +/// \file +// This file implements the common initialization routines for the +// GlobalISel library. +//===----------------------------------------------------------------------===// + +#include "llvm/InitializePasses.h" +#include "llvm/PassRegistry.h" + +using namespace llvm; + +void llvm::initializeGlobalISel(PassRegistry &Registry) { + initializeIRTranslatorPass(Registry); + initializeLegalizerPass(Registry); + initializeLocalizerPass(Registry); + initializeRegBankSelectPass(Registry); + initializeInstructionSelectPass(Registry); +} diff --git a/contrib/libs/llvm12/lib/CodeGen/GlobalISel/IRTranslator.cpp b/contrib/libs/llvm12/lib/CodeGen/GlobalISel/IRTranslator.cpp index 16e8996471..b7883cbc31 100644 --- a/contrib/libs/llvm12/lib/CodeGen/GlobalISel/IRTranslator.cpp +++ b/contrib/libs/llvm12/lib/CodeGen/GlobalISel/IRTranslator.cpp @@ -1,376 +1,376 @@ -//===- llvm/CodeGen/GlobalISel/IRTranslator.cpp - IRTranslator ---*- C++ -*-==// -// -// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. -// See https://llvm.org/LICENSE.txt for license information. -// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception -// -//===----------------------------------------------------------------------===// -/// \file -/// This file implements the IRTranslator class. -//===----------------------------------------------------------------------===// - -#include "llvm/CodeGen/GlobalISel/IRTranslator.h" -#include "llvm/ADT/PostOrderIterator.h" -#include "llvm/ADT/STLExtras.h" -#include "llvm/ADT/ScopeExit.h" -#include "llvm/ADT/SmallSet.h" -#include "llvm/ADT/SmallVector.h" -#include "llvm/Analysis/BranchProbabilityInfo.h" -#include "llvm/Analysis/Loads.h" -#include "llvm/Analysis/OptimizationRemarkEmitter.h" -#include "llvm/Analysis/ValueTracking.h" -#include "llvm/CodeGen/Analysis.h" -#include "llvm/CodeGen/GlobalISel/CallLowering.h" -#include "llvm/CodeGen/GlobalISel/GISelChangeObserver.h" -#include "llvm/CodeGen/GlobalISel/InlineAsmLowering.h" -#include "llvm/CodeGen/LowLevelType.h" -#include "llvm/CodeGen/MachineBasicBlock.h" -#include "llvm/CodeGen/MachineFrameInfo.h" -#include "llvm/CodeGen/MachineFunction.h" -#include "llvm/CodeGen/MachineInstrBuilder.h" -#include "llvm/CodeGen/MachineMemOperand.h" +//===- llvm/CodeGen/GlobalISel/IRTranslator.cpp - IRTranslator ---*- C++ -*-==// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +/// \file +/// This file implements the IRTranslator class. +//===----------------------------------------------------------------------===// + +#include "llvm/CodeGen/GlobalISel/IRTranslator.h" +#include "llvm/ADT/PostOrderIterator.h" +#include "llvm/ADT/STLExtras.h" +#include "llvm/ADT/ScopeExit.h" +#include "llvm/ADT/SmallSet.h" +#include "llvm/ADT/SmallVector.h" +#include "llvm/Analysis/BranchProbabilityInfo.h" +#include "llvm/Analysis/Loads.h" +#include "llvm/Analysis/OptimizationRemarkEmitter.h" +#include "llvm/Analysis/ValueTracking.h" +#include "llvm/CodeGen/Analysis.h" +#include "llvm/CodeGen/GlobalISel/CallLowering.h" +#include "llvm/CodeGen/GlobalISel/GISelChangeObserver.h" +#include "llvm/CodeGen/GlobalISel/InlineAsmLowering.h" +#include "llvm/CodeGen/LowLevelType.h" +#include "llvm/CodeGen/MachineBasicBlock.h" +#include "llvm/CodeGen/MachineFrameInfo.h" +#include "llvm/CodeGen/MachineFunction.h" +#include "llvm/CodeGen/MachineInstrBuilder.h" +#include "llvm/CodeGen/MachineMemOperand.h" #include "llvm/CodeGen/MachineModuleInfo.h" -#include "llvm/CodeGen/MachineOperand.h" -#include "llvm/CodeGen/MachineRegisterInfo.h" -#include "llvm/CodeGen/StackProtector.h" +#include "llvm/CodeGen/MachineOperand.h" +#include "llvm/CodeGen/MachineRegisterInfo.h" +#include "llvm/CodeGen/StackProtector.h" #include "llvm/CodeGen/SwitchLoweringUtils.h" -#include "llvm/CodeGen/TargetFrameLowering.h" -#include "llvm/CodeGen/TargetInstrInfo.h" -#include "llvm/CodeGen/TargetLowering.h" -#include "llvm/CodeGen/TargetPassConfig.h" -#include "llvm/CodeGen/TargetRegisterInfo.h" -#include "llvm/CodeGen/TargetSubtargetInfo.h" -#include "llvm/IR/BasicBlock.h" -#include "llvm/IR/CFG.h" -#include "llvm/IR/Constant.h" -#include "llvm/IR/Constants.h" -#include "llvm/IR/DataLayout.h" -#include "llvm/IR/DebugInfo.h" -#include "llvm/IR/DerivedTypes.h" -#include "llvm/IR/Function.h" -#include "llvm/IR/GetElementPtrTypeIterator.h" -#include "llvm/IR/InlineAsm.h" +#include "llvm/CodeGen/TargetFrameLowering.h" +#include "llvm/CodeGen/TargetInstrInfo.h" +#include "llvm/CodeGen/TargetLowering.h" +#include "llvm/CodeGen/TargetPassConfig.h" +#include "llvm/CodeGen/TargetRegisterInfo.h" +#include "llvm/CodeGen/TargetSubtargetInfo.h" +#include "llvm/IR/BasicBlock.h" +#include "llvm/IR/CFG.h" +#include "llvm/IR/Constant.h" +#include "llvm/IR/Constants.h" +#include "llvm/IR/DataLayout.h" +#include "llvm/IR/DebugInfo.h" +#include "llvm/IR/DerivedTypes.h" +#include "llvm/IR/Function.h" +#include "llvm/IR/GetElementPtrTypeIterator.h" +#include "llvm/IR/InlineAsm.h" #include "llvm/IR/InstrTypes.h" -#include "llvm/IR/Instructions.h" -#include "llvm/IR/IntrinsicInst.h" -#include "llvm/IR/Intrinsics.h" -#include "llvm/IR/LLVMContext.h" -#include "llvm/IR/Metadata.h" +#include "llvm/IR/Instructions.h" +#include "llvm/IR/IntrinsicInst.h" +#include "llvm/IR/Intrinsics.h" +#include "llvm/IR/LLVMContext.h" +#include "llvm/IR/Metadata.h" #include "llvm/IR/PatternMatch.h" -#include "llvm/IR/Type.h" -#include "llvm/IR/User.h" -#include "llvm/IR/Value.h" -#include "llvm/InitializePasses.h" -#include "llvm/MC/MCContext.h" -#include "llvm/Pass.h" -#include "llvm/Support/Casting.h" -#include "llvm/Support/CodeGen.h" -#include "llvm/Support/Debug.h" -#include "llvm/Support/ErrorHandling.h" -#include "llvm/Support/LowLevelTypeImpl.h" -#include "llvm/Support/MathExtras.h" -#include "llvm/Support/raw_ostream.h" -#include "llvm/Target/TargetIntrinsicInfo.h" -#include "llvm/Target/TargetMachine.h" -#include <algorithm> -#include <cassert> +#include "llvm/IR/Type.h" +#include "llvm/IR/User.h" +#include "llvm/IR/Value.h" +#include "llvm/InitializePasses.h" +#include "llvm/MC/MCContext.h" +#include "llvm/Pass.h" +#include "llvm/Support/Casting.h" +#include "llvm/Support/CodeGen.h" +#include "llvm/Support/Debug.h" +#include "llvm/Support/ErrorHandling.h" +#include "llvm/Support/LowLevelTypeImpl.h" +#include "llvm/Support/MathExtras.h" +#include "llvm/Support/raw_ostream.h" +#include "llvm/Target/TargetIntrinsicInfo.h" +#include "llvm/Target/TargetMachine.h" +#include <algorithm> +#include <cassert> #include <cstddef> -#include <cstdint> -#include <iterator> -#include <string> -#include <utility> -#include <vector> - -#define DEBUG_TYPE "irtranslator" - -using namespace llvm; - -static cl::opt<bool> - EnableCSEInIRTranslator("enable-cse-in-irtranslator", - cl::desc("Should enable CSE in irtranslator"), - cl::Optional, cl::init(false)); -char IRTranslator::ID = 0; - -INITIALIZE_PASS_BEGIN(IRTranslator, DEBUG_TYPE, "IRTranslator LLVM IR -> MI", - false, false) -INITIALIZE_PASS_DEPENDENCY(TargetPassConfig) -INITIALIZE_PASS_DEPENDENCY(GISelCSEAnalysisWrapperPass) +#include <cstdint> +#include <iterator> +#include <string> +#include <utility> +#include <vector> + +#define DEBUG_TYPE "irtranslator" + +using namespace llvm; + +static cl::opt<bool> + EnableCSEInIRTranslator("enable-cse-in-irtranslator", + cl::desc("Should enable CSE in irtranslator"), + cl::Optional, cl::init(false)); +char IRTranslator::ID = 0; + +INITIALIZE_PASS_BEGIN(IRTranslator, DEBUG_TYPE, "IRTranslator LLVM IR -> MI", + false, false) +INITIALIZE_PASS_DEPENDENCY(TargetPassConfig) +INITIALIZE_PASS_DEPENDENCY(GISelCSEAnalysisWrapperPass) INITIALIZE_PASS_DEPENDENCY(BlockFrequencyInfoWrapperPass) INITIALIZE_PASS_DEPENDENCY(StackProtector) -INITIALIZE_PASS_END(IRTranslator, DEBUG_TYPE, "IRTranslator LLVM IR -> MI", - false, false) - -static void reportTranslationError(MachineFunction &MF, - const TargetPassConfig &TPC, - OptimizationRemarkEmitter &ORE, - OptimizationRemarkMissed &R) { - MF.getProperties().set(MachineFunctionProperties::Property::FailedISel); - - // Print the function name explicitly if we don't have a debug location (which - // makes the diagnostic less useful) or if we're going to emit a raw error. - if (!R.getLocation().isValid() || TPC.isGlobalISelAbortEnabled()) - R << (" (in function: " + MF.getName() + ")").str(); - - if (TPC.isGlobalISelAbortEnabled()) - report_fatal_error(R.getMsg()); - else - ORE.emit(R); -} - +INITIALIZE_PASS_END(IRTranslator, DEBUG_TYPE, "IRTranslator LLVM IR -> MI", + false, false) + +static void reportTranslationError(MachineFunction &MF, + const TargetPassConfig &TPC, + OptimizationRemarkEmitter &ORE, + OptimizationRemarkMissed &R) { + MF.getProperties().set(MachineFunctionProperties::Property::FailedISel); + + // Print the function name explicitly if we don't have a debug location (which + // makes the diagnostic less useful) or if we're going to emit a raw error. + if (!R.getLocation().isValid() || TPC.isGlobalISelAbortEnabled()) + R << (" (in function: " + MF.getName() + ")").str(); + + if (TPC.isGlobalISelAbortEnabled()) + report_fatal_error(R.getMsg()); + else + ORE.emit(R); +} + IRTranslator::IRTranslator(CodeGenOpt::Level optlevel) : MachineFunctionPass(ID), OptLevel(optlevel) {} - -#ifndef NDEBUG -namespace { -/// Verify that every instruction created has the same DILocation as the -/// instruction being translated. -class DILocationVerifier : public GISelChangeObserver { - const Instruction *CurrInst = nullptr; - -public: - DILocationVerifier() = default; - ~DILocationVerifier() = default; - - const Instruction *getCurrentInst() const { return CurrInst; } - void setCurrentInst(const Instruction *Inst) { CurrInst = Inst; } - - void erasingInstr(MachineInstr &MI) override {} - void changingInstr(MachineInstr &MI) override {} - void changedInstr(MachineInstr &MI) override {} - - void createdInstr(MachineInstr &MI) override { - assert(getCurrentInst() && "Inserted instruction without a current MI"); - - // Only print the check message if we're actually checking it. -#ifndef NDEBUG - LLVM_DEBUG(dbgs() << "Checking DILocation from " << *CurrInst - << " was copied to " << MI); -#endif - // We allow insts in the entry block to have a debug loc line of 0 because - // they could have originated from constants, and we don't want a jumpy - // debug experience. - assert((CurrInst->getDebugLoc() == MI.getDebugLoc() || - MI.getDebugLoc().getLine() == 0) && - "Line info was not transferred to all instructions"); - } -}; -} // namespace -#endif // ifndef NDEBUG - - -void IRTranslator::getAnalysisUsage(AnalysisUsage &AU) const { - AU.addRequired<StackProtector>(); - AU.addRequired<TargetPassConfig>(); - AU.addRequired<GISelCSEAnalysisWrapperPass>(); + +#ifndef NDEBUG +namespace { +/// Verify that every instruction created has the same DILocation as the +/// instruction being translated. +class DILocationVerifier : public GISelChangeObserver { + const Instruction *CurrInst = nullptr; + +public: + DILocationVerifier() = default; + ~DILocationVerifier() = default; + + const Instruction *getCurrentInst() const { return CurrInst; } + void setCurrentInst(const Instruction *Inst) { CurrInst = Inst; } + + void erasingInstr(MachineInstr &MI) override {} + void changingInstr(MachineInstr &MI) override {} + void changedInstr(MachineInstr &MI) override {} + + void createdInstr(MachineInstr &MI) override { + assert(getCurrentInst() && "Inserted instruction without a current MI"); + + // Only print the check message if we're actually checking it. +#ifndef NDEBUG + LLVM_DEBUG(dbgs() << "Checking DILocation from " << *CurrInst + << " was copied to " << MI); +#endif + // We allow insts in the entry block to have a debug loc line of 0 because + // they could have originated from constants, and we don't want a jumpy + // debug experience. + assert((CurrInst->getDebugLoc() == MI.getDebugLoc() || + MI.getDebugLoc().getLine() == 0) && + "Line info was not transferred to all instructions"); + } +}; +} // namespace +#endif // ifndef NDEBUG + + +void IRTranslator::getAnalysisUsage(AnalysisUsage &AU) const { + AU.addRequired<StackProtector>(); + AU.addRequired<TargetPassConfig>(); + AU.addRequired<GISelCSEAnalysisWrapperPass>(); if (OptLevel != CodeGenOpt::None) AU.addRequired<BranchProbabilityInfoWrapperPass>(); - getSelectionDAGFallbackAnalysisUsage(AU); - MachineFunctionPass::getAnalysisUsage(AU); -} - -IRTranslator::ValueToVRegInfo::VRegListT & -IRTranslator::allocateVRegs(const Value &Val) { + getSelectionDAGFallbackAnalysisUsage(AU); + MachineFunctionPass::getAnalysisUsage(AU); +} + +IRTranslator::ValueToVRegInfo::VRegListT & +IRTranslator::allocateVRegs(const Value &Val) { auto VRegsIt = VMap.findVRegs(Val); if (VRegsIt != VMap.vregs_end()) return *VRegsIt->second; - auto *Regs = VMap.getVRegs(Val); - auto *Offsets = VMap.getOffsets(Val); - SmallVector<LLT, 4> SplitTys; - computeValueLLTs(*DL, *Val.getType(), SplitTys, - Offsets->empty() ? Offsets : nullptr); - for (unsigned i = 0; i < SplitTys.size(); ++i) - Regs->push_back(0); - return *Regs; -} - -ArrayRef<Register> IRTranslator::getOrCreateVRegs(const Value &Val) { - auto VRegsIt = VMap.findVRegs(Val); - if (VRegsIt != VMap.vregs_end()) - return *VRegsIt->second; - - if (Val.getType()->isVoidTy()) - return *VMap.getVRegs(Val); - - // Create entry for this type. - auto *VRegs = VMap.getVRegs(Val); - auto *Offsets = VMap.getOffsets(Val); - - assert(Val.getType()->isSized() && - "Don't know how to create an empty vreg"); - - SmallVector<LLT, 4> SplitTys; - computeValueLLTs(*DL, *Val.getType(), SplitTys, - Offsets->empty() ? Offsets : nullptr); - - if (!isa<Constant>(Val)) { - for (auto Ty : SplitTys) - VRegs->push_back(MRI->createGenericVirtualRegister(Ty)); - return *VRegs; - } - - if (Val.getType()->isAggregateType()) { - // UndefValue, ConstantAggregateZero - auto &C = cast<Constant>(Val); - unsigned Idx = 0; - while (auto Elt = C.getAggregateElement(Idx++)) { - auto EltRegs = getOrCreateVRegs(*Elt); - llvm::copy(EltRegs, std::back_inserter(*VRegs)); - } - } else { - assert(SplitTys.size() == 1 && "unexpectedly split LLT"); - VRegs->push_back(MRI->createGenericVirtualRegister(SplitTys[0])); - bool Success = translate(cast<Constant>(Val), VRegs->front()); - if (!Success) { - OptimizationRemarkMissed R("gisel-irtranslator", "GISelFailure", - MF->getFunction().getSubprogram(), - &MF->getFunction().getEntryBlock()); - R << "unable to translate constant: " << ore::NV("Type", Val.getType()); - reportTranslationError(*MF, *TPC, *ORE, R); - return *VRegs; - } - } - - return *VRegs; -} - -int IRTranslator::getOrCreateFrameIndex(const AllocaInst &AI) { + auto *Regs = VMap.getVRegs(Val); + auto *Offsets = VMap.getOffsets(Val); + SmallVector<LLT, 4> SplitTys; + computeValueLLTs(*DL, *Val.getType(), SplitTys, + Offsets->empty() ? Offsets : nullptr); + for (unsigned i = 0; i < SplitTys.size(); ++i) + Regs->push_back(0); + return *Regs; +} + +ArrayRef<Register> IRTranslator::getOrCreateVRegs(const Value &Val) { + auto VRegsIt = VMap.findVRegs(Val); + if (VRegsIt != VMap.vregs_end()) + return *VRegsIt->second; + + if (Val.getType()->isVoidTy()) + return *VMap.getVRegs(Val); + + // Create entry for this type. + auto *VRegs = VMap.getVRegs(Val); + auto *Offsets = VMap.getOffsets(Val); + + assert(Val.getType()->isSized() && + "Don't know how to create an empty vreg"); + + SmallVector<LLT, 4> SplitTys; + computeValueLLTs(*DL, *Val.getType(), SplitTys, + Offsets->empty() ? Offsets : nullptr); + + if (!isa<Constant>(Val)) { + for (auto Ty : SplitTys) + VRegs->push_back(MRI->createGenericVirtualRegister(Ty)); + return *VRegs; + } + + if (Val.getType()->isAggregateType()) { + // UndefValue, ConstantAggregateZero + auto &C = cast<Constant>(Val); + unsigned Idx = 0; + while (auto Elt = C.getAggregateElement(Idx++)) { + auto EltRegs = getOrCreateVRegs(*Elt); + llvm::copy(EltRegs, std::back_inserter(*VRegs)); + } + } else { + assert(SplitTys.size() == 1 && "unexpectedly split LLT"); + VRegs->push_back(MRI->createGenericVirtualRegister(SplitTys[0])); + bool Success = translate(cast<Constant>(Val), VRegs->front()); + if (!Success) { + OptimizationRemarkMissed R("gisel-irtranslator", "GISelFailure", + MF->getFunction().getSubprogram(), + &MF->getFunction().getEntryBlock()); + R << "unable to translate constant: " << ore::NV("Type", Val.getType()); + reportTranslationError(*MF, *TPC, *ORE, R); + return *VRegs; + } + } + + return *VRegs; +} + +int IRTranslator::getOrCreateFrameIndex(const AllocaInst &AI) { auto MapEntry = FrameIndices.find(&AI); if (MapEntry != FrameIndices.end()) return MapEntry->second; - - uint64_t ElementSize = DL->getTypeAllocSize(AI.getAllocatedType()); - uint64_t Size = - ElementSize * cast<ConstantInt>(AI.getArraySize())->getZExtValue(); - - // Always allocate at least one byte. - Size = std::max<uint64_t>(Size, 1u); - - int &FI = FrameIndices[&AI]; - FI = MF->getFrameInfo().CreateStackObject(Size, AI.getAlign(), false, &AI); - return FI; -} - -Align IRTranslator::getMemOpAlign(const Instruction &I) { - if (const StoreInst *SI = dyn_cast<StoreInst>(&I)) - return SI->getAlign(); - if (const LoadInst *LI = dyn_cast<LoadInst>(&I)) { - return LI->getAlign(); - } - if (const AtomicCmpXchgInst *AI = dyn_cast<AtomicCmpXchgInst>(&I)) { - // TODO(PR27168): This instruction has no alignment attribute, but unlike - // the default alignment for load/store, the default here is to assume - // it has NATURAL alignment, not DataLayout-specified alignment. - const DataLayout &DL = AI->getModule()->getDataLayout(); - return Align(DL.getTypeStoreSize(AI->getCompareOperand()->getType())); - } - if (const AtomicRMWInst *AI = dyn_cast<AtomicRMWInst>(&I)) { - // TODO(PR27168): This instruction has no alignment attribute, but unlike - // the default alignment for load/store, the default here is to assume - // it has NATURAL alignment, not DataLayout-specified alignment. - const DataLayout &DL = AI->getModule()->getDataLayout(); - return Align(DL.getTypeStoreSize(AI->getValOperand()->getType())); - } - OptimizationRemarkMissed R("gisel-irtranslator", "", &I); - R << "unable to translate memop: " << ore::NV("Opcode", &I); - reportTranslationError(*MF, *TPC, *ORE, R); - return Align(1); -} - -MachineBasicBlock &IRTranslator::getMBB(const BasicBlock &BB) { - MachineBasicBlock *&MBB = BBToMBB[&BB]; - assert(MBB && "BasicBlock was not encountered before"); - return *MBB; -} - -void IRTranslator::addMachineCFGPred(CFGEdge Edge, MachineBasicBlock *NewPred) { - assert(NewPred && "new predecessor must be a real MachineBasicBlock"); - MachinePreds[Edge].push_back(NewPred); -} - -bool IRTranslator::translateBinaryOp(unsigned Opcode, const User &U, - MachineIRBuilder &MIRBuilder) { - // Get or create a virtual register for each value. - // Unless the value is a Constant => loadimm cst? - // or inline constant each time? - // Creation of a virtual register needs to have a size. - Register Op0 = getOrCreateVReg(*U.getOperand(0)); - Register Op1 = getOrCreateVReg(*U.getOperand(1)); - Register Res = getOrCreateVReg(U); - uint16_t Flags = 0; - if (isa<Instruction>(U)) { - const Instruction &I = cast<Instruction>(U); - Flags = MachineInstr::copyFlagsFromInstruction(I); - } - - MIRBuilder.buildInstr(Opcode, {Res}, {Op0, Op1}, Flags); - return true; -} - + + uint64_t ElementSize = DL->getTypeAllocSize(AI.getAllocatedType()); + uint64_t Size = + ElementSize * cast<ConstantInt>(AI.getArraySize())->getZExtValue(); + + // Always allocate at least one byte. + Size = std::max<uint64_t>(Size, 1u); + + int &FI = FrameIndices[&AI]; + FI = MF->getFrameInfo().CreateStackObject(Size, AI.getAlign(), false, &AI); + return FI; +} + +Align IRTranslator::getMemOpAlign(const Instruction &I) { + if (const StoreInst *SI = dyn_cast<StoreInst>(&I)) + return SI->getAlign(); + if (const LoadInst *LI = dyn_cast<LoadInst>(&I)) { + return LI->getAlign(); + } + if (const AtomicCmpXchgInst *AI = dyn_cast<AtomicCmpXchgInst>(&I)) { + // TODO(PR27168): This instruction has no alignment attribute, but unlike + // the default alignment for load/store, the default here is to assume + // it has NATURAL alignment, not DataLayout-specified alignment. + const DataLayout &DL = AI->getModule()->getDataLayout(); + return Align(DL.getTypeStoreSize(AI->getCompareOperand()->getType())); + } + if (const AtomicRMWInst *AI = dyn_cast<AtomicRMWInst>(&I)) { + // TODO(PR27168): This instruction has no alignment attribute, but unlike + // the default alignment for load/store, the default here is to assume + // it has NATURAL alignment, not DataLayout-specified alignment. + const DataLayout &DL = AI->getModule()->getDataLayout(); + return Align(DL.getTypeStoreSize(AI->getValOperand()->getType())); + } + OptimizationRemarkMissed R("gisel-irtranslator", "", &I); + R << "unable to translate memop: " << ore::NV("Opcode", &I); + reportTranslationError(*MF, *TPC, *ORE, R); + return Align(1); +} + +MachineBasicBlock &IRTranslator::getMBB(const BasicBlock &BB) { + MachineBasicBlock *&MBB = BBToMBB[&BB]; + assert(MBB && "BasicBlock was not encountered before"); + return *MBB; +} + +void IRTranslator::addMachineCFGPred(CFGEdge Edge, MachineBasicBlock *NewPred) { + assert(NewPred && "new predecessor must be a real MachineBasicBlock"); + MachinePreds[Edge].push_back(NewPred); +} + +bool IRTranslator::translateBinaryOp(unsigned Opcode, const User &U, + MachineIRBuilder &MIRBuilder) { + // Get or create a virtual register for each value. + // Unless the value is a Constant => loadimm cst? + // or inline constant each time? + // Creation of a virtual register needs to have a size. + Register Op0 = getOrCreateVReg(*U.getOperand(0)); + Register Op1 = getOrCreateVReg(*U.getOperand(1)); + Register Res = getOrCreateVReg(U); + uint16_t Flags = 0; + if (isa<Instruction>(U)) { + const Instruction &I = cast<Instruction>(U); + Flags = MachineInstr::copyFlagsFromInstruction(I); + } + + MIRBuilder.buildInstr(Opcode, {Res}, {Op0, Op1}, Flags); + return true; +} + bool IRTranslator::translateUnaryOp(unsigned Opcode, const User &U, MachineIRBuilder &MIRBuilder) { - Register Op0 = getOrCreateVReg(*U.getOperand(0)); - Register Res = getOrCreateVReg(U); - uint16_t Flags = 0; - if (isa<Instruction>(U)) { - const Instruction &I = cast<Instruction>(U); - Flags = MachineInstr::copyFlagsFromInstruction(I); - } + Register Op0 = getOrCreateVReg(*U.getOperand(0)); + Register Res = getOrCreateVReg(U); + uint16_t Flags = 0; + if (isa<Instruction>(U)) { + const Instruction &I = cast<Instruction>(U); + Flags = MachineInstr::copyFlagsFromInstruction(I); + } MIRBuilder.buildInstr(Opcode, {Res}, {Op0}, Flags); - return true; -} - + return true; +} + bool IRTranslator::translateFNeg(const User &U, MachineIRBuilder &MIRBuilder) { return translateUnaryOp(TargetOpcode::G_FNEG, U, MIRBuilder); } -bool IRTranslator::translateCompare(const User &U, - MachineIRBuilder &MIRBuilder) { - auto *CI = dyn_cast<CmpInst>(&U); - Register Op0 = getOrCreateVReg(*U.getOperand(0)); - Register Op1 = getOrCreateVReg(*U.getOperand(1)); - Register Res = getOrCreateVReg(U); - CmpInst::Predicate Pred = - CI ? CI->getPredicate() : static_cast<CmpInst::Predicate>( - cast<ConstantExpr>(U).getPredicate()); - if (CmpInst::isIntPredicate(Pred)) - MIRBuilder.buildICmp(Pred, Res, Op0, Op1); - else if (Pred == CmpInst::FCMP_FALSE) - MIRBuilder.buildCopy( - Res, getOrCreateVReg(*Constant::getNullValue(U.getType()))); - else if (Pred == CmpInst::FCMP_TRUE) - MIRBuilder.buildCopy( - Res, getOrCreateVReg(*Constant::getAllOnesValue(U.getType()))); - else { - assert(CI && "Instruction should be CmpInst"); - MIRBuilder.buildFCmp(Pred, Res, Op0, Op1, - MachineInstr::copyFlagsFromInstruction(*CI)); - } - - return true; -} - -bool IRTranslator::translateRet(const User &U, MachineIRBuilder &MIRBuilder) { - const ReturnInst &RI = cast<ReturnInst>(U); - const Value *Ret = RI.getReturnValue(); - if (Ret && DL->getTypeStoreSize(Ret->getType()) == 0) - Ret = nullptr; - - ArrayRef<Register> VRegs; - if (Ret) - VRegs = getOrCreateVRegs(*Ret); - - Register SwiftErrorVReg = 0; - if (CLI->supportSwiftError() && SwiftError.getFunctionArg()) { - SwiftErrorVReg = SwiftError.getOrCreateVRegUseAt( - &RI, &MIRBuilder.getMBB(), SwiftError.getFunctionArg()); - } - - // The target may mess up with the insertion point, but - // this is not important as a return is the last instruction - // of the block anyway. +bool IRTranslator::translateCompare(const User &U, + MachineIRBuilder &MIRBuilder) { + auto *CI = dyn_cast<CmpInst>(&U); + Register Op0 = getOrCreateVReg(*U.getOperand(0)); + Register Op1 = getOrCreateVReg(*U.getOperand(1)); + Register Res = getOrCreateVReg(U); + CmpInst::Predicate Pred = + CI ? CI->getPredicate() : static_cast<CmpInst::Predicate>( + cast<ConstantExpr>(U).getPredicate()); + if (CmpInst::isIntPredicate(Pred)) + MIRBuilder.buildICmp(Pred, Res, Op0, Op1); + else if (Pred == CmpInst::FCMP_FALSE) + MIRBuilder.buildCopy( + Res, getOrCreateVReg(*Constant::getNullValue(U.getType()))); + else if (Pred == CmpInst::FCMP_TRUE) + MIRBuilder.buildCopy( + Res, getOrCreateVReg(*Constant::getAllOnesValue(U.getType()))); + else { + assert(CI && "Instruction should be CmpInst"); + MIRBuilder.buildFCmp(Pred, Res, Op0, Op1, + MachineInstr::copyFlagsFromInstruction(*CI)); + } + + return true; +} + +bool IRTranslator::translateRet(const User &U, MachineIRBuilder &MIRBuilder) { + const ReturnInst &RI = cast<ReturnInst>(U); + const Value *Ret = RI.getReturnValue(); + if (Ret && DL->getTypeStoreSize(Ret->getType()) == 0) + Ret = nullptr; + + ArrayRef<Register> VRegs; + if (Ret) + VRegs = getOrCreateVRegs(*Ret); + + Register SwiftErrorVReg = 0; + if (CLI->supportSwiftError() && SwiftError.getFunctionArg()) { + SwiftErrorVReg = SwiftError.getOrCreateVRegUseAt( + &RI, &MIRBuilder.getMBB(), SwiftError.getFunctionArg()); + } + + // The target may mess up with the insertion point, but + // this is not important as a return is the last instruction + // of the block anyway. return CLI->lowerReturn(MIRBuilder, Ret, VRegs, FuncInfo, SwiftErrorVReg); -} - +} + void IRTranslator::emitBranchForMergedCondition( const Value *Cond, MachineBasicBlock *TBB, MachineBasicBlock *FBB, MachineBasicBlock *CurBB, MachineBasicBlock *SwitchBB, @@ -565,8 +565,8 @@ bool IRTranslator::shouldEmitAsBranches( return true; } -bool IRTranslator::translateBr(const User &U, MachineIRBuilder &MIRBuilder) { - const BranchInst &BrInst = cast<BranchInst>(U); +bool IRTranslator::translateBr(const User &U, MachineIRBuilder &MIRBuilder) { + const BranchInst &BrInst = cast<BranchInst>(U); auto &CurMBB = MIRBuilder.getMBB(); auto *Succ0MBB = &getMBB(*BrInst.getSuccessor(0)); @@ -579,15 +579,15 @@ bool IRTranslator::translateBr(const User &U, MachineIRBuilder &MIRBuilder) { for (const BasicBlock *Succ : successors(&BrInst)) CurMBB.addSuccessor(&getMBB(*Succ)); return true; - } - + } + // If this condition is one of the special cases we handle, do special stuff // now. const Value *CondVal = BrInst.getCondition(); MachineBasicBlock *Succ1MBB = &getMBB(*BrInst.getSuccessor(1)); - + const auto &TLI = *MF->getSubtarget().getTargetLowering(); - + // If this is a series of conditions that are or'd or and'd together, emit // this as a sequence of branches instead of setcc's with and/or operations. // As long as jumps are not expensive (exceptions for multi-use logic ops, @@ -651,191 +651,191 @@ bool IRTranslator::translateBr(const User &U, MachineIRBuilder &MIRBuilder) { // Use emitSwitchCase to actually insert the fast branch sequence for this // cond branch. emitSwitchCase(CB, &CurMBB, *CurBuilder); - return true; -} - -void IRTranslator::addSuccessorWithProb(MachineBasicBlock *Src, - MachineBasicBlock *Dst, - BranchProbability Prob) { - if (!FuncInfo.BPI) { - Src->addSuccessorWithoutProb(Dst); - return; - } - if (Prob.isUnknown()) - Prob = getEdgeProbability(Src, Dst); - Src->addSuccessor(Dst, Prob); -} - -BranchProbability -IRTranslator::getEdgeProbability(const MachineBasicBlock *Src, - const MachineBasicBlock *Dst) const { - const BasicBlock *SrcBB = Src->getBasicBlock(); - const BasicBlock *DstBB = Dst->getBasicBlock(); - if (!FuncInfo.BPI) { - // If BPI is not available, set the default probability as 1 / N, where N is - // the number of successors. - auto SuccSize = std::max<uint32_t>(succ_size(SrcBB), 1); - return BranchProbability(1, SuccSize); - } - return FuncInfo.BPI->getEdgeProbability(SrcBB, DstBB); -} - -bool IRTranslator::translateSwitch(const User &U, MachineIRBuilder &MIB) { - using namespace SwitchCG; - // Extract cases from the switch. - const SwitchInst &SI = cast<SwitchInst>(U); - BranchProbabilityInfo *BPI = FuncInfo.BPI; - CaseClusterVector Clusters; - Clusters.reserve(SI.getNumCases()); - for (auto &I : SI.cases()) { - MachineBasicBlock *Succ = &getMBB(*I.getCaseSuccessor()); - assert(Succ && "Could not find successor mbb in mapping"); - const ConstantInt *CaseVal = I.getCaseValue(); - BranchProbability Prob = - BPI ? BPI->getEdgeProbability(SI.getParent(), I.getSuccessorIndex()) - : BranchProbability(1, SI.getNumCases() + 1); - Clusters.push_back(CaseCluster::range(CaseVal, CaseVal, Succ, Prob)); - } - - MachineBasicBlock *DefaultMBB = &getMBB(*SI.getDefaultDest()); - - // Cluster adjacent cases with the same destination. We do this at all - // optimization levels because it's cheap to do and will make codegen faster - // if there are many clusters. - sortAndRangeify(Clusters); - - MachineBasicBlock *SwitchMBB = &getMBB(*SI.getParent()); - - // If there is only the default destination, jump there directly. - if (Clusters.empty()) { - SwitchMBB->addSuccessor(DefaultMBB); - if (DefaultMBB != SwitchMBB->getNextNode()) - MIB.buildBr(*DefaultMBB); - return true; - } - - SL->findJumpTables(Clusters, &SI, DefaultMBB, nullptr, nullptr); + return true; +} + +void IRTranslator::addSuccessorWithProb(MachineBasicBlock *Src, + MachineBasicBlock *Dst, + BranchProbability Prob) { + if (!FuncInfo.BPI) { + Src->addSuccessorWithoutProb(Dst); + return; + } + if (Prob.isUnknown()) + Prob = getEdgeProbability(Src, Dst); + Src->addSuccessor(Dst, Prob); +} + +BranchProbability +IRTranslator::getEdgeProbability(const MachineBasicBlock *Src, + const MachineBasicBlock *Dst) const { + const BasicBlock *SrcBB = Src->getBasicBlock(); + const BasicBlock *DstBB = Dst->getBasicBlock(); + if (!FuncInfo.BPI) { + // If BPI is not available, set the default probability as 1 / N, where N is + // the number of successors. + auto SuccSize = std::max<uint32_t>(succ_size(SrcBB), 1); + return BranchProbability(1, SuccSize); + } + return FuncInfo.BPI->getEdgeProbability(SrcBB, DstBB); +} + +bool IRTranslator::translateSwitch(const User &U, MachineIRBuilder &MIB) { + using namespace SwitchCG; + // Extract cases from the switch. + const SwitchInst &SI = cast<SwitchInst>(U); + BranchProbabilityInfo *BPI = FuncInfo.BPI; + CaseClusterVector Clusters; + Clusters.reserve(SI.getNumCases()); + for (auto &I : SI.cases()) { + MachineBasicBlock *Succ = &getMBB(*I.getCaseSuccessor()); + assert(Succ && "Could not find successor mbb in mapping"); + const ConstantInt *CaseVal = I.getCaseValue(); + BranchProbability Prob = + BPI ? BPI->getEdgeProbability(SI.getParent(), I.getSuccessorIndex()) + : BranchProbability(1, SI.getNumCases() + 1); + Clusters.push_back(CaseCluster::range(CaseVal, CaseVal, Succ, Prob)); + } + + MachineBasicBlock *DefaultMBB = &getMBB(*SI.getDefaultDest()); + + // Cluster adjacent cases with the same destination. We do this at all + // optimization levels because it's cheap to do and will make codegen faster + // if there are many clusters. + sortAndRangeify(Clusters); + + MachineBasicBlock *SwitchMBB = &getMBB(*SI.getParent()); + + // If there is only the default destination, jump there directly. + if (Clusters.empty()) { + SwitchMBB->addSuccessor(DefaultMBB); + if (DefaultMBB != SwitchMBB->getNextNode()) + MIB.buildBr(*DefaultMBB); + return true; + } + + SL->findJumpTables(Clusters, &SI, DefaultMBB, nullptr, nullptr); SL->findBitTestClusters(Clusters, &SI); - - LLVM_DEBUG({ - dbgs() << "Case clusters: "; - for (const CaseCluster &C : Clusters) { - if (C.Kind == CC_JumpTable) - dbgs() << "JT:"; - if (C.Kind == CC_BitTests) - dbgs() << "BT:"; - - C.Low->getValue().print(dbgs(), true); - if (C.Low != C.High) { - dbgs() << '-'; - C.High->getValue().print(dbgs(), true); - } - dbgs() << ' '; - } - dbgs() << '\n'; - }); - - assert(!Clusters.empty()); - SwitchWorkList WorkList; - CaseClusterIt First = Clusters.begin(); - CaseClusterIt Last = Clusters.end() - 1; - auto DefaultProb = getEdgeProbability(SwitchMBB, DefaultMBB); - WorkList.push_back({SwitchMBB, First, Last, nullptr, nullptr, DefaultProb}); - - // FIXME: At the moment we don't do any splitting optimizations here like - // SelectionDAG does, so this worklist only has one entry. - while (!WorkList.empty()) { - SwitchWorkListItem W = WorkList.back(); - WorkList.pop_back(); - if (!lowerSwitchWorkItem(W, SI.getCondition(), SwitchMBB, DefaultMBB, MIB)) - return false; - } - return true; -} - -void IRTranslator::emitJumpTable(SwitchCG::JumpTable &JT, - MachineBasicBlock *MBB) { - // Emit the code for the jump table - assert(JT.Reg != -1U && "Should lower JT Header first!"); - MachineIRBuilder MIB(*MBB->getParent()); - MIB.setMBB(*MBB); - MIB.setDebugLoc(CurBuilder->getDebugLoc()); - - Type *PtrIRTy = Type::getInt8PtrTy(MF->getFunction().getContext()); - const LLT PtrTy = getLLTForType(*PtrIRTy, *DL); - - auto Table = MIB.buildJumpTable(PtrTy, JT.JTI); - MIB.buildBrJT(Table.getReg(0), JT.JTI, JT.Reg); -} - -bool IRTranslator::emitJumpTableHeader(SwitchCG::JumpTable &JT, - SwitchCG::JumpTableHeader &JTH, - MachineBasicBlock *HeaderBB) { - MachineIRBuilder MIB(*HeaderBB->getParent()); - MIB.setMBB(*HeaderBB); - MIB.setDebugLoc(CurBuilder->getDebugLoc()); - - const Value &SValue = *JTH.SValue; - // Subtract the lowest switch case value from the value being switched on. - const LLT SwitchTy = getLLTForType(*SValue.getType(), *DL); - Register SwitchOpReg = getOrCreateVReg(SValue); - auto FirstCst = MIB.buildConstant(SwitchTy, JTH.First); - auto Sub = MIB.buildSub({SwitchTy}, SwitchOpReg, FirstCst); - - // This value may be smaller or larger than the target's pointer type, and - // therefore require extension or truncating. - Type *PtrIRTy = SValue.getType()->getPointerTo(); - const LLT PtrScalarTy = LLT::scalar(DL->getTypeSizeInBits(PtrIRTy)); - Sub = MIB.buildZExtOrTrunc(PtrScalarTy, Sub); - - JT.Reg = Sub.getReg(0); - - if (JTH.OmitRangeCheck) { - if (JT.MBB != HeaderBB->getNextNode()) - MIB.buildBr(*JT.MBB); - return true; - } - - // Emit the range check for the jump table, and branch to the default block - // for the switch statement if the value being switched on exceeds the - // largest case in the switch. - auto Cst = getOrCreateVReg( - *ConstantInt::get(SValue.getType(), JTH.Last - JTH.First)); - Cst = MIB.buildZExtOrTrunc(PtrScalarTy, Cst).getReg(0); - auto Cmp = MIB.buildICmp(CmpInst::ICMP_UGT, LLT::scalar(1), Sub, Cst); - - auto BrCond = MIB.buildBrCond(Cmp.getReg(0), *JT.Default); - - // Avoid emitting unnecessary branches to the next block. - if (JT.MBB != HeaderBB->getNextNode()) - BrCond = MIB.buildBr(*JT.MBB); - return true; -} - -void IRTranslator::emitSwitchCase(SwitchCG::CaseBlock &CB, - MachineBasicBlock *SwitchBB, - MachineIRBuilder &MIB) { - Register CondLHS = getOrCreateVReg(*CB.CmpLHS); - Register Cond; - DebugLoc OldDbgLoc = MIB.getDebugLoc(); - MIB.setDebugLoc(CB.DbgLoc); - MIB.setMBB(*CB.ThisBB); - - if (CB.PredInfo.NoCmp) { - // Branch or fall through to TrueBB. - addSuccessorWithProb(CB.ThisBB, CB.TrueBB, CB.TrueProb); - addMachineCFGPred({SwitchBB->getBasicBlock(), CB.TrueBB->getBasicBlock()}, - CB.ThisBB); - CB.ThisBB->normalizeSuccProbs(); - if (CB.TrueBB != CB.ThisBB->getNextNode()) - MIB.buildBr(*CB.TrueBB); - MIB.setDebugLoc(OldDbgLoc); - return; - } - - const LLT i1Ty = LLT::scalar(1); - // Build the compare. - if (!CB.CmpMHS) { + + LLVM_DEBUG({ + dbgs() << "Case clusters: "; + for (const CaseCluster &C : Clusters) { + if (C.Kind == CC_JumpTable) + dbgs() << "JT:"; + if (C.Kind == CC_BitTests) + dbgs() << "BT:"; + + C.Low->getValue().print(dbgs(), true); + if (C.Low != C.High) { + dbgs() << '-'; + C.High->getValue().print(dbgs(), true); + } + dbgs() << ' '; + } + dbgs() << '\n'; + }); + + assert(!Clusters.empty()); + SwitchWorkList WorkList; + CaseClusterIt First = Clusters.begin(); + CaseClusterIt Last = Clusters.end() - 1; + auto DefaultProb = getEdgeProbability(SwitchMBB, DefaultMBB); + WorkList.push_back({SwitchMBB, First, Last, nullptr, nullptr, DefaultProb}); + + // FIXME: At the moment we don't do any splitting optimizations here like + // SelectionDAG does, so this worklist only has one entry. + while (!WorkList.empty()) { + SwitchWorkListItem W = WorkList.back(); + WorkList.pop_back(); + if (!lowerSwitchWorkItem(W, SI.getCondition(), SwitchMBB, DefaultMBB, MIB)) + return false; + } + return true; +} + +void IRTranslator::emitJumpTable(SwitchCG::JumpTable &JT, + MachineBasicBlock *MBB) { + // Emit the code for the jump table + assert(JT.Reg != -1U && "Should lower JT Header first!"); + MachineIRBuilder MIB(*MBB->getParent()); + MIB.setMBB(*MBB); + MIB.setDebugLoc(CurBuilder->getDebugLoc()); + + Type *PtrIRTy = Type::getInt8PtrTy(MF->getFunction().getContext()); + const LLT PtrTy = getLLTForType(*PtrIRTy, *DL); + + auto Table = MIB.buildJumpTable(PtrTy, JT.JTI); + MIB.buildBrJT(Table.getReg(0), JT.JTI, JT.Reg); +} + +bool IRTranslator::emitJumpTableHeader(SwitchCG::JumpTable &JT, + SwitchCG::JumpTableHeader &JTH, + MachineBasicBlock *HeaderBB) { + MachineIRBuilder MIB(*HeaderBB->getParent()); + MIB.setMBB(*HeaderBB); + MIB.setDebugLoc(CurBuilder->getDebugLoc()); + + const Value &SValue = *JTH.SValue; + // Subtract the lowest switch case value from the value being switched on. + const LLT SwitchTy = getLLTForType(*SValue.getType(), *DL); + Register SwitchOpReg = getOrCreateVReg(SValue); + auto FirstCst = MIB.buildConstant(SwitchTy, JTH.First); + auto Sub = MIB.buildSub({SwitchTy}, SwitchOpReg, FirstCst); + + // This value may be smaller or larger than the target's pointer type, and + // therefore require extension or truncating. + Type *PtrIRTy = SValue.getType()->getPointerTo(); + const LLT PtrScalarTy = LLT::scalar(DL->getTypeSizeInBits(PtrIRTy)); + Sub = MIB.buildZExtOrTrunc(PtrScalarTy, Sub); + + JT.Reg = Sub.getReg(0); + + if (JTH.OmitRangeCheck) { + if (JT.MBB != HeaderBB->getNextNode()) + MIB.buildBr(*JT.MBB); + return true; + } + + // Emit the range check for the jump table, and branch to the default block + // for the switch statement if the value being switched on exceeds the + // largest case in the switch. + auto Cst = getOrCreateVReg( + *ConstantInt::get(SValue.getType(), JTH.Last - JTH.First)); + Cst = MIB.buildZExtOrTrunc(PtrScalarTy, Cst).getReg(0); + auto Cmp = MIB.buildICmp(CmpInst::ICMP_UGT, LLT::scalar(1), Sub, Cst); + + auto BrCond = MIB.buildBrCond(Cmp.getReg(0), *JT.Default); + + // Avoid emitting unnecessary branches to the next block. + if (JT.MBB != HeaderBB->getNextNode()) + BrCond = MIB.buildBr(*JT.MBB); + return true; +} + +void IRTranslator::emitSwitchCase(SwitchCG::CaseBlock &CB, + MachineBasicBlock *SwitchBB, + MachineIRBuilder &MIB) { + Register CondLHS = getOrCreateVReg(*CB.CmpLHS); + Register Cond; + DebugLoc OldDbgLoc = MIB.getDebugLoc(); + MIB.setDebugLoc(CB.DbgLoc); + MIB.setMBB(*CB.ThisBB); + + if (CB.PredInfo.NoCmp) { + // Branch or fall through to TrueBB. + addSuccessorWithProb(CB.ThisBB, CB.TrueBB, CB.TrueProb); + addMachineCFGPred({SwitchBB->getBasicBlock(), CB.TrueBB->getBasicBlock()}, + CB.ThisBB); + CB.ThisBB->normalizeSuccProbs(); + if (CB.TrueBB != CB.ThisBB->getNextNode()) + MIB.buildBr(*CB.TrueBB); + MIB.setDebugLoc(OldDbgLoc); + return; + } + + const LLT i1Ty = LLT::scalar(1); + // Build the compare. + if (!CB.CmpMHS) { const auto *CI = dyn_cast<ConstantInt>(CB.CmpRHS); // For conditional branch lowering, we might try to do something silly like // emit an G_ICMP to compare an existing G_ICMP i1 result with true. If so, @@ -852,152 +852,152 @@ void IRTranslator::emitSwitchCase(SwitchCG::CaseBlock &CB, Cond = MIB.buildICmp(CB.PredInfo.Pred, i1Ty, CondLHS, CondRHS).getReg(0); } - } else { - assert(CB.PredInfo.Pred == CmpInst::ICMP_SLE && - "Can only handle SLE ranges"); - - const APInt& Low = cast<ConstantInt>(CB.CmpLHS)->getValue(); - const APInt& High = cast<ConstantInt>(CB.CmpRHS)->getValue(); - - Register CmpOpReg = getOrCreateVReg(*CB.CmpMHS); - if (cast<ConstantInt>(CB.CmpLHS)->isMinValue(true)) { - Register CondRHS = getOrCreateVReg(*CB.CmpRHS); - Cond = - MIB.buildICmp(CmpInst::ICMP_SLE, i1Ty, CmpOpReg, CondRHS).getReg(0); - } else { - const LLT CmpTy = MRI->getType(CmpOpReg); - auto Sub = MIB.buildSub({CmpTy}, CmpOpReg, CondLHS); - auto Diff = MIB.buildConstant(CmpTy, High - Low); - Cond = MIB.buildICmp(CmpInst::ICMP_ULE, i1Ty, Sub, Diff).getReg(0); - } - } - - // Update successor info - addSuccessorWithProb(CB.ThisBB, CB.TrueBB, CB.TrueProb); - - addMachineCFGPred({SwitchBB->getBasicBlock(), CB.TrueBB->getBasicBlock()}, - CB.ThisBB); - - // TrueBB and FalseBB are always different unless the incoming IR is - // degenerate. This only happens when running llc on weird IR. - if (CB.TrueBB != CB.FalseBB) - addSuccessorWithProb(CB.ThisBB, CB.FalseBB, CB.FalseProb); - CB.ThisBB->normalizeSuccProbs(); - + } else { + assert(CB.PredInfo.Pred == CmpInst::ICMP_SLE && + "Can only handle SLE ranges"); + + const APInt& Low = cast<ConstantInt>(CB.CmpLHS)->getValue(); + const APInt& High = cast<ConstantInt>(CB.CmpRHS)->getValue(); + + Register CmpOpReg = getOrCreateVReg(*CB.CmpMHS); + if (cast<ConstantInt>(CB.CmpLHS)->isMinValue(true)) { + Register CondRHS = getOrCreateVReg(*CB.CmpRHS); + Cond = + MIB.buildICmp(CmpInst::ICMP_SLE, i1Ty, CmpOpReg, CondRHS).getReg(0); + } else { + const LLT CmpTy = MRI->getType(CmpOpReg); + auto Sub = MIB.buildSub({CmpTy}, CmpOpReg, CondLHS); + auto Diff = MIB.buildConstant(CmpTy, High - Low); + Cond = MIB.buildICmp(CmpInst::ICMP_ULE, i1Ty, Sub, Diff).getReg(0); + } + } + + // Update successor info + addSuccessorWithProb(CB.ThisBB, CB.TrueBB, CB.TrueProb); + + addMachineCFGPred({SwitchBB->getBasicBlock(), CB.TrueBB->getBasicBlock()}, + CB.ThisBB); + + // TrueBB and FalseBB are always different unless the incoming IR is + // degenerate. This only happens when running llc on weird IR. + if (CB.TrueBB != CB.FalseBB) + addSuccessorWithProb(CB.ThisBB, CB.FalseBB, CB.FalseProb); + CB.ThisBB->normalizeSuccProbs(); + addMachineCFGPred({SwitchBB->getBasicBlock(), CB.FalseBB->getBasicBlock()}, CB.ThisBB); - - MIB.buildBrCond(Cond, *CB.TrueBB); - MIB.buildBr(*CB.FalseBB); - MIB.setDebugLoc(OldDbgLoc); -} - -bool IRTranslator::lowerJumpTableWorkItem(SwitchCG::SwitchWorkListItem W, - MachineBasicBlock *SwitchMBB, - MachineBasicBlock *CurMBB, - MachineBasicBlock *DefaultMBB, - MachineIRBuilder &MIB, - MachineFunction::iterator BBI, - BranchProbability UnhandledProbs, - SwitchCG::CaseClusterIt I, - MachineBasicBlock *Fallthrough, - bool FallthroughUnreachable) { - using namespace SwitchCG; - MachineFunction *CurMF = SwitchMBB->getParent(); - // FIXME: Optimize away range check based on pivot comparisons. - JumpTableHeader *JTH = &SL->JTCases[I->JTCasesIndex].first; - SwitchCG::JumpTable *JT = &SL->JTCases[I->JTCasesIndex].second; - BranchProbability DefaultProb = W.DefaultProb; - - // The jump block hasn't been inserted yet; insert it here. - MachineBasicBlock *JumpMBB = JT->MBB; - CurMF->insert(BBI, JumpMBB); - - // Since the jump table block is separate from the switch block, we need - // to keep track of it as a machine predecessor to the default block, - // otherwise we lose the phi edges. - addMachineCFGPred({SwitchMBB->getBasicBlock(), DefaultMBB->getBasicBlock()}, - CurMBB); - addMachineCFGPred({SwitchMBB->getBasicBlock(), DefaultMBB->getBasicBlock()}, - JumpMBB); - - auto JumpProb = I->Prob; - auto FallthroughProb = UnhandledProbs; - - // If the default statement is a target of the jump table, we evenly - // distribute the default probability to successors of CurMBB. Also - // update the probability on the edge from JumpMBB to Fallthrough. - for (MachineBasicBlock::succ_iterator SI = JumpMBB->succ_begin(), - SE = JumpMBB->succ_end(); - SI != SE; ++SI) { - if (*SI == DefaultMBB) { - JumpProb += DefaultProb / 2; - FallthroughProb -= DefaultProb / 2; - JumpMBB->setSuccProbability(SI, DefaultProb / 2); - JumpMBB->normalizeSuccProbs(); - } else { - // Also record edges from the jump table block to it's successors. - addMachineCFGPred({SwitchMBB->getBasicBlock(), (*SI)->getBasicBlock()}, - JumpMBB); - } - } - - // Skip the range check if the fallthrough block is unreachable. - if (FallthroughUnreachable) - JTH->OmitRangeCheck = true; - - if (!JTH->OmitRangeCheck) - addSuccessorWithProb(CurMBB, Fallthrough, FallthroughProb); - addSuccessorWithProb(CurMBB, JumpMBB, JumpProb); - CurMBB->normalizeSuccProbs(); - - // The jump table header will be inserted in our current block, do the - // range check, and fall through to our fallthrough block. - JTH->HeaderBB = CurMBB; - JT->Default = Fallthrough; // FIXME: Move Default to JumpTableHeader. - - // If we're in the right place, emit the jump table header right now. - if (CurMBB == SwitchMBB) { - if (!emitJumpTableHeader(*JT, *JTH, CurMBB)) - return false; - JTH->Emitted = true; - } - return true; -} -bool IRTranslator::lowerSwitchRangeWorkItem(SwitchCG::CaseClusterIt I, - Value *Cond, - MachineBasicBlock *Fallthrough, - bool FallthroughUnreachable, - BranchProbability UnhandledProbs, - MachineBasicBlock *CurMBB, - MachineIRBuilder &MIB, - MachineBasicBlock *SwitchMBB) { - using namespace SwitchCG; - const Value *RHS, *LHS, *MHS; - CmpInst::Predicate Pred; - if (I->Low == I->High) { - // Check Cond == I->Low. - Pred = CmpInst::ICMP_EQ; - LHS = Cond; - RHS = I->Low; - MHS = nullptr; - } else { - // Check I->Low <= Cond <= I->High. - Pred = CmpInst::ICMP_SLE; - LHS = I->Low; - MHS = Cond; - RHS = I->High; - } - - // If Fallthrough is unreachable, fold away the comparison. - // The false probability is the sum of all unhandled cases. - CaseBlock CB(Pred, FallthroughUnreachable, LHS, RHS, MHS, I->MBB, Fallthrough, - CurMBB, MIB.getDebugLoc(), I->Prob, UnhandledProbs); - - emitSwitchCase(CB, SwitchMBB, MIB); - return true; -} - + + MIB.buildBrCond(Cond, *CB.TrueBB); + MIB.buildBr(*CB.FalseBB); + MIB.setDebugLoc(OldDbgLoc); +} + +bool IRTranslator::lowerJumpTableWorkItem(SwitchCG::SwitchWorkListItem W, + MachineBasicBlock *SwitchMBB, + MachineBasicBlock *CurMBB, + MachineBasicBlock *DefaultMBB, + MachineIRBuilder &MIB, + MachineFunction::iterator BBI, + BranchProbability UnhandledProbs, + SwitchCG::CaseClusterIt I, + MachineBasicBlock *Fallthrough, + bool FallthroughUnreachable) { + using namespace SwitchCG; + MachineFunction *CurMF = SwitchMBB->getParent(); + // FIXME: Optimize away range check based on pivot comparisons. + JumpTableHeader *JTH = &SL->JTCases[I->JTCasesIndex].first; + SwitchCG::JumpTable *JT = &SL->JTCases[I->JTCasesIndex].second; + BranchProbability DefaultProb = W.DefaultProb; + + // The jump block hasn't been inserted yet; insert it here. + MachineBasicBlock *JumpMBB = JT->MBB; + CurMF->insert(BBI, JumpMBB); + + // Since the jump table block is separate from the switch block, we need + // to keep track of it as a machine predecessor to the default block, + // otherwise we lose the phi edges. + addMachineCFGPred({SwitchMBB->getBasicBlock(), DefaultMBB->getBasicBlock()}, + CurMBB); + addMachineCFGPred({SwitchMBB->getBasicBlock(), DefaultMBB->getBasicBlock()}, + JumpMBB); + + auto JumpProb = I->Prob; + auto FallthroughProb = UnhandledProbs; + + // If the default statement is a target of the jump table, we evenly + // distribute the default probability to successors of CurMBB. Also + // update the probability on the edge from JumpMBB to Fallthrough. + for (MachineBasicBlock::succ_iterator SI = JumpMBB->succ_begin(), + SE = JumpMBB->succ_end(); + SI != SE; ++SI) { + if (*SI == DefaultMBB) { + JumpProb += DefaultProb / 2; + FallthroughProb -= DefaultProb / 2; + JumpMBB->setSuccProbability(SI, DefaultProb / 2); + JumpMBB->normalizeSuccProbs(); + } else { + // Also record edges from the jump table block to it's successors. + addMachineCFGPred({SwitchMBB->getBasicBlock(), (*SI)->getBasicBlock()}, + JumpMBB); + } + } + + // Skip the range check if the fallthrough block is unreachable. + if (FallthroughUnreachable) + JTH->OmitRangeCheck = true; + + if (!JTH->OmitRangeCheck) + addSuccessorWithProb(CurMBB, Fallthrough, FallthroughProb); + addSuccessorWithProb(CurMBB, JumpMBB, JumpProb); + CurMBB->normalizeSuccProbs(); + + // The jump table header will be inserted in our current block, do the + // range check, and fall through to our fallthrough block. + JTH->HeaderBB = CurMBB; + JT->Default = Fallthrough; // FIXME: Move Default to JumpTableHeader. + + // If we're in the right place, emit the jump table header right now. + if (CurMBB == SwitchMBB) { + if (!emitJumpTableHeader(*JT, *JTH, CurMBB)) + return false; + JTH->Emitted = true; + } + return true; +} +bool IRTranslator::lowerSwitchRangeWorkItem(SwitchCG::CaseClusterIt I, + Value *Cond, + MachineBasicBlock *Fallthrough, + bool FallthroughUnreachable, + BranchProbability UnhandledProbs, + MachineBasicBlock *CurMBB, + MachineIRBuilder &MIB, + MachineBasicBlock *SwitchMBB) { + using namespace SwitchCG; + const Value *RHS, *LHS, *MHS; + CmpInst::Predicate Pred; + if (I->Low == I->High) { + // Check Cond == I->Low. + Pred = CmpInst::ICMP_EQ; + LHS = Cond; + RHS = I->Low; + MHS = nullptr; + } else { + // Check I->Low <= Cond <= I->High. + Pred = CmpInst::ICMP_SLE; + LHS = I->Low; + MHS = Cond; + RHS = I->High; + } + + // If Fallthrough is unreachable, fold away the comparison. + // The false probability is the sum of all unhandled cases. + CaseBlock CB(Pred, FallthroughUnreachable, LHS, RHS, MHS, I->MBB, Fallthrough, + CurMBB, MIB.getDebugLoc(), I->Prob, UnhandledProbs); + + emitSwitchCase(CB, SwitchMBB, MIB); + return true; +} + void IRTranslator::emitBitTestHeader(SwitchCG::BitTestBlock &B, MachineBasicBlock *SwitchBB) { MachineIRBuilder &MIB = *CurBuilder; @@ -1148,66 +1148,66 @@ bool IRTranslator::lowerBitTestWorkItem( return true; } -bool IRTranslator::lowerSwitchWorkItem(SwitchCG::SwitchWorkListItem W, - Value *Cond, - MachineBasicBlock *SwitchMBB, - MachineBasicBlock *DefaultMBB, - MachineIRBuilder &MIB) { - using namespace SwitchCG; - MachineFunction *CurMF = FuncInfo.MF; - MachineBasicBlock *NextMBB = nullptr; - MachineFunction::iterator BBI(W.MBB); - if (++BBI != FuncInfo.MF->end()) - NextMBB = &*BBI; - - if (EnableOpts) { - // Here, we order cases by probability so the most likely case will be - // checked first. However, two clusters can have the same probability in - // which case their relative ordering is non-deterministic. So we use Low - // as a tie-breaker as clusters are guaranteed to never overlap. - llvm::sort(W.FirstCluster, W.LastCluster + 1, - [](const CaseCluster &a, const CaseCluster &b) { - return a.Prob != b.Prob - ? a.Prob > b.Prob - : a.Low->getValue().slt(b.Low->getValue()); - }); - - // Rearrange the case blocks so that the last one falls through if possible - // without changing the order of probabilities. - for (CaseClusterIt I = W.LastCluster; I > W.FirstCluster;) { - --I; - if (I->Prob > W.LastCluster->Prob) - break; - if (I->Kind == CC_Range && I->MBB == NextMBB) { - std::swap(*I, *W.LastCluster); - break; - } - } - } - - // Compute total probability. - BranchProbability DefaultProb = W.DefaultProb; - BranchProbability UnhandledProbs = DefaultProb; - for (CaseClusterIt I = W.FirstCluster; I <= W.LastCluster; ++I) - UnhandledProbs += I->Prob; - - MachineBasicBlock *CurMBB = W.MBB; - for (CaseClusterIt I = W.FirstCluster, E = W.LastCluster; I <= E; ++I) { - bool FallthroughUnreachable = false; - MachineBasicBlock *Fallthrough; - if (I == W.LastCluster) { - // For the last cluster, fall through to the default destination. - Fallthrough = DefaultMBB; - FallthroughUnreachable = isa<UnreachableInst>( - DefaultMBB->getBasicBlock()->getFirstNonPHIOrDbg()); - } else { - Fallthrough = CurMF->CreateMachineBasicBlock(CurMBB->getBasicBlock()); - CurMF->insert(BBI, Fallthrough); - } - UnhandledProbs -= I->Prob; - - switch (I->Kind) { - case CC_BitTests: { +bool IRTranslator::lowerSwitchWorkItem(SwitchCG::SwitchWorkListItem W, + Value *Cond, + MachineBasicBlock *SwitchMBB, + MachineBasicBlock *DefaultMBB, + MachineIRBuilder &MIB) { + using namespace SwitchCG; + MachineFunction *CurMF = FuncInfo.MF; + MachineBasicBlock *NextMBB = nullptr; + MachineFunction::iterator BBI(W.MBB); + if (++BBI != FuncInfo.MF->end()) + NextMBB = &*BBI; + + if (EnableOpts) { + // Here, we order cases by probability so the most likely case will be + // checked first. However, two clusters can have the same probability in + // which case their relative ordering is non-deterministic. So we use Low + // as a tie-breaker as clusters are guaranteed to never overlap. + llvm::sort(W.FirstCluster, W.LastCluster + 1, + [](const CaseCluster &a, const CaseCluster &b) { + return a.Prob != b.Prob + ? a.Prob > b.Prob + : a.Low->getValue().slt(b.Low->getValue()); + }); + + // Rearrange the case blocks so that the last one falls through if possible + // without changing the order of probabilities. + for (CaseClusterIt I = W.LastCluster; I > W.FirstCluster;) { + --I; + if (I->Prob > W.LastCluster->Prob) + break; + if (I->Kind == CC_Range && I->MBB == NextMBB) { + std::swap(*I, *W.LastCluster); + break; + } + } + } + + // Compute total probability. + BranchProbability DefaultProb = W.DefaultProb; + BranchProbability UnhandledProbs = DefaultProb; + for (CaseClusterIt I = W.FirstCluster; I <= W.LastCluster; ++I) + UnhandledProbs += I->Prob; + + MachineBasicBlock *CurMBB = W.MBB; + for (CaseClusterIt I = W.FirstCluster, E = W.LastCluster; I <= E; ++I) { + bool FallthroughUnreachable = false; + MachineBasicBlock *Fallthrough; + if (I == W.LastCluster) { + // For the last cluster, fall through to the default destination. + Fallthrough = DefaultMBB; + FallthroughUnreachable = isa<UnreachableInst>( + DefaultMBB->getBasicBlock()->getFirstNonPHIOrDbg()); + } else { + Fallthrough = CurMF->CreateMachineBasicBlock(CurMBB->getBasicBlock()); + CurMF->insert(BBI, Fallthrough); + } + UnhandledProbs -= I->Prob; + + switch (I->Kind) { + case CC_BitTests: { if (!lowerBitTestWorkItem(W, SwitchMBB, CurMBB, DefaultMBB, MIB, BBI, DefaultProb, UnhandledProbs, I, Fallthrough, FallthroughUnreachable)) { @@ -1215,356 +1215,356 @@ bool IRTranslator::lowerSwitchWorkItem(SwitchCG::SwitchWorkListItem W, return false; } break; - } - - case CC_JumpTable: { - if (!lowerJumpTableWorkItem(W, SwitchMBB, CurMBB, DefaultMBB, MIB, BBI, - UnhandledProbs, I, Fallthrough, - FallthroughUnreachable)) { - LLVM_DEBUG(dbgs() << "Failed to lower jump table"); - return false; - } - break; - } - case CC_Range: { - if (!lowerSwitchRangeWorkItem(I, Cond, Fallthrough, - FallthroughUnreachable, UnhandledProbs, - CurMBB, MIB, SwitchMBB)) { - LLVM_DEBUG(dbgs() << "Failed to lower switch range"); - return false; - } - break; - } - } - CurMBB = Fallthrough; - } - - return true; -} - -bool IRTranslator::translateIndirectBr(const User &U, - MachineIRBuilder &MIRBuilder) { - const IndirectBrInst &BrInst = cast<IndirectBrInst>(U); - - const Register Tgt = getOrCreateVReg(*BrInst.getAddress()); - MIRBuilder.buildBrIndirect(Tgt); - - // Link successors. - SmallPtrSet<const BasicBlock *, 32> AddedSuccessors; - MachineBasicBlock &CurBB = MIRBuilder.getMBB(); - for (const BasicBlock *Succ : successors(&BrInst)) { - // It's legal for indirectbr instructions to have duplicate blocks in the - // destination list. We don't allow this in MIR. Skip anything that's - // already a successor. - if (!AddedSuccessors.insert(Succ).second) - continue; - CurBB.addSuccessor(&getMBB(*Succ)); - } - - return true; -} - -static bool isSwiftError(const Value *V) { - if (auto Arg = dyn_cast<Argument>(V)) - return Arg->hasSwiftErrorAttr(); - if (auto AI = dyn_cast<AllocaInst>(V)) - return AI->isSwiftError(); - return false; -} - -bool IRTranslator::translateLoad(const User &U, MachineIRBuilder &MIRBuilder) { - const LoadInst &LI = cast<LoadInst>(U); - if (DL->getTypeStoreSize(LI.getType()) == 0) - return true; - - ArrayRef<Register> Regs = getOrCreateVRegs(LI); - ArrayRef<uint64_t> Offsets = *VMap.getOffsets(LI); - Register Base = getOrCreateVReg(*LI.getPointerOperand()); - - Type *OffsetIRTy = DL->getIntPtrType(LI.getPointerOperandType()); - LLT OffsetTy = getLLTForType(*OffsetIRTy, *DL); - - if (CLI->supportSwiftError() && isSwiftError(LI.getPointerOperand())) { - assert(Regs.size() == 1 && "swifterror should be single pointer"); - Register VReg = SwiftError.getOrCreateVRegUseAt(&LI, &MIRBuilder.getMBB(), - LI.getPointerOperand()); - MIRBuilder.buildCopy(Regs[0], VReg); - return true; - } - - auto &TLI = *MF->getSubtarget().getTargetLowering(); - MachineMemOperand::Flags Flags = TLI.getLoadMemOperandFlags(LI, *DL); - - const MDNode *Ranges = - Regs.size() == 1 ? LI.getMetadata(LLVMContext::MD_range) : nullptr; - for (unsigned i = 0; i < Regs.size(); ++i) { - Register Addr; - MIRBuilder.materializePtrAdd(Addr, Base, OffsetTy, Offsets[i] / 8); - - MachinePointerInfo Ptr(LI.getPointerOperand(), Offsets[i] / 8); - Align BaseAlign = getMemOpAlign(LI); - AAMDNodes AAMetadata; - LI.getAAMetadata(AAMetadata); - auto MMO = MF->getMachineMemOperand( - Ptr, Flags, MRI->getType(Regs[i]).getSizeInBytes(), - commonAlignment(BaseAlign, Offsets[i] / 8), AAMetadata, Ranges, - LI.getSyncScopeID(), LI.getOrdering()); - MIRBuilder.buildLoad(Regs[i], Addr, *MMO); - } - - return true; -} - -bool IRTranslator::translateStore(const User &U, MachineIRBuilder &MIRBuilder) { - const StoreInst &SI = cast<StoreInst>(U); - if (DL->getTypeStoreSize(SI.getValueOperand()->getType()) == 0) - return true; - - ArrayRef<Register> Vals = getOrCreateVRegs(*SI.getValueOperand()); - ArrayRef<uint64_t> Offsets = *VMap.getOffsets(*SI.getValueOperand()); - Register Base = getOrCreateVReg(*SI.getPointerOperand()); - - Type *OffsetIRTy = DL->getIntPtrType(SI.getPointerOperandType()); - LLT OffsetTy = getLLTForType(*OffsetIRTy, *DL); - - if (CLI->supportSwiftError() && isSwiftError(SI.getPointerOperand())) { - assert(Vals.size() == 1 && "swifterror should be single pointer"); - - Register VReg = SwiftError.getOrCreateVRegDefAt(&SI, &MIRBuilder.getMBB(), - SI.getPointerOperand()); - MIRBuilder.buildCopy(VReg, Vals[0]); - return true; - } - - auto &TLI = *MF->getSubtarget().getTargetLowering(); - MachineMemOperand::Flags Flags = TLI.getStoreMemOperandFlags(SI, *DL); - - for (unsigned i = 0; i < Vals.size(); ++i) { - Register Addr; - MIRBuilder.materializePtrAdd(Addr, Base, OffsetTy, Offsets[i] / 8); - - MachinePointerInfo Ptr(SI.getPointerOperand(), Offsets[i] / 8); - Align BaseAlign = getMemOpAlign(SI); - AAMDNodes AAMetadata; - SI.getAAMetadata(AAMetadata); - auto MMO = MF->getMachineMemOperand( - Ptr, Flags, MRI->getType(Vals[i]).getSizeInBytes(), - commonAlignment(BaseAlign, Offsets[i] / 8), AAMetadata, nullptr, - SI.getSyncScopeID(), SI.getOrdering()); - MIRBuilder.buildStore(Vals[i], Addr, *MMO); - } - return true; -} - -static uint64_t getOffsetFromIndices(const User &U, const DataLayout &DL) { - const Value *Src = U.getOperand(0); - Type *Int32Ty = Type::getInt32Ty(U.getContext()); - - // getIndexedOffsetInType is designed for GEPs, so the first index is the - // usual array element rather than looking into the actual aggregate. - SmallVector<Value *, 1> Indices; - Indices.push_back(ConstantInt::get(Int32Ty, 0)); - - if (const ExtractValueInst *EVI = dyn_cast<ExtractValueInst>(&U)) { - for (auto Idx : EVI->indices()) - Indices.push_back(ConstantInt::get(Int32Ty, Idx)); - } else if (const InsertValueInst *IVI = dyn_cast<InsertValueInst>(&U)) { - for (auto Idx : IVI->indices()) - Indices.push_back(ConstantInt::get(Int32Ty, Idx)); - } else { - for (unsigned i = 1; i < U.getNumOperands(); ++i) - Indices.push_back(U.getOperand(i)); - } - - return 8 * static_cast<uint64_t>( - DL.getIndexedOffsetInType(Src->getType(), Indices)); -} - -bool IRTranslator::translateExtractValue(const User &U, - MachineIRBuilder &MIRBuilder) { - const Value *Src = U.getOperand(0); - uint64_t Offset = getOffsetFromIndices(U, *DL); - ArrayRef<Register> SrcRegs = getOrCreateVRegs(*Src); - ArrayRef<uint64_t> Offsets = *VMap.getOffsets(*Src); - unsigned Idx = llvm::lower_bound(Offsets, Offset) - Offsets.begin(); - auto &DstRegs = allocateVRegs(U); - - for (unsigned i = 0; i < DstRegs.size(); ++i) - DstRegs[i] = SrcRegs[Idx++]; - - return true; -} - -bool IRTranslator::translateInsertValue(const User &U, - MachineIRBuilder &MIRBuilder) { - const Value *Src = U.getOperand(0); - uint64_t Offset = getOffsetFromIndices(U, *DL); - auto &DstRegs = allocateVRegs(U); - ArrayRef<uint64_t> DstOffsets = *VMap.getOffsets(U); - ArrayRef<Register> SrcRegs = getOrCreateVRegs(*Src); - ArrayRef<Register> InsertedRegs = getOrCreateVRegs(*U.getOperand(1)); - auto InsertedIt = InsertedRegs.begin(); - - for (unsigned i = 0; i < DstRegs.size(); ++i) { - if (DstOffsets[i] >= Offset && InsertedIt != InsertedRegs.end()) - DstRegs[i] = *InsertedIt++; - else - DstRegs[i] = SrcRegs[i]; - } - - return true; -} - -bool IRTranslator::translateSelect(const User &U, - MachineIRBuilder &MIRBuilder) { - Register Tst = getOrCreateVReg(*U.getOperand(0)); - ArrayRef<Register> ResRegs = getOrCreateVRegs(U); - ArrayRef<Register> Op0Regs = getOrCreateVRegs(*U.getOperand(1)); - ArrayRef<Register> Op1Regs = getOrCreateVRegs(*U.getOperand(2)); - - uint16_t Flags = 0; - if (const SelectInst *SI = dyn_cast<SelectInst>(&U)) - Flags = MachineInstr::copyFlagsFromInstruction(*SI); - - for (unsigned i = 0; i < ResRegs.size(); ++i) { - MIRBuilder.buildSelect(ResRegs[i], Tst, Op0Regs[i], Op1Regs[i], Flags); - } - - return true; -} - -bool IRTranslator::translateCopy(const User &U, const Value &V, - MachineIRBuilder &MIRBuilder) { - Register Src = getOrCreateVReg(V); - auto &Regs = *VMap.getVRegs(U); - if (Regs.empty()) { - Regs.push_back(Src); - VMap.getOffsets(U)->push_back(0); - } else { - // If we already assigned a vreg for this instruction, we can't change that. - // Emit a copy to satisfy the users we already emitted. - MIRBuilder.buildCopy(Regs[0], Src); - } - return true; -} - -bool IRTranslator::translateBitCast(const User &U, - MachineIRBuilder &MIRBuilder) { - // If we're bitcasting to the source type, we can reuse the source vreg. - if (getLLTForType(*U.getOperand(0)->getType(), *DL) == - getLLTForType(*U.getType(), *DL)) - return translateCopy(U, *U.getOperand(0), MIRBuilder); - - return translateCast(TargetOpcode::G_BITCAST, U, MIRBuilder); -} - -bool IRTranslator::translateCast(unsigned Opcode, const User &U, - MachineIRBuilder &MIRBuilder) { - Register Op = getOrCreateVReg(*U.getOperand(0)); - Register Res = getOrCreateVReg(U); - MIRBuilder.buildInstr(Opcode, {Res}, {Op}); - return true; -} - -bool IRTranslator::translateGetElementPtr(const User &U, - MachineIRBuilder &MIRBuilder) { - Value &Op0 = *U.getOperand(0); - Register BaseReg = getOrCreateVReg(Op0); - Type *PtrIRTy = Op0.getType(); - LLT PtrTy = getLLTForType(*PtrIRTy, *DL); - Type *OffsetIRTy = DL->getIntPtrType(PtrIRTy); - LLT OffsetTy = getLLTForType(*OffsetIRTy, *DL); - - // Normalize Vector GEP - all scalar operands should be converted to the - // splat vector. - unsigned VectorWidth = 0; - if (auto *VT = dyn_cast<VectorType>(U.getType())) - VectorWidth = cast<FixedVectorType>(VT)->getNumElements(); - - // We might need to splat the base pointer into a vector if the offsets - // are vectors. - if (VectorWidth && !PtrTy.isVector()) { - BaseReg = - MIRBuilder.buildSplatVector(LLT::vector(VectorWidth, PtrTy), BaseReg) - .getReg(0); - PtrIRTy = FixedVectorType::get(PtrIRTy, VectorWidth); - PtrTy = getLLTForType(*PtrIRTy, *DL); - OffsetIRTy = DL->getIntPtrType(PtrIRTy); - OffsetTy = getLLTForType(*OffsetIRTy, *DL); - } - - int64_t Offset = 0; - for (gep_type_iterator GTI = gep_type_begin(&U), E = gep_type_end(&U); - GTI != E; ++GTI) { - const Value *Idx = GTI.getOperand(); - if (StructType *StTy = GTI.getStructTypeOrNull()) { - unsigned Field = cast<Constant>(Idx)->getUniqueInteger().getZExtValue(); - Offset += DL->getStructLayout(StTy)->getElementOffset(Field); - continue; - } else { - uint64_t ElementSize = DL->getTypeAllocSize(GTI.getIndexedType()); - - // If this is a scalar constant or a splat vector of constants, - // handle it quickly. - if (const auto *CI = dyn_cast<ConstantInt>(Idx)) { - Offset += ElementSize * CI->getSExtValue(); - continue; - } - - if (Offset != 0) { - auto OffsetMIB = MIRBuilder.buildConstant({OffsetTy}, Offset); - BaseReg = MIRBuilder.buildPtrAdd(PtrTy, BaseReg, OffsetMIB.getReg(0)) - .getReg(0); - Offset = 0; - } - - Register IdxReg = getOrCreateVReg(*Idx); - LLT IdxTy = MRI->getType(IdxReg); - if (IdxTy != OffsetTy) { - if (!IdxTy.isVector() && VectorWidth) { - IdxReg = MIRBuilder.buildSplatVector( - OffsetTy.changeElementType(IdxTy), IdxReg).getReg(0); - } - - IdxReg = MIRBuilder.buildSExtOrTrunc(OffsetTy, IdxReg).getReg(0); - } - - // N = N + Idx * ElementSize; - // Avoid doing it for ElementSize of 1. - Register GepOffsetReg; - if (ElementSize != 1) { - auto ElementSizeMIB = MIRBuilder.buildConstant( - getLLTForType(*OffsetIRTy, *DL), ElementSize); - GepOffsetReg = - MIRBuilder.buildMul(OffsetTy, IdxReg, ElementSizeMIB).getReg(0); - } else - GepOffsetReg = IdxReg; - - BaseReg = MIRBuilder.buildPtrAdd(PtrTy, BaseReg, GepOffsetReg).getReg(0); - } - } - - if (Offset != 0) { - auto OffsetMIB = - MIRBuilder.buildConstant(OffsetTy, Offset); - MIRBuilder.buildPtrAdd(getOrCreateVReg(U), BaseReg, OffsetMIB.getReg(0)); - return true; - } - - MIRBuilder.buildCopy(getOrCreateVReg(U), BaseReg); - return true; -} - -bool IRTranslator::translateMemFunc(const CallInst &CI, - MachineIRBuilder &MIRBuilder, + } + + case CC_JumpTable: { + if (!lowerJumpTableWorkItem(W, SwitchMBB, CurMBB, DefaultMBB, MIB, BBI, + UnhandledProbs, I, Fallthrough, + FallthroughUnreachable)) { + LLVM_DEBUG(dbgs() << "Failed to lower jump table"); + return false; + } + break; + } + case CC_Range: { + if (!lowerSwitchRangeWorkItem(I, Cond, Fallthrough, + FallthroughUnreachable, UnhandledProbs, + CurMBB, MIB, SwitchMBB)) { + LLVM_DEBUG(dbgs() << "Failed to lower switch range"); + return false; + } + break; + } + } + CurMBB = Fallthrough; + } + + return true; +} + +bool IRTranslator::translateIndirectBr(const User &U, + MachineIRBuilder &MIRBuilder) { + const IndirectBrInst &BrInst = cast<IndirectBrInst>(U); + + const Register Tgt = getOrCreateVReg(*BrInst.getAddress()); + MIRBuilder.buildBrIndirect(Tgt); + + // Link successors. + SmallPtrSet<const BasicBlock *, 32> AddedSuccessors; + MachineBasicBlock &CurBB = MIRBuilder.getMBB(); + for (const BasicBlock *Succ : successors(&BrInst)) { + // It's legal for indirectbr instructions to have duplicate blocks in the + // destination list. We don't allow this in MIR. Skip anything that's + // already a successor. + if (!AddedSuccessors.insert(Succ).second) + continue; + CurBB.addSuccessor(&getMBB(*Succ)); + } + + return true; +} + +static bool isSwiftError(const Value *V) { + if (auto Arg = dyn_cast<Argument>(V)) + return Arg->hasSwiftErrorAttr(); + if (auto AI = dyn_cast<AllocaInst>(V)) + return AI->isSwiftError(); + return false; +} + +bool IRTranslator::translateLoad(const User &U, MachineIRBuilder &MIRBuilder) { + const LoadInst &LI = cast<LoadInst>(U); + if (DL->getTypeStoreSize(LI.getType()) == 0) + return true; + + ArrayRef<Register> Regs = getOrCreateVRegs(LI); + ArrayRef<uint64_t> Offsets = *VMap.getOffsets(LI); + Register Base = getOrCreateVReg(*LI.getPointerOperand()); + + Type *OffsetIRTy = DL->getIntPtrType(LI.getPointerOperandType()); + LLT OffsetTy = getLLTForType(*OffsetIRTy, *DL); + + if (CLI->supportSwiftError() && isSwiftError(LI.getPointerOperand())) { + assert(Regs.size() == 1 && "swifterror should be single pointer"); + Register VReg = SwiftError.getOrCreateVRegUseAt(&LI, &MIRBuilder.getMBB(), + LI.getPointerOperand()); + MIRBuilder.buildCopy(Regs[0], VReg); + return true; + } + + auto &TLI = *MF->getSubtarget().getTargetLowering(); + MachineMemOperand::Flags Flags = TLI.getLoadMemOperandFlags(LI, *DL); + + const MDNode *Ranges = + Regs.size() == 1 ? LI.getMetadata(LLVMContext::MD_range) : nullptr; + for (unsigned i = 0; i < Regs.size(); ++i) { + Register Addr; + MIRBuilder.materializePtrAdd(Addr, Base, OffsetTy, Offsets[i] / 8); + + MachinePointerInfo Ptr(LI.getPointerOperand(), Offsets[i] / 8); + Align BaseAlign = getMemOpAlign(LI); + AAMDNodes AAMetadata; + LI.getAAMetadata(AAMetadata); + auto MMO = MF->getMachineMemOperand( + Ptr, Flags, MRI->getType(Regs[i]).getSizeInBytes(), + commonAlignment(BaseAlign, Offsets[i] / 8), AAMetadata, Ranges, + LI.getSyncScopeID(), LI.getOrdering()); + MIRBuilder.buildLoad(Regs[i], Addr, *MMO); + } + + return true; +} + +bool IRTranslator::translateStore(const User &U, MachineIRBuilder &MIRBuilder) { + const StoreInst &SI = cast<StoreInst>(U); + if (DL->getTypeStoreSize(SI.getValueOperand()->getType()) == 0) + return true; + + ArrayRef<Register> Vals = getOrCreateVRegs(*SI.getValueOperand()); + ArrayRef<uint64_t> Offsets = *VMap.getOffsets(*SI.getValueOperand()); + Register Base = getOrCreateVReg(*SI.getPointerOperand()); + + Type *OffsetIRTy = DL->getIntPtrType(SI.getPointerOperandType()); + LLT OffsetTy = getLLTForType(*OffsetIRTy, *DL); + + if (CLI->supportSwiftError() && isSwiftError(SI.getPointerOperand())) { + assert(Vals.size() == 1 && "swifterror should be single pointer"); + + Register VReg = SwiftError.getOrCreateVRegDefAt(&SI, &MIRBuilder.getMBB(), + SI.getPointerOperand()); + MIRBuilder.buildCopy(VReg, Vals[0]); + return true; + } + + auto &TLI = *MF->getSubtarget().getTargetLowering(); + MachineMemOperand::Flags Flags = TLI.getStoreMemOperandFlags(SI, *DL); + + for (unsigned i = 0; i < Vals.size(); ++i) { + Register Addr; + MIRBuilder.materializePtrAdd(Addr, Base, OffsetTy, Offsets[i] / 8); + + MachinePointerInfo Ptr(SI.getPointerOperand(), Offsets[i] / 8); + Align BaseAlign = getMemOpAlign(SI); + AAMDNodes AAMetadata; + SI.getAAMetadata(AAMetadata); + auto MMO = MF->getMachineMemOperand( + Ptr, Flags, MRI->getType(Vals[i]).getSizeInBytes(), + commonAlignment(BaseAlign, Offsets[i] / 8), AAMetadata, nullptr, + SI.getSyncScopeID(), SI.getOrdering()); + MIRBuilder.buildStore(Vals[i], Addr, *MMO); + } + return true; +} + +static uint64_t getOffsetFromIndices(const User &U, const DataLayout &DL) { + const Value *Src = U.getOperand(0); + Type *Int32Ty = Type::getInt32Ty(U.getContext()); + + // getIndexedOffsetInType is designed for GEPs, so the first index is the + // usual array element rather than looking into the actual aggregate. + SmallVector<Value *, 1> Indices; + Indices.push_back(ConstantInt::get(Int32Ty, 0)); + + if (const ExtractValueInst *EVI = dyn_cast<ExtractValueInst>(&U)) { + for (auto Idx : EVI->indices()) + Indices.push_back(ConstantInt::get(Int32Ty, Idx)); + } else if (const InsertValueInst *IVI = dyn_cast<InsertValueInst>(&U)) { + for (auto Idx : IVI->indices()) + Indices.push_back(ConstantInt::get(Int32Ty, Idx)); + } else { + for (unsigned i = 1; i < U.getNumOperands(); ++i) + Indices.push_back(U.getOperand(i)); + } + + return 8 * static_cast<uint64_t>( + DL.getIndexedOffsetInType(Src->getType(), Indices)); +} + +bool IRTranslator::translateExtractValue(const User &U, + MachineIRBuilder &MIRBuilder) { + const Value *Src = U.getOperand(0); + uint64_t Offset = getOffsetFromIndices(U, *DL); + ArrayRef<Register> SrcRegs = getOrCreateVRegs(*Src); + ArrayRef<uint64_t> Offsets = *VMap.getOffsets(*Src); + unsigned Idx = llvm::lower_bound(Offsets, Offset) - Offsets.begin(); + auto &DstRegs = allocateVRegs(U); + + for (unsigned i = 0; i < DstRegs.size(); ++i) + DstRegs[i] = SrcRegs[Idx++]; + + return true; +} + +bool IRTranslator::translateInsertValue(const User &U, + MachineIRBuilder &MIRBuilder) { + const Value *Src = U.getOperand(0); + uint64_t Offset = getOffsetFromIndices(U, *DL); + auto &DstRegs = allocateVRegs(U); + ArrayRef<uint64_t> DstOffsets = *VMap.getOffsets(U); + ArrayRef<Register> SrcRegs = getOrCreateVRegs(*Src); + ArrayRef<Register> InsertedRegs = getOrCreateVRegs(*U.getOperand(1)); + auto InsertedIt = InsertedRegs.begin(); + + for (unsigned i = 0; i < DstRegs.size(); ++i) { + if (DstOffsets[i] >= Offset && InsertedIt != InsertedRegs.end()) + DstRegs[i] = *InsertedIt++; + else + DstRegs[i] = SrcRegs[i]; + } + + return true; +} + +bool IRTranslator::translateSelect(const User &U, + MachineIRBuilder &MIRBuilder) { + Register Tst = getOrCreateVReg(*U.getOperand(0)); + ArrayRef<Register> ResRegs = getOrCreateVRegs(U); + ArrayRef<Register> Op0Regs = getOrCreateVRegs(*U.getOperand(1)); + ArrayRef<Register> Op1Regs = getOrCreateVRegs(*U.getOperand(2)); + + uint16_t Flags = 0; + if (const SelectInst *SI = dyn_cast<SelectInst>(&U)) + Flags = MachineInstr::copyFlagsFromInstruction(*SI); + + for (unsigned i = 0; i < ResRegs.size(); ++i) { + MIRBuilder.buildSelect(ResRegs[i], Tst, Op0Regs[i], Op1Regs[i], Flags); + } + + return true; +} + +bool IRTranslator::translateCopy(const User &U, const Value &V, + MachineIRBuilder &MIRBuilder) { + Register Src = getOrCreateVReg(V); + auto &Regs = *VMap.getVRegs(U); + if (Regs.empty()) { + Regs.push_back(Src); + VMap.getOffsets(U)->push_back(0); + } else { + // If we already assigned a vreg for this instruction, we can't change that. + // Emit a copy to satisfy the users we already emitted. + MIRBuilder.buildCopy(Regs[0], Src); + } + return true; +} + +bool IRTranslator::translateBitCast(const User &U, + MachineIRBuilder &MIRBuilder) { + // If we're bitcasting to the source type, we can reuse the source vreg. + if (getLLTForType(*U.getOperand(0)->getType(), *DL) == + getLLTForType(*U.getType(), *DL)) + return translateCopy(U, *U.getOperand(0), MIRBuilder); + + return translateCast(TargetOpcode::G_BITCAST, U, MIRBuilder); +} + +bool IRTranslator::translateCast(unsigned Opcode, const User &U, + MachineIRBuilder &MIRBuilder) { + Register Op = getOrCreateVReg(*U.getOperand(0)); + Register Res = getOrCreateVReg(U); + MIRBuilder.buildInstr(Opcode, {Res}, {Op}); + return true; +} + +bool IRTranslator::translateGetElementPtr(const User &U, + MachineIRBuilder &MIRBuilder) { + Value &Op0 = *U.getOperand(0); + Register BaseReg = getOrCreateVReg(Op0); + Type *PtrIRTy = Op0.getType(); + LLT PtrTy = getLLTForType(*PtrIRTy, *DL); + Type *OffsetIRTy = DL->getIntPtrType(PtrIRTy); + LLT OffsetTy = getLLTForType(*OffsetIRTy, *DL); + + // Normalize Vector GEP - all scalar operands should be converted to the + // splat vector. + unsigned VectorWidth = 0; + if (auto *VT = dyn_cast<VectorType>(U.getType())) + VectorWidth = cast<FixedVectorType>(VT)->getNumElements(); + + // We might need to splat the base pointer into a vector if the offsets + // are vectors. + if (VectorWidth && !PtrTy.isVector()) { + BaseReg = + MIRBuilder.buildSplatVector(LLT::vector(VectorWidth, PtrTy), BaseReg) + .getReg(0); + PtrIRTy = FixedVectorType::get(PtrIRTy, VectorWidth); + PtrTy = getLLTForType(*PtrIRTy, *DL); + OffsetIRTy = DL->getIntPtrType(PtrIRTy); + OffsetTy = getLLTForType(*OffsetIRTy, *DL); + } + + int64_t Offset = 0; + for (gep_type_iterator GTI = gep_type_begin(&U), E = gep_type_end(&U); + GTI != E; ++GTI) { + const Value *Idx = GTI.getOperand(); + if (StructType *StTy = GTI.getStructTypeOrNull()) { + unsigned Field = cast<Constant>(Idx)->getUniqueInteger().getZExtValue(); + Offset += DL->getStructLayout(StTy)->getElementOffset(Field); + continue; + } else { + uint64_t ElementSize = DL->getTypeAllocSize(GTI.getIndexedType()); + + // If this is a scalar constant or a splat vector of constants, + // handle it quickly. + if (const auto *CI = dyn_cast<ConstantInt>(Idx)) { + Offset += ElementSize * CI->getSExtValue(); + continue; + } + + if (Offset != 0) { + auto OffsetMIB = MIRBuilder.buildConstant({OffsetTy}, Offset); + BaseReg = MIRBuilder.buildPtrAdd(PtrTy, BaseReg, OffsetMIB.getReg(0)) + .getReg(0); + Offset = 0; + } + + Register IdxReg = getOrCreateVReg(*Idx); + LLT IdxTy = MRI->getType(IdxReg); + if (IdxTy != OffsetTy) { + if (!IdxTy.isVector() && VectorWidth) { + IdxReg = MIRBuilder.buildSplatVector( + OffsetTy.changeElementType(IdxTy), IdxReg).getReg(0); + } + + IdxReg = MIRBuilder.buildSExtOrTrunc(OffsetTy, IdxReg).getReg(0); + } + + // N = N + Idx * ElementSize; + // Avoid doing it for ElementSize of 1. + Register GepOffsetReg; + if (ElementSize != 1) { + auto ElementSizeMIB = MIRBuilder.buildConstant( + getLLTForType(*OffsetIRTy, *DL), ElementSize); + GepOffsetReg = + MIRBuilder.buildMul(OffsetTy, IdxReg, ElementSizeMIB).getReg(0); + } else + GepOffsetReg = IdxReg; + + BaseReg = MIRBuilder.buildPtrAdd(PtrTy, BaseReg, GepOffsetReg).getReg(0); + } + } + + if (Offset != 0) { + auto OffsetMIB = + MIRBuilder.buildConstant(OffsetTy, Offset); + MIRBuilder.buildPtrAdd(getOrCreateVReg(U), BaseReg, OffsetMIB.getReg(0)); + return true; + } + + MIRBuilder.buildCopy(getOrCreateVReg(U), BaseReg); + return true; +} + +bool IRTranslator::translateMemFunc(const CallInst &CI, + MachineIRBuilder &MIRBuilder, unsigned Opcode) { - - // If the source is undef, then just emit a nop. - if (isa<UndefValue>(CI.getArgOperand(1))) - return true; - + + // If the source is undef, then just emit a nop. + if (isa<UndefValue>(CI.getArgOperand(1))) + return true; + SmallVector<Register, 3> SrcRegs; - + unsigned MinPtrSize = UINT_MAX; for (auto AI = CI.arg_begin(), AE = CI.arg_end(); std::next(AI) != AE; ++AI) { Register SrcReg = getOrCreateVReg(**AI); @@ -1585,72 +1585,72 @@ bool IRTranslator::translateMemFunc(const CallInst &CI, for (Register SrcReg : SrcRegs) ICall.addUse(SrcReg); - Align DstAlign; - Align SrcAlign; - unsigned IsVol = - cast<ConstantInt>(CI.getArgOperand(CI.getNumArgOperands() - 1)) - ->getZExtValue(); - - if (auto *MCI = dyn_cast<MemCpyInst>(&CI)) { - DstAlign = MCI->getDestAlign().valueOrOne(); - SrcAlign = MCI->getSourceAlign().valueOrOne(); - } else if (auto *MMI = dyn_cast<MemMoveInst>(&CI)) { - DstAlign = MMI->getDestAlign().valueOrOne(); - SrcAlign = MMI->getSourceAlign().valueOrOne(); - } else { - auto *MSI = cast<MemSetInst>(&CI); - DstAlign = MSI->getDestAlign().valueOrOne(); - } - - // We need to propagate the tail call flag from the IR inst as an argument. - // Otherwise, we have to pessimize and assume later that we cannot tail call - // any memory intrinsics. - ICall.addImm(CI.isTailCall() ? 1 : 0); - - // Create mem operands to store the alignment and volatile info. - auto VolFlag = IsVol ? MachineMemOperand::MOVolatile : MachineMemOperand::MONone; - ICall.addMemOperand(MF->getMachineMemOperand( - MachinePointerInfo(CI.getArgOperand(0)), - MachineMemOperand::MOStore | VolFlag, 1, DstAlign)); + Align DstAlign; + Align SrcAlign; + unsigned IsVol = + cast<ConstantInt>(CI.getArgOperand(CI.getNumArgOperands() - 1)) + ->getZExtValue(); + + if (auto *MCI = dyn_cast<MemCpyInst>(&CI)) { + DstAlign = MCI->getDestAlign().valueOrOne(); + SrcAlign = MCI->getSourceAlign().valueOrOne(); + } else if (auto *MMI = dyn_cast<MemMoveInst>(&CI)) { + DstAlign = MMI->getDestAlign().valueOrOne(); + SrcAlign = MMI->getSourceAlign().valueOrOne(); + } else { + auto *MSI = cast<MemSetInst>(&CI); + DstAlign = MSI->getDestAlign().valueOrOne(); + } + + // We need to propagate the tail call flag from the IR inst as an argument. + // Otherwise, we have to pessimize and assume later that we cannot tail call + // any memory intrinsics. + ICall.addImm(CI.isTailCall() ? 1 : 0); + + // Create mem operands to store the alignment and volatile info. + auto VolFlag = IsVol ? MachineMemOperand::MOVolatile : MachineMemOperand::MONone; + ICall.addMemOperand(MF->getMachineMemOperand( + MachinePointerInfo(CI.getArgOperand(0)), + MachineMemOperand::MOStore | VolFlag, 1, DstAlign)); if (Opcode != TargetOpcode::G_MEMSET) - ICall.addMemOperand(MF->getMachineMemOperand( - MachinePointerInfo(CI.getArgOperand(1)), - MachineMemOperand::MOLoad | VolFlag, 1, SrcAlign)); - - return true; -} - -void IRTranslator::getStackGuard(Register DstReg, - MachineIRBuilder &MIRBuilder) { - const TargetRegisterInfo *TRI = MF->getSubtarget().getRegisterInfo(); - MRI->setRegClass(DstReg, TRI->getPointerRegClass(*MF)); - auto MIB = - MIRBuilder.buildInstr(TargetOpcode::LOAD_STACK_GUARD, {DstReg}, {}); - - auto &TLI = *MF->getSubtarget().getTargetLowering(); - Value *Global = TLI.getSDagStackGuard(*MF->getFunction().getParent()); - if (!Global) - return; - - MachinePointerInfo MPInfo(Global); - auto Flags = MachineMemOperand::MOLoad | MachineMemOperand::MOInvariant | - MachineMemOperand::MODereferenceable; - MachineMemOperand *MemRef = - MF->getMachineMemOperand(MPInfo, Flags, DL->getPointerSizeInBits() / 8, - DL->getPointerABIAlignment(0)); - MIB.setMemRefs({MemRef}); -} - -bool IRTranslator::translateOverflowIntrinsic(const CallInst &CI, unsigned Op, - MachineIRBuilder &MIRBuilder) { - ArrayRef<Register> ResRegs = getOrCreateVRegs(CI); - MIRBuilder.buildInstr( - Op, {ResRegs[0], ResRegs[1]}, - {getOrCreateVReg(*CI.getOperand(0)), getOrCreateVReg(*CI.getOperand(1))}); - - return true; -} - + ICall.addMemOperand(MF->getMachineMemOperand( + MachinePointerInfo(CI.getArgOperand(1)), + MachineMemOperand::MOLoad | VolFlag, 1, SrcAlign)); + + return true; +} + +void IRTranslator::getStackGuard(Register DstReg, + MachineIRBuilder &MIRBuilder) { + const TargetRegisterInfo *TRI = MF->getSubtarget().getRegisterInfo(); + MRI->setRegClass(DstReg, TRI->getPointerRegClass(*MF)); + auto MIB = + MIRBuilder.buildInstr(TargetOpcode::LOAD_STACK_GUARD, {DstReg}, {}); + + auto &TLI = *MF->getSubtarget().getTargetLowering(); + Value *Global = TLI.getSDagStackGuard(*MF->getFunction().getParent()); + if (!Global) + return; + + MachinePointerInfo MPInfo(Global); + auto Flags = MachineMemOperand::MOLoad | MachineMemOperand::MOInvariant | + MachineMemOperand::MODereferenceable; + MachineMemOperand *MemRef = + MF->getMachineMemOperand(MPInfo, Flags, DL->getPointerSizeInBits() / 8, + DL->getPointerABIAlignment(0)); + MIB.setMemRefs({MemRef}); +} + +bool IRTranslator::translateOverflowIntrinsic(const CallInst &CI, unsigned Op, + MachineIRBuilder &MIRBuilder) { + ArrayRef<Register> ResRegs = getOrCreateVRegs(CI); + MIRBuilder.buildInstr( + Op, {ResRegs[0], ResRegs[1]}, + {getOrCreateVReg(*CI.getOperand(0)), getOrCreateVReg(*CI.getOperand(1))}); + + return true; +} + bool IRTranslator::translateFixedPointIntrinsic(unsigned Op, const CallInst &CI, MachineIRBuilder &MIRBuilder) { Register Dst = getOrCreateVReg(CI); @@ -1661,74 +1661,74 @@ bool IRTranslator::translateFixedPointIntrinsic(unsigned Op, const CallInst &CI, return true; } -unsigned IRTranslator::getSimpleIntrinsicOpcode(Intrinsic::ID ID) { - switch (ID) { - default: - break; - case Intrinsic::bswap: - return TargetOpcode::G_BSWAP; - case Intrinsic::bitreverse: - return TargetOpcode::G_BITREVERSE; - case Intrinsic::fshl: - return TargetOpcode::G_FSHL; - case Intrinsic::fshr: - return TargetOpcode::G_FSHR; - case Intrinsic::ceil: - return TargetOpcode::G_FCEIL; - case Intrinsic::cos: - return TargetOpcode::G_FCOS; - case Intrinsic::ctpop: - return TargetOpcode::G_CTPOP; - case Intrinsic::exp: - return TargetOpcode::G_FEXP; - case Intrinsic::exp2: - return TargetOpcode::G_FEXP2; - case Intrinsic::fabs: - return TargetOpcode::G_FABS; - case Intrinsic::copysign: - return TargetOpcode::G_FCOPYSIGN; - case Intrinsic::minnum: - return TargetOpcode::G_FMINNUM; - case Intrinsic::maxnum: - return TargetOpcode::G_FMAXNUM; - case Intrinsic::minimum: - return TargetOpcode::G_FMINIMUM; - case Intrinsic::maximum: - return TargetOpcode::G_FMAXIMUM; - case Intrinsic::canonicalize: - return TargetOpcode::G_FCANONICALIZE; - case Intrinsic::floor: - return TargetOpcode::G_FFLOOR; - case Intrinsic::fma: - return TargetOpcode::G_FMA; - case Intrinsic::log: - return TargetOpcode::G_FLOG; - case Intrinsic::log2: - return TargetOpcode::G_FLOG2; - case Intrinsic::log10: - return TargetOpcode::G_FLOG10; - case Intrinsic::nearbyint: - return TargetOpcode::G_FNEARBYINT; - case Intrinsic::pow: - return TargetOpcode::G_FPOW; +unsigned IRTranslator::getSimpleIntrinsicOpcode(Intrinsic::ID ID) { + switch (ID) { + default: + break; + case Intrinsic::bswap: + return TargetOpcode::G_BSWAP; + case Intrinsic::bitreverse: + return TargetOpcode::G_BITREVERSE; + case Intrinsic::fshl: + return TargetOpcode::G_FSHL; + case Intrinsic::fshr: + return TargetOpcode::G_FSHR; + case Intrinsic::ceil: + return TargetOpcode::G_FCEIL; + case Intrinsic::cos: + return TargetOpcode::G_FCOS; + case Intrinsic::ctpop: + return TargetOpcode::G_CTPOP; + case Intrinsic::exp: + return TargetOpcode::G_FEXP; + case Intrinsic::exp2: + return TargetOpcode::G_FEXP2; + case Intrinsic::fabs: + return TargetOpcode::G_FABS; + case Intrinsic::copysign: + return TargetOpcode::G_FCOPYSIGN; + case Intrinsic::minnum: + return TargetOpcode::G_FMINNUM; + case Intrinsic::maxnum: + return TargetOpcode::G_FMAXNUM; + case Intrinsic::minimum: + return TargetOpcode::G_FMINIMUM; + case Intrinsic::maximum: + return TargetOpcode::G_FMAXIMUM; + case Intrinsic::canonicalize: + return TargetOpcode::G_FCANONICALIZE; + case Intrinsic::floor: + return TargetOpcode::G_FFLOOR; + case Intrinsic::fma: + return TargetOpcode::G_FMA; + case Intrinsic::log: + return TargetOpcode::G_FLOG; + case Intrinsic::log2: + return TargetOpcode::G_FLOG2; + case Intrinsic::log10: + return TargetOpcode::G_FLOG10; + case Intrinsic::nearbyint: + return TargetOpcode::G_FNEARBYINT; + case Intrinsic::pow: + return TargetOpcode::G_FPOW; case Intrinsic::powi: return TargetOpcode::G_FPOWI; - case Intrinsic::rint: - return TargetOpcode::G_FRINT; - case Intrinsic::round: - return TargetOpcode::G_INTRINSIC_ROUND; + case Intrinsic::rint: + return TargetOpcode::G_FRINT; + case Intrinsic::round: + return TargetOpcode::G_INTRINSIC_ROUND; case Intrinsic::roundeven: return TargetOpcode::G_INTRINSIC_ROUNDEVEN; - case Intrinsic::sin: - return TargetOpcode::G_FSIN; - case Intrinsic::sqrt: - return TargetOpcode::G_FSQRT; - case Intrinsic::trunc: - return TargetOpcode::G_INTRINSIC_TRUNC; - case Intrinsic::readcyclecounter: - return TargetOpcode::G_READCYCLECOUNTER; - case Intrinsic::ptrmask: - return TargetOpcode::G_PTRMASK; + case Intrinsic::sin: + return TargetOpcode::G_FSIN; + case Intrinsic::sqrt: + return TargetOpcode::G_FSQRT; + case Intrinsic::trunc: + return TargetOpcode::G_INTRINSIC_TRUNC; + case Intrinsic::readcyclecounter: + return TargetOpcode::G_READCYCLECOUNTER; + case Intrinsic::ptrmask: + return TargetOpcode::G_PTRMASK; case Intrinsic::lrint: return TargetOpcode::G_INTRINSIC_LRINT; // FADD/FMUL require checking the FMF, so are handled elsewhere. @@ -1754,212 +1754,212 @@ unsigned IRTranslator::getSimpleIntrinsicOpcode(Intrinsic::ID ID) { return TargetOpcode::G_VECREDUCE_UMAX; case Intrinsic::vector_reduce_umin: return TargetOpcode::G_VECREDUCE_UMIN; - } - return Intrinsic::not_intrinsic; -} - -bool IRTranslator::translateSimpleIntrinsic(const CallInst &CI, - Intrinsic::ID ID, - MachineIRBuilder &MIRBuilder) { - - unsigned Op = getSimpleIntrinsicOpcode(ID); - - // Is this a simple intrinsic? - if (Op == Intrinsic::not_intrinsic) - return false; - - // Yes. Let's translate it. - SmallVector<llvm::SrcOp, 4> VRegs; - for (auto &Arg : CI.arg_operands()) - VRegs.push_back(getOrCreateVReg(*Arg)); - - MIRBuilder.buildInstr(Op, {getOrCreateVReg(CI)}, VRegs, - MachineInstr::copyFlagsFromInstruction(CI)); - return true; -} - -// TODO: Include ConstainedOps.def when all strict instructions are defined. -static unsigned getConstrainedOpcode(Intrinsic::ID ID) { - switch (ID) { - case Intrinsic::experimental_constrained_fadd: - return TargetOpcode::G_STRICT_FADD; - case Intrinsic::experimental_constrained_fsub: - return TargetOpcode::G_STRICT_FSUB; - case Intrinsic::experimental_constrained_fmul: - return TargetOpcode::G_STRICT_FMUL; - case Intrinsic::experimental_constrained_fdiv: - return TargetOpcode::G_STRICT_FDIV; - case Intrinsic::experimental_constrained_frem: - return TargetOpcode::G_STRICT_FREM; - case Intrinsic::experimental_constrained_fma: - return TargetOpcode::G_STRICT_FMA; - case Intrinsic::experimental_constrained_sqrt: - return TargetOpcode::G_STRICT_FSQRT; - default: - return 0; - } -} - -bool IRTranslator::translateConstrainedFPIntrinsic( - const ConstrainedFPIntrinsic &FPI, MachineIRBuilder &MIRBuilder) { - fp::ExceptionBehavior EB = FPI.getExceptionBehavior().getValue(); - - unsigned Opcode = getConstrainedOpcode(FPI.getIntrinsicID()); - if (!Opcode) - return false; - - unsigned Flags = MachineInstr::copyFlagsFromInstruction(FPI); - if (EB == fp::ExceptionBehavior::ebIgnore) - Flags |= MachineInstr::NoFPExcept; - - SmallVector<llvm::SrcOp, 4> VRegs; - VRegs.push_back(getOrCreateVReg(*FPI.getArgOperand(0))); - if (!FPI.isUnaryOp()) - VRegs.push_back(getOrCreateVReg(*FPI.getArgOperand(1))); - if (FPI.isTernaryOp()) - VRegs.push_back(getOrCreateVReg(*FPI.getArgOperand(2))); - - MIRBuilder.buildInstr(Opcode, {getOrCreateVReg(FPI)}, VRegs, Flags); - return true; -} - -bool IRTranslator::translateKnownIntrinsic(const CallInst &CI, Intrinsic::ID ID, - MachineIRBuilder &MIRBuilder) { - - // If this is a simple intrinsic (that is, we just need to add a def of - // a vreg, and uses for each arg operand, then translate it. - if (translateSimpleIntrinsic(CI, ID, MIRBuilder)) - return true; - - switch (ID) { - default: - break; - case Intrinsic::lifetime_start: - case Intrinsic::lifetime_end: { - // No stack colouring in O0, discard region information. - if (MF->getTarget().getOptLevel() == CodeGenOpt::None) - return true; - - unsigned Op = ID == Intrinsic::lifetime_start ? TargetOpcode::LIFETIME_START - : TargetOpcode::LIFETIME_END; - - // Get the underlying objects for the location passed on the lifetime - // marker. - SmallVector<const Value *, 4> Allocas; + } + return Intrinsic::not_intrinsic; +} + +bool IRTranslator::translateSimpleIntrinsic(const CallInst &CI, + Intrinsic::ID ID, + MachineIRBuilder &MIRBuilder) { + + unsigned Op = getSimpleIntrinsicOpcode(ID); + + // Is this a simple intrinsic? + if (Op == Intrinsic::not_intrinsic) + return false; + + // Yes. Let's translate it. + SmallVector<llvm::SrcOp, 4> VRegs; + for (auto &Arg : CI.arg_operands()) + VRegs.push_back(getOrCreateVReg(*Arg)); + + MIRBuilder.buildInstr(Op, {getOrCreateVReg(CI)}, VRegs, + MachineInstr::copyFlagsFromInstruction(CI)); + return true; +} + +// TODO: Include ConstainedOps.def when all strict instructions are defined. +static unsigned getConstrainedOpcode(Intrinsic::ID ID) { + switch (ID) { + case Intrinsic::experimental_constrained_fadd: + return TargetOpcode::G_STRICT_FADD; + case Intrinsic::experimental_constrained_fsub: + return TargetOpcode::G_STRICT_FSUB; + case Intrinsic::experimental_constrained_fmul: + return TargetOpcode::G_STRICT_FMUL; + case Intrinsic::experimental_constrained_fdiv: + return TargetOpcode::G_STRICT_FDIV; + case Intrinsic::experimental_constrained_frem: + return TargetOpcode::G_STRICT_FREM; + case Intrinsic::experimental_constrained_fma: + return TargetOpcode::G_STRICT_FMA; + case Intrinsic::experimental_constrained_sqrt: + return TargetOpcode::G_STRICT_FSQRT; + default: + return 0; + } +} + +bool IRTranslator::translateConstrainedFPIntrinsic( + const ConstrainedFPIntrinsic &FPI, MachineIRBuilder &MIRBuilder) { + fp::ExceptionBehavior EB = FPI.getExceptionBehavior().getValue(); + + unsigned Opcode = getConstrainedOpcode(FPI.getIntrinsicID()); + if (!Opcode) + return false; + + unsigned Flags = MachineInstr::copyFlagsFromInstruction(FPI); + if (EB == fp::ExceptionBehavior::ebIgnore) + Flags |= MachineInstr::NoFPExcept; + + SmallVector<llvm::SrcOp, 4> VRegs; + VRegs.push_back(getOrCreateVReg(*FPI.getArgOperand(0))); + if (!FPI.isUnaryOp()) + VRegs.push_back(getOrCreateVReg(*FPI.getArgOperand(1))); + if (FPI.isTernaryOp()) + VRegs.push_back(getOrCreateVReg(*FPI.getArgOperand(2))); + + MIRBuilder.buildInstr(Opcode, {getOrCreateVReg(FPI)}, VRegs, Flags); + return true; +} + +bool IRTranslator::translateKnownIntrinsic(const CallInst &CI, Intrinsic::ID ID, + MachineIRBuilder &MIRBuilder) { + + // If this is a simple intrinsic (that is, we just need to add a def of + // a vreg, and uses for each arg operand, then translate it. + if (translateSimpleIntrinsic(CI, ID, MIRBuilder)) + return true; + + switch (ID) { + default: + break; + case Intrinsic::lifetime_start: + case Intrinsic::lifetime_end: { + // No stack colouring in O0, discard region information. + if (MF->getTarget().getOptLevel() == CodeGenOpt::None) + return true; + + unsigned Op = ID == Intrinsic::lifetime_start ? TargetOpcode::LIFETIME_START + : TargetOpcode::LIFETIME_END; + + // Get the underlying objects for the location passed on the lifetime + // marker. + SmallVector<const Value *, 4> Allocas; getUnderlyingObjects(CI.getArgOperand(1), Allocas); - - // Iterate over each underlying object, creating lifetime markers for each - // static alloca. Quit if we find a non-static alloca. - for (const Value *V : Allocas) { - const AllocaInst *AI = dyn_cast<AllocaInst>(V); - if (!AI) - continue; - - if (!AI->isStaticAlloca()) - return true; - - MIRBuilder.buildInstr(Op).addFrameIndex(getOrCreateFrameIndex(*AI)); - } - return true; - } - case Intrinsic::dbg_declare: { - const DbgDeclareInst &DI = cast<DbgDeclareInst>(CI); - assert(DI.getVariable() && "Missing variable"); - - const Value *Address = DI.getAddress(); - if (!Address || isa<UndefValue>(Address)) { - LLVM_DEBUG(dbgs() << "Dropping debug info for " << DI << "\n"); - return true; - } - - assert(DI.getVariable()->isValidLocationForIntrinsic( - MIRBuilder.getDebugLoc()) && - "Expected inlined-at fields to agree"); - auto AI = dyn_cast<AllocaInst>(Address); - if (AI && AI->isStaticAlloca()) { - // Static allocas are tracked at the MF level, no need for DBG_VALUE - // instructions (in fact, they get ignored if they *do* exist). - MF->setVariableDbgInfo(DI.getVariable(), DI.getExpression(), - getOrCreateFrameIndex(*AI), DI.getDebugLoc()); - } else { - // A dbg.declare describes the address of a source variable, so lower it - // into an indirect DBG_VALUE. - MIRBuilder.buildIndirectDbgValue(getOrCreateVReg(*Address), - DI.getVariable(), DI.getExpression()); - } - return true; - } - case Intrinsic::dbg_label: { - const DbgLabelInst &DI = cast<DbgLabelInst>(CI); - assert(DI.getLabel() && "Missing label"); - - assert(DI.getLabel()->isValidLocationForIntrinsic( - MIRBuilder.getDebugLoc()) && - "Expected inlined-at fields to agree"); - - MIRBuilder.buildDbgLabel(DI.getLabel()); - return true; - } - case Intrinsic::vaend: - // No target I know of cares about va_end. Certainly no in-tree target - // does. Simplest intrinsic ever! - return true; - case Intrinsic::vastart: { - auto &TLI = *MF->getSubtarget().getTargetLowering(); - Value *Ptr = CI.getArgOperand(0); - unsigned ListSize = TLI.getVaListSizeInBits(*DL) / 8; - - // FIXME: Get alignment - MIRBuilder.buildInstr(TargetOpcode::G_VASTART, {}, {getOrCreateVReg(*Ptr)}) - .addMemOperand(MF->getMachineMemOperand(MachinePointerInfo(Ptr), - MachineMemOperand::MOStore, - ListSize, Align(1))); - return true; - } - case Intrinsic::dbg_value: { - // This form of DBG_VALUE is target-independent. - const DbgValueInst &DI = cast<DbgValueInst>(CI); - const Value *V = DI.getValue(); - assert(DI.getVariable()->isValidLocationForIntrinsic( - MIRBuilder.getDebugLoc()) && - "Expected inlined-at fields to agree"); - if (!V) { - // Currently the optimizer can produce this; insert an undef to - // help debugging. Probably the optimizer should not do this. - MIRBuilder.buildIndirectDbgValue(0, DI.getVariable(), DI.getExpression()); - } else if (const auto *CI = dyn_cast<Constant>(V)) { - MIRBuilder.buildConstDbgValue(*CI, DI.getVariable(), DI.getExpression()); - } else { - for (Register Reg : getOrCreateVRegs(*V)) { - // FIXME: This does not handle register-indirect values at offset 0. The - // direct/indirect thing shouldn't really be handled by something as - // implicit as reg+noreg vs reg+imm in the first place, but it seems - // pretty baked in right now. - MIRBuilder.buildDirectDbgValue(Reg, DI.getVariable(), DI.getExpression()); - } - } - return true; - } - case Intrinsic::uadd_with_overflow: - return translateOverflowIntrinsic(CI, TargetOpcode::G_UADDO, MIRBuilder); - case Intrinsic::sadd_with_overflow: - return translateOverflowIntrinsic(CI, TargetOpcode::G_SADDO, MIRBuilder); - case Intrinsic::usub_with_overflow: - return translateOverflowIntrinsic(CI, TargetOpcode::G_USUBO, MIRBuilder); - case Intrinsic::ssub_with_overflow: - return translateOverflowIntrinsic(CI, TargetOpcode::G_SSUBO, MIRBuilder); - case Intrinsic::umul_with_overflow: - return translateOverflowIntrinsic(CI, TargetOpcode::G_UMULO, MIRBuilder); - case Intrinsic::smul_with_overflow: - return translateOverflowIntrinsic(CI, TargetOpcode::G_SMULO, MIRBuilder); - case Intrinsic::uadd_sat: - return translateBinaryOp(TargetOpcode::G_UADDSAT, CI, MIRBuilder); - case Intrinsic::sadd_sat: - return translateBinaryOp(TargetOpcode::G_SADDSAT, CI, MIRBuilder); - case Intrinsic::usub_sat: - return translateBinaryOp(TargetOpcode::G_USUBSAT, CI, MIRBuilder); - case Intrinsic::ssub_sat: - return translateBinaryOp(TargetOpcode::G_SSUBSAT, CI, MIRBuilder); + + // Iterate over each underlying object, creating lifetime markers for each + // static alloca. Quit if we find a non-static alloca. + for (const Value *V : Allocas) { + const AllocaInst *AI = dyn_cast<AllocaInst>(V); + if (!AI) + continue; + + if (!AI->isStaticAlloca()) + return true; + + MIRBuilder.buildInstr(Op).addFrameIndex(getOrCreateFrameIndex(*AI)); + } + return true; + } + case Intrinsic::dbg_declare: { + const DbgDeclareInst &DI = cast<DbgDeclareInst>(CI); + assert(DI.getVariable() && "Missing variable"); + + const Value *Address = DI.getAddress(); + if (!Address || isa<UndefValue>(Address)) { + LLVM_DEBUG(dbgs() << "Dropping debug info for " << DI << "\n"); + return true; + } + + assert(DI.getVariable()->isValidLocationForIntrinsic( + MIRBuilder.getDebugLoc()) && + "Expected inlined-at fields to agree"); + auto AI = dyn_cast<AllocaInst>(Address); + if (AI && AI->isStaticAlloca()) { + // Static allocas are tracked at the MF level, no need for DBG_VALUE + // instructions (in fact, they get ignored if they *do* exist). + MF->setVariableDbgInfo(DI.getVariable(), DI.getExpression(), + getOrCreateFrameIndex(*AI), DI.getDebugLoc()); + } else { + // A dbg.declare describes the address of a source variable, so lower it + // into an indirect DBG_VALUE. + MIRBuilder.buildIndirectDbgValue(getOrCreateVReg(*Address), + DI.getVariable(), DI.getExpression()); + } + return true; + } + case Intrinsic::dbg_label: { + const DbgLabelInst &DI = cast<DbgLabelInst>(CI); + assert(DI.getLabel() && "Missing label"); + + assert(DI.getLabel()->isValidLocationForIntrinsic( + MIRBuilder.getDebugLoc()) && + "Expected inlined-at fields to agree"); + + MIRBuilder.buildDbgLabel(DI.getLabel()); + return true; + } + case Intrinsic::vaend: + // No target I know of cares about va_end. Certainly no in-tree target + // does. Simplest intrinsic ever! + return true; + case Intrinsic::vastart: { + auto &TLI = *MF->getSubtarget().getTargetLowering(); + Value *Ptr = CI.getArgOperand(0); + unsigned ListSize = TLI.getVaListSizeInBits(*DL) / 8; + + // FIXME: Get alignment + MIRBuilder.buildInstr(TargetOpcode::G_VASTART, {}, {getOrCreateVReg(*Ptr)}) + .addMemOperand(MF->getMachineMemOperand(MachinePointerInfo(Ptr), + MachineMemOperand::MOStore, + ListSize, Align(1))); + return true; + } + case Intrinsic::dbg_value: { + // This form of DBG_VALUE is target-independent. + const DbgValueInst &DI = cast<DbgValueInst>(CI); + const Value *V = DI.getValue(); + assert(DI.getVariable()->isValidLocationForIntrinsic( + MIRBuilder.getDebugLoc()) && + "Expected inlined-at fields to agree"); + if (!V) { + // Currently the optimizer can produce this; insert an undef to + // help debugging. Probably the optimizer should not do this. + MIRBuilder.buildIndirectDbgValue(0, DI.getVariable(), DI.getExpression()); + } else if (const auto *CI = dyn_cast<Constant>(V)) { + MIRBuilder.buildConstDbgValue(*CI, DI.getVariable(), DI.getExpression()); + } else { + for (Register Reg : getOrCreateVRegs(*V)) { + // FIXME: This does not handle register-indirect values at offset 0. The + // direct/indirect thing shouldn't really be handled by something as + // implicit as reg+noreg vs reg+imm in the first place, but it seems + // pretty baked in right now. + MIRBuilder.buildDirectDbgValue(Reg, DI.getVariable(), DI.getExpression()); + } + } + return true; + } + case Intrinsic::uadd_with_overflow: + return translateOverflowIntrinsic(CI, TargetOpcode::G_UADDO, MIRBuilder); + case Intrinsic::sadd_with_overflow: + return translateOverflowIntrinsic(CI, TargetOpcode::G_SADDO, MIRBuilder); + case Intrinsic::usub_with_overflow: + return translateOverflowIntrinsic(CI, TargetOpcode::G_USUBO, MIRBuilder); + case Intrinsic::ssub_with_overflow: + return translateOverflowIntrinsic(CI, TargetOpcode::G_SSUBO, MIRBuilder); + case Intrinsic::umul_with_overflow: + return translateOverflowIntrinsic(CI, TargetOpcode::G_UMULO, MIRBuilder); + case Intrinsic::smul_with_overflow: + return translateOverflowIntrinsic(CI, TargetOpcode::G_SMULO, MIRBuilder); + case Intrinsic::uadd_sat: + return translateBinaryOp(TargetOpcode::G_UADDSAT, CI, MIRBuilder); + case Intrinsic::sadd_sat: + return translateBinaryOp(TargetOpcode::G_SADDSAT, CI, MIRBuilder); + case Intrinsic::usub_sat: + return translateBinaryOp(TargetOpcode::G_USUBSAT, CI, MIRBuilder); + case Intrinsic::ssub_sat: + return translateBinaryOp(TargetOpcode::G_SSUBSAT, CI, MIRBuilder); case Intrinsic::ushl_sat: return translateBinaryOp(TargetOpcode::G_USHLSAT, CI, MIRBuilder); case Intrinsic::sshl_sat: @@ -1991,29 +1991,29 @@ bool IRTranslator::translateKnownIntrinsic(const CallInst &CI, Intrinsic::ID ID, return translateFixedPointIntrinsic(TargetOpcode::G_SDIVFIXSAT, CI, MIRBuilder); case Intrinsic::udiv_fix_sat: return translateFixedPointIntrinsic(TargetOpcode::G_UDIVFIXSAT, CI, MIRBuilder); - case Intrinsic::fmuladd: { - const TargetMachine &TM = MF->getTarget(); - const TargetLowering &TLI = *MF->getSubtarget().getTargetLowering(); - Register Dst = getOrCreateVReg(CI); - Register Op0 = getOrCreateVReg(*CI.getArgOperand(0)); - Register Op1 = getOrCreateVReg(*CI.getArgOperand(1)); - Register Op2 = getOrCreateVReg(*CI.getArgOperand(2)); - if (TM.Options.AllowFPOpFusion != FPOpFusion::Strict && - TLI.isFMAFasterThanFMulAndFAdd(*MF, - TLI.getValueType(*DL, CI.getType()))) { - // TODO: Revisit this to see if we should move this part of the - // lowering to the combiner. - MIRBuilder.buildFMA(Dst, Op0, Op1, Op2, - MachineInstr::copyFlagsFromInstruction(CI)); - } else { - LLT Ty = getLLTForType(*CI.getType(), *DL); - auto FMul = MIRBuilder.buildFMul( - Ty, Op0, Op1, MachineInstr::copyFlagsFromInstruction(CI)); - MIRBuilder.buildFAdd(Dst, FMul, Op2, - MachineInstr::copyFlagsFromInstruction(CI)); - } - return true; - } + case Intrinsic::fmuladd: { + const TargetMachine &TM = MF->getTarget(); + const TargetLowering &TLI = *MF->getSubtarget().getTargetLowering(); + Register Dst = getOrCreateVReg(CI); + Register Op0 = getOrCreateVReg(*CI.getArgOperand(0)); + Register Op1 = getOrCreateVReg(*CI.getArgOperand(1)); + Register Op2 = getOrCreateVReg(*CI.getArgOperand(2)); + if (TM.Options.AllowFPOpFusion != FPOpFusion::Strict && + TLI.isFMAFasterThanFMulAndFAdd(*MF, + TLI.getValueType(*DL, CI.getType()))) { + // TODO: Revisit this to see if we should move this part of the + // lowering to the combiner. + MIRBuilder.buildFMA(Dst, Op0, Op1, Op2, + MachineInstr::copyFlagsFromInstruction(CI)); + } else { + LLT Ty = getLLTForType(*CI.getType(), *DL); + auto FMul = MIRBuilder.buildFMul( + Ty, Op0, Op1, MachineInstr::copyFlagsFromInstruction(CI)); + MIRBuilder.buildFAdd(Dst, FMul, Op2, + MachineInstr::copyFlagsFromInstruction(CI)); + } + return true; + } case Intrinsic::convert_from_fp16: // FIXME: This intrinsic should probably be removed from the IR. MIRBuilder.buildFPExt(getOrCreateVReg(CI), @@ -2026,94 +2026,94 @@ bool IRTranslator::translateKnownIntrinsic(const CallInst &CI, Intrinsic::ID ID, getOrCreateVReg(*CI.getArgOperand(0)), MachineInstr::copyFlagsFromInstruction(CI)); return true; - case Intrinsic::memcpy: + case Intrinsic::memcpy: return translateMemFunc(CI, MIRBuilder, TargetOpcode::G_MEMCPY); - case Intrinsic::memmove: + case Intrinsic::memmove: return translateMemFunc(CI, MIRBuilder, TargetOpcode::G_MEMMOVE); - case Intrinsic::memset: + case Intrinsic::memset: return translateMemFunc(CI, MIRBuilder, TargetOpcode::G_MEMSET); - case Intrinsic::eh_typeid_for: { - GlobalValue *GV = ExtractTypeInfo(CI.getArgOperand(0)); - Register Reg = getOrCreateVReg(CI); - unsigned TypeID = MF->getTypeIDFor(GV); - MIRBuilder.buildConstant(Reg, TypeID); - return true; - } - case Intrinsic::objectsize: - llvm_unreachable("llvm.objectsize.* should have been lowered already"); - - case Intrinsic::is_constant: - llvm_unreachable("llvm.is.constant.* should have been lowered already"); - - case Intrinsic::stackguard: - getStackGuard(getOrCreateVReg(CI), MIRBuilder); - return true; - case Intrinsic::stackprotector: { - LLT PtrTy = getLLTForType(*CI.getArgOperand(0)->getType(), *DL); - Register GuardVal = MRI->createGenericVirtualRegister(PtrTy); - getStackGuard(GuardVal, MIRBuilder); - - AllocaInst *Slot = cast<AllocaInst>(CI.getArgOperand(1)); - int FI = getOrCreateFrameIndex(*Slot); - MF->getFrameInfo().setStackProtectorIndex(FI); - - MIRBuilder.buildStore( - GuardVal, getOrCreateVReg(*Slot), - *MF->getMachineMemOperand(MachinePointerInfo::getFixedStack(*MF, FI), - MachineMemOperand::MOStore | - MachineMemOperand::MOVolatile, - PtrTy.getSizeInBits() / 8, Align(8))); - return true; - } - case Intrinsic::stacksave: { - // Save the stack pointer to the location provided by the intrinsic. - Register Reg = getOrCreateVReg(CI); - Register StackPtr = MF->getSubtarget() - .getTargetLowering() - ->getStackPointerRegisterToSaveRestore(); - - // If the target doesn't specify a stack pointer, then fall back. - if (!StackPtr) - return false; - - MIRBuilder.buildCopy(Reg, StackPtr); - return true; - } - case Intrinsic::stackrestore: { - // Restore the stack pointer from the location provided by the intrinsic. - Register Reg = getOrCreateVReg(*CI.getArgOperand(0)); - Register StackPtr = MF->getSubtarget() - .getTargetLowering() - ->getStackPointerRegisterToSaveRestore(); - - // If the target doesn't specify a stack pointer, then fall back. - if (!StackPtr) - return false; - - MIRBuilder.buildCopy(StackPtr, Reg); - return true; - } - case Intrinsic::cttz: - case Intrinsic::ctlz: { - ConstantInt *Cst = cast<ConstantInt>(CI.getArgOperand(1)); - bool isTrailing = ID == Intrinsic::cttz; - unsigned Opcode = isTrailing - ? Cst->isZero() ? TargetOpcode::G_CTTZ - : TargetOpcode::G_CTTZ_ZERO_UNDEF - : Cst->isZero() ? TargetOpcode::G_CTLZ - : TargetOpcode::G_CTLZ_ZERO_UNDEF; - MIRBuilder.buildInstr(Opcode, {getOrCreateVReg(CI)}, - {getOrCreateVReg(*CI.getArgOperand(0))}); - return true; - } - case Intrinsic::invariant_start: { - LLT PtrTy = getLLTForType(*CI.getArgOperand(0)->getType(), *DL); - Register Undef = MRI->createGenericVirtualRegister(PtrTy); - MIRBuilder.buildUndef(Undef); - return true; - } - case Intrinsic::invariant_end: - return true; + case Intrinsic::eh_typeid_for: { + GlobalValue *GV = ExtractTypeInfo(CI.getArgOperand(0)); + Register Reg = getOrCreateVReg(CI); + unsigned TypeID = MF->getTypeIDFor(GV); + MIRBuilder.buildConstant(Reg, TypeID); + return true; + } + case Intrinsic::objectsize: + llvm_unreachable("llvm.objectsize.* should have been lowered already"); + + case Intrinsic::is_constant: + llvm_unreachable("llvm.is.constant.* should have been lowered already"); + + case Intrinsic::stackguard: + getStackGuard(getOrCreateVReg(CI), MIRBuilder); + return true; + case Intrinsic::stackprotector: { + LLT PtrTy = getLLTForType(*CI.getArgOperand(0)->getType(), *DL); + Register GuardVal = MRI->createGenericVirtualRegister(PtrTy); + getStackGuard(GuardVal, MIRBuilder); + + AllocaInst *Slot = cast<AllocaInst>(CI.getArgOperand(1)); + int FI = getOrCreateFrameIndex(*Slot); + MF->getFrameInfo().setStackProtectorIndex(FI); + + MIRBuilder.buildStore( + GuardVal, getOrCreateVReg(*Slot), + *MF->getMachineMemOperand(MachinePointerInfo::getFixedStack(*MF, FI), + MachineMemOperand::MOStore | + MachineMemOperand::MOVolatile, + PtrTy.getSizeInBits() / 8, Align(8))); + return true; + } + case Intrinsic::stacksave: { + // Save the stack pointer to the location provided by the intrinsic. + Register Reg = getOrCreateVReg(CI); + Register StackPtr = MF->getSubtarget() + .getTargetLowering() + ->getStackPointerRegisterToSaveRestore(); + + // If the target doesn't specify a stack pointer, then fall back. + if (!StackPtr) + return false; + + MIRBuilder.buildCopy(Reg, StackPtr); + return true; + } + case Intrinsic::stackrestore: { + // Restore the stack pointer from the location provided by the intrinsic. + Register Reg = getOrCreateVReg(*CI.getArgOperand(0)); + Register StackPtr = MF->getSubtarget() + .getTargetLowering() + ->getStackPointerRegisterToSaveRestore(); + + // If the target doesn't specify a stack pointer, then fall back. + if (!StackPtr) + return false; + + MIRBuilder.buildCopy(StackPtr, Reg); + return true; + } + case Intrinsic::cttz: + case Intrinsic::ctlz: { + ConstantInt *Cst = cast<ConstantInt>(CI.getArgOperand(1)); + bool isTrailing = ID == Intrinsic::cttz; + unsigned Opcode = isTrailing + ? Cst->isZero() ? TargetOpcode::G_CTTZ + : TargetOpcode::G_CTTZ_ZERO_UNDEF + : Cst->isZero() ? TargetOpcode::G_CTLZ + : TargetOpcode::G_CTLZ_ZERO_UNDEF; + MIRBuilder.buildInstr(Opcode, {getOrCreateVReg(CI)}, + {getOrCreateVReg(*CI.getArgOperand(0))}); + return true; + } + case Intrinsic::invariant_start: { + LLT PtrTy = getLLTForType(*CI.getArgOperand(0)->getType(), *DL); + Register Undef = MRI->createGenericVirtualRegister(PtrTy); + MIRBuilder.buildUndef(Undef); + return true; + } + case Intrinsic::invariant_end: + return true; case Intrinsic::expect: case Intrinsic::annotation: case Intrinsic::ptr_annotation: @@ -2124,27 +2124,27 @@ bool IRTranslator::translateKnownIntrinsic(const CallInst &CI, Intrinsic::ID ID, getOrCreateVReg(*CI.getArgOperand(0))); return true; } - case Intrinsic::assume: + case Intrinsic::assume: case Intrinsic::experimental_noalias_scope_decl: - case Intrinsic::var_annotation: - case Intrinsic::sideeffect: - // Discard annotate attributes, assumptions, and artificial side-effects. - return true; - case Intrinsic::read_volatile_register: - case Intrinsic::read_register: { - Value *Arg = CI.getArgOperand(0); - MIRBuilder - .buildInstr(TargetOpcode::G_READ_REGISTER, {getOrCreateVReg(CI)}, {}) - .addMetadata(cast<MDNode>(cast<MetadataAsValue>(Arg)->getMetadata())); - return true; - } - case Intrinsic::write_register: { - Value *Arg = CI.getArgOperand(0); - MIRBuilder.buildInstr(TargetOpcode::G_WRITE_REGISTER) - .addMetadata(cast<MDNode>(cast<MetadataAsValue>(Arg)->getMetadata())) - .addUse(getOrCreateVReg(*CI.getArgOperand(1))); - return true; - } + case Intrinsic::var_annotation: + case Intrinsic::sideeffect: + // Discard annotate attributes, assumptions, and artificial side-effects. + return true; + case Intrinsic::read_volatile_register: + case Intrinsic::read_register: { + Value *Arg = CI.getArgOperand(0); + MIRBuilder + .buildInstr(TargetOpcode::G_READ_REGISTER, {getOrCreateVReg(CI)}, {}) + .addMetadata(cast<MDNode>(cast<MetadataAsValue>(Arg)->getMetadata())); + return true; + } + case Intrinsic::write_register: { + Value *Arg = CI.getArgOperand(0); + MIRBuilder.buildInstr(TargetOpcode::G_WRITE_REGISTER) + .addMetadata(cast<MDNode>(cast<MetadataAsValue>(Arg)->getMetadata())) + .addUse(getOrCreateVReg(*CI.getArgOperand(1))); + return true; + } case Intrinsic::localescape: { MachineBasicBlock &EntryMBB = MF->front(); StringRef EscapedName = GlobalValue::dropLLVMManglingEscape(MF->getName()); @@ -2207,156 +2207,156 @@ bool IRTranslator::translateKnownIntrinsic(const CallInst &CI, Intrinsic::ID ID, return true; } -#define INSTRUCTION(NAME, NARG, ROUND_MODE, INTRINSIC) \ - case Intrinsic::INTRINSIC: -#include "llvm/IR/ConstrainedOps.def" - return translateConstrainedFPIntrinsic(cast<ConstrainedFPIntrinsic>(CI), - MIRBuilder); - - } - return false; -} - -bool IRTranslator::translateInlineAsm(const CallBase &CB, - MachineIRBuilder &MIRBuilder) { - - const InlineAsmLowering *ALI = MF->getSubtarget().getInlineAsmLowering(); - - if (!ALI) { - LLVM_DEBUG( - dbgs() << "Inline asm lowering is not supported for this target yet\n"); - return false; - } - - return ALI->lowerInlineAsm( - MIRBuilder, CB, [&](const Value &Val) { return getOrCreateVRegs(Val); }); -} - -bool IRTranslator::translateCallBase(const CallBase &CB, - MachineIRBuilder &MIRBuilder) { - ArrayRef<Register> Res = getOrCreateVRegs(CB); - - SmallVector<ArrayRef<Register>, 8> Args; - Register SwiftInVReg = 0; - Register SwiftErrorVReg = 0; - for (auto &Arg : CB.args()) { - if (CLI->supportSwiftError() && isSwiftError(Arg)) { - assert(SwiftInVReg == 0 && "Expected only one swift error argument"); - LLT Ty = getLLTForType(*Arg->getType(), *DL); - SwiftInVReg = MRI->createGenericVirtualRegister(Ty); - MIRBuilder.buildCopy(SwiftInVReg, SwiftError.getOrCreateVRegUseAt( - &CB, &MIRBuilder.getMBB(), Arg)); - Args.emplace_back(makeArrayRef(SwiftInVReg)); - SwiftErrorVReg = - SwiftError.getOrCreateVRegDefAt(&CB, &MIRBuilder.getMBB(), Arg); - continue; - } - Args.push_back(getOrCreateVRegs(*Arg)); - } - - // We don't set HasCalls on MFI here yet because call lowering may decide to - // optimize into tail calls. Instead, we defer that to selection where a final - // scan is done to check if any instructions are calls. - bool Success = - CLI->lowerCall(MIRBuilder, CB, Res, Args, SwiftErrorVReg, - [&]() { return getOrCreateVReg(*CB.getCalledOperand()); }); - - // Check if we just inserted a tail call. - if (Success) { - assert(!HasTailCall && "Can't tail call return twice from block?"); - const TargetInstrInfo *TII = MF->getSubtarget().getInstrInfo(); - HasTailCall = TII->isTailCall(*std::prev(MIRBuilder.getInsertPt())); - } - - return Success; -} - -bool IRTranslator::translateCall(const User &U, MachineIRBuilder &MIRBuilder) { - const CallInst &CI = cast<CallInst>(U); - auto TII = MF->getTarget().getIntrinsicInfo(); - const Function *F = CI.getCalledFunction(); - - // FIXME: support Windows dllimport function calls. - if (F && (F->hasDLLImportStorageClass() || - (MF->getTarget().getTargetTriple().isOSWindows() && - F->hasExternalWeakLinkage()))) - return false; - - // FIXME: support control flow guard targets. - if (CI.countOperandBundlesOfType(LLVMContext::OB_cfguardtarget)) - return false; - - if (CI.isInlineAsm()) - return translateInlineAsm(CI, MIRBuilder); - - Intrinsic::ID ID = Intrinsic::not_intrinsic; - if (F && F->isIntrinsic()) { - ID = F->getIntrinsicID(); - if (TII && ID == Intrinsic::not_intrinsic) - ID = static_cast<Intrinsic::ID>(TII->getIntrinsicID(F)); - } - - if (!F || !F->isIntrinsic() || ID == Intrinsic::not_intrinsic) - return translateCallBase(CI, MIRBuilder); - - assert(ID != Intrinsic::not_intrinsic && "unknown intrinsic"); - - if (translateKnownIntrinsic(CI, ID, MIRBuilder)) - return true; - - ArrayRef<Register> ResultRegs; - if (!CI.getType()->isVoidTy()) - ResultRegs = getOrCreateVRegs(CI); - - // Ignore the callsite attributes. Backend code is most likely not expecting - // an intrinsic to sometimes have side effects and sometimes not. - MachineInstrBuilder MIB = - MIRBuilder.buildIntrinsic(ID, ResultRegs, !F->doesNotAccessMemory()); - if (isa<FPMathOperator>(CI)) - MIB->copyIRFlags(CI); - - for (auto &Arg : enumerate(CI.arg_operands())) { - // If this is required to be an immediate, don't materialize it in a - // register. - if (CI.paramHasAttr(Arg.index(), Attribute::ImmArg)) { - if (ConstantInt *CI = dyn_cast<ConstantInt>(Arg.value())) { - // imm arguments are more convenient than cimm (and realistically - // probably sufficient), so use them. - assert(CI->getBitWidth() <= 64 && - "large intrinsic immediates not handled"); - MIB.addImm(CI->getSExtValue()); - } else { - MIB.addFPImm(cast<ConstantFP>(Arg.value())); - } +#define INSTRUCTION(NAME, NARG, ROUND_MODE, INTRINSIC) \ + case Intrinsic::INTRINSIC: +#include "llvm/IR/ConstrainedOps.def" + return translateConstrainedFPIntrinsic(cast<ConstrainedFPIntrinsic>(CI), + MIRBuilder); + + } + return false; +} + +bool IRTranslator::translateInlineAsm(const CallBase &CB, + MachineIRBuilder &MIRBuilder) { + + const InlineAsmLowering *ALI = MF->getSubtarget().getInlineAsmLowering(); + + if (!ALI) { + LLVM_DEBUG( + dbgs() << "Inline asm lowering is not supported for this target yet\n"); + return false; + } + + return ALI->lowerInlineAsm( + MIRBuilder, CB, [&](const Value &Val) { return getOrCreateVRegs(Val); }); +} + +bool IRTranslator::translateCallBase(const CallBase &CB, + MachineIRBuilder &MIRBuilder) { + ArrayRef<Register> Res = getOrCreateVRegs(CB); + + SmallVector<ArrayRef<Register>, 8> Args; + Register SwiftInVReg = 0; + Register SwiftErrorVReg = 0; + for (auto &Arg : CB.args()) { + if (CLI->supportSwiftError() && isSwiftError(Arg)) { + assert(SwiftInVReg == 0 && "Expected only one swift error argument"); + LLT Ty = getLLTForType(*Arg->getType(), *DL); + SwiftInVReg = MRI->createGenericVirtualRegister(Ty); + MIRBuilder.buildCopy(SwiftInVReg, SwiftError.getOrCreateVRegUseAt( + &CB, &MIRBuilder.getMBB(), Arg)); + Args.emplace_back(makeArrayRef(SwiftInVReg)); + SwiftErrorVReg = + SwiftError.getOrCreateVRegDefAt(&CB, &MIRBuilder.getMBB(), Arg); + continue; + } + Args.push_back(getOrCreateVRegs(*Arg)); + } + + // We don't set HasCalls on MFI here yet because call lowering may decide to + // optimize into tail calls. Instead, we defer that to selection where a final + // scan is done to check if any instructions are calls. + bool Success = + CLI->lowerCall(MIRBuilder, CB, Res, Args, SwiftErrorVReg, + [&]() { return getOrCreateVReg(*CB.getCalledOperand()); }); + + // Check if we just inserted a tail call. + if (Success) { + assert(!HasTailCall && "Can't tail call return twice from block?"); + const TargetInstrInfo *TII = MF->getSubtarget().getInstrInfo(); + HasTailCall = TII->isTailCall(*std::prev(MIRBuilder.getInsertPt())); + } + + return Success; +} + +bool IRTranslator::translateCall(const User &U, MachineIRBuilder &MIRBuilder) { + const CallInst &CI = cast<CallInst>(U); + auto TII = MF->getTarget().getIntrinsicInfo(); + const Function *F = CI.getCalledFunction(); + + // FIXME: support Windows dllimport function calls. + if (F && (F->hasDLLImportStorageClass() || + (MF->getTarget().getTargetTriple().isOSWindows() && + F->hasExternalWeakLinkage()))) + return false; + + // FIXME: support control flow guard targets. + if (CI.countOperandBundlesOfType(LLVMContext::OB_cfguardtarget)) + return false; + + if (CI.isInlineAsm()) + return translateInlineAsm(CI, MIRBuilder); + + Intrinsic::ID ID = Intrinsic::not_intrinsic; + if (F && F->isIntrinsic()) { + ID = F->getIntrinsicID(); + if (TII && ID == Intrinsic::not_intrinsic) + ID = static_cast<Intrinsic::ID>(TII->getIntrinsicID(F)); + } + + if (!F || !F->isIntrinsic() || ID == Intrinsic::not_intrinsic) + return translateCallBase(CI, MIRBuilder); + + assert(ID != Intrinsic::not_intrinsic && "unknown intrinsic"); + + if (translateKnownIntrinsic(CI, ID, MIRBuilder)) + return true; + + ArrayRef<Register> ResultRegs; + if (!CI.getType()->isVoidTy()) + ResultRegs = getOrCreateVRegs(CI); + + // Ignore the callsite attributes. Backend code is most likely not expecting + // an intrinsic to sometimes have side effects and sometimes not. + MachineInstrBuilder MIB = + MIRBuilder.buildIntrinsic(ID, ResultRegs, !F->doesNotAccessMemory()); + if (isa<FPMathOperator>(CI)) + MIB->copyIRFlags(CI); + + for (auto &Arg : enumerate(CI.arg_operands())) { + // If this is required to be an immediate, don't materialize it in a + // register. + if (CI.paramHasAttr(Arg.index(), Attribute::ImmArg)) { + if (ConstantInt *CI = dyn_cast<ConstantInt>(Arg.value())) { + // imm arguments are more convenient than cimm (and realistically + // probably sufficient), so use them. + assert(CI->getBitWidth() <= 64 && + "large intrinsic immediates not handled"); + MIB.addImm(CI->getSExtValue()); + } else { + MIB.addFPImm(cast<ConstantFP>(Arg.value())); + } } else if (auto MD = dyn_cast<MetadataAsValue>(Arg.value())) { auto *MDN = dyn_cast<MDNode>(MD->getMetadata()); if (!MDN) // This was probably an MDString. return false; MIB.addMetadata(MDN); - } else { - ArrayRef<Register> VRegs = getOrCreateVRegs(*Arg.value()); - if (VRegs.size() > 1) - return false; - MIB.addUse(VRegs[0]); - } - } - - // Add a MachineMemOperand if it is a target mem intrinsic. - const TargetLowering &TLI = *MF->getSubtarget().getTargetLowering(); - TargetLowering::IntrinsicInfo Info; - // TODO: Add a GlobalISel version of getTgtMemIntrinsic. - if (TLI.getTgtMemIntrinsic(Info, CI, *MF, ID)) { - Align Alignment = Info.align.getValueOr( - DL->getABITypeAlign(Info.memVT.getTypeForEVT(F->getContext()))); - - uint64_t Size = Info.memVT.getStoreSize(); - MIB.addMemOperand(MF->getMachineMemOperand(MachinePointerInfo(Info.ptrVal), - Info.flags, Size, Alignment)); - } - - return true; -} - + } else { + ArrayRef<Register> VRegs = getOrCreateVRegs(*Arg.value()); + if (VRegs.size() > 1) + return false; + MIB.addUse(VRegs[0]); + } + } + + // Add a MachineMemOperand if it is a target mem intrinsic. + const TargetLowering &TLI = *MF->getSubtarget().getTargetLowering(); + TargetLowering::IntrinsicInfo Info; + // TODO: Add a GlobalISel version of getTgtMemIntrinsic. + if (TLI.getTgtMemIntrinsic(Info, CI, *MF, ID)) { + Align Alignment = Info.align.getValueOr( + DL->getABITypeAlign(Info.memVT.getTypeForEVT(F->getContext()))); + + uint64_t Size = Info.memVT.getStoreSize(); + MIB.addMemOperand(MF->getMachineMemOperand(MachinePointerInfo(Info.ptrVal), + Info.flags, Size, Alignment)); + } + + return true; +} + bool IRTranslator::findUnwindDestinations( const BasicBlock *EHPadBB, BranchProbability Prob, @@ -2413,45 +2413,45 @@ bool IRTranslator::findUnwindDestinations( return true; } -bool IRTranslator::translateInvoke(const User &U, - MachineIRBuilder &MIRBuilder) { - const InvokeInst &I = cast<InvokeInst>(U); - MCContext &Context = MF->getContext(); - - const BasicBlock *ReturnBB = I.getSuccessor(0); - const BasicBlock *EHPadBB = I.getSuccessor(1); - - const Function *Fn = I.getCalledFunction(); - if (I.isInlineAsm()) - return false; - - // FIXME: support invoking patchpoint and statepoint intrinsics. - if (Fn && Fn->isIntrinsic()) - return false; - - // FIXME: support whatever these are. - if (I.countOperandBundlesOfType(LLVMContext::OB_deopt)) - return false; - - // FIXME: support control flow guard targets. - if (I.countOperandBundlesOfType(LLVMContext::OB_cfguardtarget)) - return false; - - // FIXME: support Windows exception handling. +bool IRTranslator::translateInvoke(const User &U, + MachineIRBuilder &MIRBuilder) { + const InvokeInst &I = cast<InvokeInst>(U); + MCContext &Context = MF->getContext(); + + const BasicBlock *ReturnBB = I.getSuccessor(0); + const BasicBlock *EHPadBB = I.getSuccessor(1); + + const Function *Fn = I.getCalledFunction(); + if (I.isInlineAsm()) + return false; + + // FIXME: support invoking patchpoint and statepoint intrinsics. + if (Fn && Fn->isIntrinsic()) + return false; + + // FIXME: support whatever these are. + if (I.countOperandBundlesOfType(LLVMContext::OB_deopt)) + return false; + + // FIXME: support control flow guard targets. + if (I.countOperandBundlesOfType(LLVMContext::OB_cfguardtarget)) + return false; + + // FIXME: support Windows exception handling. if (!isa<LandingPadInst>(EHPadBB->getFirstNonPHI())) - return false; - - // Emit the actual call, bracketed by EH_LABELs so that the MF knows about - // the region covered by the try. - MCSymbol *BeginSymbol = Context.createTempSymbol(); - MIRBuilder.buildInstr(TargetOpcode::EH_LABEL).addSym(BeginSymbol); - - if (!translateCallBase(I, MIRBuilder)) - return false; - - MCSymbol *EndSymbol = Context.createTempSymbol(); - MIRBuilder.buildInstr(TargetOpcode::EH_LABEL).addSym(EndSymbol); - + return false; + + // Emit the actual call, bracketed by EH_LABELs so that the MF knows about + // the region covered by the try. + MCSymbol *BeginSymbol = Context.createTempSymbol(); + MIRBuilder.buildInstr(TargetOpcode::EH_LABEL).addSym(BeginSymbol); + + if (!translateCallBase(I, MIRBuilder)) + return false; + + MCSymbol *EndSymbol = Context.createTempSymbol(); + MIRBuilder.buildInstr(TargetOpcode::EH_LABEL).addSym(EndSymbol); + SmallVector<std::pair<MachineBasicBlock *, BranchProbability>, 1> UnwindDests; BranchProbabilityInfo *BPI = FuncInfo.BPI; MachineBasicBlock *InvokeMBB = &MIRBuilder.getMBB(); @@ -2462,8 +2462,8 @@ bool IRTranslator::translateInvoke(const User &U, if (!findUnwindDestinations(EHPadBB, EHPadBBProb, UnwindDests)) return false; - MachineBasicBlock &EHPadMBB = getMBB(*EHPadBB), - &ReturnMBB = getMBB(*ReturnBB); + MachineBasicBlock &EHPadMBB = getMBB(*EHPadBB), + &ReturnMBB = getMBB(*ReturnBB); // Update successor info. addSuccessorWithProb(InvokeMBB, &ReturnMBB); for (auto &UnwindDest : UnwindDests) { @@ -2472,468 +2472,468 @@ bool IRTranslator::translateInvoke(const User &U, } InvokeMBB->normalizeSuccProbs(); - MF->addInvoke(&EHPadMBB, BeginSymbol, EndSymbol); - MIRBuilder.buildBr(ReturnMBB); - return true; -} - -bool IRTranslator::translateCallBr(const User &U, - MachineIRBuilder &MIRBuilder) { - // FIXME: Implement this. - return false; -} - -bool IRTranslator::translateLandingPad(const User &U, - MachineIRBuilder &MIRBuilder) { - const LandingPadInst &LP = cast<LandingPadInst>(U); - - MachineBasicBlock &MBB = MIRBuilder.getMBB(); - - MBB.setIsEHPad(); - - // If there aren't registers to copy the values into (e.g., during SjLj - // exceptions), then don't bother. - auto &TLI = *MF->getSubtarget().getTargetLowering(); - const Constant *PersonalityFn = MF->getFunction().getPersonalityFn(); - if (TLI.getExceptionPointerRegister(PersonalityFn) == 0 && - TLI.getExceptionSelectorRegister(PersonalityFn) == 0) - return true; - - // If landingpad's return type is token type, we don't create DAG nodes - // for its exception pointer and selector value. The extraction of exception - // pointer or selector value from token type landingpads is not currently - // supported. - if (LP.getType()->isTokenTy()) - return true; - - // Add a label to mark the beginning of the landing pad. Deletion of the - // landing pad can thus be detected via the MachineModuleInfo. - MIRBuilder.buildInstr(TargetOpcode::EH_LABEL) - .addSym(MF->addLandingPad(&MBB)); - + MF->addInvoke(&EHPadMBB, BeginSymbol, EndSymbol); + MIRBuilder.buildBr(ReturnMBB); + return true; +} + +bool IRTranslator::translateCallBr(const User &U, + MachineIRBuilder &MIRBuilder) { + // FIXME: Implement this. + return false; +} + +bool IRTranslator::translateLandingPad(const User &U, + MachineIRBuilder &MIRBuilder) { + const LandingPadInst &LP = cast<LandingPadInst>(U); + + MachineBasicBlock &MBB = MIRBuilder.getMBB(); + + MBB.setIsEHPad(); + + // If there aren't registers to copy the values into (e.g., during SjLj + // exceptions), then don't bother. + auto &TLI = *MF->getSubtarget().getTargetLowering(); + const Constant *PersonalityFn = MF->getFunction().getPersonalityFn(); + if (TLI.getExceptionPointerRegister(PersonalityFn) == 0 && + TLI.getExceptionSelectorRegister(PersonalityFn) == 0) + return true; + + // If landingpad's return type is token type, we don't create DAG nodes + // for its exception pointer and selector value. The extraction of exception + // pointer or selector value from token type landingpads is not currently + // supported. + if (LP.getType()->isTokenTy()) + return true; + + // Add a label to mark the beginning of the landing pad. Deletion of the + // landing pad can thus be detected via the MachineModuleInfo. + MIRBuilder.buildInstr(TargetOpcode::EH_LABEL) + .addSym(MF->addLandingPad(&MBB)); + // If the unwinder does not preserve all registers, ensure that the // function marks the clobbered registers as used. const TargetRegisterInfo &TRI = *MF->getSubtarget().getRegisterInfo(); if (auto *RegMask = TRI.getCustomEHPadPreservedMask(*MF)) MF->getRegInfo().addPhysRegsUsedFromRegMask(RegMask); - LLT Ty = getLLTForType(*LP.getType(), *DL); - Register Undef = MRI->createGenericVirtualRegister(Ty); - MIRBuilder.buildUndef(Undef); - - SmallVector<LLT, 2> Tys; - for (Type *Ty : cast<StructType>(LP.getType())->elements()) - Tys.push_back(getLLTForType(*Ty, *DL)); - assert(Tys.size() == 2 && "Only two-valued landingpads are supported"); - - // Mark exception register as live in. - Register ExceptionReg = TLI.getExceptionPointerRegister(PersonalityFn); - if (!ExceptionReg) - return false; - - MBB.addLiveIn(ExceptionReg); - ArrayRef<Register> ResRegs = getOrCreateVRegs(LP); - MIRBuilder.buildCopy(ResRegs[0], ExceptionReg); - - Register SelectorReg = TLI.getExceptionSelectorRegister(PersonalityFn); - if (!SelectorReg) - return false; - - MBB.addLiveIn(SelectorReg); - Register PtrVReg = MRI->createGenericVirtualRegister(Tys[0]); - MIRBuilder.buildCopy(PtrVReg, SelectorReg); - MIRBuilder.buildCast(ResRegs[1], PtrVReg); - - return true; -} - -bool IRTranslator::translateAlloca(const User &U, - MachineIRBuilder &MIRBuilder) { - auto &AI = cast<AllocaInst>(U); - - if (AI.isSwiftError()) - return true; - - if (AI.isStaticAlloca()) { - Register Res = getOrCreateVReg(AI); - int FI = getOrCreateFrameIndex(AI); - MIRBuilder.buildFrameIndex(Res, FI); - return true; - } - - // FIXME: support stack probing for Windows. - if (MF->getTarget().getTargetTriple().isOSWindows()) - return false; - - // Now we're in the harder dynamic case. - Register NumElts = getOrCreateVReg(*AI.getArraySize()); - Type *IntPtrIRTy = DL->getIntPtrType(AI.getType()); - LLT IntPtrTy = getLLTForType(*IntPtrIRTy, *DL); - if (MRI->getType(NumElts) != IntPtrTy) { - Register ExtElts = MRI->createGenericVirtualRegister(IntPtrTy); - MIRBuilder.buildZExtOrTrunc(ExtElts, NumElts); - NumElts = ExtElts; - } - - Type *Ty = AI.getAllocatedType(); - - Register AllocSize = MRI->createGenericVirtualRegister(IntPtrTy); - Register TySize = - getOrCreateVReg(*ConstantInt::get(IntPtrIRTy, DL->getTypeAllocSize(Ty))); - MIRBuilder.buildMul(AllocSize, NumElts, TySize); - - // Round the size of the allocation up to the stack alignment size - // by add SA-1 to the size. This doesn't overflow because we're computing - // an address inside an alloca. - Align StackAlign = MF->getSubtarget().getFrameLowering()->getStackAlign(); - auto SAMinusOne = MIRBuilder.buildConstant(IntPtrTy, StackAlign.value() - 1); - auto AllocAdd = MIRBuilder.buildAdd(IntPtrTy, AllocSize, SAMinusOne, - MachineInstr::NoUWrap); - auto AlignCst = - MIRBuilder.buildConstant(IntPtrTy, ~(uint64_t)(StackAlign.value() - 1)); - auto AlignedAlloc = MIRBuilder.buildAnd(IntPtrTy, AllocAdd, AlignCst); - - Align Alignment = std::max(AI.getAlign(), DL->getPrefTypeAlign(Ty)); - if (Alignment <= StackAlign) - Alignment = Align(1); - MIRBuilder.buildDynStackAlloc(getOrCreateVReg(AI), AlignedAlloc, Alignment); - - MF->getFrameInfo().CreateVariableSizedObject(Alignment, &AI); - assert(MF->getFrameInfo().hasVarSizedObjects()); - return true; -} - -bool IRTranslator::translateVAArg(const User &U, MachineIRBuilder &MIRBuilder) { - // FIXME: We may need more info about the type. Because of how LLT works, - // we're completely discarding the i64/double distinction here (amongst - // others). Fortunately the ABIs I know of where that matters don't use va_arg - // anyway but that's not guaranteed. - MIRBuilder.buildInstr(TargetOpcode::G_VAARG, {getOrCreateVReg(U)}, - {getOrCreateVReg(*U.getOperand(0)), - DL->getABITypeAlign(U.getType()).value()}); - return true; -} - -bool IRTranslator::translateInsertElement(const User &U, - MachineIRBuilder &MIRBuilder) { - // If it is a <1 x Ty> vector, use the scalar as it is - // not a legal vector type in LLT. - if (cast<FixedVectorType>(U.getType())->getNumElements() == 1) - return translateCopy(U, *U.getOperand(1), MIRBuilder); - - Register Res = getOrCreateVReg(U); - Register Val = getOrCreateVReg(*U.getOperand(0)); - Register Elt = getOrCreateVReg(*U.getOperand(1)); - Register Idx = getOrCreateVReg(*U.getOperand(2)); - MIRBuilder.buildInsertVectorElement(Res, Val, Elt, Idx); - return true; -} - -bool IRTranslator::translateExtractElement(const User &U, - MachineIRBuilder &MIRBuilder) { - // If it is a <1 x Ty> vector, use the scalar as it is - // not a legal vector type in LLT. - if (cast<FixedVectorType>(U.getOperand(0)->getType())->getNumElements() == 1) - return translateCopy(U, *U.getOperand(0), MIRBuilder); - - Register Res = getOrCreateVReg(U); - Register Val = getOrCreateVReg(*U.getOperand(0)); - const auto &TLI = *MF->getSubtarget().getTargetLowering(); - unsigned PreferredVecIdxWidth = TLI.getVectorIdxTy(*DL).getSizeInBits(); - Register Idx; - if (auto *CI = dyn_cast<ConstantInt>(U.getOperand(1))) { - if (CI->getBitWidth() != PreferredVecIdxWidth) { - APInt NewIdx = CI->getValue().sextOrTrunc(PreferredVecIdxWidth); - auto *NewIdxCI = ConstantInt::get(CI->getContext(), NewIdx); - Idx = getOrCreateVReg(*NewIdxCI); - } - } - if (!Idx) - Idx = getOrCreateVReg(*U.getOperand(1)); - if (MRI->getType(Idx).getSizeInBits() != PreferredVecIdxWidth) { - const LLT VecIdxTy = LLT::scalar(PreferredVecIdxWidth); - Idx = MIRBuilder.buildSExtOrTrunc(VecIdxTy, Idx).getReg(0); - } - MIRBuilder.buildExtractVectorElement(Res, Val, Idx); - return true; -} - -bool IRTranslator::translateShuffleVector(const User &U, - MachineIRBuilder &MIRBuilder) { - ArrayRef<int> Mask; - if (auto *SVI = dyn_cast<ShuffleVectorInst>(&U)) - Mask = SVI->getShuffleMask(); - else - Mask = cast<ConstantExpr>(U).getShuffleMask(); - ArrayRef<int> MaskAlloc = MF->allocateShuffleMask(Mask); - MIRBuilder - .buildInstr(TargetOpcode::G_SHUFFLE_VECTOR, {getOrCreateVReg(U)}, - {getOrCreateVReg(*U.getOperand(0)), - getOrCreateVReg(*U.getOperand(1))}) - .addShuffleMask(MaskAlloc); - return true; -} - -bool IRTranslator::translatePHI(const User &U, MachineIRBuilder &MIRBuilder) { - const PHINode &PI = cast<PHINode>(U); - - SmallVector<MachineInstr *, 4> Insts; - for (auto Reg : getOrCreateVRegs(PI)) { - auto MIB = MIRBuilder.buildInstr(TargetOpcode::G_PHI, {Reg}, {}); - Insts.push_back(MIB.getInstr()); - } - - PendingPHIs.emplace_back(&PI, std::move(Insts)); - return true; -} - -bool IRTranslator::translateAtomicCmpXchg(const User &U, - MachineIRBuilder &MIRBuilder) { - const AtomicCmpXchgInst &I = cast<AtomicCmpXchgInst>(U); - - auto &TLI = *MF->getSubtarget().getTargetLowering(); - auto Flags = TLI.getAtomicMemOperandFlags(I, *DL); - - Type *ResType = I.getType(); - Type *ValType = ResType->Type::getStructElementType(0); - - auto Res = getOrCreateVRegs(I); - Register OldValRes = Res[0]; - Register SuccessRes = Res[1]; - Register Addr = getOrCreateVReg(*I.getPointerOperand()); - Register Cmp = getOrCreateVReg(*I.getCompareOperand()); - Register NewVal = getOrCreateVReg(*I.getNewValOperand()); - - AAMDNodes AAMetadata; - I.getAAMetadata(AAMetadata); - - MIRBuilder.buildAtomicCmpXchgWithSuccess( - OldValRes, SuccessRes, Addr, Cmp, NewVal, - *MF->getMachineMemOperand( - MachinePointerInfo(I.getPointerOperand()), Flags, - DL->getTypeStoreSize(ValType), getMemOpAlign(I), AAMetadata, nullptr, - I.getSyncScopeID(), I.getSuccessOrdering(), I.getFailureOrdering())); - return true; -} - -bool IRTranslator::translateAtomicRMW(const User &U, - MachineIRBuilder &MIRBuilder) { - const AtomicRMWInst &I = cast<AtomicRMWInst>(U); - auto &TLI = *MF->getSubtarget().getTargetLowering(); - auto Flags = TLI.getAtomicMemOperandFlags(I, *DL); - - Type *ResType = I.getType(); - - Register Res = getOrCreateVReg(I); - Register Addr = getOrCreateVReg(*I.getPointerOperand()); - Register Val = getOrCreateVReg(*I.getValOperand()); - - unsigned Opcode = 0; - switch (I.getOperation()) { - default: - return false; - case AtomicRMWInst::Xchg: - Opcode = TargetOpcode::G_ATOMICRMW_XCHG; - break; - case AtomicRMWInst::Add: - Opcode = TargetOpcode::G_ATOMICRMW_ADD; - break; - case AtomicRMWInst::Sub: - Opcode = TargetOpcode::G_ATOMICRMW_SUB; - break; - case AtomicRMWInst::And: - Opcode = TargetOpcode::G_ATOMICRMW_AND; - break; - case AtomicRMWInst::Nand: - Opcode = TargetOpcode::G_ATOMICRMW_NAND; - break; - case AtomicRMWInst::Or: - Opcode = TargetOpcode::G_ATOMICRMW_OR; - break; - case AtomicRMWInst::Xor: - Opcode = TargetOpcode::G_ATOMICRMW_XOR; - break; - case AtomicRMWInst::Max: - Opcode = TargetOpcode::G_ATOMICRMW_MAX; - break; - case AtomicRMWInst::Min: - Opcode = TargetOpcode::G_ATOMICRMW_MIN; - break; - case AtomicRMWInst::UMax: - Opcode = TargetOpcode::G_ATOMICRMW_UMAX; - break; - case AtomicRMWInst::UMin: - Opcode = TargetOpcode::G_ATOMICRMW_UMIN; - break; - case AtomicRMWInst::FAdd: - Opcode = TargetOpcode::G_ATOMICRMW_FADD; - break; - case AtomicRMWInst::FSub: - Opcode = TargetOpcode::G_ATOMICRMW_FSUB; - break; - } - - AAMDNodes AAMetadata; - I.getAAMetadata(AAMetadata); - - MIRBuilder.buildAtomicRMW( - Opcode, Res, Addr, Val, - *MF->getMachineMemOperand(MachinePointerInfo(I.getPointerOperand()), - Flags, DL->getTypeStoreSize(ResType), - getMemOpAlign(I), AAMetadata, nullptr, - I.getSyncScopeID(), I.getOrdering())); - return true; -} - -bool IRTranslator::translateFence(const User &U, - MachineIRBuilder &MIRBuilder) { - const FenceInst &Fence = cast<FenceInst>(U); - MIRBuilder.buildFence(static_cast<unsigned>(Fence.getOrdering()), - Fence.getSyncScopeID()); - return true; -} - -bool IRTranslator::translateFreeze(const User &U, - MachineIRBuilder &MIRBuilder) { - const ArrayRef<Register> DstRegs = getOrCreateVRegs(U); - const ArrayRef<Register> SrcRegs = getOrCreateVRegs(*U.getOperand(0)); - - assert(DstRegs.size() == SrcRegs.size() && - "Freeze with different source and destination type?"); - - for (unsigned I = 0; I < DstRegs.size(); ++I) { - MIRBuilder.buildFreeze(DstRegs[I], SrcRegs[I]); - } - - return true; -} - -void IRTranslator::finishPendingPhis() { -#ifndef NDEBUG - DILocationVerifier Verifier; - GISelObserverWrapper WrapperObserver(&Verifier); - RAIIDelegateInstaller DelInstall(*MF, &WrapperObserver); -#endif // ifndef NDEBUG - for (auto &Phi : PendingPHIs) { - const PHINode *PI = Phi.first; - ArrayRef<MachineInstr *> ComponentPHIs = Phi.second; - MachineBasicBlock *PhiMBB = ComponentPHIs[0]->getParent(); - EntryBuilder->setDebugLoc(PI->getDebugLoc()); -#ifndef NDEBUG - Verifier.setCurrentInst(PI); -#endif // ifndef NDEBUG - - SmallSet<const MachineBasicBlock *, 16> SeenPreds; - for (unsigned i = 0; i < PI->getNumIncomingValues(); ++i) { - auto IRPred = PI->getIncomingBlock(i); - ArrayRef<Register> ValRegs = getOrCreateVRegs(*PI->getIncomingValue(i)); - for (auto Pred : getMachinePredBBs({IRPred, PI->getParent()})) { - if (SeenPreds.count(Pred) || !PhiMBB->isPredecessor(Pred)) - continue; - SeenPreds.insert(Pred); - for (unsigned j = 0; j < ValRegs.size(); ++j) { - MachineInstrBuilder MIB(*MF, ComponentPHIs[j]); - MIB.addUse(ValRegs[j]); - MIB.addMBB(Pred); - } - } - } - } -} - -bool IRTranslator::valueIsSplit(const Value &V, - SmallVectorImpl<uint64_t> *Offsets) { - SmallVector<LLT, 4> SplitTys; - if (Offsets && !Offsets->empty()) - Offsets->clear(); - computeValueLLTs(*DL, *V.getType(), SplitTys, Offsets); - return SplitTys.size() > 1; -} - -bool IRTranslator::translate(const Instruction &Inst) { - CurBuilder->setDebugLoc(Inst.getDebugLoc()); - // We only emit constants into the entry block from here. To prevent jumpy - // debug behaviour set the line to 0. - if (const DebugLoc &DL = Inst.getDebugLoc()) + LLT Ty = getLLTForType(*LP.getType(), *DL); + Register Undef = MRI->createGenericVirtualRegister(Ty); + MIRBuilder.buildUndef(Undef); + + SmallVector<LLT, 2> Tys; + for (Type *Ty : cast<StructType>(LP.getType())->elements()) + Tys.push_back(getLLTForType(*Ty, *DL)); + assert(Tys.size() == 2 && "Only two-valued landingpads are supported"); + + // Mark exception register as live in. + Register ExceptionReg = TLI.getExceptionPointerRegister(PersonalityFn); + if (!ExceptionReg) + return false; + + MBB.addLiveIn(ExceptionReg); + ArrayRef<Register> ResRegs = getOrCreateVRegs(LP); + MIRBuilder.buildCopy(ResRegs[0], ExceptionReg); + + Register SelectorReg = TLI.getExceptionSelectorRegister(PersonalityFn); + if (!SelectorReg) + return false; + + MBB.addLiveIn(SelectorReg); + Register PtrVReg = MRI->createGenericVirtualRegister(Tys[0]); + MIRBuilder.buildCopy(PtrVReg, SelectorReg); + MIRBuilder.buildCast(ResRegs[1], PtrVReg); + + return true; +} + +bool IRTranslator::translateAlloca(const User &U, + MachineIRBuilder &MIRBuilder) { + auto &AI = cast<AllocaInst>(U); + + if (AI.isSwiftError()) + return true; + + if (AI.isStaticAlloca()) { + Register Res = getOrCreateVReg(AI); + int FI = getOrCreateFrameIndex(AI); + MIRBuilder.buildFrameIndex(Res, FI); + return true; + } + + // FIXME: support stack probing for Windows. + if (MF->getTarget().getTargetTriple().isOSWindows()) + return false; + + // Now we're in the harder dynamic case. + Register NumElts = getOrCreateVReg(*AI.getArraySize()); + Type *IntPtrIRTy = DL->getIntPtrType(AI.getType()); + LLT IntPtrTy = getLLTForType(*IntPtrIRTy, *DL); + if (MRI->getType(NumElts) != IntPtrTy) { + Register ExtElts = MRI->createGenericVirtualRegister(IntPtrTy); + MIRBuilder.buildZExtOrTrunc(ExtElts, NumElts); + NumElts = ExtElts; + } + + Type *Ty = AI.getAllocatedType(); + + Register AllocSize = MRI->createGenericVirtualRegister(IntPtrTy); + Register TySize = + getOrCreateVReg(*ConstantInt::get(IntPtrIRTy, DL->getTypeAllocSize(Ty))); + MIRBuilder.buildMul(AllocSize, NumElts, TySize); + + // Round the size of the allocation up to the stack alignment size + // by add SA-1 to the size. This doesn't overflow because we're computing + // an address inside an alloca. + Align StackAlign = MF->getSubtarget().getFrameLowering()->getStackAlign(); + auto SAMinusOne = MIRBuilder.buildConstant(IntPtrTy, StackAlign.value() - 1); + auto AllocAdd = MIRBuilder.buildAdd(IntPtrTy, AllocSize, SAMinusOne, + MachineInstr::NoUWrap); + auto AlignCst = + MIRBuilder.buildConstant(IntPtrTy, ~(uint64_t)(StackAlign.value() - 1)); + auto AlignedAlloc = MIRBuilder.buildAnd(IntPtrTy, AllocAdd, AlignCst); + + Align Alignment = std::max(AI.getAlign(), DL->getPrefTypeAlign(Ty)); + if (Alignment <= StackAlign) + Alignment = Align(1); + MIRBuilder.buildDynStackAlloc(getOrCreateVReg(AI), AlignedAlloc, Alignment); + + MF->getFrameInfo().CreateVariableSizedObject(Alignment, &AI); + assert(MF->getFrameInfo().hasVarSizedObjects()); + return true; +} + +bool IRTranslator::translateVAArg(const User &U, MachineIRBuilder &MIRBuilder) { + // FIXME: We may need more info about the type. Because of how LLT works, + // we're completely discarding the i64/double distinction here (amongst + // others). Fortunately the ABIs I know of where that matters don't use va_arg + // anyway but that's not guaranteed. + MIRBuilder.buildInstr(TargetOpcode::G_VAARG, {getOrCreateVReg(U)}, + {getOrCreateVReg(*U.getOperand(0)), + DL->getABITypeAlign(U.getType()).value()}); + return true; +} + +bool IRTranslator::translateInsertElement(const User &U, + MachineIRBuilder &MIRBuilder) { + // If it is a <1 x Ty> vector, use the scalar as it is + // not a legal vector type in LLT. + if (cast<FixedVectorType>(U.getType())->getNumElements() == 1) + return translateCopy(U, *U.getOperand(1), MIRBuilder); + + Register Res = getOrCreateVReg(U); + Register Val = getOrCreateVReg(*U.getOperand(0)); + Register Elt = getOrCreateVReg(*U.getOperand(1)); + Register Idx = getOrCreateVReg(*U.getOperand(2)); + MIRBuilder.buildInsertVectorElement(Res, Val, Elt, Idx); + return true; +} + +bool IRTranslator::translateExtractElement(const User &U, + MachineIRBuilder &MIRBuilder) { + // If it is a <1 x Ty> vector, use the scalar as it is + // not a legal vector type in LLT. + if (cast<FixedVectorType>(U.getOperand(0)->getType())->getNumElements() == 1) + return translateCopy(U, *U.getOperand(0), MIRBuilder); + + Register Res = getOrCreateVReg(U); + Register Val = getOrCreateVReg(*U.getOperand(0)); + const auto &TLI = *MF->getSubtarget().getTargetLowering(); + unsigned PreferredVecIdxWidth = TLI.getVectorIdxTy(*DL).getSizeInBits(); + Register Idx; + if (auto *CI = dyn_cast<ConstantInt>(U.getOperand(1))) { + if (CI->getBitWidth() != PreferredVecIdxWidth) { + APInt NewIdx = CI->getValue().sextOrTrunc(PreferredVecIdxWidth); + auto *NewIdxCI = ConstantInt::get(CI->getContext(), NewIdx); + Idx = getOrCreateVReg(*NewIdxCI); + } + } + if (!Idx) + Idx = getOrCreateVReg(*U.getOperand(1)); + if (MRI->getType(Idx).getSizeInBits() != PreferredVecIdxWidth) { + const LLT VecIdxTy = LLT::scalar(PreferredVecIdxWidth); + Idx = MIRBuilder.buildSExtOrTrunc(VecIdxTy, Idx).getReg(0); + } + MIRBuilder.buildExtractVectorElement(Res, Val, Idx); + return true; +} + +bool IRTranslator::translateShuffleVector(const User &U, + MachineIRBuilder &MIRBuilder) { + ArrayRef<int> Mask; + if (auto *SVI = dyn_cast<ShuffleVectorInst>(&U)) + Mask = SVI->getShuffleMask(); + else + Mask = cast<ConstantExpr>(U).getShuffleMask(); + ArrayRef<int> MaskAlloc = MF->allocateShuffleMask(Mask); + MIRBuilder + .buildInstr(TargetOpcode::G_SHUFFLE_VECTOR, {getOrCreateVReg(U)}, + {getOrCreateVReg(*U.getOperand(0)), + getOrCreateVReg(*U.getOperand(1))}) + .addShuffleMask(MaskAlloc); + return true; +} + +bool IRTranslator::translatePHI(const User &U, MachineIRBuilder &MIRBuilder) { + const PHINode &PI = cast<PHINode>(U); + + SmallVector<MachineInstr *, 4> Insts; + for (auto Reg : getOrCreateVRegs(PI)) { + auto MIB = MIRBuilder.buildInstr(TargetOpcode::G_PHI, {Reg}, {}); + Insts.push_back(MIB.getInstr()); + } + + PendingPHIs.emplace_back(&PI, std::move(Insts)); + return true; +} + +bool IRTranslator::translateAtomicCmpXchg(const User &U, + MachineIRBuilder &MIRBuilder) { + const AtomicCmpXchgInst &I = cast<AtomicCmpXchgInst>(U); + + auto &TLI = *MF->getSubtarget().getTargetLowering(); + auto Flags = TLI.getAtomicMemOperandFlags(I, *DL); + + Type *ResType = I.getType(); + Type *ValType = ResType->Type::getStructElementType(0); + + auto Res = getOrCreateVRegs(I); + Register OldValRes = Res[0]; + Register SuccessRes = Res[1]; + Register Addr = getOrCreateVReg(*I.getPointerOperand()); + Register Cmp = getOrCreateVReg(*I.getCompareOperand()); + Register NewVal = getOrCreateVReg(*I.getNewValOperand()); + + AAMDNodes AAMetadata; + I.getAAMetadata(AAMetadata); + + MIRBuilder.buildAtomicCmpXchgWithSuccess( + OldValRes, SuccessRes, Addr, Cmp, NewVal, + *MF->getMachineMemOperand( + MachinePointerInfo(I.getPointerOperand()), Flags, + DL->getTypeStoreSize(ValType), getMemOpAlign(I), AAMetadata, nullptr, + I.getSyncScopeID(), I.getSuccessOrdering(), I.getFailureOrdering())); + return true; +} + +bool IRTranslator::translateAtomicRMW(const User &U, + MachineIRBuilder &MIRBuilder) { + const AtomicRMWInst &I = cast<AtomicRMWInst>(U); + auto &TLI = *MF->getSubtarget().getTargetLowering(); + auto Flags = TLI.getAtomicMemOperandFlags(I, *DL); + + Type *ResType = I.getType(); + + Register Res = getOrCreateVReg(I); + Register Addr = getOrCreateVReg(*I.getPointerOperand()); + Register Val = getOrCreateVReg(*I.getValOperand()); + + unsigned Opcode = 0; + switch (I.getOperation()) { + default: + return false; + case AtomicRMWInst::Xchg: + Opcode = TargetOpcode::G_ATOMICRMW_XCHG; + break; + case AtomicRMWInst::Add: + Opcode = TargetOpcode::G_ATOMICRMW_ADD; + break; + case AtomicRMWInst::Sub: + Opcode = TargetOpcode::G_ATOMICRMW_SUB; + break; + case AtomicRMWInst::And: + Opcode = TargetOpcode::G_ATOMICRMW_AND; + break; + case AtomicRMWInst::Nand: + Opcode = TargetOpcode::G_ATOMICRMW_NAND; + break; + case AtomicRMWInst::Or: + Opcode = TargetOpcode::G_ATOMICRMW_OR; + break; + case AtomicRMWInst::Xor: + Opcode = TargetOpcode::G_ATOMICRMW_XOR; + break; + case AtomicRMWInst::Max: + Opcode = TargetOpcode::G_ATOMICRMW_MAX; + break; + case AtomicRMWInst::Min: + Opcode = TargetOpcode::G_ATOMICRMW_MIN; + break; + case AtomicRMWInst::UMax: + Opcode = TargetOpcode::G_ATOMICRMW_UMAX; + break; + case AtomicRMWInst::UMin: + Opcode = TargetOpcode::G_ATOMICRMW_UMIN; + break; + case AtomicRMWInst::FAdd: + Opcode = TargetOpcode::G_ATOMICRMW_FADD; + break; + case AtomicRMWInst::FSub: + Opcode = TargetOpcode::G_ATOMICRMW_FSUB; + break; + } + + AAMDNodes AAMetadata; + I.getAAMetadata(AAMetadata); + + MIRBuilder.buildAtomicRMW( + Opcode, Res, Addr, Val, + *MF->getMachineMemOperand(MachinePointerInfo(I.getPointerOperand()), + Flags, DL->getTypeStoreSize(ResType), + getMemOpAlign(I), AAMetadata, nullptr, + I.getSyncScopeID(), I.getOrdering())); + return true; +} + +bool IRTranslator::translateFence(const User &U, + MachineIRBuilder &MIRBuilder) { + const FenceInst &Fence = cast<FenceInst>(U); + MIRBuilder.buildFence(static_cast<unsigned>(Fence.getOrdering()), + Fence.getSyncScopeID()); + return true; +} + +bool IRTranslator::translateFreeze(const User &U, + MachineIRBuilder &MIRBuilder) { + const ArrayRef<Register> DstRegs = getOrCreateVRegs(U); + const ArrayRef<Register> SrcRegs = getOrCreateVRegs(*U.getOperand(0)); + + assert(DstRegs.size() == SrcRegs.size() && + "Freeze with different source and destination type?"); + + for (unsigned I = 0; I < DstRegs.size(); ++I) { + MIRBuilder.buildFreeze(DstRegs[I], SrcRegs[I]); + } + + return true; +} + +void IRTranslator::finishPendingPhis() { +#ifndef NDEBUG + DILocationVerifier Verifier; + GISelObserverWrapper WrapperObserver(&Verifier); + RAIIDelegateInstaller DelInstall(*MF, &WrapperObserver); +#endif // ifndef NDEBUG + for (auto &Phi : PendingPHIs) { + const PHINode *PI = Phi.first; + ArrayRef<MachineInstr *> ComponentPHIs = Phi.second; + MachineBasicBlock *PhiMBB = ComponentPHIs[0]->getParent(); + EntryBuilder->setDebugLoc(PI->getDebugLoc()); +#ifndef NDEBUG + Verifier.setCurrentInst(PI); +#endif // ifndef NDEBUG + + SmallSet<const MachineBasicBlock *, 16> SeenPreds; + for (unsigned i = 0; i < PI->getNumIncomingValues(); ++i) { + auto IRPred = PI->getIncomingBlock(i); + ArrayRef<Register> ValRegs = getOrCreateVRegs(*PI->getIncomingValue(i)); + for (auto Pred : getMachinePredBBs({IRPred, PI->getParent()})) { + if (SeenPreds.count(Pred) || !PhiMBB->isPredecessor(Pred)) + continue; + SeenPreds.insert(Pred); + for (unsigned j = 0; j < ValRegs.size(); ++j) { + MachineInstrBuilder MIB(*MF, ComponentPHIs[j]); + MIB.addUse(ValRegs[j]); + MIB.addMBB(Pred); + } + } + } + } +} + +bool IRTranslator::valueIsSplit(const Value &V, + SmallVectorImpl<uint64_t> *Offsets) { + SmallVector<LLT, 4> SplitTys; + if (Offsets && !Offsets->empty()) + Offsets->clear(); + computeValueLLTs(*DL, *V.getType(), SplitTys, Offsets); + return SplitTys.size() > 1; +} + +bool IRTranslator::translate(const Instruction &Inst) { + CurBuilder->setDebugLoc(Inst.getDebugLoc()); + // We only emit constants into the entry block from here. To prevent jumpy + // debug behaviour set the line to 0. + if (const DebugLoc &DL = Inst.getDebugLoc()) EntryBuilder->setDebugLoc(DILocation::get( Inst.getContext(), 0, 0, DL.getScope(), DL.getInlinedAt())); - else - EntryBuilder->setDebugLoc(DebugLoc()); - - auto &TLI = *MF->getSubtarget().getTargetLowering(); - if (TLI.fallBackToDAGISel(Inst)) - return false; - - switch (Inst.getOpcode()) { -#define HANDLE_INST(NUM, OPCODE, CLASS) \ - case Instruction::OPCODE: \ - return translate##OPCODE(Inst, *CurBuilder.get()); -#include "llvm/IR/Instruction.def" - default: - return false; - } -} - -bool IRTranslator::translate(const Constant &C, Register Reg) { - if (auto CI = dyn_cast<ConstantInt>(&C)) - EntryBuilder->buildConstant(Reg, *CI); - else if (auto CF = dyn_cast<ConstantFP>(&C)) - EntryBuilder->buildFConstant(Reg, *CF); - else if (isa<UndefValue>(C)) - EntryBuilder->buildUndef(Reg); - else if (isa<ConstantPointerNull>(C)) - EntryBuilder->buildConstant(Reg, 0); - else if (auto GV = dyn_cast<GlobalValue>(&C)) - EntryBuilder->buildGlobalValue(Reg, GV); - else if (auto CAZ = dyn_cast<ConstantAggregateZero>(&C)) { - if (!CAZ->getType()->isVectorTy()) - return false; - // Return the scalar if it is a <1 x Ty> vector. - if (CAZ->getNumElements() == 1) - return translateCopy(C, *CAZ->getElementValue(0u), *EntryBuilder.get()); - SmallVector<Register, 4> Ops; - for (unsigned i = 0; i < CAZ->getNumElements(); ++i) { - Constant &Elt = *CAZ->getElementValue(i); - Ops.push_back(getOrCreateVReg(Elt)); - } - EntryBuilder->buildBuildVector(Reg, Ops); - } else if (auto CV = dyn_cast<ConstantDataVector>(&C)) { - // Return the scalar if it is a <1 x Ty> vector. - if (CV->getNumElements() == 1) - return translateCopy(C, *CV->getElementAsConstant(0), - *EntryBuilder.get()); - SmallVector<Register, 4> Ops; - for (unsigned i = 0; i < CV->getNumElements(); ++i) { - Constant &Elt = *CV->getElementAsConstant(i); - Ops.push_back(getOrCreateVReg(Elt)); - } - EntryBuilder->buildBuildVector(Reg, Ops); - } else if (auto CE = dyn_cast<ConstantExpr>(&C)) { - switch(CE->getOpcode()) { -#define HANDLE_INST(NUM, OPCODE, CLASS) \ - case Instruction::OPCODE: \ - return translate##OPCODE(*CE, *EntryBuilder.get()); -#include "llvm/IR/Instruction.def" - default: - return false; - } - } else if (auto CV = dyn_cast<ConstantVector>(&C)) { - if (CV->getNumOperands() == 1) - return translateCopy(C, *CV->getOperand(0), *EntryBuilder.get()); - SmallVector<Register, 4> Ops; - for (unsigned i = 0; i < CV->getNumOperands(); ++i) { - Ops.push_back(getOrCreateVReg(*CV->getOperand(i))); - } - EntryBuilder->buildBuildVector(Reg, Ops); - } else if (auto *BA = dyn_cast<BlockAddress>(&C)) { - EntryBuilder->buildBlockAddress(Reg, BA); - } else - return false; - - return true; -} - -void IRTranslator::finalizeBasicBlock() { + else + EntryBuilder->setDebugLoc(DebugLoc()); + + auto &TLI = *MF->getSubtarget().getTargetLowering(); + if (TLI.fallBackToDAGISel(Inst)) + return false; + + switch (Inst.getOpcode()) { +#define HANDLE_INST(NUM, OPCODE, CLASS) \ + case Instruction::OPCODE: \ + return translate##OPCODE(Inst, *CurBuilder.get()); +#include "llvm/IR/Instruction.def" + default: + return false; + } +} + +bool IRTranslator::translate(const Constant &C, Register Reg) { + if (auto CI = dyn_cast<ConstantInt>(&C)) + EntryBuilder->buildConstant(Reg, *CI); + else if (auto CF = dyn_cast<ConstantFP>(&C)) + EntryBuilder->buildFConstant(Reg, *CF); + else if (isa<UndefValue>(C)) + EntryBuilder->buildUndef(Reg); + else if (isa<ConstantPointerNull>(C)) + EntryBuilder->buildConstant(Reg, 0); + else if (auto GV = dyn_cast<GlobalValue>(&C)) + EntryBuilder->buildGlobalValue(Reg, GV); + else if (auto CAZ = dyn_cast<ConstantAggregateZero>(&C)) { + if (!CAZ->getType()->isVectorTy()) + return false; + // Return the scalar if it is a <1 x Ty> vector. + if (CAZ->getNumElements() == 1) + return translateCopy(C, *CAZ->getElementValue(0u), *EntryBuilder.get()); + SmallVector<Register, 4> Ops; + for (unsigned i = 0; i < CAZ->getNumElements(); ++i) { + Constant &Elt = *CAZ->getElementValue(i); + Ops.push_back(getOrCreateVReg(Elt)); + } + EntryBuilder->buildBuildVector(Reg, Ops); + } else if (auto CV = dyn_cast<ConstantDataVector>(&C)) { + // Return the scalar if it is a <1 x Ty> vector. + if (CV->getNumElements() == 1) + return translateCopy(C, *CV->getElementAsConstant(0), + *EntryBuilder.get()); + SmallVector<Register, 4> Ops; + for (unsigned i = 0; i < CV->getNumElements(); ++i) { + Constant &Elt = *CV->getElementAsConstant(i); + Ops.push_back(getOrCreateVReg(Elt)); + } + EntryBuilder->buildBuildVector(Reg, Ops); + } else if (auto CE = dyn_cast<ConstantExpr>(&C)) { + switch(CE->getOpcode()) { +#define HANDLE_INST(NUM, OPCODE, CLASS) \ + case Instruction::OPCODE: \ + return translate##OPCODE(*CE, *EntryBuilder.get()); +#include "llvm/IR/Instruction.def" + default: + return false; + } + } else if (auto CV = dyn_cast<ConstantVector>(&C)) { + if (CV->getNumOperands() == 1) + return translateCopy(C, *CV->getOperand(0), *EntryBuilder.get()); + SmallVector<Register, 4> Ops; + for (unsigned i = 0; i < CV->getNumOperands(); ++i) { + Ops.push_back(getOrCreateVReg(*CV->getOperand(i))); + } + EntryBuilder->buildBuildVector(Reg, Ops); + } else if (auto *BA = dyn_cast<BlockAddress>(&C)) { + EntryBuilder->buildBlockAddress(Reg, BA); + } else + return false; + + return true; +} + +void IRTranslator::finalizeBasicBlock() { for (auto &BTB : SL->BitTestCases) { // Emit header first, if it wasn't already emitted. if (!BTB.Emitted) @@ -2985,83 +2985,83 @@ void IRTranslator::finalizeBasicBlock() { } SL->BitTestCases.clear(); - for (auto &JTCase : SL->JTCases) { - // Emit header first, if it wasn't already emitted. - if (!JTCase.first.Emitted) - emitJumpTableHeader(JTCase.second, JTCase.first, JTCase.first.HeaderBB); - - emitJumpTable(JTCase.second, JTCase.second.MBB); - } - SL->JTCases.clear(); + for (auto &JTCase : SL->JTCases) { + // Emit header first, if it wasn't already emitted. + if (!JTCase.first.Emitted) + emitJumpTableHeader(JTCase.second, JTCase.first, JTCase.first.HeaderBB); + + emitJumpTable(JTCase.second, JTCase.second.MBB); + } + SL->JTCases.clear(); for (auto &SwCase : SL->SwitchCases) emitSwitchCase(SwCase, &CurBuilder->getMBB(), *CurBuilder); SL->SwitchCases.clear(); -} - -void IRTranslator::finalizeFunction() { - // Release the memory used by the different maps we - // needed during the translation. - PendingPHIs.clear(); - VMap.reset(); - FrameIndices.clear(); - MachinePreds.clear(); - // MachineIRBuilder::DebugLoc can outlive the DILocation it holds. Clear it - // to avoid accessing free’d memory (in runOnMachineFunction) and to avoid - // destroying it twice (in ~IRTranslator() and ~LLVMContext()) - EntryBuilder.reset(); - CurBuilder.reset(); - FuncInfo.clear(); -} - -/// Returns true if a BasicBlock \p BB within a variadic function contains a -/// variadic musttail call. -static bool checkForMustTailInVarArgFn(bool IsVarArg, const BasicBlock &BB) { - if (!IsVarArg) - return false; - - // Walk the block backwards, because tail calls usually only appear at the end - // of a block. - return std::any_of(BB.rbegin(), BB.rend(), [](const Instruction &I) { - const auto *CI = dyn_cast<CallInst>(&I); - return CI && CI->isMustTailCall(); - }); -} - -bool IRTranslator::runOnMachineFunction(MachineFunction &CurMF) { - MF = &CurMF; - const Function &F = MF->getFunction(); - if (F.empty()) - return false; - GISelCSEAnalysisWrapper &Wrapper = - getAnalysis<GISelCSEAnalysisWrapperPass>().getCSEWrapper(); - // Set the CSEConfig and run the analysis. - GISelCSEInfo *CSEInfo = nullptr; - TPC = &getAnalysis<TargetPassConfig>(); - bool EnableCSE = EnableCSEInIRTranslator.getNumOccurrences() - ? EnableCSEInIRTranslator - : TPC->isGISelCSEEnabled(); - - if (EnableCSE) { - EntryBuilder = std::make_unique<CSEMIRBuilder>(CurMF); - CSEInfo = &Wrapper.get(TPC->getCSEConfig()); - EntryBuilder->setCSEInfo(CSEInfo); - CurBuilder = std::make_unique<CSEMIRBuilder>(CurMF); - CurBuilder->setCSEInfo(CSEInfo); - } else { - EntryBuilder = std::make_unique<MachineIRBuilder>(); - CurBuilder = std::make_unique<MachineIRBuilder>(); - } - CLI = MF->getSubtarget().getCallLowering(); - CurBuilder->setMF(*MF); - EntryBuilder->setMF(*MF); - MRI = &MF->getRegInfo(); - DL = &F.getParent()->getDataLayout(); - ORE = std::make_unique<OptimizationRemarkEmitter>(&F); +} + +void IRTranslator::finalizeFunction() { + // Release the memory used by the different maps we + // needed during the translation. + PendingPHIs.clear(); + VMap.reset(); + FrameIndices.clear(); + MachinePreds.clear(); + // MachineIRBuilder::DebugLoc can outlive the DILocation it holds. Clear it + // to avoid accessing free’d memory (in runOnMachineFunction) and to avoid + // destroying it twice (in ~IRTranslator() and ~LLVMContext()) + EntryBuilder.reset(); + CurBuilder.reset(); + FuncInfo.clear(); +} + +/// Returns true if a BasicBlock \p BB within a variadic function contains a +/// variadic musttail call. +static bool checkForMustTailInVarArgFn(bool IsVarArg, const BasicBlock &BB) { + if (!IsVarArg) + return false; + + // Walk the block backwards, because tail calls usually only appear at the end + // of a block. + return std::any_of(BB.rbegin(), BB.rend(), [](const Instruction &I) { + const auto *CI = dyn_cast<CallInst>(&I); + return CI && CI->isMustTailCall(); + }); +} + +bool IRTranslator::runOnMachineFunction(MachineFunction &CurMF) { + MF = &CurMF; + const Function &F = MF->getFunction(); + if (F.empty()) + return false; + GISelCSEAnalysisWrapper &Wrapper = + getAnalysis<GISelCSEAnalysisWrapperPass>().getCSEWrapper(); + // Set the CSEConfig and run the analysis. + GISelCSEInfo *CSEInfo = nullptr; + TPC = &getAnalysis<TargetPassConfig>(); + bool EnableCSE = EnableCSEInIRTranslator.getNumOccurrences() + ? EnableCSEInIRTranslator + : TPC->isGISelCSEEnabled(); + + if (EnableCSE) { + EntryBuilder = std::make_unique<CSEMIRBuilder>(CurMF); + CSEInfo = &Wrapper.get(TPC->getCSEConfig()); + EntryBuilder->setCSEInfo(CSEInfo); + CurBuilder = std::make_unique<CSEMIRBuilder>(CurMF); + CurBuilder->setCSEInfo(CSEInfo); + } else { + EntryBuilder = std::make_unique<MachineIRBuilder>(); + CurBuilder = std::make_unique<MachineIRBuilder>(); + } + CLI = MF->getSubtarget().getCallLowering(); + CurBuilder->setMF(*MF); + EntryBuilder->setMF(*MF); + MRI = &MF->getRegInfo(); + DL = &F.getParent()->getDataLayout(); + ORE = std::make_unique<OptimizationRemarkEmitter>(&F); const TargetMachine &TM = MF->getTarget(); TM.resetTargetOptions(F); EnableOpts = OptLevel != CodeGenOpt::None && !skipFunction(F); - FuncInfo.MF = MF; + FuncInfo.MF = MF; if (EnableOpts) FuncInfo.BPI = &getAnalysis<BranchProbabilityInfoWrapperPass>().getBPI(); else @@ -3069,176 +3069,176 @@ bool IRTranslator::runOnMachineFunction(MachineFunction &CurMF) { FuncInfo.CanLowerReturn = CLI->checkReturnTypeForCallConv(*MF); - const auto &TLI = *MF->getSubtarget().getTargetLowering(); - - SL = std::make_unique<GISelSwitchLowering>(this, FuncInfo); - SL->init(TLI, TM, *DL); - - - - assert(PendingPHIs.empty() && "stale PHIs"); - - if (!DL->isLittleEndian()) { - // Currently we don't properly handle big endian code. - OptimizationRemarkMissed R("gisel-irtranslator", "GISelFailure", - F.getSubprogram(), &F.getEntryBlock()); - R << "unable to translate in big endian mode"; - reportTranslationError(*MF, *TPC, *ORE, R); - } - - // Release the per-function state when we return, whether we succeeded or not. - auto FinalizeOnReturn = make_scope_exit([this]() { finalizeFunction(); }); - - // Setup a separate basic-block for the arguments and constants - MachineBasicBlock *EntryBB = MF->CreateMachineBasicBlock(); - MF->push_back(EntryBB); - EntryBuilder->setMBB(*EntryBB); - - DebugLoc DbgLoc = F.getEntryBlock().getFirstNonPHI()->getDebugLoc(); - SwiftError.setFunction(CurMF); - SwiftError.createEntriesInEntryBlock(DbgLoc); - - bool IsVarArg = F.isVarArg(); - bool HasMustTailInVarArgFn = false; - - // Create all blocks, in IR order, to preserve the layout. - for (const BasicBlock &BB: F) { - auto *&MBB = BBToMBB[&BB]; - - MBB = MF->CreateMachineBasicBlock(&BB); - MF->push_back(MBB); - - if (BB.hasAddressTaken()) - MBB->setHasAddressTaken(); - - if (!HasMustTailInVarArgFn) - HasMustTailInVarArgFn = checkForMustTailInVarArgFn(IsVarArg, BB); - } - - MF->getFrameInfo().setHasMustTailInVarArgFunc(HasMustTailInVarArgFn); - - // Make our arguments/constants entry block fallthrough to the IR entry block. - EntryBB->addSuccessor(&getMBB(F.front())); - - if (CLI->fallBackToDAGISel(F)) { - OptimizationRemarkMissed R("gisel-irtranslator", "GISelFailure", - F.getSubprogram(), &F.getEntryBlock()); - R << "unable to lower function: " << ore::NV("Prototype", F.getType()); - reportTranslationError(*MF, *TPC, *ORE, R); - return false; - } - - // Lower the actual args into this basic block. - SmallVector<ArrayRef<Register>, 8> VRegArgs; - for (const Argument &Arg: F.args()) { - if (DL->getTypeStoreSize(Arg.getType()).isZero()) - continue; // Don't handle zero sized types. - ArrayRef<Register> VRegs = getOrCreateVRegs(Arg); - VRegArgs.push_back(VRegs); - - if (Arg.hasSwiftErrorAttr()) { - assert(VRegs.size() == 1 && "Too many vregs for Swift error"); - SwiftError.setCurrentVReg(EntryBB, SwiftError.getFunctionArg(), VRegs[0]); - } - } - + const auto &TLI = *MF->getSubtarget().getTargetLowering(); + + SL = std::make_unique<GISelSwitchLowering>(this, FuncInfo); + SL->init(TLI, TM, *DL); + + + + assert(PendingPHIs.empty() && "stale PHIs"); + + if (!DL->isLittleEndian()) { + // Currently we don't properly handle big endian code. + OptimizationRemarkMissed R("gisel-irtranslator", "GISelFailure", + F.getSubprogram(), &F.getEntryBlock()); + R << "unable to translate in big endian mode"; + reportTranslationError(*MF, *TPC, *ORE, R); + } + + // Release the per-function state when we return, whether we succeeded or not. + auto FinalizeOnReturn = make_scope_exit([this]() { finalizeFunction(); }); + + // Setup a separate basic-block for the arguments and constants + MachineBasicBlock *EntryBB = MF->CreateMachineBasicBlock(); + MF->push_back(EntryBB); + EntryBuilder->setMBB(*EntryBB); + + DebugLoc DbgLoc = F.getEntryBlock().getFirstNonPHI()->getDebugLoc(); + SwiftError.setFunction(CurMF); + SwiftError.createEntriesInEntryBlock(DbgLoc); + + bool IsVarArg = F.isVarArg(); + bool HasMustTailInVarArgFn = false; + + // Create all blocks, in IR order, to preserve the layout. + for (const BasicBlock &BB: F) { + auto *&MBB = BBToMBB[&BB]; + + MBB = MF->CreateMachineBasicBlock(&BB); + MF->push_back(MBB); + + if (BB.hasAddressTaken()) + MBB->setHasAddressTaken(); + + if (!HasMustTailInVarArgFn) + HasMustTailInVarArgFn = checkForMustTailInVarArgFn(IsVarArg, BB); + } + + MF->getFrameInfo().setHasMustTailInVarArgFunc(HasMustTailInVarArgFn); + + // Make our arguments/constants entry block fallthrough to the IR entry block. + EntryBB->addSuccessor(&getMBB(F.front())); + + if (CLI->fallBackToDAGISel(F)) { + OptimizationRemarkMissed R("gisel-irtranslator", "GISelFailure", + F.getSubprogram(), &F.getEntryBlock()); + R << "unable to lower function: " << ore::NV("Prototype", F.getType()); + reportTranslationError(*MF, *TPC, *ORE, R); + return false; + } + + // Lower the actual args into this basic block. + SmallVector<ArrayRef<Register>, 8> VRegArgs; + for (const Argument &Arg: F.args()) { + if (DL->getTypeStoreSize(Arg.getType()).isZero()) + continue; // Don't handle zero sized types. + ArrayRef<Register> VRegs = getOrCreateVRegs(Arg); + VRegArgs.push_back(VRegs); + + if (Arg.hasSwiftErrorAttr()) { + assert(VRegs.size() == 1 && "Too many vregs for Swift error"); + SwiftError.setCurrentVReg(EntryBB, SwiftError.getFunctionArg(), VRegs[0]); + } + } + if (!CLI->lowerFormalArguments(*EntryBuilder.get(), F, VRegArgs, FuncInfo)) { - OptimizationRemarkMissed R("gisel-irtranslator", "GISelFailure", - F.getSubprogram(), &F.getEntryBlock()); - R << "unable to lower arguments: " << ore::NV("Prototype", F.getType()); - reportTranslationError(*MF, *TPC, *ORE, R); - return false; - } - - // Need to visit defs before uses when translating instructions. - GISelObserverWrapper WrapperObserver; - if (EnableCSE && CSEInfo) - WrapperObserver.addObserver(CSEInfo); - { - ReversePostOrderTraversal<const Function *> RPOT(&F); -#ifndef NDEBUG - DILocationVerifier Verifier; - WrapperObserver.addObserver(&Verifier); -#endif // ifndef NDEBUG - RAIIDelegateInstaller DelInstall(*MF, &WrapperObserver); - RAIIMFObserverInstaller ObsInstall(*MF, WrapperObserver); - for (const BasicBlock *BB : RPOT) { - MachineBasicBlock &MBB = getMBB(*BB); - // Set the insertion point of all the following translations to - // the end of this basic block. - CurBuilder->setMBB(MBB); - HasTailCall = false; - for (const Instruction &Inst : *BB) { - // If we translated a tail call in the last step, then we know - // everything after the call is either a return, or something that is - // handled by the call itself. (E.g. a lifetime marker or assume - // intrinsic.) In this case, we should stop translating the block and - // move on. - if (HasTailCall) - break; -#ifndef NDEBUG - Verifier.setCurrentInst(&Inst); -#endif // ifndef NDEBUG - if (translate(Inst)) - continue; - - OptimizationRemarkMissed R("gisel-irtranslator", "GISelFailure", - Inst.getDebugLoc(), BB); - R << "unable to translate instruction: " << ore::NV("Opcode", &Inst); - - if (ORE->allowExtraAnalysis("gisel-irtranslator")) { - std::string InstStrStorage; - raw_string_ostream InstStr(InstStrStorage); - InstStr << Inst; - - R << ": '" << InstStr.str() << "'"; - } - - reportTranslationError(*MF, *TPC, *ORE, R); - return false; - } - - finalizeBasicBlock(); - } -#ifndef NDEBUG - WrapperObserver.removeObserver(&Verifier); -#endif - } - - finishPendingPhis(); - - SwiftError.propagateVRegs(); - - // Merge the argument lowering and constants block with its single - // successor, the LLVM-IR entry block. We want the basic block to - // be maximal. - assert(EntryBB->succ_size() == 1 && - "Custom BB used for lowering should have only one successor"); - // Get the successor of the current entry block. - MachineBasicBlock &NewEntryBB = **EntryBB->succ_begin(); - assert(NewEntryBB.pred_size() == 1 && - "LLVM-IR entry block has a predecessor!?"); - // Move all the instruction from the current entry block to the - // new entry block. - NewEntryBB.splice(NewEntryBB.begin(), EntryBB, EntryBB->begin(), - EntryBB->end()); - - // Update the live-in information for the new entry block. - for (const MachineBasicBlock::RegisterMaskPair &LiveIn : EntryBB->liveins()) - NewEntryBB.addLiveIn(LiveIn); - NewEntryBB.sortUniqueLiveIns(); - - // Get rid of the now empty basic block. - EntryBB->removeSuccessor(&NewEntryBB); - MF->remove(EntryBB); - MF->DeleteMachineBasicBlock(EntryBB); - - assert(&MF->front() == &NewEntryBB && - "New entry wasn't next in the list of basic block!"); - - // Initialize stack protector information. - StackProtector &SP = getAnalysis<StackProtector>(); - SP.copyToMachineFrameInfo(MF->getFrameInfo()); - - return false; -} + OptimizationRemarkMissed R("gisel-irtranslator", "GISelFailure", + F.getSubprogram(), &F.getEntryBlock()); + R << "unable to lower arguments: " << ore::NV("Prototype", F.getType()); + reportTranslationError(*MF, *TPC, *ORE, R); + return false; + } + + // Need to visit defs before uses when translating instructions. + GISelObserverWrapper WrapperObserver; + if (EnableCSE && CSEInfo) + WrapperObserver.addObserver(CSEInfo); + { + ReversePostOrderTraversal<const Function *> RPOT(&F); +#ifndef NDEBUG + DILocationVerifier Verifier; + WrapperObserver.addObserver(&Verifier); +#endif // ifndef NDEBUG + RAIIDelegateInstaller DelInstall(*MF, &WrapperObserver); + RAIIMFObserverInstaller ObsInstall(*MF, WrapperObserver); + for (const BasicBlock *BB : RPOT) { + MachineBasicBlock &MBB = getMBB(*BB); + // Set the insertion point of all the following translations to + // the end of this basic block. + CurBuilder->setMBB(MBB); + HasTailCall = false; + for (const Instruction &Inst : *BB) { + // If we translated a tail call in the last step, then we know + // everything after the call is either a return, or something that is + // handled by the call itself. (E.g. a lifetime marker or assume + // intrinsic.) In this case, we should stop translating the block and + // move on. + if (HasTailCall) + break; +#ifndef NDEBUG + Verifier.setCurrentInst(&Inst); +#endif // ifndef NDEBUG + if (translate(Inst)) + continue; + + OptimizationRemarkMissed R("gisel-irtranslator", "GISelFailure", + Inst.getDebugLoc(), BB); + R << "unable to translate instruction: " << ore::NV("Opcode", &Inst); + + if (ORE->allowExtraAnalysis("gisel-irtranslator")) { + std::string InstStrStorage; + raw_string_ostream InstStr(InstStrStorage); + InstStr << Inst; + + R << ": '" << InstStr.str() << "'"; + } + + reportTranslationError(*MF, *TPC, *ORE, R); + return false; + } + + finalizeBasicBlock(); + } +#ifndef NDEBUG + WrapperObserver.removeObserver(&Verifier); +#endif + } + + finishPendingPhis(); + + SwiftError.propagateVRegs(); + + // Merge the argument lowering and constants block with its single + // successor, the LLVM-IR entry block. We want the basic block to + // be maximal. + assert(EntryBB->succ_size() == 1 && + "Custom BB used for lowering should have only one successor"); + // Get the successor of the current entry block. + MachineBasicBlock &NewEntryBB = **EntryBB->succ_begin(); + assert(NewEntryBB.pred_size() == 1 && + "LLVM-IR entry block has a predecessor!?"); + // Move all the instruction from the current entry block to the + // new entry block. + NewEntryBB.splice(NewEntryBB.begin(), EntryBB, EntryBB->begin(), + EntryBB->end()); + + // Update the live-in information for the new entry block. + for (const MachineBasicBlock::RegisterMaskPair &LiveIn : EntryBB->liveins()) + NewEntryBB.addLiveIn(LiveIn); + NewEntryBB.sortUniqueLiveIns(); + + // Get rid of the now empty basic block. + EntryBB->removeSuccessor(&NewEntryBB); + MF->remove(EntryBB); + MF->DeleteMachineBasicBlock(EntryBB); + + assert(&MF->front() == &NewEntryBB && + "New entry wasn't next in the list of basic block!"); + + // Initialize stack protector information. + StackProtector &SP = getAnalysis<StackProtector>(); + SP.copyToMachineFrameInfo(MF->getFrameInfo()); + + return false; +} diff --git a/contrib/libs/llvm12/lib/CodeGen/GlobalISel/InlineAsmLowering.cpp b/contrib/libs/llvm12/lib/CodeGen/GlobalISel/InlineAsmLowering.cpp index ce6827bc01..bb4d41cfd6 100644 --- a/contrib/libs/llvm12/lib/CodeGen/GlobalISel/InlineAsmLowering.cpp +++ b/contrib/libs/llvm12/lib/CodeGen/GlobalISel/InlineAsmLowering.cpp @@ -1,677 +1,677 @@ -//===-- lib/CodeGen/GlobalISel/InlineAsmLowering.cpp ----------------------===// -// -// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. -// See https://llvm.org/LICENSE.txt for license information. -// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception -// -//===----------------------------------------------------------------------===// -/// -/// \file -/// This file implements the lowering from LLVM IR inline asm to MIR INLINEASM -/// -//===----------------------------------------------------------------------===// - -#include "llvm/CodeGen/GlobalISel/InlineAsmLowering.h" -#include "llvm/CodeGen/Analysis.h" -#include "llvm/CodeGen/GlobalISel/MachineIRBuilder.h" -#include "llvm/CodeGen/GlobalISel/Utils.h" -#include "llvm/CodeGen/MachineOperand.h" -#include "llvm/CodeGen/MachineRegisterInfo.h" -#include "llvm/CodeGen/TargetLowering.h" -#include "llvm/IR/DataLayout.h" -#include "llvm/IR/Instructions.h" -#include "llvm/IR/LLVMContext.h" -#include "llvm/IR/Module.h" - -#define DEBUG_TYPE "inline-asm-lowering" - -using namespace llvm; - -void InlineAsmLowering::anchor() {} - -namespace { - -/// GISelAsmOperandInfo - This contains information for each constraint that we -/// are lowering. -class GISelAsmOperandInfo : public TargetLowering::AsmOperandInfo { -public: - /// Regs - If this is a register or register class operand, this - /// contains the set of assigned registers corresponding to the operand. - SmallVector<Register, 1> Regs; - - explicit GISelAsmOperandInfo(const TargetLowering::AsmOperandInfo &Info) - : TargetLowering::AsmOperandInfo(Info) {} -}; - -using GISelAsmOperandInfoVector = SmallVector<GISelAsmOperandInfo, 16>; - -class ExtraFlags { - unsigned Flags = 0; - -public: - explicit ExtraFlags(const CallBase &CB) { - const InlineAsm *IA = cast<InlineAsm>(CB.getCalledOperand()); - if (IA->hasSideEffects()) - Flags |= InlineAsm::Extra_HasSideEffects; - if (IA->isAlignStack()) - Flags |= InlineAsm::Extra_IsAlignStack; - if (CB.isConvergent()) - Flags |= InlineAsm::Extra_IsConvergent; - Flags |= IA->getDialect() * InlineAsm::Extra_AsmDialect; - } - - void update(const TargetLowering::AsmOperandInfo &OpInfo) { - // Ideally, we would only check against memory constraints. However, the - // meaning of an Other constraint can be target-specific and we can't easily - // reason about it. Therefore, be conservative and set MayLoad/MayStore - // for Other constraints as well. - if (OpInfo.ConstraintType == TargetLowering::C_Memory || - OpInfo.ConstraintType == TargetLowering::C_Other) { - if (OpInfo.Type == InlineAsm::isInput) - Flags |= InlineAsm::Extra_MayLoad; - else if (OpInfo.Type == InlineAsm::isOutput) - Flags |= InlineAsm::Extra_MayStore; - else if (OpInfo.Type == InlineAsm::isClobber) - Flags |= (InlineAsm::Extra_MayLoad | InlineAsm::Extra_MayStore); - } - } - - unsigned get() const { return Flags; } -}; - -} // namespace - -/// Assign virtual/physical registers for the specified register operand. -static void getRegistersForValue(MachineFunction &MF, - MachineIRBuilder &MIRBuilder, - GISelAsmOperandInfo &OpInfo, - GISelAsmOperandInfo &RefOpInfo) { - - const TargetLowering &TLI = *MF.getSubtarget().getTargetLowering(); - const TargetRegisterInfo &TRI = *MF.getSubtarget().getRegisterInfo(); - - // No work to do for memory operations. - if (OpInfo.ConstraintType == TargetLowering::C_Memory) - return; - - // If this is a constraint for a single physreg, or a constraint for a - // register class, find it. - Register AssignedReg; - const TargetRegisterClass *RC; - std::tie(AssignedReg, RC) = TLI.getRegForInlineAsmConstraint( - &TRI, RefOpInfo.ConstraintCode, RefOpInfo.ConstraintVT); - // RC is unset only on failure. Return immediately. - if (!RC) - return; - - // No need to allocate a matching input constraint since the constraint it's - // matching to has already been allocated. - if (OpInfo.isMatchingInputConstraint()) - return; - - // Initialize NumRegs. - unsigned NumRegs = 1; - if (OpInfo.ConstraintVT != MVT::Other) - NumRegs = - TLI.getNumRegisters(MF.getFunction().getContext(), OpInfo.ConstraintVT); - - // If this is a constraint for a specific physical register, but the type of - // the operand requires more than one register to be passed, we allocate the - // required amount of physical registers, starting from the selected physical - // register. - // For this, first retrieve a register iterator for the given register class - TargetRegisterClass::iterator I = RC->begin(); - MachineRegisterInfo &RegInfo = MF.getRegInfo(); - - // Advance the iterator to the assigned register (if set) - if (AssignedReg) { - for (; *I != AssignedReg; ++I) - assert(I != RC->end() && "AssignedReg should be a member of provided RC"); - } - - // Finally, assign the registers. If the AssignedReg isn't set, create virtual - // registers with the provided register class - for (; NumRegs; --NumRegs, ++I) { - assert(I != RC->end() && "Ran out of registers to allocate!"); - Register R = AssignedReg ? Register(*I) : RegInfo.createVirtualRegister(RC); - OpInfo.Regs.push_back(R); - } -} - -/// Return an integer indicating how general CT is. -static unsigned getConstraintGenerality(TargetLowering::ConstraintType CT) { - switch (CT) { - case TargetLowering::C_Immediate: - case TargetLowering::C_Other: - case TargetLowering::C_Unknown: - return 0; - case TargetLowering::C_Register: - return 1; - case TargetLowering::C_RegisterClass: - return 2; - case TargetLowering::C_Memory: - return 3; - } - llvm_unreachable("Invalid constraint type"); -} - -static void chooseConstraint(TargetLowering::AsmOperandInfo &OpInfo, - const TargetLowering *TLI) { - assert(OpInfo.Codes.size() > 1 && "Doesn't have multiple constraint options"); - unsigned BestIdx = 0; - TargetLowering::ConstraintType BestType = TargetLowering::C_Unknown; - int BestGenerality = -1; - - // Loop over the options, keeping track of the most general one. - for (unsigned i = 0, e = OpInfo.Codes.size(); i != e; ++i) { - TargetLowering::ConstraintType CType = - TLI->getConstraintType(OpInfo.Codes[i]); - - // Indirect 'other' or 'immediate' constraints are not allowed. - if (OpInfo.isIndirect && !(CType == TargetLowering::C_Memory || - CType == TargetLowering::C_Register || - CType == TargetLowering::C_RegisterClass)) - continue; - - // If this is an 'other' or 'immediate' constraint, see if the operand is - // valid for it. For example, on X86 we might have an 'rI' constraint. If - // the operand is an integer in the range [0..31] we want to use I (saving a - // load of a register), otherwise we must use 'r'. - if (CType == TargetLowering::C_Other || - CType == TargetLowering::C_Immediate) { - assert(OpInfo.Codes[i].size() == 1 && - "Unhandled multi-letter 'other' constraint"); - // FIXME: prefer immediate constraints if the target allows it - } - - // Things with matching constraints can only be registers, per gcc - // documentation. This mainly affects "g" constraints. - if (CType == TargetLowering::C_Memory && OpInfo.hasMatchingInput()) - continue; - - // This constraint letter is more general than the previous one, use it. - int Generality = getConstraintGenerality(CType); - if (Generality > BestGenerality) { - BestType = CType; - BestIdx = i; - BestGenerality = Generality; - } - } - - OpInfo.ConstraintCode = OpInfo.Codes[BestIdx]; - OpInfo.ConstraintType = BestType; -} - -static void computeConstraintToUse(const TargetLowering *TLI, - TargetLowering::AsmOperandInfo &OpInfo) { - assert(!OpInfo.Codes.empty() && "Must have at least one constraint"); - - // Single-letter constraints ('r') are very common. - if (OpInfo.Codes.size() == 1) { - OpInfo.ConstraintCode = OpInfo.Codes[0]; - OpInfo.ConstraintType = TLI->getConstraintType(OpInfo.ConstraintCode); - } else { - chooseConstraint(OpInfo, TLI); - } - - // 'X' matches anything. - if (OpInfo.ConstraintCode == "X" && OpInfo.CallOperandVal) { - // Labels and constants are handled elsewhere ('X' is the only thing - // that matches labels). For Functions, the type here is the type of - // the result, which is not what we want to look at; leave them alone. - Value *Val = OpInfo.CallOperandVal; - if (isa<BasicBlock>(Val) || isa<ConstantInt>(Val) || isa<Function>(Val)) - return; - - // Otherwise, try to resolve it to something we know about by looking at - // the actual operand type. - if (const char *Repl = TLI->LowerXConstraint(OpInfo.ConstraintVT)) { - OpInfo.ConstraintCode = Repl; - OpInfo.ConstraintType = TLI->getConstraintType(OpInfo.ConstraintCode); - } - } -} - -static unsigned getNumOpRegs(const MachineInstr &I, unsigned OpIdx) { - unsigned Flag = I.getOperand(OpIdx).getImm(); - return InlineAsm::getNumOperandRegisters(Flag); -} - -static bool buildAnyextOrCopy(Register Dst, Register Src, - MachineIRBuilder &MIRBuilder) { - const TargetRegisterInfo *TRI = - MIRBuilder.getMF().getSubtarget().getRegisterInfo(); - MachineRegisterInfo *MRI = MIRBuilder.getMRI(); - - auto SrcTy = MRI->getType(Src); - if (!SrcTy.isValid()) { - LLVM_DEBUG(dbgs() << "Source type for copy is not valid\n"); - return false; - } - unsigned SrcSize = TRI->getRegSizeInBits(Src, *MRI); - unsigned DstSize = TRI->getRegSizeInBits(Dst, *MRI); - - if (DstSize < SrcSize) { - LLVM_DEBUG(dbgs() << "Input can't fit in destination reg class\n"); - return false; - } - - // Attempt to anyext small scalar sources. - if (DstSize > SrcSize) { - if (!SrcTy.isScalar()) { - LLVM_DEBUG(dbgs() << "Can't extend non-scalar input to size of" - "destination register class\n"); - return false; - } - Src = MIRBuilder.buildAnyExt(LLT::scalar(DstSize), Src).getReg(0); - } - - MIRBuilder.buildCopy(Dst, Src); - return true; -} - -bool InlineAsmLowering::lowerInlineAsm( - MachineIRBuilder &MIRBuilder, const CallBase &Call, - std::function<ArrayRef<Register>(const Value &Val)> GetOrCreateVRegs) - const { - const InlineAsm *IA = cast<InlineAsm>(Call.getCalledOperand()); - - /// ConstraintOperands - Information about all of the constraints. - GISelAsmOperandInfoVector ConstraintOperands; - - MachineFunction &MF = MIRBuilder.getMF(); - const Function &F = MF.getFunction(); - const DataLayout &DL = F.getParent()->getDataLayout(); - const TargetRegisterInfo *TRI = MF.getSubtarget().getRegisterInfo(); - - MachineRegisterInfo *MRI = MIRBuilder.getMRI(); - - TargetLowering::AsmOperandInfoVector TargetConstraints = - TLI->ParseConstraints(DL, TRI, Call); - - ExtraFlags ExtraInfo(Call); - unsigned ArgNo = 0; // ArgNo - The argument of the CallInst. - unsigned ResNo = 0; // ResNo - The result number of the next output. - for (auto &T : TargetConstraints) { - ConstraintOperands.push_back(GISelAsmOperandInfo(T)); - GISelAsmOperandInfo &OpInfo = ConstraintOperands.back(); - - // Compute the value type for each operand. - if (OpInfo.Type == InlineAsm::isInput || - (OpInfo.Type == InlineAsm::isOutput && OpInfo.isIndirect)) { - - OpInfo.CallOperandVal = const_cast<Value *>(Call.getArgOperand(ArgNo++)); - - if (isa<BasicBlock>(OpInfo.CallOperandVal)) { - LLVM_DEBUG(dbgs() << "Basic block input operands not supported yet\n"); - return false; - } - - Type *OpTy = OpInfo.CallOperandVal->getType(); - - // If this is an indirect operand, the operand is a pointer to the - // accessed type. - if (OpInfo.isIndirect) { - PointerType *PtrTy = dyn_cast<PointerType>(OpTy); - if (!PtrTy) - report_fatal_error("Indirect operand for inline asm not a pointer!"); - OpTy = PtrTy->getElementType(); - } - - // FIXME: Support aggregate input operands - if (!OpTy->isSingleValueType()) { - LLVM_DEBUG( - dbgs() << "Aggregate input operands are not supported yet\n"); - return false; - } - - OpInfo.ConstraintVT = TLI->getValueType(DL, OpTy, true).getSimpleVT(); - - } else if (OpInfo.Type == InlineAsm::isOutput && !OpInfo.isIndirect) { - assert(!Call.getType()->isVoidTy() && "Bad inline asm!"); - if (StructType *STy = dyn_cast<StructType>(Call.getType())) { - OpInfo.ConstraintVT = - TLI->getSimpleValueType(DL, STy->getElementType(ResNo)); - } else { - assert(ResNo == 0 && "Asm only has one result!"); - OpInfo.ConstraintVT = TLI->getSimpleValueType(DL, Call.getType()); - } - ++ResNo; - } else { - OpInfo.ConstraintVT = MVT::Other; - } - - // Compute the constraint code and ConstraintType to use. - computeConstraintToUse(TLI, OpInfo); - - // The selected constraint type might expose new sideeffects - ExtraInfo.update(OpInfo); - } - - // At this point, all operand types are decided. - // Create the MachineInstr, but don't insert it yet since input - // operands still need to insert instructions before this one - auto Inst = MIRBuilder.buildInstrNoInsert(TargetOpcode::INLINEASM) - .addExternalSymbol(IA->getAsmString().c_str()) - .addImm(ExtraInfo.get()); - - // Starting from this operand: flag followed by register(s) will be added as - // operands to Inst for each constraint. Used for matching input constraints. - unsigned StartIdx = Inst->getNumOperands(); - - // Collects the output operands for later processing - GISelAsmOperandInfoVector OutputOperands; - - for (auto &OpInfo : ConstraintOperands) { - GISelAsmOperandInfo &RefOpInfo = - OpInfo.isMatchingInputConstraint() - ? ConstraintOperands[OpInfo.getMatchedOperand()] - : OpInfo; - - // Assign registers for register operands - getRegistersForValue(MF, MIRBuilder, OpInfo, RefOpInfo); - - switch (OpInfo.Type) { - case InlineAsm::isOutput: - if (OpInfo.ConstraintType == TargetLowering::C_Memory) { - unsigned ConstraintID = - TLI->getInlineAsmMemConstraint(OpInfo.ConstraintCode); - assert(ConstraintID != InlineAsm::Constraint_Unknown && - "Failed to convert memory constraint code to constraint id."); - - // Add information to the INLINEASM instruction to know about this - // output. - unsigned OpFlags = InlineAsm::getFlagWord(InlineAsm::Kind_Mem, 1); - OpFlags = InlineAsm::getFlagWordForMem(OpFlags, ConstraintID); - Inst.addImm(OpFlags); - ArrayRef<Register> SourceRegs = - GetOrCreateVRegs(*OpInfo.CallOperandVal); - assert( - SourceRegs.size() == 1 && - "Expected the memory output to fit into a single virtual register"); - Inst.addReg(SourceRegs[0]); - } else { - // Otherwise, this outputs to a register (directly for C_Register / - // C_RegisterClass. Find a register that we can use. - assert(OpInfo.ConstraintType == TargetLowering::C_Register || - OpInfo.ConstraintType == TargetLowering::C_RegisterClass); - - if (OpInfo.Regs.empty()) { - LLVM_DEBUG(dbgs() - << "Couldn't allocate output register for constraint\n"); - return false; - } - - // Add information to the INLINEASM instruction to know that this - // register is set. - unsigned Flag = InlineAsm::getFlagWord( - OpInfo.isEarlyClobber ? InlineAsm::Kind_RegDefEarlyClobber - : InlineAsm::Kind_RegDef, - OpInfo.Regs.size()); - if (OpInfo.Regs.front().isVirtual()) { - // Put the register class of the virtual registers in the flag word. - // That way, later passes can recompute register class constraints for - // inline assembly as well as normal instructions. Don't do this for - // tied operands that can use the regclass information from the def. - const TargetRegisterClass *RC = MRI->getRegClass(OpInfo.Regs.front()); - Flag = InlineAsm::getFlagWordForRegClass(Flag, RC->getID()); - } - - Inst.addImm(Flag); - - for (Register Reg : OpInfo.Regs) { - Inst.addReg(Reg, - RegState::Define | getImplRegState(Reg.isPhysical()) | - (OpInfo.isEarlyClobber ? RegState::EarlyClobber : 0)); - } - - // Remember this output operand for later processing - OutputOperands.push_back(OpInfo); - } - - break; - case InlineAsm::isInput: { - if (OpInfo.isMatchingInputConstraint()) { - unsigned DefIdx = OpInfo.getMatchedOperand(); - // Find operand with register def that corresponds to DefIdx. - unsigned InstFlagIdx = StartIdx; - for (unsigned i = 0; i < DefIdx; ++i) - InstFlagIdx += getNumOpRegs(*Inst, InstFlagIdx) + 1; - assert(getNumOpRegs(*Inst, InstFlagIdx) == 1 && "Wrong flag"); - - unsigned MatchedOperandFlag = Inst->getOperand(InstFlagIdx).getImm(); - if (InlineAsm::isMemKind(MatchedOperandFlag)) { - LLVM_DEBUG(dbgs() << "Matching input constraint to mem operand not " - "supported. This should be target specific.\n"); - return false; - } - if (!InlineAsm::isRegDefKind(MatchedOperandFlag) && - !InlineAsm::isRegDefEarlyClobberKind(MatchedOperandFlag)) { - LLVM_DEBUG(dbgs() << "Unknown matching constraint\n"); - return false; - } - - // We want to tie input to register in next operand. - unsigned DefRegIdx = InstFlagIdx + 1; - Register Def = Inst->getOperand(DefRegIdx).getReg(); - - ArrayRef<Register> SrcRegs = GetOrCreateVRegs(*OpInfo.CallOperandVal); - assert(SrcRegs.size() == 1 && "Single register is expected here"); - - // When Def is physreg: use given input. - Register In = SrcRegs[0]; - // When Def is vreg: copy input to new vreg with same reg class as Def. - if (Def.isVirtual()) { - In = MRI->createVirtualRegister(MRI->getRegClass(Def)); - if (!buildAnyextOrCopy(In, SrcRegs[0], MIRBuilder)) - return false; - } - - // Add Flag and input register operand (In) to Inst. Tie In to Def. - unsigned UseFlag = InlineAsm::getFlagWord(InlineAsm::Kind_RegUse, 1); - unsigned Flag = InlineAsm::getFlagWordForMatchingOp(UseFlag, DefIdx); - Inst.addImm(Flag); - Inst.addReg(In); - Inst->tieOperands(DefRegIdx, Inst->getNumOperands() - 1); - break; - } - - if (OpInfo.ConstraintType == TargetLowering::C_Other && - OpInfo.isIndirect) { - LLVM_DEBUG(dbgs() << "Indirect input operands with unknown constraint " - "not supported yet\n"); - return false; - } - - if (OpInfo.ConstraintType == TargetLowering::C_Immediate || - OpInfo.ConstraintType == TargetLowering::C_Other) { - - std::vector<MachineOperand> Ops; - if (!lowerAsmOperandForConstraint(OpInfo.CallOperandVal, - OpInfo.ConstraintCode, Ops, - MIRBuilder)) { - LLVM_DEBUG(dbgs() << "Don't support constraint: " - << OpInfo.ConstraintCode << " yet\n"); - return false; - } - - assert(Ops.size() > 0 && - "Expected constraint to be lowered to at least one operand"); - - // Add information to the INLINEASM node to know about this input. - unsigned OpFlags = - InlineAsm::getFlagWord(InlineAsm::Kind_Imm, Ops.size()); - Inst.addImm(OpFlags); - Inst.add(Ops); - break; - } - - if (OpInfo.ConstraintType == TargetLowering::C_Memory) { - - if (!OpInfo.isIndirect) { - LLVM_DEBUG(dbgs() - << "Cannot indirectify memory input operands yet\n"); - return false; - } - - assert(OpInfo.isIndirect && "Operand must be indirect to be a mem!"); - - unsigned ConstraintID = - TLI->getInlineAsmMemConstraint(OpInfo.ConstraintCode); - unsigned OpFlags = InlineAsm::getFlagWord(InlineAsm::Kind_Mem, 1); - OpFlags = InlineAsm::getFlagWordForMem(OpFlags, ConstraintID); - Inst.addImm(OpFlags); - ArrayRef<Register> SourceRegs = - GetOrCreateVRegs(*OpInfo.CallOperandVal); - assert( - SourceRegs.size() == 1 && - "Expected the memory input to fit into a single virtual register"); - Inst.addReg(SourceRegs[0]); - break; - } - - assert((OpInfo.ConstraintType == TargetLowering::C_RegisterClass || - OpInfo.ConstraintType == TargetLowering::C_Register) && - "Unknown constraint type!"); - - if (OpInfo.isIndirect) { - LLVM_DEBUG(dbgs() << "Can't handle indirect register inputs yet " - "for constraint '" - << OpInfo.ConstraintCode << "'\n"); - return false; - } - - // Copy the input into the appropriate registers. - if (OpInfo.Regs.empty()) { - LLVM_DEBUG( - dbgs() - << "Couldn't allocate input register for register constraint\n"); - return false; - } - - unsigned NumRegs = OpInfo.Regs.size(); - ArrayRef<Register> SourceRegs = GetOrCreateVRegs(*OpInfo.CallOperandVal); - assert(NumRegs == SourceRegs.size() && - "Expected the number of input registers to match the number of " - "source registers"); - - if (NumRegs > 1) { - LLVM_DEBUG(dbgs() << "Input operands with multiple input registers are " - "not supported yet\n"); - return false; - } - - unsigned Flag = InlineAsm::getFlagWord(InlineAsm::Kind_RegUse, NumRegs); +//===-- lib/CodeGen/GlobalISel/InlineAsmLowering.cpp ----------------------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +/// +/// \file +/// This file implements the lowering from LLVM IR inline asm to MIR INLINEASM +/// +//===----------------------------------------------------------------------===// + +#include "llvm/CodeGen/GlobalISel/InlineAsmLowering.h" +#include "llvm/CodeGen/Analysis.h" +#include "llvm/CodeGen/GlobalISel/MachineIRBuilder.h" +#include "llvm/CodeGen/GlobalISel/Utils.h" +#include "llvm/CodeGen/MachineOperand.h" +#include "llvm/CodeGen/MachineRegisterInfo.h" +#include "llvm/CodeGen/TargetLowering.h" +#include "llvm/IR/DataLayout.h" +#include "llvm/IR/Instructions.h" +#include "llvm/IR/LLVMContext.h" +#include "llvm/IR/Module.h" + +#define DEBUG_TYPE "inline-asm-lowering" + +using namespace llvm; + +void InlineAsmLowering::anchor() {} + +namespace { + +/// GISelAsmOperandInfo - This contains information for each constraint that we +/// are lowering. +class GISelAsmOperandInfo : public TargetLowering::AsmOperandInfo { +public: + /// Regs - If this is a register or register class operand, this + /// contains the set of assigned registers corresponding to the operand. + SmallVector<Register, 1> Regs; + + explicit GISelAsmOperandInfo(const TargetLowering::AsmOperandInfo &Info) + : TargetLowering::AsmOperandInfo(Info) {} +}; + +using GISelAsmOperandInfoVector = SmallVector<GISelAsmOperandInfo, 16>; + +class ExtraFlags { + unsigned Flags = 0; + +public: + explicit ExtraFlags(const CallBase &CB) { + const InlineAsm *IA = cast<InlineAsm>(CB.getCalledOperand()); + if (IA->hasSideEffects()) + Flags |= InlineAsm::Extra_HasSideEffects; + if (IA->isAlignStack()) + Flags |= InlineAsm::Extra_IsAlignStack; + if (CB.isConvergent()) + Flags |= InlineAsm::Extra_IsConvergent; + Flags |= IA->getDialect() * InlineAsm::Extra_AsmDialect; + } + + void update(const TargetLowering::AsmOperandInfo &OpInfo) { + // Ideally, we would only check against memory constraints. However, the + // meaning of an Other constraint can be target-specific and we can't easily + // reason about it. Therefore, be conservative and set MayLoad/MayStore + // for Other constraints as well. + if (OpInfo.ConstraintType == TargetLowering::C_Memory || + OpInfo.ConstraintType == TargetLowering::C_Other) { + if (OpInfo.Type == InlineAsm::isInput) + Flags |= InlineAsm::Extra_MayLoad; + else if (OpInfo.Type == InlineAsm::isOutput) + Flags |= InlineAsm::Extra_MayStore; + else if (OpInfo.Type == InlineAsm::isClobber) + Flags |= (InlineAsm::Extra_MayLoad | InlineAsm::Extra_MayStore); + } + } + + unsigned get() const { return Flags; } +}; + +} // namespace + +/// Assign virtual/physical registers for the specified register operand. +static void getRegistersForValue(MachineFunction &MF, + MachineIRBuilder &MIRBuilder, + GISelAsmOperandInfo &OpInfo, + GISelAsmOperandInfo &RefOpInfo) { + + const TargetLowering &TLI = *MF.getSubtarget().getTargetLowering(); + const TargetRegisterInfo &TRI = *MF.getSubtarget().getRegisterInfo(); + + // No work to do for memory operations. + if (OpInfo.ConstraintType == TargetLowering::C_Memory) + return; + + // If this is a constraint for a single physreg, or a constraint for a + // register class, find it. + Register AssignedReg; + const TargetRegisterClass *RC; + std::tie(AssignedReg, RC) = TLI.getRegForInlineAsmConstraint( + &TRI, RefOpInfo.ConstraintCode, RefOpInfo.ConstraintVT); + // RC is unset only on failure. Return immediately. + if (!RC) + return; + + // No need to allocate a matching input constraint since the constraint it's + // matching to has already been allocated. + if (OpInfo.isMatchingInputConstraint()) + return; + + // Initialize NumRegs. + unsigned NumRegs = 1; + if (OpInfo.ConstraintVT != MVT::Other) + NumRegs = + TLI.getNumRegisters(MF.getFunction().getContext(), OpInfo.ConstraintVT); + + // If this is a constraint for a specific physical register, but the type of + // the operand requires more than one register to be passed, we allocate the + // required amount of physical registers, starting from the selected physical + // register. + // For this, first retrieve a register iterator for the given register class + TargetRegisterClass::iterator I = RC->begin(); + MachineRegisterInfo &RegInfo = MF.getRegInfo(); + + // Advance the iterator to the assigned register (if set) + if (AssignedReg) { + for (; *I != AssignedReg; ++I) + assert(I != RC->end() && "AssignedReg should be a member of provided RC"); + } + + // Finally, assign the registers. If the AssignedReg isn't set, create virtual + // registers with the provided register class + for (; NumRegs; --NumRegs, ++I) { + assert(I != RC->end() && "Ran out of registers to allocate!"); + Register R = AssignedReg ? Register(*I) : RegInfo.createVirtualRegister(RC); + OpInfo.Regs.push_back(R); + } +} + +/// Return an integer indicating how general CT is. +static unsigned getConstraintGenerality(TargetLowering::ConstraintType CT) { + switch (CT) { + case TargetLowering::C_Immediate: + case TargetLowering::C_Other: + case TargetLowering::C_Unknown: + return 0; + case TargetLowering::C_Register: + return 1; + case TargetLowering::C_RegisterClass: + return 2; + case TargetLowering::C_Memory: + return 3; + } + llvm_unreachable("Invalid constraint type"); +} + +static void chooseConstraint(TargetLowering::AsmOperandInfo &OpInfo, + const TargetLowering *TLI) { + assert(OpInfo.Codes.size() > 1 && "Doesn't have multiple constraint options"); + unsigned BestIdx = 0; + TargetLowering::ConstraintType BestType = TargetLowering::C_Unknown; + int BestGenerality = -1; + + // Loop over the options, keeping track of the most general one. + for (unsigned i = 0, e = OpInfo.Codes.size(); i != e; ++i) { + TargetLowering::ConstraintType CType = + TLI->getConstraintType(OpInfo.Codes[i]); + + // Indirect 'other' or 'immediate' constraints are not allowed. + if (OpInfo.isIndirect && !(CType == TargetLowering::C_Memory || + CType == TargetLowering::C_Register || + CType == TargetLowering::C_RegisterClass)) + continue; + + // If this is an 'other' or 'immediate' constraint, see if the operand is + // valid for it. For example, on X86 we might have an 'rI' constraint. If + // the operand is an integer in the range [0..31] we want to use I (saving a + // load of a register), otherwise we must use 'r'. + if (CType == TargetLowering::C_Other || + CType == TargetLowering::C_Immediate) { + assert(OpInfo.Codes[i].size() == 1 && + "Unhandled multi-letter 'other' constraint"); + // FIXME: prefer immediate constraints if the target allows it + } + + // Things with matching constraints can only be registers, per gcc + // documentation. This mainly affects "g" constraints. + if (CType == TargetLowering::C_Memory && OpInfo.hasMatchingInput()) + continue; + + // This constraint letter is more general than the previous one, use it. + int Generality = getConstraintGenerality(CType); + if (Generality > BestGenerality) { + BestType = CType; + BestIdx = i; + BestGenerality = Generality; + } + } + + OpInfo.ConstraintCode = OpInfo.Codes[BestIdx]; + OpInfo.ConstraintType = BestType; +} + +static void computeConstraintToUse(const TargetLowering *TLI, + TargetLowering::AsmOperandInfo &OpInfo) { + assert(!OpInfo.Codes.empty() && "Must have at least one constraint"); + + // Single-letter constraints ('r') are very common. + if (OpInfo.Codes.size() == 1) { + OpInfo.ConstraintCode = OpInfo.Codes[0]; + OpInfo.ConstraintType = TLI->getConstraintType(OpInfo.ConstraintCode); + } else { + chooseConstraint(OpInfo, TLI); + } + + // 'X' matches anything. + if (OpInfo.ConstraintCode == "X" && OpInfo.CallOperandVal) { + // Labels and constants are handled elsewhere ('X' is the only thing + // that matches labels). For Functions, the type here is the type of + // the result, which is not what we want to look at; leave them alone. + Value *Val = OpInfo.CallOperandVal; + if (isa<BasicBlock>(Val) || isa<ConstantInt>(Val) || isa<Function>(Val)) + return; + + // Otherwise, try to resolve it to something we know about by looking at + // the actual operand type. + if (const char *Repl = TLI->LowerXConstraint(OpInfo.ConstraintVT)) { + OpInfo.ConstraintCode = Repl; + OpInfo.ConstraintType = TLI->getConstraintType(OpInfo.ConstraintCode); + } + } +} + +static unsigned getNumOpRegs(const MachineInstr &I, unsigned OpIdx) { + unsigned Flag = I.getOperand(OpIdx).getImm(); + return InlineAsm::getNumOperandRegisters(Flag); +} + +static bool buildAnyextOrCopy(Register Dst, Register Src, + MachineIRBuilder &MIRBuilder) { + const TargetRegisterInfo *TRI = + MIRBuilder.getMF().getSubtarget().getRegisterInfo(); + MachineRegisterInfo *MRI = MIRBuilder.getMRI(); + + auto SrcTy = MRI->getType(Src); + if (!SrcTy.isValid()) { + LLVM_DEBUG(dbgs() << "Source type for copy is not valid\n"); + return false; + } + unsigned SrcSize = TRI->getRegSizeInBits(Src, *MRI); + unsigned DstSize = TRI->getRegSizeInBits(Dst, *MRI); + + if (DstSize < SrcSize) { + LLVM_DEBUG(dbgs() << "Input can't fit in destination reg class\n"); + return false; + } + + // Attempt to anyext small scalar sources. + if (DstSize > SrcSize) { + if (!SrcTy.isScalar()) { + LLVM_DEBUG(dbgs() << "Can't extend non-scalar input to size of" + "destination register class\n"); + return false; + } + Src = MIRBuilder.buildAnyExt(LLT::scalar(DstSize), Src).getReg(0); + } + + MIRBuilder.buildCopy(Dst, Src); + return true; +} + +bool InlineAsmLowering::lowerInlineAsm( + MachineIRBuilder &MIRBuilder, const CallBase &Call, + std::function<ArrayRef<Register>(const Value &Val)> GetOrCreateVRegs) + const { + const InlineAsm *IA = cast<InlineAsm>(Call.getCalledOperand()); + + /// ConstraintOperands - Information about all of the constraints. + GISelAsmOperandInfoVector ConstraintOperands; + + MachineFunction &MF = MIRBuilder.getMF(); + const Function &F = MF.getFunction(); + const DataLayout &DL = F.getParent()->getDataLayout(); + const TargetRegisterInfo *TRI = MF.getSubtarget().getRegisterInfo(); + + MachineRegisterInfo *MRI = MIRBuilder.getMRI(); + + TargetLowering::AsmOperandInfoVector TargetConstraints = + TLI->ParseConstraints(DL, TRI, Call); + + ExtraFlags ExtraInfo(Call); + unsigned ArgNo = 0; // ArgNo - The argument of the CallInst. + unsigned ResNo = 0; // ResNo - The result number of the next output. + for (auto &T : TargetConstraints) { + ConstraintOperands.push_back(GISelAsmOperandInfo(T)); + GISelAsmOperandInfo &OpInfo = ConstraintOperands.back(); + + // Compute the value type for each operand. + if (OpInfo.Type == InlineAsm::isInput || + (OpInfo.Type == InlineAsm::isOutput && OpInfo.isIndirect)) { + + OpInfo.CallOperandVal = const_cast<Value *>(Call.getArgOperand(ArgNo++)); + + if (isa<BasicBlock>(OpInfo.CallOperandVal)) { + LLVM_DEBUG(dbgs() << "Basic block input operands not supported yet\n"); + return false; + } + + Type *OpTy = OpInfo.CallOperandVal->getType(); + + // If this is an indirect operand, the operand is a pointer to the + // accessed type. + if (OpInfo.isIndirect) { + PointerType *PtrTy = dyn_cast<PointerType>(OpTy); + if (!PtrTy) + report_fatal_error("Indirect operand for inline asm not a pointer!"); + OpTy = PtrTy->getElementType(); + } + + // FIXME: Support aggregate input operands + if (!OpTy->isSingleValueType()) { + LLVM_DEBUG( + dbgs() << "Aggregate input operands are not supported yet\n"); + return false; + } + + OpInfo.ConstraintVT = TLI->getValueType(DL, OpTy, true).getSimpleVT(); + + } else if (OpInfo.Type == InlineAsm::isOutput && !OpInfo.isIndirect) { + assert(!Call.getType()->isVoidTy() && "Bad inline asm!"); + if (StructType *STy = dyn_cast<StructType>(Call.getType())) { + OpInfo.ConstraintVT = + TLI->getSimpleValueType(DL, STy->getElementType(ResNo)); + } else { + assert(ResNo == 0 && "Asm only has one result!"); + OpInfo.ConstraintVT = TLI->getSimpleValueType(DL, Call.getType()); + } + ++ResNo; + } else { + OpInfo.ConstraintVT = MVT::Other; + } + + // Compute the constraint code and ConstraintType to use. + computeConstraintToUse(TLI, OpInfo); + + // The selected constraint type might expose new sideeffects + ExtraInfo.update(OpInfo); + } + + // At this point, all operand types are decided. + // Create the MachineInstr, but don't insert it yet since input + // operands still need to insert instructions before this one + auto Inst = MIRBuilder.buildInstrNoInsert(TargetOpcode::INLINEASM) + .addExternalSymbol(IA->getAsmString().c_str()) + .addImm(ExtraInfo.get()); + + // Starting from this operand: flag followed by register(s) will be added as + // operands to Inst for each constraint. Used for matching input constraints. + unsigned StartIdx = Inst->getNumOperands(); + + // Collects the output operands for later processing + GISelAsmOperandInfoVector OutputOperands; + + for (auto &OpInfo : ConstraintOperands) { + GISelAsmOperandInfo &RefOpInfo = + OpInfo.isMatchingInputConstraint() + ? ConstraintOperands[OpInfo.getMatchedOperand()] + : OpInfo; + + // Assign registers for register operands + getRegistersForValue(MF, MIRBuilder, OpInfo, RefOpInfo); + + switch (OpInfo.Type) { + case InlineAsm::isOutput: + if (OpInfo.ConstraintType == TargetLowering::C_Memory) { + unsigned ConstraintID = + TLI->getInlineAsmMemConstraint(OpInfo.ConstraintCode); + assert(ConstraintID != InlineAsm::Constraint_Unknown && + "Failed to convert memory constraint code to constraint id."); + + // Add information to the INLINEASM instruction to know about this + // output. + unsigned OpFlags = InlineAsm::getFlagWord(InlineAsm::Kind_Mem, 1); + OpFlags = InlineAsm::getFlagWordForMem(OpFlags, ConstraintID); + Inst.addImm(OpFlags); + ArrayRef<Register> SourceRegs = + GetOrCreateVRegs(*OpInfo.CallOperandVal); + assert( + SourceRegs.size() == 1 && + "Expected the memory output to fit into a single virtual register"); + Inst.addReg(SourceRegs[0]); + } else { + // Otherwise, this outputs to a register (directly for C_Register / + // C_RegisterClass. Find a register that we can use. + assert(OpInfo.ConstraintType == TargetLowering::C_Register || + OpInfo.ConstraintType == TargetLowering::C_RegisterClass); + + if (OpInfo.Regs.empty()) { + LLVM_DEBUG(dbgs() + << "Couldn't allocate output register for constraint\n"); + return false; + } + + // Add information to the INLINEASM instruction to know that this + // register is set. + unsigned Flag = InlineAsm::getFlagWord( + OpInfo.isEarlyClobber ? InlineAsm::Kind_RegDefEarlyClobber + : InlineAsm::Kind_RegDef, + OpInfo.Regs.size()); + if (OpInfo.Regs.front().isVirtual()) { + // Put the register class of the virtual registers in the flag word. + // That way, later passes can recompute register class constraints for + // inline assembly as well as normal instructions. Don't do this for + // tied operands that can use the regclass information from the def. + const TargetRegisterClass *RC = MRI->getRegClass(OpInfo.Regs.front()); + Flag = InlineAsm::getFlagWordForRegClass(Flag, RC->getID()); + } + + Inst.addImm(Flag); + + for (Register Reg : OpInfo.Regs) { + Inst.addReg(Reg, + RegState::Define | getImplRegState(Reg.isPhysical()) | + (OpInfo.isEarlyClobber ? RegState::EarlyClobber : 0)); + } + + // Remember this output operand for later processing + OutputOperands.push_back(OpInfo); + } + + break; + case InlineAsm::isInput: { + if (OpInfo.isMatchingInputConstraint()) { + unsigned DefIdx = OpInfo.getMatchedOperand(); + // Find operand with register def that corresponds to DefIdx. + unsigned InstFlagIdx = StartIdx; + for (unsigned i = 0; i < DefIdx; ++i) + InstFlagIdx += getNumOpRegs(*Inst, InstFlagIdx) + 1; + assert(getNumOpRegs(*Inst, InstFlagIdx) == 1 && "Wrong flag"); + + unsigned MatchedOperandFlag = Inst->getOperand(InstFlagIdx).getImm(); + if (InlineAsm::isMemKind(MatchedOperandFlag)) { + LLVM_DEBUG(dbgs() << "Matching input constraint to mem operand not " + "supported. This should be target specific.\n"); + return false; + } + if (!InlineAsm::isRegDefKind(MatchedOperandFlag) && + !InlineAsm::isRegDefEarlyClobberKind(MatchedOperandFlag)) { + LLVM_DEBUG(dbgs() << "Unknown matching constraint\n"); + return false; + } + + // We want to tie input to register in next operand. + unsigned DefRegIdx = InstFlagIdx + 1; + Register Def = Inst->getOperand(DefRegIdx).getReg(); + + ArrayRef<Register> SrcRegs = GetOrCreateVRegs(*OpInfo.CallOperandVal); + assert(SrcRegs.size() == 1 && "Single register is expected here"); + + // When Def is physreg: use given input. + Register In = SrcRegs[0]; + // When Def is vreg: copy input to new vreg with same reg class as Def. + if (Def.isVirtual()) { + In = MRI->createVirtualRegister(MRI->getRegClass(Def)); + if (!buildAnyextOrCopy(In, SrcRegs[0], MIRBuilder)) + return false; + } + + // Add Flag and input register operand (In) to Inst. Tie In to Def. + unsigned UseFlag = InlineAsm::getFlagWord(InlineAsm::Kind_RegUse, 1); + unsigned Flag = InlineAsm::getFlagWordForMatchingOp(UseFlag, DefIdx); + Inst.addImm(Flag); + Inst.addReg(In); + Inst->tieOperands(DefRegIdx, Inst->getNumOperands() - 1); + break; + } + + if (OpInfo.ConstraintType == TargetLowering::C_Other && + OpInfo.isIndirect) { + LLVM_DEBUG(dbgs() << "Indirect input operands with unknown constraint " + "not supported yet\n"); + return false; + } + + if (OpInfo.ConstraintType == TargetLowering::C_Immediate || + OpInfo.ConstraintType == TargetLowering::C_Other) { + + std::vector<MachineOperand> Ops; + if (!lowerAsmOperandForConstraint(OpInfo.CallOperandVal, + OpInfo.ConstraintCode, Ops, + MIRBuilder)) { + LLVM_DEBUG(dbgs() << "Don't support constraint: " + << OpInfo.ConstraintCode << " yet\n"); + return false; + } + + assert(Ops.size() > 0 && + "Expected constraint to be lowered to at least one operand"); + + // Add information to the INLINEASM node to know about this input. + unsigned OpFlags = + InlineAsm::getFlagWord(InlineAsm::Kind_Imm, Ops.size()); + Inst.addImm(OpFlags); + Inst.add(Ops); + break; + } + + if (OpInfo.ConstraintType == TargetLowering::C_Memory) { + + if (!OpInfo.isIndirect) { + LLVM_DEBUG(dbgs() + << "Cannot indirectify memory input operands yet\n"); + return false; + } + + assert(OpInfo.isIndirect && "Operand must be indirect to be a mem!"); + + unsigned ConstraintID = + TLI->getInlineAsmMemConstraint(OpInfo.ConstraintCode); + unsigned OpFlags = InlineAsm::getFlagWord(InlineAsm::Kind_Mem, 1); + OpFlags = InlineAsm::getFlagWordForMem(OpFlags, ConstraintID); + Inst.addImm(OpFlags); + ArrayRef<Register> SourceRegs = + GetOrCreateVRegs(*OpInfo.CallOperandVal); + assert( + SourceRegs.size() == 1 && + "Expected the memory input to fit into a single virtual register"); + Inst.addReg(SourceRegs[0]); + break; + } + + assert((OpInfo.ConstraintType == TargetLowering::C_RegisterClass || + OpInfo.ConstraintType == TargetLowering::C_Register) && + "Unknown constraint type!"); + + if (OpInfo.isIndirect) { + LLVM_DEBUG(dbgs() << "Can't handle indirect register inputs yet " + "for constraint '" + << OpInfo.ConstraintCode << "'\n"); + return false; + } + + // Copy the input into the appropriate registers. + if (OpInfo.Regs.empty()) { + LLVM_DEBUG( + dbgs() + << "Couldn't allocate input register for register constraint\n"); + return false; + } + + unsigned NumRegs = OpInfo.Regs.size(); + ArrayRef<Register> SourceRegs = GetOrCreateVRegs(*OpInfo.CallOperandVal); + assert(NumRegs == SourceRegs.size() && + "Expected the number of input registers to match the number of " + "source registers"); + + if (NumRegs > 1) { + LLVM_DEBUG(dbgs() << "Input operands with multiple input registers are " + "not supported yet\n"); + return false; + } + + unsigned Flag = InlineAsm::getFlagWord(InlineAsm::Kind_RegUse, NumRegs); if (OpInfo.Regs.front().isVirtual()) { // Put the register class of the virtual registers in the flag word. const TargetRegisterClass *RC = MRI->getRegClass(OpInfo.Regs.front()); Flag = InlineAsm::getFlagWordForRegClass(Flag, RC->getID()); } - Inst.addImm(Flag); - if (!buildAnyextOrCopy(OpInfo.Regs[0], SourceRegs[0], MIRBuilder)) - return false; - Inst.addReg(OpInfo.Regs[0]); - break; - } - - case InlineAsm::isClobber: { - - unsigned NumRegs = OpInfo.Regs.size(); - if (NumRegs > 0) { - unsigned Flag = - InlineAsm::getFlagWord(InlineAsm::Kind_Clobber, NumRegs); - Inst.addImm(Flag); - - for (Register Reg : OpInfo.Regs) { - Inst.addReg(Reg, RegState::Define | RegState::EarlyClobber | - getImplRegState(Reg.isPhysical())); - } - } - break; - } - } - } - - if (const MDNode *SrcLoc = Call.getMetadata("srcloc")) - Inst.addMetadata(SrcLoc); - - // All inputs are handled, insert the instruction now - MIRBuilder.insertInstr(Inst); - - // Finally, copy the output operands into the output registers - ArrayRef<Register> ResRegs = GetOrCreateVRegs(Call); - if (ResRegs.size() != OutputOperands.size()) { - LLVM_DEBUG(dbgs() << "Expected the number of output registers to match the " - "number of destination registers\n"); - return false; - } - for (unsigned int i = 0, e = ResRegs.size(); i < e; i++) { - GISelAsmOperandInfo &OpInfo = OutputOperands[i]; - - if (OpInfo.Regs.empty()) - continue; - - switch (OpInfo.ConstraintType) { - case TargetLowering::C_Register: - case TargetLowering::C_RegisterClass: { - if (OpInfo.Regs.size() > 1) { - LLVM_DEBUG(dbgs() << "Output operands with multiple defining " - "registers are not supported yet\n"); - return false; - } - - Register SrcReg = OpInfo.Regs[0]; - unsigned SrcSize = TRI->getRegSizeInBits(SrcReg, *MRI); - if (MRI->getType(ResRegs[i]).getSizeInBits() < SrcSize) { - // First copy the non-typed virtual register into a generic virtual - // register - Register Tmp1Reg = - MRI->createGenericVirtualRegister(LLT::scalar(SrcSize)); - MIRBuilder.buildCopy(Tmp1Reg, SrcReg); - // Need to truncate the result of the register - MIRBuilder.buildTrunc(ResRegs[i], Tmp1Reg); - } else { - MIRBuilder.buildCopy(ResRegs[i], SrcReg); - } - break; - } - case TargetLowering::C_Immediate: - case TargetLowering::C_Other: - LLVM_DEBUG( - dbgs() << "Cannot lower target specific output constraints yet\n"); - return false; - case TargetLowering::C_Memory: - break; // Already handled. - case TargetLowering::C_Unknown: - LLVM_DEBUG(dbgs() << "Unexpected unknown constraint\n"); - return false; - } - } - - return true; -} - -bool InlineAsmLowering::lowerAsmOperandForConstraint( - Value *Val, StringRef Constraint, std::vector<MachineOperand> &Ops, - MachineIRBuilder &MIRBuilder) const { - if (Constraint.size() > 1) - return false; - - char ConstraintLetter = Constraint[0]; - switch (ConstraintLetter) { - default: - return false; - case 'i': // Simple Integer or Relocatable Constant + Inst.addImm(Flag); + if (!buildAnyextOrCopy(OpInfo.Regs[0], SourceRegs[0], MIRBuilder)) + return false; + Inst.addReg(OpInfo.Regs[0]); + break; + } + + case InlineAsm::isClobber: { + + unsigned NumRegs = OpInfo.Regs.size(); + if (NumRegs > 0) { + unsigned Flag = + InlineAsm::getFlagWord(InlineAsm::Kind_Clobber, NumRegs); + Inst.addImm(Flag); + + for (Register Reg : OpInfo.Regs) { + Inst.addReg(Reg, RegState::Define | RegState::EarlyClobber | + getImplRegState(Reg.isPhysical())); + } + } + break; + } + } + } + + if (const MDNode *SrcLoc = Call.getMetadata("srcloc")) + Inst.addMetadata(SrcLoc); + + // All inputs are handled, insert the instruction now + MIRBuilder.insertInstr(Inst); + + // Finally, copy the output operands into the output registers + ArrayRef<Register> ResRegs = GetOrCreateVRegs(Call); + if (ResRegs.size() != OutputOperands.size()) { + LLVM_DEBUG(dbgs() << "Expected the number of output registers to match the " + "number of destination registers\n"); + return false; + } + for (unsigned int i = 0, e = ResRegs.size(); i < e; i++) { + GISelAsmOperandInfo &OpInfo = OutputOperands[i]; + + if (OpInfo.Regs.empty()) + continue; + + switch (OpInfo.ConstraintType) { + case TargetLowering::C_Register: + case TargetLowering::C_RegisterClass: { + if (OpInfo.Regs.size() > 1) { + LLVM_DEBUG(dbgs() << "Output operands with multiple defining " + "registers are not supported yet\n"); + return false; + } + + Register SrcReg = OpInfo.Regs[0]; + unsigned SrcSize = TRI->getRegSizeInBits(SrcReg, *MRI); + if (MRI->getType(ResRegs[i]).getSizeInBits() < SrcSize) { + // First copy the non-typed virtual register into a generic virtual + // register + Register Tmp1Reg = + MRI->createGenericVirtualRegister(LLT::scalar(SrcSize)); + MIRBuilder.buildCopy(Tmp1Reg, SrcReg); + // Need to truncate the result of the register + MIRBuilder.buildTrunc(ResRegs[i], Tmp1Reg); + } else { + MIRBuilder.buildCopy(ResRegs[i], SrcReg); + } + break; + } + case TargetLowering::C_Immediate: + case TargetLowering::C_Other: + LLVM_DEBUG( + dbgs() << "Cannot lower target specific output constraints yet\n"); + return false; + case TargetLowering::C_Memory: + break; // Already handled. + case TargetLowering::C_Unknown: + LLVM_DEBUG(dbgs() << "Unexpected unknown constraint\n"); + return false; + } + } + + return true; +} + +bool InlineAsmLowering::lowerAsmOperandForConstraint( + Value *Val, StringRef Constraint, std::vector<MachineOperand> &Ops, + MachineIRBuilder &MIRBuilder) const { + if (Constraint.size() > 1) + return false; + + char ConstraintLetter = Constraint[0]; + switch (ConstraintLetter) { + default: + return false; + case 'i': // Simple Integer or Relocatable Constant case 'n': // immediate integer with a known value. - if (ConstantInt *CI = dyn_cast<ConstantInt>(Val)) { - assert(CI->getBitWidth() <= 64 && - "expected immediate to fit into 64-bits"); - // Boolean constants should be zero-extended, others are sign-extended - bool IsBool = CI->getBitWidth() == 1; - int64_t ExtVal = IsBool ? CI->getZExtValue() : CI->getSExtValue(); - Ops.push_back(MachineOperand::CreateImm(ExtVal)); - return true; - } - return false; - } -} + if (ConstantInt *CI = dyn_cast<ConstantInt>(Val)) { + assert(CI->getBitWidth() <= 64 && + "expected immediate to fit into 64-bits"); + // Boolean constants should be zero-extended, others are sign-extended + bool IsBool = CI->getBitWidth() == 1; + int64_t ExtVal = IsBool ? CI->getZExtValue() : CI->getSExtValue(); + Ops.push_back(MachineOperand::CreateImm(ExtVal)); + return true; + } + return false; + } +} diff --git a/contrib/libs/llvm12/lib/CodeGen/GlobalISel/InstructionSelect.cpp b/contrib/libs/llvm12/lib/CodeGen/GlobalISel/InstructionSelect.cpp index 9c6ae00de3..25fae54871 100644 --- a/contrib/libs/llvm12/lib/CodeGen/GlobalISel/InstructionSelect.cpp +++ b/contrib/libs/llvm12/lib/CodeGen/GlobalISel/InstructionSelect.cpp @@ -1,261 +1,261 @@ -//===- llvm/CodeGen/GlobalISel/InstructionSelect.cpp - InstructionSelect ---==// -// -// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. -// See https://llvm.org/LICENSE.txt for license information. -// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception -// -//===----------------------------------------------------------------------===// -/// \file -/// This file implements the InstructionSelect class. -//===----------------------------------------------------------------------===// - -#include "llvm/CodeGen/GlobalISel/InstructionSelect.h" -#include "llvm/ADT/PostOrderIterator.h" -#include "llvm/ADT/Twine.h" -#include "llvm/CodeGen/GlobalISel/GISelKnownBits.h" -#include "llvm/CodeGen/GlobalISel/InstructionSelector.h" -#include "llvm/CodeGen/GlobalISel/LegalizerInfo.h" -#include "llvm/CodeGen/GlobalISel/Utils.h" -#include "llvm/CodeGen/MachineOptimizationRemarkEmitter.h" -#include "llvm/CodeGen/MachineFrameInfo.h" -#include "llvm/CodeGen/MachineRegisterInfo.h" -#include "llvm/CodeGen/TargetInstrInfo.h" -#include "llvm/CodeGen/TargetLowering.h" -#include "llvm/CodeGen/TargetPassConfig.h" -#include "llvm/CodeGen/TargetSubtargetInfo.h" -#include "llvm/Config/config.h" -#include "llvm/IR/Constants.h" -#include "llvm/IR/Function.h" -#include "llvm/Support/CommandLine.h" -#include "llvm/Support/Debug.h" -#include "llvm/Support/TargetRegistry.h" -#include "llvm/Target/TargetMachine.h" - -#define DEBUG_TYPE "instruction-select" - -using namespace llvm; - -#ifdef LLVM_GISEL_COV_PREFIX -static cl::opt<std::string> - CoveragePrefix("gisel-coverage-prefix", cl::init(LLVM_GISEL_COV_PREFIX), - cl::desc("Record GlobalISel rule coverage files of this " - "prefix if instrumentation was generated")); -#else +//===- llvm/CodeGen/GlobalISel/InstructionSelect.cpp - InstructionSelect ---==// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +/// \file +/// This file implements the InstructionSelect class. +//===----------------------------------------------------------------------===// + +#include "llvm/CodeGen/GlobalISel/InstructionSelect.h" +#include "llvm/ADT/PostOrderIterator.h" +#include "llvm/ADT/Twine.h" +#include "llvm/CodeGen/GlobalISel/GISelKnownBits.h" +#include "llvm/CodeGen/GlobalISel/InstructionSelector.h" +#include "llvm/CodeGen/GlobalISel/LegalizerInfo.h" +#include "llvm/CodeGen/GlobalISel/Utils.h" +#include "llvm/CodeGen/MachineOptimizationRemarkEmitter.h" +#include "llvm/CodeGen/MachineFrameInfo.h" +#include "llvm/CodeGen/MachineRegisterInfo.h" +#include "llvm/CodeGen/TargetInstrInfo.h" +#include "llvm/CodeGen/TargetLowering.h" +#include "llvm/CodeGen/TargetPassConfig.h" +#include "llvm/CodeGen/TargetSubtargetInfo.h" +#include "llvm/Config/config.h" +#include "llvm/IR/Constants.h" +#include "llvm/IR/Function.h" +#include "llvm/Support/CommandLine.h" +#include "llvm/Support/Debug.h" +#include "llvm/Support/TargetRegistry.h" +#include "llvm/Target/TargetMachine.h" + +#define DEBUG_TYPE "instruction-select" + +using namespace llvm; + +#ifdef LLVM_GISEL_COV_PREFIX +static cl::opt<std::string> + CoveragePrefix("gisel-coverage-prefix", cl::init(LLVM_GISEL_COV_PREFIX), + cl::desc("Record GlobalISel rule coverage files of this " + "prefix if instrumentation was generated")); +#else static const std::string CoveragePrefix; -#endif - -char InstructionSelect::ID = 0; -INITIALIZE_PASS_BEGIN(InstructionSelect, DEBUG_TYPE, - "Select target instructions out of generic instructions", - false, false) -INITIALIZE_PASS_DEPENDENCY(TargetPassConfig) -INITIALIZE_PASS_DEPENDENCY(GISelKnownBitsAnalysis) -INITIALIZE_PASS_END(InstructionSelect, DEBUG_TYPE, - "Select target instructions out of generic instructions", - false, false) - -InstructionSelect::InstructionSelect() : MachineFunctionPass(ID) { } - -void InstructionSelect::getAnalysisUsage(AnalysisUsage &AU) const { - AU.addRequired<TargetPassConfig>(); - AU.addRequired<GISelKnownBitsAnalysis>(); - AU.addPreserved<GISelKnownBitsAnalysis>(); - getSelectionDAGFallbackAnalysisUsage(AU); - MachineFunctionPass::getAnalysisUsage(AU); -} - -bool InstructionSelect::runOnMachineFunction(MachineFunction &MF) { - // If the ISel pipeline failed, do not bother running that pass. - if (MF.getProperties().hasProperty( - MachineFunctionProperties::Property::FailedISel)) - return false; - - LLVM_DEBUG(dbgs() << "Selecting function: " << MF.getName() << '\n'); - GISelKnownBits &KB = getAnalysis<GISelKnownBitsAnalysis>().get(MF); - - const TargetPassConfig &TPC = getAnalysis<TargetPassConfig>(); - InstructionSelector *ISel = MF.getSubtarget().getInstructionSelector(); - CodeGenCoverage CoverageInfo; - assert(ISel && "Cannot work without InstructionSelector"); - ISel->setupMF(MF, KB, CoverageInfo); - - // An optimization remark emitter. Used to report failures. - MachineOptimizationRemarkEmitter MORE(MF, /*MBFI=*/nullptr); - - // FIXME: There are many other MF/MFI fields we need to initialize. - - MachineRegisterInfo &MRI = MF.getRegInfo(); -#ifndef NDEBUG - // Check that our input is fully legal: we require the function to have the - // Legalized property, so it should be. - // FIXME: This should be in the MachineVerifier, as the RegBankSelected - // property check already is. - if (!DisableGISelLegalityCheck) - if (const MachineInstr *MI = machineFunctionIsIllegal(MF)) { - reportGISelFailure(MF, TPC, MORE, "gisel-select", - "instruction is not legal", *MI); - return false; - } - // FIXME: We could introduce new blocks and will need to fix the outer loop. - // Until then, keep track of the number of blocks to assert that we don't. - const size_t NumBlocks = MF.size(); -#endif - - for (MachineBasicBlock *MBB : post_order(&MF)) { - if (MBB->empty()) - continue; - - // Select instructions in reverse block order. We permit erasing so have - // to resort to manually iterating and recognizing the begin (rend) case. - bool ReachedBegin = false; - for (auto MII = std::prev(MBB->end()), Begin = MBB->begin(); - !ReachedBegin;) { -#ifndef NDEBUG - // Keep track of the insertion range for debug printing. - const auto AfterIt = std::next(MII); -#endif - // Select this instruction. - MachineInstr &MI = *MII; - - // And have our iterator point to the next instruction, if there is one. - if (MII == Begin) - ReachedBegin = true; - else - --MII; - - LLVM_DEBUG(dbgs() << "Selecting: \n " << MI); - - // We could have folded this instruction away already, making it dead. - // If so, erase it. - if (isTriviallyDead(MI, MRI)) { - LLVM_DEBUG(dbgs() << "Is dead; erasing.\n"); - MI.eraseFromParentAndMarkDBGValuesForRemoval(); - continue; - } - - if (!ISel->select(MI)) { - // FIXME: It would be nice to dump all inserted instructions. It's - // not obvious how, esp. considering select() can insert after MI. - reportGISelFailure(MF, TPC, MORE, "gisel-select", "cannot select", MI); - return false; - } - - // Dump the range of instructions that MI expanded into. - LLVM_DEBUG({ - auto InsertedBegin = ReachedBegin ? MBB->begin() : std::next(MII); - dbgs() << "Into:\n"; - for (auto &InsertedMI : make_range(InsertedBegin, AfterIt)) - dbgs() << " " << InsertedMI; - dbgs() << '\n'; - }); - } - } - - for (MachineBasicBlock &MBB : MF) { - if (MBB.empty()) - continue; - - // Try to find redundant copies b/w vregs of the same register class. - bool ReachedBegin = false; - for (auto MII = std::prev(MBB.end()), Begin = MBB.begin(); !ReachedBegin;) { - // Select this instruction. - MachineInstr &MI = *MII; - - // And have our iterator point to the next instruction, if there is one. - if (MII == Begin) - ReachedBegin = true; - else - --MII; - if (MI.getOpcode() != TargetOpcode::COPY) - continue; - Register SrcReg = MI.getOperand(1).getReg(); - Register DstReg = MI.getOperand(0).getReg(); - if (Register::isVirtualRegister(SrcReg) && - Register::isVirtualRegister(DstReg)) { - auto SrcRC = MRI.getRegClass(SrcReg); - auto DstRC = MRI.getRegClass(DstReg); - if (SrcRC == DstRC) { - MRI.replaceRegWith(DstReg, SrcReg); - MI.eraseFromParent(); - } - } - } - } - -#ifndef NDEBUG - const TargetRegisterInfo &TRI = *MF.getSubtarget().getRegisterInfo(); - // Now that selection is complete, there are no more generic vregs. Verify - // that the size of the now-constrained vreg is unchanged and that it has a - // register class. - for (unsigned I = 0, E = MRI.getNumVirtRegs(); I != E; ++I) { - unsigned VReg = Register::index2VirtReg(I); - - MachineInstr *MI = nullptr; - if (!MRI.def_empty(VReg)) - MI = &*MRI.def_instr_begin(VReg); - else if (!MRI.use_empty(VReg)) - MI = &*MRI.use_instr_begin(VReg); - if (!MI) - continue; - - const TargetRegisterClass *RC = MRI.getRegClassOrNull(VReg); - if (!RC) { - reportGISelFailure(MF, TPC, MORE, "gisel-select", - "VReg has no regclass after selection", *MI); - return false; - } - - const LLT Ty = MRI.getType(VReg); - if (Ty.isValid() && Ty.getSizeInBits() > TRI.getRegSizeInBits(*RC)) { - reportGISelFailure( - MF, TPC, MORE, "gisel-select", - "VReg's low-level type and register class have different sizes", *MI); - return false; - } - } - - if (MF.size() != NumBlocks) { - MachineOptimizationRemarkMissed R("gisel-select", "GISelFailure", - MF.getFunction().getSubprogram(), - /*MBB=*/nullptr); - R << "inserting blocks is not supported yet"; - reportGISelFailure(MF, TPC, MORE, R); - return false; - } -#endif - // Determine if there are any calls in this machine function. Ported from - // SelectionDAG. - MachineFrameInfo &MFI = MF.getFrameInfo(); - for (const auto &MBB : MF) { - if (MFI.hasCalls() && MF.hasInlineAsm()) - break; - - for (const auto &MI : MBB) { - if ((MI.isCall() && !MI.isReturn()) || MI.isStackAligningInlineAsm()) - MFI.setHasCalls(true); - if (MI.isInlineAsm()) - MF.setHasInlineAsm(true); - } - } - - // FIXME: FinalizeISel pass calls finalizeLowering, so it's called twice. - auto &TLI = *MF.getSubtarget().getTargetLowering(); - TLI.finalizeLowering(MF); - - LLVM_DEBUG({ - dbgs() << "Rules covered by selecting function: " << MF.getName() << ":"; - for (auto RuleID : CoverageInfo.covered()) - dbgs() << " id" << RuleID; - dbgs() << "\n\n"; - }); - CoverageInfo.emit(CoveragePrefix, - TLI.getTargetMachine().getTarget().getBackendName()); - - // If we successfully selected the function nothing is going to use the vreg - // types after us (otherwise MIRPrinter would need them). Make sure the types - // disappear. - MRI.clearVirtRegTypes(); - - // FIXME: Should we accurately track changes? - return true; -} +#endif + +char InstructionSelect::ID = 0; +INITIALIZE_PASS_BEGIN(InstructionSelect, DEBUG_TYPE, + "Select target instructions out of generic instructions", + false, false) +INITIALIZE_PASS_DEPENDENCY(TargetPassConfig) +INITIALIZE_PASS_DEPENDENCY(GISelKnownBitsAnalysis) +INITIALIZE_PASS_END(InstructionSelect, DEBUG_TYPE, + "Select target instructions out of generic instructions", + false, false) + +InstructionSelect::InstructionSelect() : MachineFunctionPass(ID) { } + +void InstructionSelect::getAnalysisUsage(AnalysisUsage &AU) const { + AU.addRequired<TargetPassConfig>(); + AU.addRequired<GISelKnownBitsAnalysis>(); + AU.addPreserved<GISelKnownBitsAnalysis>(); + getSelectionDAGFallbackAnalysisUsage(AU); + MachineFunctionPass::getAnalysisUsage(AU); +} + +bool InstructionSelect::runOnMachineFunction(MachineFunction &MF) { + // If the ISel pipeline failed, do not bother running that pass. + if (MF.getProperties().hasProperty( + MachineFunctionProperties::Property::FailedISel)) + return false; + + LLVM_DEBUG(dbgs() << "Selecting function: " << MF.getName() << '\n'); + GISelKnownBits &KB = getAnalysis<GISelKnownBitsAnalysis>().get(MF); + + const TargetPassConfig &TPC = getAnalysis<TargetPassConfig>(); + InstructionSelector *ISel = MF.getSubtarget().getInstructionSelector(); + CodeGenCoverage CoverageInfo; + assert(ISel && "Cannot work without InstructionSelector"); + ISel->setupMF(MF, KB, CoverageInfo); + + // An optimization remark emitter. Used to report failures. + MachineOptimizationRemarkEmitter MORE(MF, /*MBFI=*/nullptr); + + // FIXME: There are many other MF/MFI fields we need to initialize. + + MachineRegisterInfo &MRI = MF.getRegInfo(); +#ifndef NDEBUG + // Check that our input is fully legal: we require the function to have the + // Legalized property, so it should be. + // FIXME: This should be in the MachineVerifier, as the RegBankSelected + // property check already is. + if (!DisableGISelLegalityCheck) + if (const MachineInstr *MI = machineFunctionIsIllegal(MF)) { + reportGISelFailure(MF, TPC, MORE, "gisel-select", + "instruction is not legal", *MI); + return false; + } + // FIXME: We could introduce new blocks and will need to fix the outer loop. + // Until then, keep track of the number of blocks to assert that we don't. + const size_t NumBlocks = MF.size(); +#endif + + for (MachineBasicBlock *MBB : post_order(&MF)) { + if (MBB->empty()) + continue; + + // Select instructions in reverse block order. We permit erasing so have + // to resort to manually iterating and recognizing the begin (rend) case. + bool ReachedBegin = false; + for (auto MII = std::prev(MBB->end()), Begin = MBB->begin(); + !ReachedBegin;) { +#ifndef NDEBUG + // Keep track of the insertion range for debug printing. + const auto AfterIt = std::next(MII); +#endif + // Select this instruction. + MachineInstr &MI = *MII; + + // And have our iterator point to the next instruction, if there is one. + if (MII == Begin) + ReachedBegin = true; + else + --MII; + + LLVM_DEBUG(dbgs() << "Selecting: \n " << MI); + + // We could have folded this instruction away already, making it dead. + // If so, erase it. + if (isTriviallyDead(MI, MRI)) { + LLVM_DEBUG(dbgs() << "Is dead; erasing.\n"); + MI.eraseFromParentAndMarkDBGValuesForRemoval(); + continue; + } + + if (!ISel->select(MI)) { + // FIXME: It would be nice to dump all inserted instructions. It's + // not obvious how, esp. considering select() can insert after MI. + reportGISelFailure(MF, TPC, MORE, "gisel-select", "cannot select", MI); + return false; + } + + // Dump the range of instructions that MI expanded into. + LLVM_DEBUG({ + auto InsertedBegin = ReachedBegin ? MBB->begin() : std::next(MII); + dbgs() << "Into:\n"; + for (auto &InsertedMI : make_range(InsertedBegin, AfterIt)) + dbgs() << " " << InsertedMI; + dbgs() << '\n'; + }); + } + } + + for (MachineBasicBlock &MBB : MF) { + if (MBB.empty()) + continue; + + // Try to find redundant copies b/w vregs of the same register class. + bool ReachedBegin = false; + for (auto MII = std::prev(MBB.end()), Begin = MBB.begin(); !ReachedBegin;) { + // Select this instruction. + MachineInstr &MI = *MII; + + // And have our iterator point to the next instruction, if there is one. + if (MII == Begin) + ReachedBegin = true; + else + --MII; + if (MI.getOpcode() != TargetOpcode::COPY) + continue; + Register SrcReg = MI.getOperand(1).getReg(); + Register DstReg = MI.getOperand(0).getReg(); + if (Register::isVirtualRegister(SrcReg) && + Register::isVirtualRegister(DstReg)) { + auto SrcRC = MRI.getRegClass(SrcReg); + auto DstRC = MRI.getRegClass(DstReg); + if (SrcRC == DstRC) { + MRI.replaceRegWith(DstReg, SrcReg); + MI.eraseFromParent(); + } + } + } + } + +#ifndef NDEBUG + const TargetRegisterInfo &TRI = *MF.getSubtarget().getRegisterInfo(); + // Now that selection is complete, there are no more generic vregs. Verify + // that the size of the now-constrained vreg is unchanged and that it has a + // register class. + for (unsigned I = 0, E = MRI.getNumVirtRegs(); I != E; ++I) { + unsigned VReg = Register::index2VirtReg(I); + + MachineInstr *MI = nullptr; + if (!MRI.def_empty(VReg)) + MI = &*MRI.def_instr_begin(VReg); + else if (!MRI.use_empty(VReg)) + MI = &*MRI.use_instr_begin(VReg); + if (!MI) + continue; + + const TargetRegisterClass *RC = MRI.getRegClassOrNull(VReg); + if (!RC) { + reportGISelFailure(MF, TPC, MORE, "gisel-select", + "VReg has no regclass after selection", *MI); + return false; + } + + const LLT Ty = MRI.getType(VReg); + if (Ty.isValid() && Ty.getSizeInBits() > TRI.getRegSizeInBits(*RC)) { + reportGISelFailure( + MF, TPC, MORE, "gisel-select", + "VReg's low-level type and register class have different sizes", *MI); + return false; + } + } + + if (MF.size() != NumBlocks) { + MachineOptimizationRemarkMissed R("gisel-select", "GISelFailure", + MF.getFunction().getSubprogram(), + /*MBB=*/nullptr); + R << "inserting blocks is not supported yet"; + reportGISelFailure(MF, TPC, MORE, R); + return false; + } +#endif + // Determine if there are any calls in this machine function. Ported from + // SelectionDAG. + MachineFrameInfo &MFI = MF.getFrameInfo(); + for (const auto &MBB : MF) { + if (MFI.hasCalls() && MF.hasInlineAsm()) + break; + + for (const auto &MI : MBB) { + if ((MI.isCall() && !MI.isReturn()) || MI.isStackAligningInlineAsm()) + MFI.setHasCalls(true); + if (MI.isInlineAsm()) + MF.setHasInlineAsm(true); + } + } + + // FIXME: FinalizeISel pass calls finalizeLowering, so it's called twice. + auto &TLI = *MF.getSubtarget().getTargetLowering(); + TLI.finalizeLowering(MF); + + LLVM_DEBUG({ + dbgs() << "Rules covered by selecting function: " << MF.getName() << ":"; + for (auto RuleID : CoverageInfo.covered()) + dbgs() << " id" << RuleID; + dbgs() << "\n\n"; + }); + CoverageInfo.emit(CoveragePrefix, + TLI.getTargetMachine().getTarget().getBackendName()); + + // If we successfully selected the function nothing is going to use the vreg + // types after us (otherwise MIRPrinter would need them). Make sure the types + // disappear. + MRI.clearVirtRegTypes(); + + // FIXME: Should we accurately track changes? + return true; +} diff --git a/contrib/libs/llvm12/lib/CodeGen/GlobalISel/InstructionSelector.cpp b/contrib/libs/llvm12/lib/CodeGen/GlobalISel/InstructionSelector.cpp index 8818439a30..4fec9e628d 100644 --- a/contrib/libs/llvm12/lib/CodeGen/GlobalISel/InstructionSelector.cpp +++ b/contrib/libs/llvm12/lib/CodeGen/GlobalISel/InstructionSelector.cpp @@ -1,71 +1,71 @@ -//===- llvm/CodeGen/GlobalISel/InstructionSelector.cpp --------------------===// -// -// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. -// See https://llvm.org/LICENSE.txt for license information. -// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception -// -//===----------------------------------------------------------------------===// -// -/// \file -/// This file implements the InstructionSelector class. -// -//===----------------------------------------------------------------------===// - -#include "llvm/CodeGen/GlobalISel/InstructionSelector.h" -#include "llvm/CodeGen/GlobalISel/Utils.h" -#include "llvm/CodeGen/MachineBasicBlock.h" -#include "llvm/CodeGen/MachineFunction.h" -#include "llvm/CodeGen/MachineInstr.h" -#include "llvm/CodeGen/MachineOperand.h" -#include "llvm/CodeGen/MachineRegisterInfo.h" -#include "llvm/CodeGen/TargetRegisterInfo.h" -#include "llvm/MC/MCInstrDesc.h" -#include "llvm/Support/Debug.h" -#include "llvm/Support/raw_ostream.h" -#include <cassert> - -#define DEBUG_TYPE "instructionselector" - -using namespace llvm; - -InstructionSelector::MatcherState::MatcherState(unsigned MaxRenderers) - : Renderers(MaxRenderers), MIs() {} - -InstructionSelector::InstructionSelector() = default; - -bool InstructionSelector::isOperandImmEqual( - const MachineOperand &MO, int64_t Value, - const MachineRegisterInfo &MRI) const { - if (MO.isReg() && MO.getReg()) - if (auto VRegVal = getConstantVRegValWithLookThrough(MO.getReg(), MRI)) +//===- llvm/CodeGen/GlobalISel/InstructionSelector.cpp --------------------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +// +/// \file +/// This file implements the InstructionSelector class. +// +//===----------------------------------------------------------------------===// + +#include "llvm/CodeGen/GlobalISel/InstructionSelector.h" +#include "llvm/CodeGen/GlobalISel/Utils.h" +#include "llvm/CodeGen/MachineBasicBlock.h" +#include "llvm/CodeGen/MachineFunction.h" +#include "llvm/CodeGen/MachineInstr.h" +#include "llvm/CodeGen/MachineOperand.h" +#include "llvm/CodeGen/MachineRegisterInfo.h" +#include "llvm/CodeGen/TargetRegisterInfo.h" +#include "llvm/MC/MCInstrDesc.h" +#include "llvm/Support/Debug.h" +#include "llvm/Support/raw_ostream.h" +#include <cassert> + +#define DEBUG_TYPE "instructionselector" + +using namespace llvm; + +InstructionSelector::MatcherState::MatcherState(unsigned MaxRenderers) + : Renderers(MaxRenderers), MIs() {} + +InstructionSelector::InstructionSelector() = default; + +bool InstructionSelector::isOperandImmEqual( + const MachineOperand &MO, int64_t Value, + const MachineRegisterInfo &MRI) const { + if (MO.isReg() && MO.getReg()) + if (auto VRegVal = getConstantVRegValWithLookThrough(MO.getReg(), MRI)) return VRegVal->Value.getSExtValue() == Value; - return false; -} - -bool InstructionSelector::isBaseWithConstantOffset( - const MachineOperand &Root, const MachineRegisterInfo &MRI) const { - if (!Root.isReg()) - return false; - - MachineInstr *RootI = MRI.getVRegDef(Root.getReg()); - if (RootI->getOpcode() != TargetOpcode::G_PTR_ADD) - return false; - - MachineOperand &RHS = RootI->getOperand(2); - MachineInstr *RHSI = MRI.getVRegDef(RHS.getReg()); - if (RHSI->getOpcode() != TargetOpcode::G_CONSTANT) - return false; - - return true; -} - -bool InstructionSelector::isObviouslySafeToFold(MachineInstr &MI, - MachineInstr &IntoMI) const { - // Immediate neighbours are already folded. - if (MI.getParent() == IntoMI.getParent() && - std::next(MI.getIterator()) == IntoMI.getIterator()) - return true; - - return !MI.mayLoadOrStore() && !MI.mayRaiseFPException() && - !MI.hasUnmodeledSideEffects() && MI.implicit_operands().empty(); -} + return false; +} + +bool InstructionSelector::isBaseWithConstantOffset( + const MachineOperand &Root, const MachineRegisterInfo &MRI) const { + if (!Root.isReg()) + return false; + + MachineInstr *RootI = MRI.getVRegDef(Root.getReg()); + if (RootI->getOpcode() != TargetOpcode::G_PTR_ADD) + return false; + + MachineOperand &RHS = RootI->getOperand(2); + MachineInstr *RHSI = MRI.getVRegDef(RHS.getReg()); + if (RHSI->getOpcode() != TargetOpcode::G_CONSTANT) + return false; + + return true; +} + +bool InstructionSelector::isObviouslySafeToFold(MachineInstr &MI, + MachineInstr &IntoMI) const { + // Immediate neighbours are already folded. + if (MI.getParent() == IntoMI.getParent() && + std::next(MI.getIterator()) == IntoMI.getIterator()) + return true; + + return !MI.mayLoadOrStore() && !MI.mayRaiseFPException() && + !MI.hasUnmodeledSideEffects() && MI.implicit_operands().empty(); +} diff --git a/contrib/libs/llvm12/lib/CodeGen/GlobalISel/LegalityPredicates.cpp b/contrib/libs/llvm12/lib/CodeGen/GlobalISel/LegalityPredicates.cpp index 1a73ea26bc..1993f60332 100644 --- a/contrib/libs/llvm12/lib/CodeGen/GlobalISel/LegalityPredicates.cpp +++ b/contrib/libs/llvm12/lib/CodeGen/GlobalISel/LegalityPredicates.cpp @@ -1,15 +1,15 @@ -//===- lib/CodeGen/GlobalISel/LegalizerPredicates.cpp - Predicates --------===// -// -// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. -// See https://llvm.org/LICENSE.txt for license information. -// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception -// -//===----------------------------------------------------------------------===// -// -// A library of predicate factories to use for LegalityPredicate. -// -//===----------------------------------------------------------------------===// - +//===- lib/CodeGen/GlobalISel/LegalizerPredicates.cpp - Predicates --------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +// +// A library of predicate factories to use for LegalityPredicate. +// +//===----------------------------------------------------------------------===// + // Enable optimizations to work around MSVC debug mode bug in 32-bit: // https://developercommunity.visualstudio.com/content/problem/1179643/msvc-copies-overaligned-non-trivially-copyable-par.html // FIXME: Remove this when the issue is closed. @@ -21,175 +21,175 @@ #pragma optimize("gs", on) #endif -#include "llvm/CodeGen/GlobalISel/LegalizerInfo.h" - -using namespace llvm; - -LegalityPredicate LegalityPredicates::typeIs(unsigned TypeIdx, LLT Type) { - return - [=](const LegalityQuery &Query) { return Query.Types[TypeIdx] == Type; }; -} - -LegalityPredicate -LegalityPredicates::typeInSet(unsigned TypeIdx, - std::initializer_list<LLT> TypesInit) { - SmallVector<LLT, 4> Types = TypesInit; - return [=](const LegalityQuery &Query) { +#include "llvm/CodeGen/GlobalISel/LegalizerInfo.h" + +using namespace llvm; + +LegalityPredicate LegalityPredicates::typeIs(unsigned TypeIdx, LLT Type) { + return + [=](const LegalityQuery &Query) { return Query.Types[TypeIdx] == Type; }; +} + +LegalityPredicate +LegalityPredicates::typeInSet(unsigned TypeIdx, + std::initializer_list<LLT> TypesInit) { + SmallVector<LLT, 4> Types = TypesInit; + return [=](const LegalityQuery &Query) { return llvm::is_contained(Types, Query.Types[TypeIdx]); - }; -} - -LegalityPredicate LegalityPredicates::typePairInSet( - unsigned TypeIdx0, unsigned TypeIdx1, - std::initializer_list<std::pair<LLT, LLT>> TypesInit) { - SmallVector<std::pair<LLT, LLT>, 4> Types = TypesInit; - return [=](const LegalityQuery &Query) { - std::pair<LLT, LLT> Match = {Query.Types[TypeIdx0], Query.Types[TypeIdx1]}; + }; +} + +LegalityPredicate LegalityPredicates::typePairInSet( + unsigned TypeIdx0, unsigned TypeIdx1, + std::initializer_list<std::pair<LLT, LLT>> TypesInit) { + SmallVector<std::pair<LLT, LLT>, 4> Types = TypesInit; + return [=](const LegalityQuery &Query) { + std::pair<LLT, LLT> Match = {Query.Types[TypeIdx0], Query.Types[TypeIdx1]}; return llvm::is_contained(Types, Match); - }; -} - -LegalityPredicate LegalityPredicates::typePairAndMemDescInSet( - unsigned TypeIdx0, unsigned TypeIdx1, unsigned MMOIdx, - std::initializer_list<TypePairAndMemDesc> TypesAndMemDescInit) { - SmallVector<TypePairAndMemDesc, 4> TypesAndMemDesc = TypesAndMemDescInit; - return [=](const LegalityQuery &Query) { - TypePairAndMemDesc Match = {Query.Types[TypeIdx0], Query.Types[TypeIdx1], - Query.MMODescrs[MMOIdx].SizeInBits, - Query.MMODescrs[MMOIdx].AlignInBits}; + }; +} + +LegalityPredicate LegalityPredicates::typePairAndMemDescInSet( + unsigned TypeIdx0, unsigned TypeIdx1, unsigned MMOIdx, + std::initializer_list<TypePairAndMemDesc> TypesAndMemDescInit) { + SmallVector<TypePairAndMemDesc, 4> TypesAndMemDesc = TypesAndMemDescInit; + return [=](const LegalityQuery &Query) { + TypePairAndMemDesc Match = {Query.Types[TypeIdx0], Query.Types[TypeIdx1], + Query.MMODescrs[MMOIdx].SizeInBits, + Query.MMODescrs[MMOIdx].AlignInBits}; return llvm::any_of(TypesAndMemDesc, [=](const TypePairAndMemDesc &Entry) -> bool { return Match.isCompatible(Entry); }); - }; -} - -LegalityPredicate LegalityPredicates::isScalar(unsigned TypeIdx) { - return [=](const LegalityQuery &Query) { - return Query.Types[TypeIdx].isScalar(); - }; -} - -LegalityPredicate LegalityPredicates::isVector(unsigned TypeIdx) { - return [=](const LegalityQuery &Query) { - return Query.Types[TypeIdx].isVector(); - }; -} - -LegalityPredicate LegalityPredicates::isPointer(unsigned TypeIdx) { - return [=](const LegalityQuery &Query) { - return Query.Types[TypeIdx].isPointer(); - }; -} - -LegalityPredicate LegalityPredicates::isPointer(unsigned TypeIdx, - unsigned AddrSpace) { - return [=](const LegalityQuery &Query) { - LLT Ty = Query.Types[TypeIdx]; - return Ty.isPointer() && Ty.getAddressSpace() == AddrSpace; - }; -} - -LegalityPredicate LegalityPredicates::elementTypeIs(unsigned TypeIdx, - LLT EltTy) { - return [=](const LegalityQuery &Query) { - const LLT QueryTy = Query.Types[TypeIdx]; - return QueryTy.isVector() && QueryTy.getElementType() == EltTy; - }; -} - -LegalityPredicate LegalityPredicates::scalarNarrowerThan(unsigned TypeIdx, - unsigned Size) { - return [=](const LegalityQuery &Query) { - const LLT QueryTy = Query.Types[TypeIdx]; - return QueryTy.isScalar() && QueryTy.getSizeInBits() < Size; - }; -} - -LegalityPredicate LegalityPredicates::scalarWiderThan(unsigned TypeIdx, - unsigned Size) { - return [=](const LegalityQuery &Query) { - const LLT QueryTy = Query.Types[TypeIdx]; - return QueryTy.isScalar() && QueryTy.getSizeInBits() > Size; - }; -} - -LegalityPredicate LegalityPredicates::smallerThan(unsigned TypeIdx0, - unsigned TypeIdx1) { - return [=](const LegalityQuery &Query) { - return Query.Types[TypeIdx0].getSizeInBits() < - Query.Types[TypeIdx1].getSizeInBits(); - }; -} - -LegalityPredicate LegalityPredicates::largerThan(unsigned TypeIdx0, - unsigned TypeIdx1) { - return [=](const LegalityQuery &Query) { - return Query.Types[TypeIdx0].getSizeInBits() > - Query.Types[TypeIdx1].getSizeInBits(); - }; -} - -LegalityPredicate LegalityPredicates::scalarOrEltNarrowerThan(unsigned TypeIdx, - unsigned Size) { - return [=](const LegalityQuery &Query) { - const LLT QueryTy = Query.Types[TypeIdx]; - return QueryTy.getScalarSizeInBits() < Size; - }; -} - -LegalityPredicate LegalityPredicates::scalarOrEltWiderThan(unsigned TypeIdx, - unsigned Size) { - return [=](const LegalityQuery &Query) { - const LLT QueryTy = Query.Types[TypeIdx]; - return QueryTy.getScalarSizeInBits() > Size; - }; -} - -LegalityPredicate LegalityPredicates::scalarOrEltSizeNotPow2(unsigned TypeIdx) { - return [=](const LegalityQuery &Query) { - const LLT QueryTy = Query.Types[TypeIdx]; - return !isPowerOf2_32(QueryTy.getScalarSizeInBits()); - }; -} - -LegalityPredicate LegalityPredicates::sizeNotPow2(unsigned TypeIdx) { - return [=](const LegalityQuery &Query) { - const LLT QueryTy = Query.Types[TypeIdx]; - return QueryTy.isScalar() && !isPowerOf2_32(QueryTy.getSizeInBits()); - }; -} - -LegalityPredicate LegalityPredicates::sizeIs(unsigned TypeIdx, unsigned Size) { - return [=](const LegalityQuery &Query) { - return Query.Types[TypeIdx].getSizeInBits() == Size; - }; -} - -LegalityPredicate LegalityPredicates::sameSize(unsigned TypeIdx0, - unsigned TypeIdx1) { - return [=](const LegalityQuery &Query) { - return Query.Types[TypeIdx0].getSizeInBits() == - Query.Types[TypeIdx1].getSizeInBits(); - }; -} - -LegalityPredicate LegalityPredicates::memSizeInBytesNotPow2(unsigned MMOIdx) { - return [=](const LegalityQuery &Query) { - return !isPowerOf2_32(Query.MMODescrs[MMOIdx].SizeInBits / 8); - }; -} - -LegalityPredicate LegalityPredicates::numElementsNotPow2(unsigned TypeIdx) { - return [=](const LegalityQuery &Query) { - const LLT QueryTy = Query.Types[TypeIdx]; - return QueryTy.isVector() && !isPowerOf2_32(QueryTy.getNumElements()); - }; -} - -LegalityPredicate LegalityPredicates::atomicOrderingAtLeastOrStrongerThan( - unsigned MMOIdx, AtomicOrdering Ordering) { - return [=](const LegalityQuery &Query) { - return isAtLeastOrStrongerThan(Query.MMODescrs[MMOIdx].Ordering, Ordering); - }; -} + }; +} + +LegalityPredicate LegalityPredicates::isScalar(unsigned TypeIdx) { + return [=](const LegalityQuery &Query) { + return Query.Types[TypeIdx].isScalar(); + }; +} + +LegalityPredicate LegalityPredicates::isVector(unsigned TypeIdx) { + return [=](const LegalityQuery &Query) { + return Query.Types[TypeIdx].isVector(); + }; +} + +LegalityPredicate LegalityPredicates::isPointer(unsigned TypeIdx) { + return [=](const LegalityQuery &Query) { + return Query.Types[TypeIdx].isPointer(); + }; +} + +LegalityPredicate LegalityPredicates::isPointer(unsigned TypeIdx, + unsigned AddrSpace) { + return [=](const LegalityQuery &Query) { + LLT Ty = Query.Types[TypeIdx]; + return Ty.isPointer() && Ty.getAddressSpace() == AddrSpace; + }; +} + +LegalityPredicate LegalityPredicates::elementTypeIs(unsigned TypeIdx, + LLT EltTy) { + return [=](const LegalityQuery &Query) { + const LLT QueryTy = Query.Types[TypeIdx]; + return QueryTy.isVector() && QueryTy.getElementType() == EltTy; + }; +} + +LegalityPredicate LegalityPredicates::scalarNarrowerThan(unsigned TypeIdx, + unsigned Size) { + return [=](const LegalityQuery &Query) { + const LLT QueryTy = Query.Types[TypeIdx]; + return QueryTy.isScalar() && QueryTy.getSizeInBits() < Size; + }; +} + +LegalityPredicate LegalityPredicates::scalarWiderThan(unsigned TypeIdx, + unsigned Size) { + return [=](const LegalityQuery &Query) { + const LLT QueryTy = Query.Types[TypeIdx]; + return QueryTy.isScalar() && QueryTy.getSizeInBits() > Size; + }; +} + +LegalityPredicate LegalityPredicates::smallerThan(unsigned TypeIdx0, + unsigned TypeIdx1) { + return [=](const LegalityQuery &Query) { + return Query.Types[TypeIdx0].getSizeInBits() < + Query.Types[TypeIdx1].getSizeInBits(); + }; +} + +LegalityPredicate LegalityPredicates::largerThan(unsigned TypeIdx0, + unsigned TypeIdx1) { + return [=](const LegalityQuery &Query) { + return Query.Types[TypeIdx0].getSizeInBits() > + Query.Types[TypeIdx1].getSizeInBits(); + }; +} + +LegalityPredicate LegalityPredicates::scalarOrEltNarrowerThan(unsigned TypeIdx, + unsigned Size) { + return [=](const LegalityQuery &Query) { + const LLT QueryTy = Query.Types[TypeIdx]; + return QueryTy.getScalarSizeInBits() < Size; + }; +} + +LegalityPredicate LegalityPredicates::scalarOrEltWiderThan(unsigned TypeIdx, + unsigned Size) { + return [=](const LegalityQuery &Query) { + const LLT QueryTy = Query.Types[TypeIdx]; + return QueryTy.getScalarSizeInBits() > Size; + }; +} + +LegalityPredicate LegalityPredicates::scalarOrEltSizeNotPow2(unsigned TypeIdx) { + return [=](const LegalityQuery &Query) { + const LLT QueryTy = Query.Types[TypeIdx]; + return !isPowerOf2_32(QueryTy.getScalarSizeInBits()); + }; +} + +LegalityPredicate LegalityPredicates::sizeNotPow2(unsigned TypeIdx) { + return [=](const LegalityQuery &Query) { + const LLT QueryTy = Query.Types[TypeIdx]; + return QueryTy.isScalar() && !isPowerOf2_32(QueryTy.getSizeInBits()); + }; +} + +LegalityPredicate LegalityPredicates::sizeIs(unsigned TypeIdx, unsigned Size) { + return [=](const LegalityQuery &Query) { + return Query.Types[TypeIdx].getSizeInBits() == Size; + }; +} + +LegalityPredicate LegalityPredicates::sameSize(unsigned TypeIdx0, + unsigned TypeIdx1) { + return [=](const LegalityQuery &Query) { + return Query.Types[TypeIdx0].getSizeInBits() == + Query.Types[TypeIdx1].getSizeInBits(); + }; +} + +LegalityPredicate LegalityPredicates::memSizeInBytesNotPow2(unsigned MMOIdx) { + return [=](const LegalityQuery &Query) { + return !isPowerOf2_32(Query.MMODescrs[MMOIdx].SizeInBits / 8); + }; +} + +LegalityPredicate LegalityPredicates::numElementsNotPow2(unsigned TypeIdx) { + return [=](const LegalityQuery &Query) { + const LLT QueryTy = Query.Types[TypeIdx]; + return QueryTy.isVector() && !isPowerOf2_32(QueryTy.getNumElements()); + }; +} + +LegalityPredicate LegalityPredicates::atomicOrderingAtLeastOrStrongerThan( + unsigned MMOIdx, AtomicOrdering Ordering) { + return [=](const LegalityQuery &Query) { + return isAtLeastOrStrongerThan(Query.MMODescrs[MMOIdx].Ordering, Ordering); + }; +} diff --git a/contrib/libs/llvm12/lib/CodeGen/GlobalISel/LegalizeMutations.cpp b/contrib/libs/llvm12/lib/CodeGen/GlobalISel/LegalizeMutations.cpp index a256d08892..f3ba3f0801 100644 --- a/contrib/libs/llvm12/lib/CodeGen/GlobalISel/LegalizeMutations.cpp +++ b/contrib/libs/llvm12/lib/CodeGen/GlobalISel/LegalizeMutations.cpp @@ -1,48 +1,48 @@ -//===- lib/CodeGen/GlobalISel/LegalizerMutations.cpp - Mutations ----------===// -// -// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. -// See https://llvm.org/LICENSE.txt for license information. -// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception -// -//===----------------------------------------------------------------------===// -// -// A library of mutation factories to use for LegalityMutation. -// -//===----------------------------------------------------------------------===// - -#include "llvm/CodeGen/GlobalISel/LegalizerInfo.h" - -using namespace llvm; - -LegalizeMutation LegalizeMutations::changeTo(unsigned TypeIdx, LLT Ty) { - return - [=](const LegalityQuery &Query) { return std::make_pair(TypeIdx, Ty); }; -} - -LegalizeMutation LegalizeMutations::changeTo(unsigned TypeIdx, - unsigned FromTypeIdx) { - return [=](const LegalityQuery &Query) { - return std::make_pair(TypeIdx, Query.Types[FromTypeIdx]); - }; -} - -LegalizeMutation LegalizeMutations::changeElementTo(unsigned TypeIdx, - unsigned FromTypeIdx) { - return [=](const LegalityQuery &Query) { - const LLT OldTy = Query.Types[TypeIdx]; - const LLT NewTy = Query.Types[FromTypeIdx]; - return std::make_pair(TypeIdx, OldTy.changeElementType(NewTy)); - }; -} - -LegalizeMutation LegalizeMutations::changeElementTo(unsigned TypeIdx, - LLT NewEltTy) { - return [=](const LegalityQuery &Query) { - const LLT OldTy = Query.Types[TypeIdx]; - return std::make_pair(TypeIdx, OldTy.changeElementType(NewEltTy)); - }; -} - +//===- lib/CodeGen/GlobalISel/LegalizerMutations.cpp - Mutations ----------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +// +// A library of mutation factories to use for LegalityMutation. +// +//===----------------------------------------------------------------------===// + +#include "llvm/CodeGen/GlobalISel/LegalizerInfo.h" + +using namespace llvm; + +LegalizeMutation LegalizeMutations::changeTo(unsigned TypeIdx, LLT Ty) { + return + [=](const LegalityQuery &Query) { return std::make_pair(TypeIdx, Ty); }; +} + +LegalizeMutation LegalizeMutations::changeTo(unsigned TypeIdx, + unsigned FromTypeIdx) { + return [=](const LegalityQuery &Query) { + return std::make_pair(TypeIdx, Query.Types[FromTypeIdx]); + }; +} + +LegalizeMutation LegalizeMutations::changeElementTo(unsigned TypeIdx, + unsigned FromTypeIdx) { + return [=](const LegalityQuery &Query) { + const LLT OldTy = Query.Types[TypeIdx]; + const LLT NewTy = Query.Types[FromTypeIdx]; + return std::make_pair(TypeIdx, OldTy.changeElementType(NewTy)); + }; +} + +LegalizeMutation LegalizeMutations::changeElementTo(unsigned TypeIdx, + LLT NewEltTy) { + return [=](const LegalityQuery &Query) { + const LLT OldTy = Query.Types[TypeIdx]; + return std::make_pair(TypeIdx, OldTy.changeElementType(NewEltTy)); + }; +} + LegalizeMutation LegalizeMutations::changeElementSizeTo(unsigned TypeIdx, unsigned FromTypeIdx) { return [=](const LegalityQuery &Query) { @@ -53,29 +53,29 @@ LegalizeMutation LegalizeMutations::changeElementSizeTo(unsigned TypeIdx, }; } -LegalizeMutation LegalizeMutations::widenScalarOrEltToNextPow2(unsigned TypeIdx, - unsigned Min) { - return [=](const LegalityQuery &Query) { - const LLT Ty = Query.Types[TypeIdx]; - unsigned NewEltSizeInBits = - std::max(1u << Log2_32_Ceil(Ty.getScalarSizeInBits()), Min); - return std::make_pair(TypeIdx, Ty.changeElementSize(NewEltSizeInBits)); - }; -} - -LegalizeMutation LegalizeMutations::moreElementsToNextPow2(unsigned TypeIdx, - unsigned Min) { - return [=](const LegalityQuery &Query) { - const LLT VecTy = Query.Types[TypeIdx]; - unsigned NewNumElements = - std::max(1u << Log2_32_Ceil(VecTy.getNumElements()), Min); - return std::make_pair(TypeIdx, - LLT::vector(NewNumElements, VecTy.getElementType())); - }; -} - -LegalizeMutation LegalizeMutations::scalarize(unsigned TypeIdx) { - return [=](const LegalityQuery &Query) { - return std::make_pair(TypeIdx, Query.Types[TypeIdx].getElementType()); - }; -} +LegalizeMutation LegalizeMutations::widenScalarOrEltToNextPow2(unsigned TypeIdx, + unsigned Min) { + return [=](const LegalityQuery &Query) { + const LLT Ty = Query.Types[TypeIdx]; + unsigned NewEltSizeInBits = + std::max(1u << Log2_32_Ceil(Ty.getScalarSizeInBits()), Min); + return std::make_pair(TypeIdx, Ty.changeElementSize(NewEltSizeInBits)); + }; +} + +LegalizeMutation LegalizeMutations::moreElementsToNextPow2(unsigned TypeIdx, + unsigned Min) { + return [=](const LegalityQuery &Query) { + const LLT VecTy = Query.Types[TypeIdx]; + unsigned NewNumElements = + std::max(1u << Log2_32_Ceil(VecTy.getNumElements()), Min); + return std::make_pair(TypeIdx, + LLT::vector(NewNumElements, VecTy.getElementType())); + }; +} + +LegalizeMutation LegalizeMutations::scalarize(unsigned TypeIdx) { + return [=](const LegalityQuery &Query) { + return std::make_pair(TypeIdx, Query.Types[TypeIdx].getElementType()); + }; +} diff --git a/contrib/libs/llvm12/lib/CodeGen/GlobalISel/Legalizer.cpp b/contrib/libs/llvm12/lib/CodeGen/GlobalISel/Legalizer.cpp index 490b7b2c0f..5ba9367cac 100644 --- a/contrib/libs/llvm12/lib/CodeGen/GlobalISel/Legalizer.cpp +++ b/contrib/libs/llvm12/lib/CodeGen/GlobalISel/Legalizer.cpp @@ -1,394 +1,394 @@ -//===-- llvm/CodeGen/GlobalISel/Legalizer.cpp -----------------------------===// -// -// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. -// See https://llvm.org/LICENSE.txt for license information. -// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception -// -//===----------------------------------------------------------------------===// -// -/// \file This file implements the LegalizerHelper class to legalize individual -/// instructions and the LegalizePass wrapper pass for the primary -/// legalization. -// -//===----------------------------------------------------------------------===// - -#include "llvm/CodeGen/GlobalISel/Legalizer.h" -#include "llvm/ADT/PostOrderIterator.h" -#include "llvm/ADT/SetVector.h" -#include "llvm/CodeGen/GlobalISel/CSEInfo.h" -#include "llvm/CodeGen/GlobalISel/CSEMIRBuilder.h" -#include "llvm/CodeGen/GlobalISel/GISelChangeObserver.h" -#include "llvm/CodeGen/GlobalISel/GISelWorkList.h" -#include "llvm/CodeGen/GlobalISel/LegalizationArtifactCombiner.h" -#include "llvm/CodeGen/GlobalISel/LegalizerHelper.h" -#include "llvm/CodeGen/GlobalISel/LostDebugLocObserver.h" -#include "llvm/CodeGen/GlobalISel/Utils.h" -#include "llvm/CodeGen/MachineOptimizationRemarkEmitter.h" -#include "llvm/CodeGen/MachineRegisterInfo.h" -#include "llvm/CodeGen/TargetPassConfig.h" -#include "llvm/CodeGen/TargetSubtargetInfo.h" -#include "llvm/InitializePasses.h" -#include "llvm/Support/Debug.h" -#include "llvm/Support/Error.h" -#include "llvm/Target/TargetMachine.h" - -#include <iterator> - -#define DEBUG_TYPE "legalizer" - -using namespace llvm; - -static cl::opt<bool> - EnableCSEInLegalizer("enable-cse-in-legalizer", - cl::desc("Should enable CSE in Legalizer"), - cl::Optional, cl::init(false)); - -enum class DebugLocVerifyLevel { - None, - Legalizations, - LegalizationsAndArtifactCombiners, -}; -#ifndef NDEBUG -static cl::opt<DebugLocVerifyLevel> VerifyDebugLocs( - "verify-legalizer-debug-locs", - cl::desc("Verify that debug locations are handled"), - cl::values( - clEnumValN(DebugLocVerifyLevel::None, "none", "No verification"), - clEnumValN(DebugLocVerifyLevel::Legalizations, "legalizations", - "Verify legalizations"), - clEnumValN(DebugLocVerifyLevel::LegalizationsAndArtifactCombiners, - "legalizations+artifactcombiners", - "Verify legalizations and artifact combines")), - cl::init(DebugLocVerifyLevel::Legalizations)); -#else -// Always disable it for release builds by preventing the observer from being -// installed. -static const DebugLocVerifyLevel VerifyDebugLocs = DebugLocVerifyLevel::None; -#endif - -char Legalizer::ID = 0; -INITIALIZE_PASS_BEGIN(Legalizer, DEBUG_TYPE, - "Legalize the Machine IR a function's Machine IR", false, - false) -INITIALIZE_PASS_DEPENDENCY(TargetPassConfig) -INITIALIZE_PASS_DEPENDENCY(GISelCSEAnalysisWrapperPass) -INITIALIZE_PASS_END(Legalizer, DEBUG_TYPE, - "Legalize the Machine IR a function's Machine IR", false, - false) - -Legalizer::Legalizer() : MachineFunctionPass(ID) { } - -void Legalizer::getAnalysisUsage(AnalysisUsage &AU) const { - AU.addRequired<TargetPassConfig>(); - AU.addRequired<GISelCSEAnalysisWrapperPass>(); - AU.addPreserved<GISelCSEAnalysisWrapperPass>(); - getSelectionDAGFallbackAnalysisUsage(AU); - MachineFunctionPass::getAnalysisUsage(AU); -} - -void Legalizer::init(MachineFunction &MF) { -} - -static bool isArtifact(const MachineInstr &MI) { - switch (MI.getOpcode()) { - default: - return false; - case TargetOpcode::G_TRUNC: - case TargetOpcode::G_ZEXT: - case TargetOpcode::G_ANYEXT: - case TargetOpcode::G_SEXT: - case TargetOpcode::G_MERGE_VALUES: - case TargetOpcode::G_UNMERGE_VALUES: - case TargetOpcode::G_CONCAT_VECTORS: - case TargetOpcode::G_BUILD_VECTOR: - case TargetOpcode::G_EXTRACT: - return true; - } -} -using InstListTy = GISelWorkList<256>; -using ArtifactListTy = GISelWorkList<128>; - -namespace { -class LegalizerWorkListManager : public GISelChangeObserver { - InstListTy &InstList; - ArtifactListTy &ArtifactList; -#ifndef NDEBUG - SmallVector<MachineInstr *, 4> NewMIs; -#endif - -public: - LegalizerWorkListManager(InstListTy &Insts, ArtifactListTy &Arts) - : InstList(Insts), ArtifactList(Arts) {} - - void createdOrChangedInstr(MachineInstr &MI) { - // Only legalize pre-isel generic instructions. - // Legalization process could generate Target specific pseudo - // instructions with generic types. Don't record them - if (isPreISelGenericOpcode(MI.getOpcode())) { - if (isArtifact(MI)) - ArtifactList.insert(&MI); - else - InstList.insert(&MI); - } - } - - void createdInstr(MachineInstr &MI) override { - LLVM_DEBUG(NewMIs.push_back(&MI)); - createdOrChangedInstr(MI); - } - - void printNewInstrs() { - LLVM_DEBUG({ - for (const auto *MI : NewMIs) - dbgs() << ".. .. New MI: " << *MI; - NewMIs.clear(); - }); - } - - void erasingInstr(MachineInstr &MI) override { - LLVM_DEBUG(dbgs() << ".. .. Erasing: " << MI); - InstList.remove(&MI); - ArtifactList.remove(&MI); - } - - void changingInstr(MachineInstr &MI) override { - LLVM_DEBUG(dbgs() << ".. .. Changing MI: " << MI); - } - - void changedInstr(MachineInstr &MI) override { - // When insts change, we want to revisit them to legalize them again. - // We'll consider them the same as created. - LLVM_DEBUG(dbgs() << ".. .. Changed MI: " << MI); - createdOrChangedInstr(MI); - } -}; -} // namespace - -Legalizer::MFResult -Legalizer::legalizeMachineFunction(MachineFunction &MF, const LegalizerInfo &LI, - ArrayRef<GISelChangeObserver *> AuxObservers, - LostDebugLocObserver &LocObserver, - MachineIRBuilder &MIRBuilder) { - MIRBuilder.setMF(MF); - MachineRegisterInfo &MRI = MF.getRegInfo(); - - // Populate worklists. - InstListTy InstList; - ArtifactListTy ArtifactList; - ReversePostOrderTraversal<MachineFunction *> RPOT(&MF); - // Perform legalization bottom up so we can DCE as we legalize. - // Traverse BB in RPOT and within each basic block, add insts top down, - // so when we pop_back_val in the legalization process, we traverse bottom-up. - for (auto *MBB : RPOT) { - if (MBB->empty()) - continue; - for (MachineInstr &MI : *MBB) { - // Only legalize pre-isel generic instructions: others don't have types - // and are assumed to be legal. - if (!isPreISelGenericOpcode(MI.getOpcode())) - continue; - if (isArtifact(MI)) - ArtifactList.deferred_insert(&MI); - else - InstList.deferred_insert(&MI); - } - } - ArtifactList.finalize(); - InstList.finalize(); - - // This observer keeps the worklists updated. - LegalizerWorkListManager WorkListObserver(InstList, ArtifactList); - // We want both WorkListObserver as well as all the auxiliary observers (e.g. - // CSEInfo) to observe all changes. Use the wrapper observer. - GISelObserverWrapper WrapperObserver(&WorkListObserver); - for (GISelChangeObserver *Observer : AuxObservers) - WrapperObserver.addObserver(Observer); - - // Now install the observer as the delegate to MF. - // This will keep all the observers notified about new insertions/deletions. - RAIIMFObsDelInstaller Installer(MF, WrapperObserver); - LegalizerHelper Helper(MF, LI, WrapperObserver, MIRBuilder); - LegalizationArtifactCombiner ArtCombiner(MIRBuilder, MRI, LI); - auto RemoveDeadInstFromLists = [&WrapperObserver](MachineInstr *DeadMI) { - WrapperObserver.erasingInstr(*DeadMI); - }; - bool Changed = false; - SmallVector<MachineInstr *, 128> RetryList; - do { - LLVM_DEBUG(dbgs() << "=== New Iteration ===\n"); - assert(RetryList.empty() && "Expected no instructions in RetryList"); - unsigned NumArtifacts = ArtifactList.size(); - while (!InstList.empty()) { - MachineInstr &MI = *InstList.pop_back_val(); - assert(isPreISelGenericOpcode(MI.getOpcode()) && - "Expecting generic opcode"); - if (isTriviallyDead(MI, MRI)) { - LLVM_DEBUG(dbgs() << MI << "Is dead; erasing.\n"); - MI.eraseFromParentAndMarkDBGValuesForRemoval(); - LocObserver.checkpoint(false); - continue; - } - - // Do the legalization for this instruction. - auto Res = Helper.legalizeInstrStep(MI); - // Error out if we couldn't legalize this instruction. We may want to - // fall back to DAG ISel instead in the future. - if (Res == LegalizerHelper::UnableToLegalize) { - // Move illegal artifacts to RetryList instead of aborting because - // legalizing InstList may generate artifacts that allow - // ArtifactCombiner to combine away them. - if (isArtifact(MI)) { - LLVM_DEBUG(dbgs() << ".. Not legalized, moving to artifacts retry\n"); - assert(NumArtifacts == 0 && - "Artifacts are only expected in instruction list starting the " - "second iteration, but each iteration starting second must " - "start with an empty artifacts list"); - (void)NumArtifacts; - RetryList.push_back(&MI); - continue; - } - Helper.MIRBuilder.stopObservingChanges(); - return {Changed, &MI}; - } - WorkListObserver.printNewInstrs(); - LocObserver.checkpoint(); - Changed |= Res == LegalizerHelper::Legalized; - } - // Try to combine the instructions in RetryList again if there - // are new artifacts. If not, stop legalizing. - if (!RetryList.empty()) { - if (!ArtifactList.empty()) { - while (!RetryList.empty()) - ArtifactList.insert(RetryList.pop_back_val()); - } else { - LLVM_DEBUG(dbgs() << "No new artifacts created, not retrying!\n"); - Helper.MIRBuilder.stopObservingChanges(); - return {Changed, RetryList.front()}; - } - } - LocObserver.checkpoint(); - while (!ArtifactList.empty()) { - MachineInstr &MI = *ArtifactList.pop_back_val(); - assert(isPreISelGenericOpcode(MI.getOpcode()) && - "Expecting generic opcode"); - if (isTriviallyDead(MI, MRI)) { - LLVM_DEBUG(dbgs() << MI << "Is dead\n"); - RemoveDeadInstFromLists(&MI); - MI.eraseFromParentAndMarkDBGValuesForRemoval(); - LocObserver.checkpoint(false); - continue; - } - SmallVector<MachineInstr *, 4> DeadInstructions; - LLVM_DEBUG(dbgs() << "Trying to combine: " << MI); - if (ArtCombiner.tryCombineInstruction(MI, DeadInstructions, - WrapperObserver)) { - WorkListObserver.printNewInstrs(); - for (auto *DeadMI : DeadInstructions) { +//===-- llvm/CodeGen/GlobalISel/Legalizer.cpp -----------------------------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +// +/// \file This file implements the LegalizerHelper class to legalize individual +/// instructions and the LegalizePass wrapper pass for the primary +/// legalization. +// +//===----------------------------------------------------------------------===// + +#include "llvm/CodeGen/GlobalISel/Legalizer.h" +#include "llvm/ADT/PostOrderIterator.h" +#include "llvm/ADT/SetVector.h" +#include "llvm/CodeGen/GlobalISel/CSEInfo.h" +#include "llvm/CodeGen/GlobalISel/CSEMIRBuilder.h" +#include "llvm/CodeGen/GlobalISel/GISelChangeObserver.h" +#include "llvm/CodeGen/GlobalISel/GISelWorkList.h" +#include "llvm/CodeGen/GlobalISel/LegalizationArtifactCombiner.h" +#include "llvm/CodeGen/GlobalISel/LegalizerHelper.h" +#include "llvm/CodeGen/GlobalISel/LostDebugLocObserver.h" +#include "llvm/CodeGen/GlobalISel/Utils.h" +#include "llvm/CodeGen/MachineOptimizationRemarkEmitter.h" +#include "llvm/CodeGen/MachineRegisterInfo.h" +#include "llvm/CodeGen/TargetPassConfig.h" +#include "llvm/CodeGen/TargetSubtargetInfo.h" +#include "llvm/InitializePasses.h" +#include "llvm/Support/Debug.h" +#include "llvm/Support/Error.h" +#include "llvm/Target/TargetMachine.h" + +#include <iterator> + +#define DEBUG_TYPE "legalizer" + +using namespace llvm; + +static cl::opt<bool> + EnableCSEInLegalizer("enable-cse-in-legalizer", + cl::desc("Should enable CSE in Legalizer"), + cl::Optional, cl::init(false)); + +enum class DebugLocVerifyLevel { + None, + Legalizations, + LegalizationsAndArtifactCombiners, +}; +#ifndef NDEBUG +static cl::opt<DebugLocVerifyLevel> VerifyDebugLocs( + "verify-legalizer-debug-locs", + cl::desc("Verify that debug locations are handled"), + cl::values( + clEnumValN(DebugLocVerifyLevel::None, "none", "No verification"), + clEnumValN(DebugLocVerifyLevel::Legalizations, "legalizations", + "Verify legalizations"), + clEnumValN(DebugLocVerifyLevel::LegalizationsAndArtifactCombiners, + "legalizations+artifactcombiners", + "Verify legalizations and artifact combines")), + cl::init(DebugLocVerifyLevel::Legalizations)); +#else +// Always disable it for release builds by preventing the observer from being +// installed. +static const DebugLocVerifyLevel VerifyDebugLocs = DebugLocVerifyLevel::None; +#endif + +char Legalizer::ID = 0; +INITIALIZE_PASS_BEGIN(Legalizer, DEBUG_TYPE, + "Legalize the Machine IR a function's Machine IR", false, + false) +INITIALIZE_PASS_DEPENDENCY(TargetPassConfig) +INITIALIZE_PASS_DEPENDENCY(GISelCSEAnalysisWrapperPass) +INITIALIZE_PASS_END(Legalizer, DEBUG_TYPE, + "Legalize the Machine IR a function's Machine IR", false, + false) + +Legalizer::Legalizer() : MachineFunctionPass(ID) { } + +void Legalizer::getAnalysisUsage(AnalysisUsage &AU) const { + AU.addRequired<TargetPassConfig>(); + AU.addRequired<GISelCSEAnalysisWrapperPass>(); + AU.addPreserved<GISelCSEAnalysisWrapperPass>(); + getSelectionDAGFallbackAnalysisUsage(AU); + MachineFunctionPass::getAnalysisUsage(AU); +} + +void Legalizer::init(MachineFunction &MF) { +} + +static bool isArtifact(const MachineInstr &MI) { + switch (MI.getOpcode()) { + default: + return false; + case TargetOpcode::G_TRUNC: + case TargetOpcode::G_ZEXT: + case TargetOpcode::G_ANYEXT: + case TargetOpcode::G_SEXT: + case TargetOpcode::G_MERGE_VALUES: + case TargetOpcode::G_UNMERGE_VALUES: + case TargetOpcode::G_CONCAT_VECTORS: + case TargetOpcode::G_BUILD_VECTOR: + case TargetOpcode::G_EXTRACT: + return true; + } +} +using InstListTy = GISelWorkList<256>; +using ArtifactListTy = GISelWorkList<128>; + +namespace { +class LegalizerWorkListManager : public GISelChangeObserver { + InstListTy &InstList; + ArtifactListTy &ArtifactList; +#ifndef NDEBUG + SmallVector<MachineInstr *, 4> NewMIs; +#endif + +public: + LegalizerWorkListManager(InstListTy &Insts, ArtifactListTy &Arts) + : InstList(Insts), ArtifactList(Arts) {} + + void createdOrChangedInstr(MachineInstr &MI) { + // Only legalize pre-isel generic instructions. + // Legalization process could generate Target specific pseudo + // instructions with generic types. Don't record them + if (isPreISelGenericOpcode(MI.getOpcode())) { + if (isArtifact(MI)) + ArtifactList.insert(&MI); + else + InstList.insert(&MI); + } + } + + void createdInstr(MachineInstr &MI) override { + LLVM_DEBUG(NewMIs.push_back(&MI)); + createdOrChangedInstr(MI); + } + + void printNewInstrs() { + LLVM_DEBUG({ + for (const auto *MI : NewMIs) + dbgs() << ".. .. New MI: " << *MI; + NewMIs.clear(); + }); + } + + void erasingInstr(MachineInstr &MI) override { + LLVM_DEBUG(dbgs() << ".. .. Erasing: " << MI); + InstList.remove(&MI); + ArtifactList.remove(&MI); + } + + void changingInstr(MachineInstr &MI) override { + LLVM_DEBUG(dbgs() << ".. .. Changing MI: " << MI); + } + + void changedInstr(MachineInstr &MI) override { + // When insts change, we want to revisit them to legalize them again. + // We'll consider them the same as created. + LLVM_DEBUG(dbgs() << ".. .. Changed MI: " << MI); + createdOrChangedInstr(MI); + } +}; +} // namespace + +Legalizer::MFResult +Legalizer::legalizeMachineFunction(MachineFunction &MF, const LegalizerInfo &LI, + ArrayRef<GISelChangeObserver *> AuxObservers, + LostDebugLocObserver &LocObserver, + MachineIRBuilder &MIRBuilder) { + MIRBuilder.setMF(MF); + MachineRegisterInfo &MRI = MF.getRegInfo(); + + // Populate worklists. + InstListTy InstList; + ArtifactListTy ArtifactList; + ReversePostOrderTraversal<MachineFunction *> RPOT(&MF); + // Perform legalization bottom up so we can DCE as we legalize. + // Traverse BB in RPOT and within each basic block, add insts top down, + // so when we pop_back_val in the legalization process, we traverse bottom-up. + for (auto *MBB : RPOT) { + if (MBB->empty()) + continue; + for (MachineInstr &MI : *MBB) { + // Only legalize pre-isel generic instructions: others don't have types + // and are assumed to be legal. + if (!isPreISelGenericOpcode(MI.getOpcode())) + continue; + if (isArtifact(MI)) + ArtifactList.deferred_insert(&MI); + else + InstList.deferred_insert(&MI); + } + } + ArtifactList.finalize(); + InstList.finalize(); + + // This observer keeps the worklists updated. + LegalizerWorkListManager WorkListObserver(InstList, ArtifactList); + // We want both WorkListObserver as well as all the auxiliary observers (e.g. + // CSEInfo) to observe all changes. Use the wrapper observer. + GISelObserverWrapper WrapperObserver(&WorkListObserver); + for (GISelChangeObserver *Observer : AuxObservers) + WrapperObserver.addObserver(Observer); + + // Now install the observer as the delegate to MF. + // This will keep all the observers notified about new insertions/deletions. + RAIIMFObsDelInstaller Installer(MF, WrapperObserver); + LegalizerHelper Helper(MF, LI, WrapperObserver, MIRBuilder); + LegalizationArtifactCombiner ArtCombiner(MIRBuilder, MRI, LI); + auto RemoveDeadInstFromLists = [&WrapperObserver](MachineInstr *DeadMI) { + WrapperObserver.erasingInstr(*DeadMI); + }; + bool Changed = false; + SmallVector<MachineInstr *, 128> RetryList; + do { + LLVM_DEBUG(dbgs() << "=== New Iteration ===\n"); + assert(RetryList.empty() && "Expected no instructions in RetryList"); + unsigned NumArtifacts = ArtifactList.size(); + while (!InstList.empty()) { + MachineInstr &MI = *InstList.pop_back_val(); + assert(isPreISelGenericOpcode(MI.getOpcode()) && + "Expecting generic opcode"); + if (isTriviallyDead(MI, MRI)) { + LLVM_DEBUG(dbgs() << MI << "Is dead; erasing.\n"); + MI.eraseFromParentAndMarkDBGValuesForRemoval(); + LocObserver.checkpoint(false); + continue; + } + + // Do the legalization for this instruction. + auto Res = Helper.legalizeInstrStep(MI); + // Error out if we couldn't legalize this instruction. We may want to + // fall back to DAG ISel instead in the future. + if (Res == LegalizerHelper::UnableToLegalize) { + // Move illegal artifacts to RetryList instead of aborting because + // legalizing InstList may generate artifacts that allow + // ArtifactCombiner to combine away them. + if (isArtifact(MI)) { + LLVM_DEBUG(dbgs() << ".. Not legalized, moving to artifacts retry\n"); + assert(NumArtifacts == 0 && + "Artifacts are only expected in instruction list starting the " + "second iteration, but each iteration starting second must " + "start with an empty artifacts list"); + (void)NumArtifacts; + RetryList.push_back(&MI); + continue; + } + Helper.MIRBuilder.stopObservingChanges(); + return {Changed, &MI}; + } + WorkListObserver.printNewInstrs(); + LocObserver.checkpoint(); + Changed |= Res == LegalizerHelper::Legalized; + } + // Try to combine the instructions in RetryList again if there + // are new artifacts. If not, stop legalizing. + if (!RetryList.empty()) { + if (!ArtifactList.empty()) { + while (!RetryList.empty()) + ArtifactList.insert(RetryList.pop_back_val()); + } else { + LLVM_DEBUG(dbgs() << "No new artifacts created, not retrying!\n"); + Helper.MIRBuilder.stopObservingChanges(); + return {Changed, RetryList.front()}; + } + } + LocObserver.checkpoint(); + while (!ArtifactList.empty()) { + MachineInstr &MI = *ArtifactList.pop_back_val(); + assert(isPreISelGenericOpcode(MI.getOpcode()) && + "Expecting generic opcode"); + if (isTriviallyDead(MI, MRI)) { + LLVM_DEBUG(dbgs() << MI << "Is dead\n"); + RemoveDeadInstFromLists(&MI); + MI.eraseFromParentAndMarkDBGValuesForRemoval(); + LocObserver.checkpoint(false); + continue; + } + SmallVector<MachineInstr *, 4> DeadInstructions; + LLVM_DEBUG(dbgs() << "Trying to combine: " << MI); + if (ArtCombiner.tryCombineInstruction(MI, DeadInstructions, + WrapperObserver)) { + WorkListObserver.printNewInstrs(); + for (auto *DeadMI : DeadInstructions) { LLVM_DEBUG(dbgs() << "Is dead: " << *DeadMI); - RemoveDeadInstFromLists(DeadMI); - DeadMI->eraseFromParentAndMarkDBGValuesForRemoval(); - } - LocObserver.checkpoint( - VerifyDebugLocs == - DebugLocVerifyLevel::LegalizationsAndArtifactCombiners); - Changed = true; - continue; - } - // If this was not an artifact (that could be combined away), this might - // need special handling. Add it to InstList, so when it's processed - // there, it has to be legal or specially handled. - else { - LLVM_DEBUG(dbgs() << ".. Not combined, moving to instructions list\n"); - InstList.insert(&MI); - } - } - } while (!InstList.empty()); - - return {Changed, /*FailedOn*/ nullptr}; -} - -bool Legalizer::runOnMachineFunction(MachineFunction &MF) { - // If the ISel pipeline failed, do not bother running that pass. - if (MF.getProperties().hasProperty( - MachineFunctionProperties::Property::FailedISel)) - return false; - LLVM_DEBUG(dbgs() << "Legalize Machine IR for: " << MF.getName() << '\n'); - init(MF); - const TargetPassConfig &TPC = getAnalysis<TargetPassConfig>(); - GISelCSEAnalysisWrapper &Wrapper = - getAnalysis<GISelCSEAnalysisWrapperPass>().getCSEWrapper(); - MachineOptimizationRemarkEmitter MORE(MF, /*MBFI=*/nullptr); - - const size_t NumBlocks = MF.size(); - - std::unique_ptr<MachineIRBuilder> MIRBuilder; - GISelCSEInfo *CSEInfo = nullptr; - bool EnableCSE = EnableCSEInLegalizer.getNumOccurrences() - ? EnableCSEInLegalizer - : TPC.isGISelCSEEnabled(); - if (EnableCSE) { - MIRBuilder = std::make_unique<CSEMIRBuilder>(); - CSEInfo = &Wrapper.get(TPC.getCSEConfig()); - MIRBuilder->setCSEInfo(CSEInfo); - } else - MIRBuilder = std::make_unique<MachineIRBuilder>(); - - SmallVector<GISelChangeObserver *, 1> AuxObservers; - if (EnableCSE && CSEInfo) { - // We want CSEInfo in addition to WorkListObserver to observe all changes. - AuxObservers.push_back(CSEInfo); - } - assert(!CSEInfo || !errorToBool(CSEInfo->verify())); - LostDebugLocObserver LocObserver(DEBUG_TYPE); - if (VerifyDebugLocs > DebugLocVerifyLevel::None) - AuxObservers.push_back(&LocObserver); - - const LegalizerInfo &LI = *MF.getSubtarget().getLegalizerInfo(); - MFResult Result = - legalizeMachineFunction(MF, LI, AuxObservers, LocObserver, *MIRBuilder); - - if (Result.FailedOn) { - reportGISelFailure(MF, TPC, MORE, "gisel-legalize", - "unable to legalize instruction", *Result.FailedOn); - return false; - } - // For now don't support if new blocks are inserted - we would need to fix the - // outer loop for that. - if (MF.size() != NumBlocks) { - MachineOptimizationRemarkMissed R("gisel-legalize", "GISelFailure", - MF.getFunction().getSubprogram(), - /*MBB=*/nullptr); - R << "inserting blocks is not supported yet"; - reportGISelFailure(MF, TPC, MORE, R); - return false; - } - - if (LocObserver.getNumLostDebugLocs()) { - MachineOptimizationRemarkMissed R("gisel-legalize", "LostDebugLoc", - MF.getFunction().getSubprogram(), - /*MBB=*/&*MF.begin()); - R << "lost " - << ore::NV("NumLostDebugLocs", LocObserver.getNumLostDebugLocs()) - << " debug locations during pass"; - reportGISelWarning(MF, TPC, MORE, R); - // Example remark: - // --- !Missed - // Pass: gisel-legalize - // Name: GISelFailure - // DebugLoc: { File: '.../legalize-urem.mir', Line: 1, Column: 0 } - // Function: test_urem_s32 - // Args: - // - String: 'lost ' - // - NumLostDebugLocs: '1' - // - String: ' debug locations during pass' - // ... - } - - // If for some reason CSE was not enabled, make sure that we invalidate the - // CSEInfo object (as we currently declare that the analysis is preserved). - // The next time get on the wrapper is called, it will force it to recompute - // the analysis. - if (!EnableCSE) - Wrapper.setComputed(false); - return Result.Changed; -} + RemoveDeadInstFromLists(DeadMI); + DeadMI->eraseFromParentAndMarkDBGValuesForRemoval(); + } + LocObserver.checkpoint( + VerifyDebugLocs == + DebugLocVerifyLevel::LegalizationsAndArtifactCombiners); + Changed = true; + continue; + } + // If this was not an artifact (that could be combined away), this might + // need special handling. Add it to InstList, so when it's processed + // there, it has to be legal or specially handled. + else { + LLVM_DEBUG(dbgs() << ".. Not combined, moving to instructions list\n"); + InstList.insert(&MI); + } + } + } while (!InstList.empty()); + + return {Changed, /*FailedOn*/ nullptr}; +} + +bool Legalizer::runOnMachineFunction(MachineFunction &MF) { + // If the ISel pipeline failed, do not bother running that pass. + if (MF.getProperties().hasProperty( + MachineFunctionProperties::Property::FailedISel)) + return false; + LLVM_DEBUG(dbgs() << "Legalize Machine IR for: " << MF.getName() << '\n'); + init(MF); + const TargetPassConfig &TPC = getAnalysis<TargetPassConfig>(); + GISelCSEAnalysisWrapper &Wrapper = + getAnalysis<GISelCSEAnalysisWrapperPass>().getCSEWrapper(); + MachineOptimizationRemarkEmitter MORE(MF, /*MBFI=*/nullptr); + + const size_t NumBlocks = MF.size(); + + std::unique_ptr<MachineIRBuilder> MIRBuilder; + GISelCSEInfo *CSEInfo = nullptr; + bool EnableCSE = EnableCSEInLegalizer.getNumOccurrences() + ? EnableCSEInLegalizer + : TPC.isGISelCSEEnabled(); + if (EnableCSE) { + MIRBuilder = std::make_unique<CSEMIRBuilder>(); + CSEInfo = &Wrapper.get(TPC.getCSEConfig()); + MIRBuilder->setCSEInfo(CSEInfo); + } else + MIRBuilder = std::make_unique<MachineIRBuilder>(); + + SmallVector<GISelChangeObserver *, 1> AuxObservers; + if (EnableCSE && CSEInfo) { + // We want CSEInfo in addition to WorkListObserver to observe all changes. + AuxObservers.push_back(CSEInfo); + } + assert(!CSEInfo || !errorToBool(CSEInfo->verify())); + LostDebugLocObserver LocObserver(DEBUG_TYPE); + if (VerifyDebugLocs > DebugLocVerifyLevel::None) + AuxObservers.push_back(&LocObserver); + + const LegalizerInfo &LI = *MF.getSubtarget().getLegalizerInfo(); + MFResult Result = + legalizeMachineFunction(MF, LI, AuxObservers, LocObserver, *MIRBuilder); + + if (Result.FailedOn) { + reportGISelFailure(MF, TPC, MORE, "gisel-legalize", + "unable to legalize instruction", *Result.FailedOn); + return false; + } + // For now don't support if new blocks are inserted - we would need to fix the + // outer loop for that. + if (MF.size() != NumBlocks) { + MachineOptimizationRemarkMissed R("gisel-legalize", "GISelFailure", + MF.getFunction().getSubprogram(), + /*MBB=*/nullptr); + R << "inserting blocks is not supported yet"; + reportGISelFailure(MF, TPC, MORE, R); + return false; + } + + if (LocObserver.getNumLostDebugLocs()) { + MachineOptimizationRemarkMissed R("gisel-legalize", "LostDebugLoc", + MF.getFunction().getSubprogram(), + /*MBB=*/&*MF.begin()); + R << "lost " + << ore::NV("NumLostDebugLocs", LocObserver.getNumLostDebugLocs()) + << " debug locations during pass"; + reportGISelWarning(MF, TPC, MORE, R); + // Example remark: + // --- !Missed + // Pass: gisel-legalize + // Name: GISelFailure + // DebugLoc: { File: '.../legalize-urem.mir', Line: 1, Column: 0 } + // Function: test_urem_s32 + // Args: + // - String: 'lost ' + // - NumLostDebugLocs: '1' + // - String: ' debug locations during pass' + // ... + } + + // If for some reason CSE was not enabled, make sure that we invalidate the + // CSEInfo object (as we currently declare that the analysis is preserved). + // The next time get on the wrapper is called, it will force it to recompute + // the analysis. + if (!EnableCSE) + Wrapper.setComputed(false); + return Result.Changed; +} diff --git a/contrib/libs/llvm12/lib/CodeGen/GlobalISel/LegalizerHelper.cpp b/contrib/libs/llvm12/lib/CodeGen/GlobalISel/LegalizerHelper.cpp index b446363ed4..66871ca3b9 100644 --- a/contrib/libs/llvm12/lib/CodeGen/GlobalISel/LegalizerHelper.cpp +++ b/contrib/libs/llvm12/lib/CodeGen/GlobalISel/LegalizerHelper.cpp @@ -1,389 +1,389 @@ -//===-- llvm/CodeGen/GlobalISel/LegalizerHelper.cpp -----------------------===// -// -// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. -// See https://llvm.org/LICENSE.txt for license information. -// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception -// -//===----------------------------------------------------------------------===// -// -/// \file This file implements the LegalizerHelper class to legalize -/// individual instructions and the LegalizeMachineIR wrapper pass for the -/// primary legalization. -// -//===----------------------------------------------------------------------===// - -#include "llvm/CodeGen/GlobalISel/LegalizerHelper.h" -#include "llvm/CodeGen/GlobalISel/CallLowering.h" -#include "llvm/CodeGen/GlobalISel/GISelChangeObserver.h" -#include "llvm/CodeGen/GlobalISel/LegalizerInfo.h" +//===-- llvm/CodeGen/GlobalISel/LegalizerHelper.cpp -----------------------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +// +/// \file This file implements the LegalizerHelper class to legalize +/// individual instructions and the LegalizeMachineIR wrapper pass for the +/// primary legalization. +// +//===----------------------------------------------------------------------===// + +#include "llvm/CodeGen/GlobalISel/LegalizerHelper.h" +#include "llvm/CodeGen/GlobalISel/CallLowering.h" +#include "llvm/CodeGen/GlobalISel/GISelChangeObserver.h" +#include "llvm/CodeGen/GlobalISel/LegalizerInfo.h" #include "llvm/CodeGen/GlobalISel/MIPatternMatch.h" -#include "llvm/CodeGen/MachineRegisterInfo.h" -#include "llvm/CodeGen/TargetFrameLowering.h" -#include "llvm/CodeGen/TargetInstrInfo.h" -#include "llvm/CodeGen/TargetLowering.h" -#include "llvm/CodeGen/TargetSubtargetInfo.h" -#include "llvm/Support/Debug.h" -#include "llvm/Support/MathExtras.h" -#include "llvm/Support/raw_ostream.h" - -#define DEBUG_TYPE "legalizer" - -using namespace llvm; -using namespace LegalizeActions; +#include "llvm/CodeGen/MachineRegisterInfo.h" +#include "llvm/CodeGen/TargetFrameLowering.h" +#include "llvm/CodeGen/TargetInstrInfo.h" +#include "llvm/CodeGen/TargetLowering.h" +#include "llvm/CodeGen/TargetSubtargetInfo.h" +#include "llvm/Support/Debug.h" +#include "llvm/Support/MathExtras.h" +#include "llvm/Support/raw_ostream.h" + +#define DEBUG_TYPE "legalizer" + +using namespace llvm; +using namespace LegalizeActions; using namespace MIPatternMatch; - -/// Try to break down \p OrigTy into \p NarrowTy sized pieces. -/// -/// Returns the number of \p NarrowTy elements needed to reconstruct \p OrigTy, -/// with any leftover piece as type \p LeftoverTy -/// -/// Returns -1 in the first element of the pair if the breakdown is not -/// satisfiable. -static std::pair<int, int> -getNarrowTypeBreakDown(LLT OrigTy, LLT NarrowTy, LLT &LeftoverTy) { - assert(!LeftoverTy.isValid() && "this is an out argument"); - - unsigned Size = OrigTy.getSizeInBits(); - unsigned NarrowSize = NarrowTy.getSizeInBits(); - unsigned NumParts = Size / NarrowSize; - unsigned LeftoverSize = Size - NumParts * NarrowSize; - assert(Size > NarrowSize); - - if (LeftoverSize == 0) - return {NumParts, 0}; - - if (NarrowTy.isVector()) { - unsigned EltSize = OrigTy.getScalarSizeInBits(); - if (LeftoverSize % EltSize != 0) - return {-1, -1}; - LeftoverTy = LLT::scalarOrVector(LeftoverSize / EltSize, EltSize); - } else { - LeftoverTy = LLT::scalar(LeftoverSize); - } - - int NumLeftover = LeftoverSize / LeftoverTy.getSizeInBits(); - return std::make_pair(NumParts, NumLeftover); -} - -static Type *getFloatTypeForLLT(LLVMContext &Ctx, LLT Ty) { - - if (!Ty.isScalar()) - return nullptr; - - switch (Ty.getSizeInBits()) { - case 16: - return Type::getHalfTy(Ctx); - case 32: - return Type::getFloatTy(Ctx); - case 64: - return Type::getDoubleTy(Ctx); + +/// Try to break down \p OrigTy into \p NarrowTy sized pieces. +/// +/// Returns the number of \p NarrowTy elements needed to reconstruct \p OrigTy, +/// with any leftover piece as type \p LeftoverTy +/// +/// Returns -1 in the first element of the pair if the breakdown is not +/// satisfiable. +static std::pair<int, int> +getNarrowTypeBreakDown(LLT OrigTy, LLT NarrowTy, LLT &LeftoverTy) { + assert(!LeftoverTy.isValid() && "this is an out argument"); + + unsigned Size = OrigTy.getSizeInBits(); + unsigned NarrowSize = NarrowTy.getSizeInBits(); + unsigned NumParts = Size / NarrowSize; + unsigned LeftoverSize = Size - NumParts * NarrowSize; + assert(Size > NarrowSize); + + if (LeftoverSize == 0) + return {NumParts, 0}; + + if (NarrowTy.isVector()) { + unsigned EltSize = OrigTy.getScalarSizeInBits(); + if (LeftoverSize % EltSize != 0) + return {-1, -1}; + LeftoverTy = LLT::scalarOrVector(LeftoverSize / EltSize, EltSize); + } else { + LeftoverTy = LLT::scalar(LeftoverSize); + } + + int NumLeftover = LeftoverSize / LeftoverTy.getSizeInBits(); + return std::make_pair(NumParts, NumLeftover); +} + +static Type *getFloatTypeForLLT(LLVMContext &Ctx, LLT Ty) { + + if (!Ty.isScalar()) + return nullptr; + + switch (Ty.getSizeInBits()) { + case 16: + return Type::getHalfTy(Ctx); + case 32: + return Type::getFloatTy(Ctx); + case 64: + return Type::getDoubleTy(Ctx); case 80: return Type::getX86_FP80Ty(Ctx); - case 128: - return Type::getFP128Ty(Ctx); - default: - return nullptr; - } -} - -LegalizerHelper::LegalizerHelper(MachineFunction &MF, - GISelChangeObserver &Observer, - MachineIRBuilder &Builder) - : MIRBuilder(Builder), Observer(Observer), MRI(MF.getRegInfo()), + case 128: + return Type::getFP128Ty(Ctx); + default: + return nullptr; + } +} + +LegalizerHelper::LegalizerHelper(MachineFunction &MF, + GISelChangeObserver &Observer, + MachineIRBuilder &Builder) + : MIRBuilder(Builder), Observer(Observer), MRI(MF.getRegInfo()), LI(*MF.getSubtarget().getLegalizerInfo()), TLI(*MF.getSubtarget().getTargetLowering()) { } - -LegalizerHelper::LegalizerHelper(MachineFunction &MF, const LegalizerInfo &LI, - GISelChangeObserver &Observer, - MachineIRBuilder &B) + +LegalizerHelper::LegalizerHelper(MachineFunction &MF, const LegalizerInfo &LI, + GISelChangeObserver &Observer, + MachineIRBuilder &B) : MIRBuilder(B), Observer(Observer), MRI(MF.getRegInfo()), LI(LI), TLI(*MF.getSubtarget().getTargetLowering()) { } -LegalizerHelper::LegalizeResult -LegalizerHelper::legalizeInstrStep(MachineInstr &MI) { - LLVM_DEBUG(dbgs() << "Legalizing: " << MI); - - MIRBuilder.setInstrAndDebugLoc(MI); - - if (MI.getOpcode() == TargetOpcode::G_INTRINSIC || - MI.getOpcode() == TargetOpcode::G_INTRINSIC_W_SIDE_EFFECTS) - return LI.legalizeIntrinsic(*this, MI) ? Legalized : UnableToLegalize; - auto Step = LI.getAction(MI, MRI); - switch (Step.Action) { - case Legal: - LLVM_DEBUG(dbgs() << ".. Already legal\n"); - return AlreadyLegal; - case Libcall: - LLVM_DEBUG(dbgs() << ".. Convert to libcall\n"); - return libcall(MI); - case NarrowScalar: - LLVM_DEBUG(dbgs() << ".. Narrow scalar\n"); - return narrowScalar(MI, Step.TypeIdx, Step.NewType); - case WidenScalar: - LLVM_DEBUG(dbgs() << ".. Widen scalar\n"); - return widenScalar(MI, Step.TypeIdx, Step.NewType); - case Bitcast: - LLVM_DEBUG(dbgs() << ".. Bitcast type\n"); - return bitcast(MI, Step.TypeIdx, Step.NewType); - case Lower: - LLVM_DEBUG(dbgs() << ".. Lower\n"); - return lower(MI, Step.TypeIdx, Step.NewType); - case FewerElements: - LLVM_DEBUG(dbgs() << ".. Reduce number of elements\n"); - return fewerElementsVector(MI, Step.TypeIdx, Step.NewType); - case MoreElements: - LLVM_DEBUG(dbgs() << ".. Increase number of elements\n"); - return moreElementsVector(MI, Step.TypeIdx, Step.NewType); - case Custom: - LLVM_DEBUG(dbgs() << ".. Custom legalization\n"); - return LI.legalizeCustom(*this, MI) ? Legalized : UnableToLegalize; - default: - LLVM_DEBUG(dbgs() << ".. Unable to legalize\n"); - return UnableToLegalize; - } -} - -void LegalizerHelper::extractParts(Register Reg, LLT Ty, int NumParts, - SmallVectorImpl<Register> &VRegs) { - for (int i = 0; i < NumParts; ++i) - VRegs.push_back(MRI.createGenericVirtualRegister(Ty)); - MIRBuilder.buildUnmerge(VRegs, Reg); -} - -bool LegalizerHelper::extractParts(Register Reg, LLT RegTy, - LLT MainTy, LLT &LeftoverTy, - SmallVectorImpl<Register> &VRegs, - SmallVectorImpl<Register> &LeftoverRegs) { - assert(!LeftoverTy.isValid() && "this is an out argument"); - - unsigned RegSize = RegTy.getSizeInBits(); - unsigned MainSize = MainTy.getSizeInBits(); - unsigned NumParts = RegSize / MainSize; - unsigned LeftoverSize = RegSize - NumParts * MainSize; - - // Use an unmerge when possible. - if (LeftoverSize == 0) { - for (unsigned I = 0; I < NumParts; ++I) - VRegs.push_back(MRI.createGenericVirtualRegister(MainTy)); - MIRBuilder.buildUnmerge(VRegs, Reg); - return true; - } - - if (MainTy.isVector()) { - unsigned EltSize = MainTy.getScalarSizeInBits(); - if (LeftoverSize % EltSize != 0) - return false; - LeftoverTy = LLT::scalarOrVector(LeftoverSize / EltSize, EltSize); - } else { - LeftoverTy = LLT::scalar(LeftoverSize); - } - - // For irregular sizes, extract the individual parts. - for (unsigned I = 0; I != NumParts; ++I) { - Register NewReg = MRI.createGenericVirtualRegister(MainTy); - VRegs.push_back(NewReg); - MIRBuilder.buildExtract(NewReg, Reg, MainSize * I); - } - - for (unsigned Offset = MainSize * NumParts; Offset < RegSize; - Offset += LeftoverSize) { - Register NewReg = MRI.createGenericVirtualRegister(LeftoverTy); - LeftoverRegs.push_back(NewReg); - MIRBuilder.buildExtract(NewReg, Reg, Offset); - } - - return true; -} - -void LegalizerHelper::insertParts(Register DstReg, - LLT ResultTy, LLT PartTy, - ArrayRef<Register> PartRegs, - LLT LeftoverTy, - ArrayRef<Register> LeftoverRegs) { - if (!LeftoverTy.isValid()) { - assert(LeftoverRegs.empty()); - - if (!ResultTy.isVector()) { - MIRBuilder.buildMerge(DstReg, PartRegs); - return; - } - - if (PartTy.isVector()) - MIRBuilder.buildConcatVectors(DstReg, PartRegs); - else - MIRBuilder.buildBuildVector(DstReg, PartRegs); - return; - } - - unsigned PartSize = PartTy.getSizeInBits(); - unsigned LeftoverPartSize = LeftoverTy.getSizeInBits(); - - Register CurResultReg = MRI.createGenericVirtualRegister(ResultTy); - MIRBuilder.buildUndef(CurResultReg); - - unsigned Offset = 0; - for (Register PartReg : PartRegs) { - Register NewResultReg = MRI.createGenericVirtualRegister(ResultTy); - MIRBuilder.buildInsert(NewResultReg, CurResultReg, PartReg, Offset); - CurResultReg = NewResultReg; - Offset += PartSize; - } - - for (unsigned I = 0, E = LeftoverRegs.size(); I != E; ++I) { - // Use the original output register for the final insert to avoid a copy. - Register NewResultReg = (I + 1 == E) ? - DstReg : MRI.createGenericVirtualRegister(ResultTy); - - MIRBuilder.buildInsert(NewResultReg, CurResultReg, LeftoverRegs[I], Offset); - CurResultReg = NewResultReg; - Offset += LeftoverPartSize; - } -} - +LegalizerHelper::LegalizeResult +LegalizerHelper::legalizeInstrStep(MachineInstr &MI) { + LLVM_DEBUG(dbgs() << "Legalizing: " << MI); + + MIRBuilder.setInstrAndDebugLoc(MI); + + if (MI.getOpcode() == TargetOpcode::G_INTRINSIC || + MI.getOpcode() == TargetOpcode::G_INTRINSIC_W_SIDE_EFFECTS) + return LI.legalizeIntrinsic(*this, MI) ? Legalized : UnableToLegalize; + auto Step = LI.getAction(MI, MRI); + switch (Step.Action) { + case Legal: + LLVM_DEBUG(dbgs() << ".. Already legal\n"); + return AlreadyLegal; + case Libcall: + LLVM_DEBUG(dbgs() << ".. Convert to libcall\n"); + return libcall(MI); + case NarrowScalar: + LLVM_DEBUG(dbgs() << ".. Narrow scalar\n"); + return narrowScalar(MI, Step.TypeIdx, Step.NewType); + case WidenScalar: + LLVM_DEBUG(dbgs() << ".. Widen scalar\n"); + return widenScalar(MI, Step.TypeIdx, Step.NewType); + case Bitcast: + LLVM_DEBUG(dbgs() << ".. Bitcast type\n"); + return bitcast(MI, Step.TypeIdx, Step.NewType); + case Lower: + LLVM_DEBUG(dbgs() << ".. Lower\n"); + return lower(MI, Step.TypeIdx, Step.NewType); + case FewerElements: + LLVM_DEBUG(dbgs() << ".. Reduce number of elements\n"); + return fewerElementsVector(MI, Step.TypeIdx, Step.NewType); + case MoreElements: + LLVM_DEBUG(dbgs() << ".. Increase number of elements\n"); + return moreElementsVector(MI, Step.TypeIdx, Step.NewType); + case Custom: + LLVM_DEBUG(dbgs() << ".. Custom legalization\n"); + return LI.legalizeCustom(*this, MI) ? Legalized : UnableToLegalize; + default: + LLVM_DEBUG(dbgs() << ".. Unable to legalize\n"); + return UnableToLegalize; + } +} + +void LegalizerHelper::extractParts(Register Reg, LLT Ty, int NumParts, + SmallVectorImpl<Register> &VRegs) { + for (int i = 0; i < NumParts; ++i) + VRegs.push_back(MRI.createGenericVirtualRegister(Ty)); + MIRBuilder.buildUnmerge(VRegs, Reg); +} + +bool LegalizerHelper::extractParts(Register Reg, LLT RegTy, + LLT MainTy, LLT &LeftoverTy, + SmallVectorImpl<Register> &VRegs, + SmallVectorImpl<Register> &LeftoverRegs) { + assert(!LeftoverTy.isValid() && "this is an out argument"); + + unsigned RegSize = RegTy.getSizeInBits(); + unsigned MainSize = MainTy.getSizeInBits(); + unsigned NumParts = RegSize / MainSize; + unsigned LeftoverSize = RegSize - NumParts * MainSize; + + // Use an unmerge when possible. + if (LeftoverSize == 0) { + for (unsigned I = 0; I < NumParts; ++I) + VRegs.push_back(MRI.createGenericVirtualRegister(MainTy)); + MIRBuilder.buildUnmerge(VRegs, Reg); + return true; + } + + if (MainTy.isVector()) { + unsigned EltSize = MainTy.getScalarSizeInBits(); + if (LeftoverSize % EltSize != 0) + return false; + LeftoverTy = LLT::scalarOrVector(LeftoverSize / EltSize, EltSize); + } else { + LeftoverTy = LLT::scalar(LeftoverSize); + } + + // For irregular sizes, extract the individual parts. + for (unsigned I = 0; I != NumParts; ++I) { + Register NewReg = MRI.createGenericVirtualRegister(MainTy); + VRegs.push_back(NewReg); + MIRBuilder.buildExtract(NewReg, Reg, MainSize * I); + } + + for (unsigned Offset = MainSize * NumParts; Offset < RegSize; + Offset += LeftoverSize) { + Register NewReg = MRI.createGenericVirtualRegister(LeftoverTy); + LeftoverRegs.push_back(NewReg); + MIRBuilder.buildExtract(NewReg, Reg, Offset); + } + + return true; +} + +void LegalizerHelper::insertParts(Register DstReg, + LLT ResultTy, LLT PartTy, + ArrayRef<Register> PartRegs, + LLT LeftoverTy, + ArrayRef<Register> LeftoverRegs) { + if (!LeftoverTy.isValid()) { + assert(LeftoverRegs.empty()); + + if (!ResultTy.isVector()) { + MIRBuilder.buildMerge(DstReg, PartRegs); + return; + } + + if (PartTy.isVector()) + MIRBuilder.buildConcatVectors(DstReg, PartRegs); + else + MIRBuilder.buildBuildVector(DstReg, PartRegs); + return; + } + + unsigned PartSize = PartTy.getSizeInBits(); + unsigned LeftoverPartSize = LeftoverTy.getSizeInBits(); + + Register CurResultReg = MRI.createGenericVirtualRegister(ResultTy); + MIRBuilder.buildUndef(CurResultReg); + + unsigned Offset = 0; + for (Register PartReg : PartRegs) { + Register NewResultReg = MRI.createGenericVirtualRegister(ResultTy); + MIRBuilder.buildInsert(NewResultReg, CurResultReg, PartReg, Offset); + CurResultReg = NewResultReg; + Offset += PartSize; + } + + for (unsigned I = 0, E = LeftoverRegs.size(); I != E; ++I) { + // Use the original output register for the final insert to avoid a copy. + Register NewResultReg = (I + 1 == E) ? + DstReg : MRI.createGenericVirtualRegister(ResultTy); + + MIRBuilder.buildInsert(NewResultReg, CurResultReg, LeftoverRegs[I], Offset); + CurResultReg = NewResultReg; + Offset += LeftoverPartSize; + } +} + /// Append the result registers of G_UNMERGE_VALUES \p MI to \p Regs. -static void getUnmergeResults(SmallVectorImpl<Register> &Regs, - const MachineInstr &MI) { - assert(MI.getOpcode() == TargetOpcode::G_UNMERGE_VALUES); - +static void getUnmergeResults(SmallVectorImpl<Register> &Regs, + const MachineInstr &MI) { + assert(MI.getOpcode() == TargetOpcode::G_UNMERGE_VALUES); + const int StartIdx = Regs.size(); - const int NumResults = MI.getNumOperands() - 1; + const int NumResults = MI.getNumOperands() - 1; Regs.resize(Regs.size() + NumResults); - for (int I = 0; I != NumResults; ++I) + for (int I = 0; I != NumResults; ++I) Regs[StartIdx + I] = MI.getOperand(I).getReg(); -} - +} + void LegalizerHelper::extractGCDType(SmallVectorImpl<Register> &Parts, LLT GCDTy, Register SrcReg) { - LLT SrcTy = MRI.getType(SrcReg); - if (SrcTy == GCDTy) { - // If the source already evenly divides the result type, we don't need to do - // anything. - Parts.push_back(SrcReg); - } else { - // Need to split into common type sized pieces. - auto Unmerge = MIRBuilder.buildUnmerge(GCDTy, SrcReg); - getUnmergeResults(Parts, *Unmerge); - } + LLT SrcTy = MRI.getType(SrcReg); + if (SrcTy == GCDTy) { + // If the source already evenly divides the result type, we don't need to do + // anything. + Parts.push_back(SrcReg); + } else { + // Need to split into common type sized pieces. + auto Unmerge = MIRBuilder.buildUnmerge(GCDTy, SrcReg); + getUnmergeResults(Parts, *Unmerge); + } } - + LLT LegalizerHelper::extractGCDType(SmallVectorImpl<Register> &Parts, LLT DstTy, LLT NarrowTy, Register SrcReg) { LLT SrcTy = MRI.getType(SrcReg); LLT GCDTy = getGCDType(getGCDType(SrcTy, NarrowTy), DstTy); extractGCDType(Parts, GCDTy, SrcReg); - return GCDTy; -} - -LLT LegalizerHelper::buildLCMMergePieces(LLT DstTy, LLT NarrowTy, LLT GCDTy, - SmallVectorImpl<Register> &VRegs, - unsigned PadStrategy) { - LLT LCMTy = getLCMType(DstTy, NarrowTy); - - int NumParts = LCMTy.getSizeInBits() / NarrowTy.getSizeInBits(); - int NumSubParts = NarrowTy.getSizeInBits() / GCDTy.getSizeInBits(); - int NumOrigSrc = VRegs.size(); - - Register PadReg; - - // Get a value we can use to pad the source value if the sources won't evenly - // cover the result type. - if (NumOrigSrc < NumParts * NumSubParts) { - if (PadStrategy == TargetOpcode::G_ZEXT) - PadReg = MIRBuilder.buildConstant(GCDTy, 0).getReg(0); - else if (PadStrategy == TargetOpcode::G_ANYEXT) - PadReg = MIRBuilder.buildUndef(GCDTy).getReg(0); - else { - assert(PadStrategy == TargetOpcode::G_SEXT); - - // Shift the sign bit of the low register through the high register. - auto ShiftAmt = - MIRBuilder.buildConstant(LLT::scalar(64), GCDTy.getSizeInBits() - 1); - PadReg = MIRBuilder.buildAShr(GCDTy, VRegs.back(), ShiftAmt).getReg(0); - } - } - - // Registers for the final merge to be produced. - SmallVector<Register, 4> Remerge(NumParts); - - // Registers needed for intermediate merges, which will be merged into a - // source for Remerge. - SmallVector<Register, 4> SubMerge(NumSubParts); - - // Once we've fully read off the end of the original source bits, we can reuse - // the same high bits for remaining padding elements. - Register AllPadReg; - - // Build merges to the LCM type to cover the original result type. - for (int I = 0; I != NumParts; ++I) { - bool AllMergePartsArePadding = true; - - // Build the requested merges to the requested type. - for (int J = 0; J != NumSubParts; ++J) { - int Idx = I * NumSubParts + J; - if (Idx >= NumOrigSrc) { - SubMerge[J] = PadReg; - continue; - } - - SubMerge[J] = VRegs[Idx]; - - // There are meaningful bits here we can't reuse later. - AllMergePartsArePadding = false; - } - - // If we've filled up a complete piece with padding bits, we can directly - // emit the natural sized constant if applicable, rather than a merge of - // smaller constants. - if (AllMergePartsArePadding && !AllPadReg) { - if (PadStrategy == TargetOpcode::G_ANYEXT) - AllPadReg = MIRBuilder.buildUndef(NarrowTy).getReg(0); - else if (PadStrategy == TargetOpcode::G_ZEXT) - AllPadReg = MIRBuilder.buildConstant(NarrowTy, 0).getReg(0); - - // If this is a sign extension, we can't materialize a trivial constant - // with the right type and have to produce a merge. - } - - if (AllPadReg) { - // Avoid creating additional instructions if we're just adding additional - // copies of padding bits. - Remerge[I] = AllPadReg; - continue; - } - - if (NumSubParts == 1) - Remerge[I] = SubMerge[0]; - else - Remerge[I] = MIRBuilder.buildMerge(NarrowTy, SubMerge).getReg(0); - - // In the sign extend padding case, re-use the first all-signbit merge. - if (AllMergePartsArePadding && !AllPadReg) - AllPadReg = Remerge[I]; - } - - VRegs = std::move(Remerge); - return LCMTy; -} - -void LegalizerHelper::buildWidenedRemergeToDst(Register DstReg, LLT LCMTy, - ArrayRef<Register> RemergeRegs) { - LLT DstTy = MRI.getType(DstReg); - - // Create the merge to the widened source, and extract the relevant bits into - // the result. - - if (DstTy == LCMTy) { - MIRBuilder.buildMerge(DstReg, RemergeRegs); - return; - } - - auto Remerge = MIRBuilder.buildMerge(LCMTy, RemergeRegs); - if (DstTy.isScalar() && LCMTy.isScalar()) { - MIRBuilder.buildTrunc(DstReg, Remerge); - return; - } - - if (LCMTy.isVector()) { + return GCDTy; +} + +LLT LegalizerHelper::buildLCMMergePieces(LLT DstTy, LLT NarrowTy, LLT GCDTy, + SmallVectorImpl<Register> &VRegs, + unsigned PadStrategy) { + LLT LCMTy = getLCMType(DstTy, NarrowTy); + + int NumParts = LCMTy.getSizeInBits() / NarrowTy.getSizeInBits(); + int NumSubParts = NarrowTy.getSizeInBits() / GCDTy.getSizeInBits(); + int NumOrigSrc = VRegs.size(); + + Register PadReg; + + // Get a value we can use to pad the source value if the sources won't evenly + // cover the result type. + if (NumOrigSrc < NumParts * NumSubParts) { + if (PadStrategy == TargetOpcode::G_ZEXT) + PadReg = MIRBuilder.buildConstant(GCDTy, 0).getReg(0); + else if (PadStrategy == TargetOpcode::G_ANYEXT) + PadReg = MIRBuilder.buildUndef(GCDTy).getReg(0); + else { + assert(PadStrategy == TargetOpcode::G_SEXT); + + // Shift the sign bit of the low register through the high register. + auto ShiftAmt = + MIRBuilder.buildConstant(LLT::scalar(64), GCDTy.getSizeInBits() - 1); + PadReg = MIRBuilder.buildAShr(GCDTy, VRegs.back(), ShiftAmt).getReg(0); + } + } + + // Registers for the final merge to be produced. + SmallVector<Register, 4> Remerge(NumParts); + + // Registers needed for intermediate merges, which will be merged into a + // source for Remerge. + SmallVector<Register, 4> SubMerge(NumSubParts); + + // Once we've fully read off the end of the original source bits, we can reuse + // the same high bits for remaining padding elements. + Register AllPadReg; + + // Build merges to the LCM type to cover the original result type. + for (int I = 0; I != NumParts; ++I) { + bool AllMergePartsArePadding = true; + + // Build the requested merges to the requested type. + for (int J = 0; J != NumSubParts; ++J) { + int Idx = I * NumSubParts + J; + if (Idx >= NumOrigSrc) { + SubMerge[J] = PadReg; + continue; + } + + SubMerge[J] = VRegs[Idx]; + + // There are meaningful bits here we can't reuse later. + AllMergePartsArePadding = false; + } + + // If we've filled up a complete piece with padding bits, we can directly + // emit the natural sized constant if applicable, rather than a merge of + // smaller constants. + if (AllMergePartsArePadding && !AllPadReg) { + if (PadStrategy == TargetOpcode::G_ANYEXT) + AllPadReg = MIRBuilder.buildUndef(NarrowTy).getReg(0); + else if (PadStrategy == TargetOpcode::G_ZEXT) + AllPadReg = MIRBuilder.buildConstant(NarrowTy, 0).getReg(0); + + // If this is a sign extension, we can't materialize a trivial constant + // with the right type and have to produce a merge. + } + + if (AllPadReg) { + // Avoid creating additional instructions if we're just adding additional + // copies of padding bits. + Remerge[I] = AllPadReg; + continue; + } + + if (NumSubParts == 1) + Remerge[I] = SubMerge[0]; + else + Remerge[I] = MIRBuilder.buildMerge(NarrowTy, SubMerge).getReg(0); + + // In the sign extend padding case, re-use the first all-signbit merge. + if (AllMergePartsArePadding && !AllPadReg) + AllPadReg = Remerge[I]; + } + + VRegs = std::move(Remerge); + return LCMTy; +} + +void LegalizerHelper::buildWidenedRemergeToDst(Register DstReg, LLT LCMTy, + ArrayRef<Register> RemergeRegs) { + LLT DstTy = MRI.getType(DstReg); + + // Create the merge to the widened source, and extract the relevant bits into + // the result. + + if (DstTy == LCMTy) { + MIRBuilder.buildMerge(DstReg, RemergeRegs); + return; + } + + auto Remerge = MIRBuilder.buildMerge(LCMTy, RemergeRegs); + if (DstTy.isScalar() && LCMTy.isScalar()) { + MIRBuilder.buildTrunc(DstReg, Remerge); + return; + } + + if (LCMTy.isVector()) { unsigned NumDefs = LCMTy.getSizeInBits() / DstTy.getSizeInBits(); SmallVector<Register, 8> UnmergeDefs(NumDefs); UnmergeDefs[0] = DstReg; @@ -392,13 +392,13 @@ void LegalizerHelper::buildWidenedRemergeToDst(Register DstReg, LLT LCMTy, MIRBuilder.buildUnmerge(UnmergeDefs, MIRBuilder.buildMerge(LCMTy, RemergeRegs)); - return; - } - - llvm_unreachable("unhandled case"); -} - -static RTLIB::Libcall getRTLibDesc(unsigned Opcode, unsigned Size) { + return; + } + + llvm_unreachable("unhandled case"); +} + +static RTLIB::Libcall getRTLibDesc(unsigned Opcode, unsigned Size) { #define RTLIBCASE_INT(LibcallPrefix) \ do { \ switch (Size) { \ @@ -413,850 +413,850 @@ static RTLIB::Libcall getRTLibDesc(unsigned Opcode, unsigned Size) { } \ } while (0) -#define RTLIBCASE(LibcallPrefix) \ - do { \ - switch (Size) { \ - case 32: \ - return RTLIB::LibcallPrefix##32; \ - case 64: \ - return RTLIB::LibcallPrefix##64; \ +#define RTLIBCASE(LibcallPrefix) \ + do { \ + switch (Size) { \ + case 32: \ + return RTLIB::LibcallPrefix##32; \ + case 64: \ + return RTLIB::LibcallPrefix##64; \ case 80: \ return RTLIB::LibcallPrefix##80; \ - case 128: \ - return RTLIB::LibcallPrefix##128; \ - default: \ - llvm_unreachable("unexpected size"); \ - } \ - } while (0) - - switch (Opcode) { - case TargetOpcode::G_SDIV: + case 128: \ + return RTLIB::LibcallPrefix##128; \ + default: \ + llvm_unreachable("unexpected size"); \ + } \ + } while (0) + + switch (Opcode) { + case TargetOpcode::G_SDIV: RTLIBCASE_INT(SDIV_I); - case TargetOpcode::G_UDIV: + case TargetOpcode::G_UDIV: RTLIBCASE_INT(UDIV_I); - case TargetOpcode::G_SREM: + case TargetOpcode::G_SREM: RTLIBCASE_INT(SREM_I); - case TargetOpcode::G_UREM: + case TargetOpcode::G_UREM: RTLIBCASE_INT(UREM_I); - case TargetOpcode::G_CTLZ_ZERO_UNDEF: + case TargetOpcode::G_CTLZ_ZERO_UNDEF: RTLIBCASE_INT(CTLZ_I); - case TargetOpcode::G_FADD: - RTLIBCASE(ADD_F); - case TargetOpcode::G_FSUB: - RTLIBCASE(SUB_F); - case TargetOpcode::G_FMUL: - RTLIBCASE(MUL_F); - case TargetOpcode::G_FDIV: - RTLIBCASE(DIV_F); - case TargetOpcode::G_FEXP: - RTLIBCASE(EXP_F); - case TargetOpcode::G_FEXP2: - RTLIBCASE(EXP2_F); - case TargetOpcode::G_FREM: - RTLIBCASE(REM_F); - case TargetOpcode::G_FPOW: - RTLIBCASE(POW_F); - case TargetOpcode::G_FMA: - RTLIBCASE(FMA_F); - case TargetOpcode::G_FSIN: - RTLIBCASE(SIN_F); - case TargetOpcode::G_FCOS: - RTLIBCASE(COS_F); - case TargetOpcode::G_FLOG10: - RTLIBCASE(LOG10_F); - case TargetOpcode::G_FLOG: - RTLIBCASE(LOG_F); - case TargetOpcode::G_FLOG2: - RTLIBCASE(LOG2_F); - case TargetOpcode::G_FCEIL: - RTLIBCASE(CEIL_F); - case TargetOpcode::G_FFLOOR: - RTLIBCASE(FLOOR_F); - case TargetOpcode::G_FMINNUM: - RTLIBCASE(FMIN_F); - case TargetOpcode::G_FMAXNUM: - RTLIBCASE(FMAX_F); - case TargetOpcode::G_FSQRT: - RTLIBCASE(SQRT_F); - case TargetOpcode::G_FRINT: - RTLIBCASE(RINT_F); - case TargetOpcode::G_FNEARBYINT: - RTLIBCASE(NEARBYINT_F); + case TargetOpcode::G_FADD: + RTLIBCASE(ADD_F); + case TargetOpcode::G_FSUB: + RTLIBCASE(SUB_F); + case TargetOpcode::G_FMUL: + RTLIBCASE(MUL_F); + case TargetOpcode::G_FDIV: + RTLIBCASE(DIV_F); + case TargetOpcode::G_FEXP: + RTLIBCASE(EXP_F); + case TargetOpcode::G_FEXP2: + RTLIBCASE(EXP2_F); + case TargetOpcode::G_FREM: + RTLIBCASE(REM_F); + case TargetOpcode::G_FPOW: + RTLIBCASE(POW_F); + case TargetOpcode::G_FMA: + RTLIBCASE(FMA_F); + case TargetOpcode::G_FSIN: + RTLIBCASE(SIN_F); + case TargetOpcode::G_FCOS: + RTLIBCASE(COS_F); + case TargetOpcode::G_FLOG10: + RTLIBCASE(LOG10_F); + case TargetOpcode::G_FLOG: + RTLIBCASE(LOG_F); + case TargetOpcode::G_FLOG2: + RTLIBCASE(LOG2_F); + case TargetOpcode::G_FCEIL: + RTLIBCASE(CEIL_F); + case TargetOpcode::G_FFLOOR: + RTLIBCASE(FLOOR_F); + case TargetOpcode::G_FMINNUM: + RTLIBCASE(FMIN_F); + case TargetOpcode::G_FMAXNUM: + RTLIBCASE(FMAX_F); + case TargetOpcode::G_FSQRT: + RTLIBCASE(SQRT_F); + case TargetOpcode::G_FRINT: + RTLIBCASE(RINT_F); + case TargetOpcode::G_FNEARBYINT: + RTLIBCASE(NEARBYINT_F); case TargetOpcode::G_INTRINSIC_ROUNDEVEN: RTLIBCASE(ROUNDEVEN_F); - } - llvm_unreachable("Unknown libcall function"); -} - -/// True if an instruction is in tail position in its caller. Intended for -/// legalizing libcalls as tail calls when possible. + } + llvm_unreachable("Unknown libcall function"); +} + +/// True if an instruction is in tail position in its caller. Intended for +/// legalizing libcalls as tail calls when possible. static bool isLibCallInTailPosition(const TargetInstrInfo &TII, MachineInstr &MI) { - MachineBasicBlock &MBB = *MI.getParent(); - const Function &F = MBB.getParent()->getFunction(); - - // Conservatively require the attributes of the call to match those of - // the return. Ignore NoAlias and NonNull because they don't affect the - // call sequence. - AttributeList CallerAttrs = F.getAttributes(); - if (AttrBuilder(CallerAttrs, AttributeList::ReturnIndex) - .removeAttribute(Attribute::NoAlias) - .removeAttribute(Attribute::NonNull) - .hasAttributes()) - return false; - - // It's not safe to eliminate the sign / zero extension of the return value. - if (CallerAttrs.hasAttribute(AttributeList::ReturnIndex, Attribute::ZExt) || - CallerAttrs.hasAttribute(AttributeList::ReturnIndex, Attribute::SExt)) - return false; - - // Only tail call if the following instruction is a standard return. - auto Next = next_nodbg(MI.getIterator(), MBB.instr_end()); - if (Next == MBB.instr_end() || TII.isTailCall(*Next) || !Next->isReturn()) - return false; - - return true; -} - -LegalizerHelper::LegalizeResult -llvm::createLibcall(MachineIRBuilder &MIRBuilder, const char *Name, - const CallLowering::ArgInfo &Result, - ArrayRef<CallLowering::ArgInfo> Args, - const CallingConv::ID CC) { - auto &CLI = *MIRBuilder.getMF().getSubtarget().getCallLowering(); - - CallLowering::CallLoweringInfo Info; - Info.CallConv = CC; - Info.Callee = MachineOperand::CreateES(Name); - Info.OrigRet = Result; - std::copy(Args.begin(), Args.end(), std::back_inserter(Info.OrigArgs)); - if (!CLI.lowerCall(MIRBuilder, Info)) - return LegalizerHelper::UnableToLegalize; - - return LegalizerHelper::Legalized; -} - -LegalizerHelper::LegalizeResult -llvm::createLibcall(MachineIRBuilder &MIRBuilder, RTLIB::Libcall Libcall, - const CallLowering::ArgInfo &Result, - ArrayRef<CallLowering::ArgInfo> Args) { - auto &TLI = *MIRBuilder.getMF().getSubtarget().getTargetLowering(); - const char *Name = TLI.getLibcallName(Libcall); - const CallingConv::ID CC = TLI.getLibcallCallingConv(Libcall); - return createLibcall(MIRBuilder, Name, Result, Args, CC); -} - -// Useful for libcalls where all operands have the same type. -static LegalizerHelper::LegalizeResult -simpleLibcall(MachineInstr &MI, MachineIRBuilder &MIRBuilder, unsigned Size, - Type *OpType) { - auto Libcall = getRTLibDesc(MI.getOpcode(), Size); - - SmallVector<CallLowering::ArgInfo, 3> Args; - for (unsigned i = 1; i < MI.getNumOperands(); i++) - Args.push_back({MI.getOperand(i).getReg(), OpType}); - return createLibcall(MIRBuilder, Libcall, {MI.getOperand(0).getReg(), OpType}, - Args); -} - -LegalizerHelper::LegalizeResult -llvm::createMemLibcall(MachineIRBuilder &MIRBuilder, MachineRegisterInfo &MRI, - MachineInstr &MI) { - auto &Ctx = MIRBuilder.getMF().getFunction().getContext(); - - SmallVector<CallLowering::ArgInfo, 3> Args; - // Add all the args, except for the last which is an imm denoting 'tail'. + MachineBasicBlock &MBB = *MI.getParent(); + const Function &F = MBB.getParent()->getFunction(); + + // Conservatively require the attributes of the call to match those of + // the return. Ignore NoAlias and NonNull because they don't affect the + // call sequence. + AttributeList CallerAttrs = F.getAttributes(); + if (AttrBuilder(CallerAttrs, AttributeList::ReturnIndex) + .removeAttribute(Attribute::NoAlias) + .removeAttribute(Attribute::NonNull) + .hasAttributes()) + return false; + + // It's not safe to eliminate the sign / zero extension of the return value. + if (CallerAttrs.hasAttribute(AttributeList::ReturnIndex, Attribute::ZExt) || + CallerAttrs.hasAttribute(AttributeList::ReturnIndex, Attribute::SExt)) + return false; + + // Only tail call if the following instruction is a standard return. + auto Next = next_nodbg(MI.getIterator(), MBB.instr_end()); + if (Next == MBB.instr_end() || TII.isTailCall(*Next) || !Next->isReturn()) + return false; + + return true; +} + +LegalizerHelper::LegalizeResult +llvm::createLibcall(MachineIRBuilder &MIRBuilder, const char *Name, + const CallLowering::ArgInfo &Result, + ArrayRef<CallLowering::ArgInfo> Args, + const CallingConv::ID CC) { + auto &CLI = *MIRBuilder.getMF().getSubtarget().getCallLowering(); + + CallLowering::CallLoweringInfo Info; + Info.CallConv = CC; + Info.Callee = MachineOperand::CreateES(Name); + Info.OrigRet = Result; + std::copy(Args.begin(), Args.end(), std::back_inserter(Info.OrigArgs)); + if (!CLI.lowerCall(MIRBuilder, Info)) + return LegalizerHelper::UnableToLegalize; + + return LegalizerHelper::Legalized; +} + +LegalizerHelper::LegalizeResult +llvm::createLibcall(MachineIRBuilder &MIRBuilder, RTLIB::Libcall Libcall, + const CallLowering::ArgInfo &Result, + ArrayRef<CallLowering::ArgInfo> Args) { + auto &TLI = *MIRBuilder.getMF().getSubtarget().getTargetLowering(); + const char *Name = TLI.getLibcallName(Libcall); + const CallingConv::ID CC = TLI.getLibcallCallingConv(Libcall); + return createLibcall(MIRBuilder, Name, Result, Args, CC); +} + +// Useful for libcalls where all operands have the same type. +static LegalizerHelper::LegalizeResult +simpleLibcall(MachineInstr &MI, MachineIRBuilder &MIRBuilder, unsigned Size, + Type *OpType) { + auto Libcall = getRTLibDesc(MI.getOpcode(), Size); + + SmallVector<CallLowering::ArgInfo, 3> Args; + for (unsigned i = 1; i < MI.getNumOperands(); i++) + Args.push_back({MI.getOperand(i).getReg(), OpType}); + return createLibcall(MIRBuilder, Libcall, {MI.getOperand(0).getReg(), OpType}, + Args); +} + +LegalizerHelper::LegalizeResult +llvm::createMemLibcall(MachineIRBuilder &MIRBuilder, MachineRegisterInfo &MRI, + MachineInstr &MI) { + auto &Ctx = MIRBuilder.getMF().getFunction().getContext(); + + SmallVector<CallLowering::ArgInfo, 3> Args; + // Add all the args, except for the last which is an imm denoting 'tail'. for (unsigned i = 0; i < MI.getNumOperands() - 1; ++i) { - Register Reg = MI.getOperand(i).getReg(); - - // Need derive an IR type for call lowering. - LLT OpLLT = MRI.getType(Reg); - Type *OpTy = nullptr; - if (OpLLT.isPointer()) - OpTy = Type::getInt8PtrTy(Ctx, OpLLT.getAddressSpace()); - else - OpTy = IntegerType::get(Ctx, OpLLT.getSizeInBits()); - Args.push_back({Reg, OpTy}); - } - - auto &CLI = *MIRBuilder.getMF().getSubtarget().getCallLowering(); - auto &TLI = *MIRBuilder.getMF().getSubtarget().getTargetLowering(); - RTLIB::Libcall RTLibcall; + Register Reg = MI.getOperand(i).getReg(); + + // Need derive an IR type for call lowering. + LLT OpLLT = MRI.getType(Reg); + Type *OpTy = nullptr; + if (OpLLT.isPointer()) + OpTy = Type::getInt8PtrTy(Ctx, OpLLT.getAddressSpace()); + else + OpTy = IntegerType::get(Ctx, OpLLT.getSizeInBits()); + Args.push_back({Reg, OpTy}); + } + + auto &CLI = *MIRBuilder.getMF().getSubtarget().getCallLowering(); + auto &TLI = *MIRBuilder.getMF().getSubtarget().getTargetLowering(); + RTLIB::Libcall RTLibcall; switch (MI.getOpcode()) { case TargetOpcode::G_MEMCPY: - RTLibcall = RTLIB::MEMCPY; - break; + RTLibcall = RTLIB::MEMCPY; + break; case TargetOpcode::G_MEMMOVE: RTLibcall = RTLIB::MEMMOVE; break; case TargetOpcode::G_MEMSET: - RTLibcall = RTLIB::MEMSET; - break; - default: - return LegalizerHelper::UnableToLegalize; - } - const char *Name = TLI.getLibcallName(RTLibcall); - - CallLowering::CallLoweringInfo Info; - Info.CallConv = TLI.getLibcallCallingConv(RTLibcall); - Info.Callee = MachineOperand::CreateES(Name); - Info.OrigRet = CallLowering::ArgInfo({0}, Type::getVoidTy(Ctx)); + RTLibcall = RTLIB::MEMSET; + break; + default: + return LegalizerHelper::UnableToLegalize; + } + const char *Name = TLI.getLibcallName(RTLibcall); + + CallLowering::CallLoweringInfo Info; + Info.CallConv = TLI.getLibcallCallingConv(RTLibcall); + Info.Callee = MachineOperand::CreateES(Name); + Info.OrigRet = CallLowering::ArgInfo({0}, Type::getVoidTy(Ctx)); Info.IsTailCall = MI.getOperand(MI.getNumOperands() - 1).getImm() && isLibCallInTailPosition(MIRBuilder.getTII(), MI); - - std::copy(Args.begin(), Args.end(), std::back_inserter(Info.OrigArgs)); - if (!CLI.lowerCall(MIRBuilder, Info)) - return LegalizerHelper::UnableToLegalize; - - if (Info.LoweredTailCall) { - assert(Info.IsTailCall && "Lowered tail call when it wasn't a tail call?"); - // We must have a return following the call (or debug insts) to get past - // isLibCallInTailPosition. - do { - MachineInstr *Next = MI.getNextNode(); - assert(Next && (Next->isReturn() || Next->isDebugInstr()) && - "Expected instr following MI to be return or debug inst?"); - // We lowered a tail call, so the call is now the return from the block. - // Delete the old return. - Next->eraseFromParent(); - } while (MI.getNextNode()); - } - - return LegalizerHelper::Legalized; -} - -static RTLIB::Libcall getConvRTLibDesc(unsigned Opcode, Type *ToType, - Type *FromType) { - auto ToMVT = MVT::getVT(ToType); - auto FromMVT = MVT::getVT(FromType); - - switch (Opcode) { - case TargetOpcode::G_FPEXT: - return RTLIB::getFPEXT(FromMVT, ToMVT); - case TargetOpcode::G_FPTRUNC: - return RTLIB::getFPROUND(FromMVT, ToMVT); - case TargetOpcode::G_FPTOSI: - return RTLIB::getFPTOSINT(FromMVT, ToMVT); - case TargetOpcode::G_FPTOUI: - return RTLIB::getFPTOUINT(FromMVT, ToMVT); - case TargetOpcode::G_SITOFP: - return RTLIB::getSINTTOFP(FromMVT, ToMVT); - case TargetOpcode::G_UITOFP: - return RTLIB::getUINTTOFP(FromMVT, ToMVT); - } - llvm_unreachable("Unsupported libcall function"); -} - -static LegalizerHelper::LegalizeResult -conversionLibcall(MachineInstr &MI, MachineIRBuilder &MIRBuilder, Type *ToType, - Type *FromType) { - RTLIB::Libcall Libcall = getConvRTLibDesc(MI.getOpcode(), ToType, FromType); - return createLibcall(MIRBuilder, Libcall, {MI.getOperand(0).getReg(), ToType}, - {{MI.getOperand(1).getReg(), FromType}}); -} - -LegalizerHelper::LegalizeResult -LegalizerHelper::libcall(MachineInstr &MI) { - LLT LLTy = MRI.getType(MI.getOperand(0).getReg()); - unsigned Size = LLTy.getSizeInBits(); - auto &Ctx = MIRBuilder.getMF().getFunction().getContext(); - - switch (MI.getOpcode()) { - default: - return UnableToLegalize; - case TargetOpcode::G_SDIV: - case TargetOpcode::G_UDIV: - case TargetOpcode::G_SREM: - case TargetOpcode::G_UREM: - case TargetOpcode::G_CTLZ_ZERO_UNDEF: { - Type *HLTy = IntegerType::get(Ctx, Size); - auto Status = simpleLibcall(MI, MIRBuilder, Size, HLTy); - if (Status != Legalized) - return Status; - break; - } - case TargetOpcode::G_FADD: - case TargetOpcode::G_FSUB: - case TargetOpcode::G_FMUL: - case TargetOpcode::G_FDIV: - case TargetOpcode::G_FMA: - case TargetOpcode::G_FPOW: - case TargetOpcode::G_FREM: - case TargetOpcode::G_FCOS: - case TargetOpcode::G_FSIN: - case TargetOpcode::G_FLOG10: - case TargetOpcode::G_FLOG: - case TargetOpcode::G_FLOG2: - case TargetOpcode::G_FEXP: - case TargetOpcode::G_FEXP2: - case TargetOpcode::G_FCEIL: - case TargetOpcode::G_FFLOOR: - case TargetOpcode::G_FMINNUM: - case TargetOpcode::G_FMAXNUM: - case TargetOpcode::G_FSQRT: - case TargetOpcode::G_FRINT: + + std::copy(Args.begin(), Args.end(), std::back_inserter(Info.OrigArgs)); + if (!CLI.lowerCall(MIRBuilder, Info)) + return LegalizerHelper::UnableToLegalize; + + if (Info.LoweredTailCall) { + assert(Info.IsTailCall && "Lowered tail call when it wasn't a tail call?"); + // We must have a return following the call (or debug insts) to get past + // isLibCallInTailPosition. + do { + MachineInstr *Next = MI.getNextNode(); + assert(Next && (Next->isReturn() || Next->isDebugInstr()) && + "Expected instr following MI to be return or debug inst?"); + // We lowered a tail call, so the call is now the return from the block. + // Delete the old return. + Next->eraseFromParent(); + } while (MI.getNextNode()); + } + + return LegalizerHelper::Legalized; +} + +static RTLIB::Libcall getConvRTLibDesc(unsigned Opcode, Type *ToType, + Type *FromType) { + auto ToMVT = MVT::getVT(ToType); + auto FromMVT = MVT::getVT(FromType); + + switch (Opcode) { + case TargetOpcode::G_FPEXT: + return RTLIB::getFPEXT(FromMVT, ToMVT); + case TargetOpcode::G_FPTRUNC: + return RTLIB::getFPROUND(FromMVT, ToMVT); + case TargetOpcode::G_FPTOSI: + return RTLIB::getFPTOSINT(FromMVT, ToMVT); + case TargetOpcode::G_FPTOUI: + return RTLIB::getFPTOUINT(FromMVT, ToMVT); + case TargetOpcode::G_SITOFP: + return RTLIB::getSINTTOFP(FromMVT, ToMVT); + case TargetOpcode::G_UITOFP: + return RTLIB::getUINTTOFP(FromMVT, ToMVT); + } + llvm_unreachable("Unsupported libcall function"); +} + +static LegalizerHelper::LegalizeResult +conversionLibcall(MachineInstr &MI, MachineIRBuilder &MIRBuilder, Type *ToType, + Type *FromType) { + RTLIB::Libcall Libcall = getConvRTLibDesc(MI.getOpcode(), ToType, FromType); + return createLibcall(MIRBuilder, Libcall, {MI.getOperand(0).getReg(), ToType}, + {{MI.getOperand(1).getReg(), FromType}}); +} + +LegalizerHelper::LegalizeResult +LegalizerHelper::libcall(MachineInstr &MI) { + LLT LLTy = MRI.getType(MI.getOperand(0).getReg()); + unsigned Size = LLTy.getSizeInBits(); + auto &Ctx = MIRBuilder.getMF().getFunction().getContext(); + + switch (MI.getOpcode()) { + default: + return UnableToLegalize; + case TargetOpcode::G_SDIV: + case TargetOpcode::G_UDIV: + case TargetOpcode::G_SREM: + case TargetOpcode::G_UREM: + case TargetOpcode::G_CTLZ_ZERO_UNDEF: { + Type *HLTy = IntegerType::get(Ctx, Size); + auto Status = simpleLibcall(MI, MIRBuilder, Size, HLTy); + if (Status != Legalized) + return Status; + break; + } + case TargetOpcode::G_FADD: + case TargetOpcode::G_FSUB: + case TargetOpcode::G_FMUL: + case TargetOpcode::G_FDIV: + case TargetOpcode::G_FMA: + case TargetOpcode::G_FPOW: + case TargetOpcode::G_FREM: + case TargetOpcode::G_FCOS: + case TargetOpcode::G_FSIN: + case TargetOpcode::G_FLOG10: + case TargetOpcode::G_FLOG: + case TargetOpcode::G_FLOG2: + case TargetOpcode::G_FEXP: + case TargetOpcode::G_FEXP2: + case TargetOpcode::G_FCEIL: + case TargetOpcode::G_FFLOOR: + case TargetOpcode::G_FMINNUM: + case TargetOpcode::G_FMAXNUM: + case TargetOpcode::G_FSQRT: + case TargetOpcode::G_FRINT: case TargetOpcode::G_FNEARBYINT: case TargetOpcode::G_INTRINSIC_ROUNDEVEN: { - Type *HLTy = getFloatTypeForLLT(Ctx, LLTy); + Type *HLTy = getFloatTypeForLLT(Ctx, LLTy); if (!HLTy || (Size != 32 && Size != 64 && Size != 80 && Size != 128)) { LLVM_DEBUG(dbgs() << "No libcall available for type " << LLTy << ".\n"); - return UnableToLegalize; - } - auto Status = simpleLibcall(MI, MIRBuilder, Size, HLTy); - if (Status != Legalized) - return Status; - break; - } - case TargetOpcode::G_FPEXT: - case TargetOpcode::G_FPTRUNC: { - Type *FromTy = getFloatTypeForLLT(Ctx, MRI.getType(MI.getOperand(1).getReg())); - Type *ToTy = getFloatTypeForLLT(Ctx, MRI.getType(MI.getOperand(0).getReg())); - if (!FromTy || !ToTy) - return UnableToLegalize; - LegalizeResult Status = conversionLibcall(MI, MIRBuilder, ToTy, FromTy ); - if (Status != Legalized) - return Status; - break; - } - case TargetOpcode::G_FPTOSI: - case TargetOpcode::G_FPTOUI: { - // FIXME: Support other types - unsigned FromSize = MRI.getType(MI.getOperand(1).getReg()).getSizeInBits(); - unsigned ToSize = MRI.getType(MI.getOperand(0).getReg()).getSizeInBits(); - if ((ToSize != 32 && ToSize != 64) || (FromSize != 32 && FromSize != 64)) - return UnableToLegalize; - LegalizeResult Status = conversionLibcall( - MI, MIRBuilder, - ToSize == 32 ? Type::getInt32Ty(Ctx) : Type::getInt64Ty(Ctx), - FromSize == 64 ? Type::getDoubleTy(Ctx) : Type::getFloatTy(Ctx)); - if (Status != Legalized) - return Status; - break; - } - case TargetOpcode::G_SITOFP: - case TargetOpcode::G_UITOFP: { - // FIXME: Support other types - unsigned FromSize = MRI.getType(MI.getOperand(1).getReg()).getSizeInBits(); - unsigned ToSize = MRI.getType(MI.getOperand(0).getReg()).getSizeInBits(); - if ((FromSize != 32 && FromSize != 64) || (ToSize != 32 && ToSize != 64)) - return UnableToLegalize; - LegalizeResult Status = conversionLibcall( - MI, MIRBuilder, - ToSize == 64 ? Type::getDoubleTy(Ctx) : Type::getFloatTy(Ctx), - FromSize == 32 ? Type::getInt32Ty(Ctx) : Type::getInt64Ty(Ctx)); - if (Status != Legalized) - return Status; - break; - } + return UnableToLegalize; + } + auto Status = simpleLibcall(MI, MIRBuilder, Size, HLTy); + if (Status != Legalized) + return Status; + break; + } + case TargetOpcode::G_FPEXT: + case TargetOpcode::G_FPTRUNC: { + Type *FromTy = getFloatTypeForLLT(Ctx, MRI.getType(MI.getOperand(1).getReg())); + Type *ToTy = getFloatTypeForLLT(Ctx, MRI.getType(MI.getOperand(0).getReg())); + if (!FromTy || !ToTy) + return UnableToLegalize; + LegalizeResult Status = conversionLibcall(MI, MIRBuilder, ToTy, FromTy ); + if (Status != Legalized) + return Status; + break; + } + case TargetOpcode::G_FPTOSI: + case TargetOpcode::G_FPTOUI: { + // FIXME: Support other types + unsigned FromSize = MRI.getType(MI.getOperand(1).getReg()).getSizeInBits(); + unsigned ToSize = MRI.getType(MI.getOperand(0).getReg()).getSizeInBits(); + if ((ToSize != 32 && ToSize != 64) || (FromSize != 32 && FromSize != 64)) + return UnableToLegalize; + LegalizeResult Status = conversionLibcall( + MI, MIRBuilder, + ToSize == 32 ? Type::getInt32Ty(Ctx) : Type::getInt64Ty(Ctx), + FromSize == 64 ? Type::getDoubleTy(Ctx) : Type::getFloatTy(Ctx)); + if (Status != Legalized) + return Status; + break; + } + case TargetOpcode::G_SITOFP: + case TargetOpcode::G_UITOFP: { + // FIXME: Support other types + unsigned FromSize = MRI.getType(MI.getOperand(1).getReg()).getSizeInBits(); + unsigned ToSize = MRI.getType(MI.getOperand(0).getReg()).getSizeInBits(); + if ((FromSize != 32 && FromSize != 64) || (ToSize != 32 && ToSize != 64)) + return UnableToLegalize; + LegalizeResult Status = conversionLibcall( + MI, MIRBuilder, + ToSize == 64 ? Type::getDoubleTy(Ctx) : Type::getFloatTy(Ctx), + FromSize == 32 ? Type::getInt32Ty(Ctx) : Type::getInt64Ty(Ctx)); + if (Status != Legalized) + return Status; + break; + } case TargetOpcode::G_MEMCPY: case TargetOpcode::G_MEMMOVE: case TargetOpcode::G_MEMSET: { LegalizeResult Result = createMemLibcall(MIRBuilder, *MIRBuilder.getMRI(), MI); MI.eraseFromParent(); return Result; - } - } - - MI.eraseFromParent(); - return Legalized; -} - -LegalizerHelper::LegalizeResult LegalizerHelper::narrowScalar(MachineInstr &MI, - unsigned TypeIdx, - LLT NarrowTy) { - uint64_t SizeOp0 = MRI.getType(MI.getOperand(0).getReg()).getSizeInBits(); - uint64_t NarrowSize = NarrowTy.getSizeInBits(); - - switch (MI.getOpcode()) { - default: - return UnableToLegalize; - case TargetOpcode::G_IMPLICIT_DEF: { - Register DstReg = MI.getOperand(0).getReg(); - LLT DstTy = MRI.getType(DstReg); - - // If SizeOp0 is not an exact multiple of NarrowSize, emit - // G_ANYEXT(G_IMPLICIT_DEF). Cast result to vector if needed. - // FIXME: Although this would also be legal for the general case, it causes - // a lot of regressions in the emitted code (superfluous COPYs, artifact - // combines not being hit). This seems to be a problem related to the - // artifact combiner. - if (SizeOp0 % NarrowSize != 0) { - LLT ImplicitTy = NarrowTy; - if (DstTy.isVector()) - ImplicitTy = LLT::vector(DstTy.getNumElements(), ImplicitTy); - - Register ImplicitReg = MIRBuilder.buildUndef(ImplicitTy).getReg(0); - MIRBuilder.buildAnyExt(DstReg, ImplicitReg); - - MI.eraseFromParent(); - return Legalized; - } - - int NumParts = SizeOp0 / NarrowSize; - - SmallVector<Register, 2> DstRegs; - for (int i = 0; i < NumParts; ++i) - DstRegs.push_back(MIRBuilder.buildUndef(NarrowTy).getReg(0)); - - if (DstTy.isVector()) - MIRBuilder.buildBuildVector(DstReg, DstRegs); - else - MIRBuilder.buildMerge(DstReg, DstRegs); - MI.eraseFromParent(); - return Legalized; - } - case TargetOpcode::G_CONSTANT: { - LLT Ty = MRI.getType(MI.getOperand(0).getReg()); - const APInt &Val = MI.getOperand(1).getCImm()->getValue(); - unsigned TotalSize = Ty.getSizeInBits(); - unsigned NarrowSize = NarrowTy.getSizeInBits(); - int NumParts = TotalSize / NarrowSize; - - SmallVector<Register, 4> PartRegs; - for (int I = 0; I != NumParts; ++I) { - unsigned Offset = I * NarrowSize; - auto K = MIRBuilder.buildConstant(NarrowTy, - Val.lshr(Offset).trunc(NarrowSize)); - PartRegs.push_back(K.getReg(0)); - } - - LLT LeftoverTy; - unsigned LeftoverBits = TotalSize - NumParts * NarrowSize; - SmallVector<Register, 1> LeftoverRegs; - if (LeftoverBits != 0) { - LeftoverTy = LLT::scalar(LeftoverBits); - auto K = MIRBuilder.buildConstant( - LeftoverTy, - Val.lshr(NumParts * NarrowSize).trunc(LeftoverBits)); - LeftoverRegs.push_back(K.getReg(0)); - } - - insertParts(MI.getOperand(0).getReg(), - Ty, NarrowTy, PartRegs, LeftoverTy, LeftoverRegs); - - MI.eraseFromParent(); - return Legalized; - } - case TargetOpcode::G_SEXT: - case TargetOpcode::G_ZEXT: - case TargetOpcode::G_ANYEXT: - return narrowScalarExt(MI, TypeIdx, NarrowTy); - case TargetOpcode::G_TRUNC: { - if (TypeIdx != 1) - return UnableToLegalize; - - uint64_t SizeOp1 = MRI.getType(MI.getOperand(1).getReg()).getSizeInBits(); - if (NarrowTy.getSizeInBits() * 2 != SizeOp1) { - LLVM_DEBUG(dbgs() << "Can't narrow trunc to type " << NarrowTy << "\n"); - return UnableToLegalize; - } - - auto Unmerge = MIRBuilder.buildUnmerge(NarrowTy, MI.getOperand(1)); - MIRBuilder.buildCopy(MI.getOperand(0), Unmerge.getReg(0)); - MI.eraseFromParent(); - return Legalized; - } - - case TargetOpcode::G_FREEZE: - return reduceOperationWidth(MI, TypeIdx, NarrowTy); - - case TargetOpcode::G_ADD: { - // FIXME: add support for when SizeOp0 isn't an exact multiple of - // NarrowSize. - if (SizeOp0 % NarrowSize != 0) - return UnableToLegalize; - // Expand in terms of carry-setting/consuming G_ADDE instructions. - int NumParts = SizeOp0 / NarrowTy.getSizeInBits(); - - SmallVector<Register, 2> Src1Regs, Src2Regs, DstRegs; - extractParts(MI.getOperand(1).getReg(), NarrowTy, NumParts, Src1Regs); - extractParts(MI.getOperand(2).getReg(), NarrowTy, NumParts, Src2Regs); - - Register CarryIn; - for (int i = 0; i < NumParts; ++i) { - Register DstReg = MRI.createGenericVirtualRegister(NarrowTy); - Register CarryOut = MRI.createGenericVirtualRegister(LLT::scalar(1)); - - if (i == 0) - MIRBuilder.buildUAddo(DstReg, CarryOut, Src1Regs[i], Src2Regs[i]); - else { - MIRBuilder.buildUAdde(DstReg, CarryOut, Src1Regs[i], - Src2Regs[i], CarryIn); - } - - DstRegs.push_back(DstReg); - CarryIn = CarryOut; - } - Register DstReg = MI.getOperand(0).getReg(); - if(MRI.getType(DstReg).isVector()) - MIRBuilder.buildBuildVector(DstReg, DstRegs); - else - MIRBuilder.buildMerge(DstReg, DstRegs); - MI.eraseFromParent(); - return Legalized; - } - case TargetOpcode::G_SUB: { - // FIXME: add support for when SizeOp0 isn't an exact multiple of - // NarrowSize. - if (SizeOp0 % NarrowSize != 0) - return UnableToLegalize; - - int NumParts = SizeOp0 / NarrowTy.getSizeInBits(); - - SmallVector<Register, 2> Src1Regs, Src2Regs, DstRegs; - extractParts(MI.getOperand(1).getReg(), NarrowTy, NumParts, Src1Regs); - extractParts(MI.getOperand(2).getReg(), NarrowTy, NumParts, Src2Regs); - - Register DstReg = MRI.createGenericVirtualRegister(NarrowTy); - Register BorrowOut = MRI.createGenericVirtualRegister(LLT::scalar(1)); - MIRBuilder.buildInstr(TargetOpcode::G_USUBO, {DstReg, BorrowOut}, - {Src1Regs[0], Src2Regs[0]}); - DstRegs.push_back(DstReg); - Register BorrowIn = BorrowOut; - for (int i = 1; i < NumParts; ++i) { - DstReg = MRI.createGenericVirtualRegister(NarrowTy); - BorrowOut = MRI.createGenericVirtualRegister(LLT::scalar(1)); - - MIRBuilder.buildInstr(TargetOpcode::G_USUBE, {DstReg, BorrowOut}, - {Src1Regs[i], Src2Regs[i], BorrowIn}); - - DstRegs.push_back(DstReg); - BorrowIn = BorrowOut; - } - MIRBuilder.buildMerge(MI.getOperand(0), DstRegs); - MI.eraseFromParent(); - return Legalized; - } - case TargetOpcode::G_MUL: - case TargetOpcode::G_UMULH: - return narrowScalarMul(MI, NarrowTy); - case TargetOpcode::G_EXTRACT: - return narrowScalarExtract(MI, TypeIdx, NarrowTy); - case TargetOpcode::G_INSERT: - return narrowScalarInsert(MI, TypeIdx, NarrowTy); - case TargetOpcode::G_LOAD: { + } + } + + MI.eraseFromParent(); + return Legalized; +} + +LegalizerHelper::LegalizeResult LegalizerHelper::narrowScalar(MachineInstr &MI, + unsigned TypeIdx, + LLT NarrowTy) { + uint64_t SizeOp0 = MRI.getType(MI.getOperand(0).getReg()).getSizeInBits(); + uint64_t NarrowSize = NarrowTy.getSizeInBits(); + + switch (MI.getOpcode()) { + default: + return UnableToLegalize; + case TargetOpcode::G_IMPLICIT_DEF: { + Register DstReg = MI.getOperand(0).getReg(); + LLT DstTy = MRI.getType(DstReg); + + // If SizeOp0 is not an exact multiple of NarrowSize, emit + // G_ANYEXT(G_IMPLICIT_DEF). Cast result to vector if needed. + // FIXME: Although this would also be legal for the general case, it causes + // a lot of regressions in the emitted code (superfluous COPYs, artifact + // combines not being hit). This seems to be a problem related to the + // artifact combiner. + if (SizeOp0 % NarrowSize != 0) { + LLT ImplicitTy = NarrowTy; + if (DstTy.isVector()) + ImplicitTy = LLT::vector(DstTy.getNumElements(), ImplicitTy); + + Register ImplicitReg = MIRBuilder.buildUndef(ImplicitTy).getReg(0); + MIRBuilder.buildAnyExt(DstReg, ImplicitReg); + + MI.eraseFromParent(); + return Legalized; + } + + int NumParts = SizeOp0 / NarrowSize; + + SmallVector<Register, 2> DstRegs; + for (int i = 0; i < NumParts; ++i) + DstRegs.push_back(MIRBuilder.buildUndef(NarrowTy).getReg(0)); + + if (DstTy.isVector()) + MIRBuilder.buildBuildVector(DstReg, DstRegs); + else + MIRBuilder.buildMerge(DstReg, DstRegs); + MI.eraseFromParent(); + return Legalized; + } + case TargetOpcode::G_CONSTANT: { + LLT Ty = MRI.getType(MI.getOperand(0).getReg()); + const APInt &Val = MI.getOperand(1).getCImm()->getValue(); + unsigned TotalSize = Ty.getSizeInBits(); + unsigned NarrowSize = NarrowTy.getSizeInBits(); + int NumParts = TotalSize / NarrowSize; + + SmallVector<Register, 4> PartRegs; + for (int I = 0; I != NumParts; ++I) { + unsigned Offset = I * NarrowSize; + auto K = MIRBuilder.buildConstant(NarrowTy, + Val.lshr(Offset).trunc(NarrowSize)); + PartRegs.push_back(K.getReg(0)); + } + + LLT LeftoverTy; + unsigned LeftoverBits = TotalSize - NumParts * NarrowSize; + SmallVector<Register, 1> LeftoverRegs; + if (LeftoverBits != 0) { + LeftoverTy = LLT::scalar(LeftoverBits); + auto K = MIRBuilder.buildConstant( + LeftoverTy, + Val.lshr(NumParts * NarrowSize).trunc(LeftoverBits)); + LeftoverRegs.push_back(K.getReg(0)); + } + + insertParts(MI.getOperand(0).getReg(), + Ty, NarrowTy, PartRegs, LeftoverTy, LeftoverRegs); + + MI.eraseFromParent(); + return Legalized; + } + case TargetOpcode::G_SEXT: + case TargetOpcode::G_ZEXT: + case TargetOpcode::G_ANYEXT: + return narrowScalarExt(MI, TypeIdx, NarrowTy); + case TargetOpcode::G_TRUNC: { + if (TypeIdx != 1) + return UnableToLegalize; + + uint64_t SizeOp1 = MRI.getType(MI.getOperand(1).getReg()).getSizeInBits(); + if (NarrowTy.getSizeInBits() * 2 != SizeOp1) { + LLVM_DEBUG(dbgs() << "Can't narrow trunc to type " << NarrowTy << "\n"); + return UnableToLegalize; + } + + auto Unmerge = MIRBuilder.buildUnmerge(NarrowTy, MI.getOperand(1)); + MIRBuilder.buildCopy(MI.getOperand(0), Unmerge.getReg(0)); + MI.eraseFromParent(); + return Legalized; + } + + case TargetOpcode::G_FREEZE: + return reduceOperationWidth(MI, TypeIdx, NarrowTy); + + case TargetOpcode::G_ADD: { + // FIXME: add support for when SizeOp0 isn't an exact multiple of + // NarrowSize. + if (SizeOp0 % NarrowSize != 0) + return UnableToLegalize; + // Expand in terms of carry-setting/consuming G_ADDE instructions. + int NumParts = SizeOp0 / NarrowTy.getSizeInBits(); + + SmallVector<Register, 2> Src1Regs, Src2Regs, DstRegs; + extractParts(MI.getOperand(1).getReg(), NarrowTy, NumParts, Src1Regs); + extractParts(MI.getOperand(2).getReg(), NarrowTy, NumParts, Src2Regs); + + Register CarryIn; + for (int i = 0; i < NumParts; ++i) { + Register DstReg = MRI.createGenericVirtualRegister(NarrowTy); + Register CarryOut = MRI.createGenericVirtualRegister(LLT::scalar(1)); + + if (i == 0) + MIRBuilder.buildUAddo(DstReg, CarryOut, Src1Regs[i], Src2Regs[i]); + else { + MIRBuilder.buildUAdde(DstReg, CarryOut, Src1Regs[i], + Src2Regs[i], CarryIn); + } + + DstRegs.push_back(DstReg); + CarryIn = CarryOut; + } + Register DstReg = MI.getOperand(0).getReg(); + if(MRI.getType(DstReg).isVector()) + MIRBuilder.buildBuildVector(DstReg, DstRegs); + else + MIRBuilder.buildMerge(DstReg, DstRegs); + MI.eraseFromParent(); + return Legalized; + } + case TargetOpcode::G_SUB: { + // FIXME: add support for when SizeOp0 isn't an exact multiple of + // NarrowSize. + if (SizeOp0 % NarrowSize != 0) + return UnableToLegalize; + + int NumParts = SizeOp0 / NarrowTy.getSizeInBits(); + + SmallVector<Register, 2> Src1Regs, Src2Regs, DstRegs; + extractParts(MI.getOperand(1).getReg(), NarrowTy, NumParts, Src1Regs); + extractParts(MI.getOperand(2).getReg(), NarrowTy, NumParts, Src2Regs); + + Register DstReg = MRI.createGenericVirtualRegister(NarrowTy); + Register BorrowOut = MRI.createGenericVirtualRegister(LLT::scalar(1)); + MIRBuilder.buildInstr(TargetOpcode::G_USUBO, {DstReg, BorrowOut}, + {Src1Regs[0], Src2Regs[0]}); + DstRegs.push_back(DstReg); + Register BorrowIn = BorrowOut; + for (int i = 1; i < NumParts; ++i) { + DstReg = MRI.createGenericVirtualRegister(NarrowTy); + BorrowOut = MRI.createGenericVirtualRegister(LLT::scalar(1)); + + MIRBuilder.buildInstr(TargetOpcode::G_USUBE, {DstReg, BorrowOut}, + {Src1Regs[i], Src2Regs[i], BorrowIn}); + + DstRegs.push_back(DstReg); + BorrowIn = BorrowOut; + } + MIRBuilder.buildMerge(MI.getOperand(0), DstRegs); + MI.eraseFromParent(); + return Legalized; + } + case TargetOpcode::G_MUL: + case TargetOpcode::G_UMULH: + return narrowScalarMul(MI, NarrowTy); + case TargetOpcode::G_EXTRACT: + return narrowScalarExtract(MI, TypeIdx, NarrowTy); + case TargetOpcode::G_INSERT: + return narrowScalarInsert(MI, TypeIdx, NarrowTy); + case TargetOpcode::G_LOAD: { + auto &MMO = **MI.memoperands_begin(); + Register DstReg = MI.getOperand(0).getReg(); + LLT DstTy = MRI.getType(DstReg); + if (DstTy.isVector()) + return UnableToLegalize; + + if (8 * MMO.getSize() != DstTy.getSizeInBits()) { + Register TmpReg = MRI.createGenericVirtualRegister(NarrowTy); + MIRBuilder.buildLoad(TmpReg, MI.getOperand(1), MMO); + MIRBuilder.buildAnyExt(DstReg, TmpReg); + MI.eraseFromParent(); + return Legalized; + } + + return reduceLoadStoreWidth(MI, TypeIdx, NarrowTy); + } + case TargetOpcode::G_ZEXTLOAD: + case TargetOpcode::G_SEXTLOAD: { + bool ZExt = MI.getOpcode() == TargetOpcode::G_ZEXTLOAD; + Register DstReg = MI.getOperand(0).getReg(); + Register PtrReg = MI.getOperand(1).getReg(); + + Register TmpReg = MRI.createGenericVirtualRegister(NarrowTy); auto &MMO = **MI.memoperands_begin(); - Register DstReg = MI.getOperand(0).getReg(); - LLT DstTy = MRI.getType(DstReg); - if (DstTy.isVector()) - return UnableToLegalize; - - if (8 * MMO.getSize() != DstTy.getSizeInBits()) { - Register TmpReg = MRI.createGenericVirtualRegister(NarrowTy); - MIRBuilder.buildLoad(TmpReg, MI.getOperand(1), MMO); - MIRBuilder.buildAnyExt(DstReg, TmpReg); - MI.eraseFromParent(); - return Legalized; - } - - return reduceLoadStoreWidth(MI, TypeIdx, NarrowTy); - } - case TargetOpcode::G_ZEXTLOAD: - case TargetOpcode::G_SEXTLOAD: { - bool ZExt = MI.getOpcode() == TargetOpcode::G_ZEXTLOAD; - Register DstReg = MI.getOperand(0).getReg(); - Register PtrReg = MI.getOperand(1).getReg(); - - Register TmpReg = MRI.createGenericVirtualRegister(NarrowTy); - auto &MMO = **MI.memoperands_begin(); unsigned MemSize = MMO.getSizeInBits(); if (MemSize == NarrowSize) { - MIRBuilder.buildLoad(TmpReg, PtrReg, MMO); + MIRBuilder.buildLoad(TmpReg, PtrReg, MMO); } else if (MemSize < NarrowSize) { - MIRBuilder.buildLoadInstr(MI.getOpcode(), TmpReg, PtrReg, MMO); + MIRBuilder.buildLoadInstr(MI.getOpcode(), TmpReg, PtrReg, MMO); } else if (MemSize > NarrowSize) { // FIXME: Need to split the load. return UnableToLegalize; - } - - if (ZExt) - MIRBuilder.buildZExt(DstReg, TmpReg); - else - MIRBuilder.buildSExt(DstReg, TmpReg); - - MI.eraseFromParent(); - return Legalized; - } - case TargetOpcode::G_STORE: { - const auto &MMO = **MI.memoperands_begin(); - - Register SrcReg = MI.getOperand(0).getReg(); - LLT SrcTy = MRI.getType(SrcReg); - if (SrcTy.isVector()) - return UnableToLegalize; - - int NumParts = SizeOp0 / NarrowSize; - unsigned HandledSize = NumParts * NarrowTy.getSizeInBits(); - unsigned LeftoverBits = SrcTy.getSizeInBits() - HandledSize; - if (SrcTy.isVector() && LeftoverBits != 0) - return UnableToLegalize; - - if (8 * MMO.getSize() != SrcTy.getSizeInBits()) { - Register TmpReg = MRI.createGenericVirtualRegister(NarrowTy); - auto &MMO = **MI.memoperands_begin(); - MIRBuilder.buildTrunc(TmpReg, SrcReg); - MIRBuilder.buildStore(TmpReg, MI.getOperand(1), MMO); - MI.eraseFromParent(); - return Legalized; - } - - return reduceLoadStoreWidth(MI, 0, NarrowTy); - } - case TargetOpcode::G_SELECT: - return narrowScalarSelect(MI, TypeIdx, NarrowTy); - case TargetOpcode::G_AND: - case TargetOpcode::G_OR: - case TargetOpcode::G_XOR: { - // Legalize bitwise operation: - // A = BinOp<Ty> B, C - // into: - // B1, ..., BN = G_UNMERGE_VALUES B - // C1, ..., CN = G_UNMERGE_VALUES C - // A1 = BinOp<Ty/N> B1, C2 - // ... - // AN = BinOp<Ty/N> BN, CN - // A = G_MERGE_VALUES A1, ..., AN - return narrowScalarBasic(MI, TypeIdx, NarrowTy); - } - case TargetOpcode::G_SHL: - case TargetOpcode::G_LSHR: - case TargetOpcode::G_ASHR: - return narrowScalarShift(MI, TypeIdx, NarrowTy); - case TargetOpcode::G_CTLZ: - case TargetOpcode::G_CTLZ_ZERO_UNDEF: - case TargetOpcode::G_CTTZ: - case TargetOpcode::G_CTTZ_ZERO_UNDEF: - case TargetOpcode::G_CTPOP: - if (TypeIdx == 1) - switch (MI.getOpcode()) { - case TargetOpcode::G_CTLZ: - case TargetOpcode::G_CTLZ_ZERO_UNDEF: - return narrowScalarCTLZ(MI, TypeIdx, NarrowTy); - case TargetOpcode::G_CTTZ: - case TargetOpcode::G_CTTZ_ZERO_UNDEF: - return narrowScalarCTTZ(MI, TypeIdx, NarrowTy); - case TargetOpcode::G_CTPOP: - return narrowScalarCTPOP(MI, TypeIdx, NarrowTy); - default: - return UnableToLegalize; - } - - Observer.changingInstr(MI); - narrowScalarDst(MI, NarrowTy, 0, TargetOpcode::G_ZEXT); - Observer.changedInstr(MI); - return Legalized; - case TargetOpcode::G_INTTOPTR: - if (TypeIdx != 1) - return UnableToLegalize; - - Observer.changingInstr(MI); - narrowScalarSrc(MI, NarrowTy, 1); - Observer.changedInstr(MI); - return Legalized; - case TargetOpcode::G_PTRTOINT: - if (TypeIdx != 0) - return UnableToLegalize; - - Observer.changingInstr(MI); - narrowScalarDst(MI, NarrowTy, 0, TargetOpcode::G_ZEXT); - Observer.changedInstr(MI); - return Legalized; - case TargetOpcode::G_PHI: { + } + + if (ZExt) + MIRBuilder.buildZExt(DstReg, TmpReg); + else + MIRBuilder.buildSExt(DstReg, TmpReg); + + MI.eraseFromParent(); + return Legalized; + } + case TargetOpcode::G_STORE: { + const auto &MMO = **MI.memoperands_begin(); + + Register SrcReg = MI.getOperand(0).getReg(); + LLT SrcTy = MRI.getType(SrcReg); + if (SrcTy.isVector()) + return UnableToLegalize; + + int NumParts = SizeOp0 / NarrowSize; + unsigned HandledSize = NumParts * NarrowTy.getSizeInBits(); + unsigned LeftoverBits = SrcTy.getSizeInBits() - HandledSize; + if (SrcTy.isVector() && LeftoverBits != 0) + return UnableToLegalize; + + if (8 * MMO.getSize() != SrcTy.getSizeInBits()) { + Register TmpReg = MRI.createGenericVirtualRegister(NarrowTy); + auto &MMO = **MI.memoperands_begin(); + MIRBuilder.buildTrunc(TmpReg, SrcReg); + MIRBuilder.buildStore(TmpReg, MI.getOperand(1), MMO); + MI.eraseFromParent(); + return Legalized; + } + + return reduceLoadStoreWidth(MI, 0, NarrowTy); + } + case TargetOpcode::G_SELECT: + return narrowScalarSelect(MI, TypeIdx, NarrowTy); + case TargetOpcode::G_AND: + case TargetOpcode::G_OR: + case TargetOpcode::G_XOR: { + // Legalize bitwise operation: + // A = BinOp<Ty> B, C + // into: + // B1, ..., BN = G_UNMERGE_VALUES B + // C1, ..., CN = G_UNMERGE_VALUES C + // A1 = BinOp<Ty/N> B1, C2 + // ... + // AN = BinOp<Ty/N> BN, CN + // A = G_MERGE_VALUES A1, ..., AN + return narrowScalarBasic(MI, TypeIdx, NarrowTy); + } + case TargetOpcode::G_SHL: + case TargetOpcode::G_LSHR: + case TargetOpcode::G_ASHR: + return narrowScalarShift(MI, TypeIdx, NarrowTy); + case TargetOpcode::G_CTLZ: + case TargetOpcode::G_CTLZ_ZERO_UNDEF: + case TargetOpcode::G_CTTZ: + case TargetOpcode::G_CTTZ_ZERO_UNDEF: + case TargetOpcode::G_CTPOP: + if (TypeIdx == 1) + switch (MI.getOpcode()) { + case TargetOpcode::G_CTLZ: + case TargetOpcode::G_CTLZ_ZERO_UNDEF: + return narrowScalarCTLZ(MI, TypeIdx, NarrowTy); + case TargetOpcode::G_CTTZ: + case TargetOpcode::G_CTTZ_ZERO_UNDEF: + return narrowScalarCTTZ(MI, TypeIdx, NarrowTy); + case TargetOpcode::G_CTPOP: + return narrowScalarCTPOP(MI, TypeIdx, NarrowTy); + default: + return UnableToLegalize; + } + + Observer.changingInstr(MI); + narrowScalarDst(MI, NarrowTy, 0, TargetOpcode::G_ZEXT); + Observer.changedInstr(MI); + return Legalized; + case TargetOpcode::G_INTTOPTR: + if (TypeIdx != 1) + return UnableToLegalize; + + Observer.changingInstr(MI); + narrowScalarSrc(MI, NarrowTy, 1); + Observer.changedInstr(MI); + return Legalized; + case TargetOpcode::G_PTRTOINT: + if (TypeIdx != 0) + return UnableToLegalize; + + Observer.changingInstr(MI); + narrowScalarDst(MI, NarrowTy, 0, TargetOpcode::G_ZEXT); + Observer.changedInstr(MI); + return Legalized; + case TargetOpcode::G_PHI: { // FIXME: add support for when SizeOp0 isn't an exact multiple of // NarrowSize. if (SizeOp0 % NarrowSize != 0) return UnableToLegalize; - unsigned NumParts = SizeOp0 / NarrowSize; - SmallVector<Register, 2> DstRegs(NumParts); - SmallVector<SmallVector<Register, 2>, 2> SrcRegs(MI.getNumOperands() / 2); - Observer.changingInstr(MI); - for (unsigned i = 1; i < MI.getNumOperands(); i += 2) { - MachineBasicBlock &OpMBB = *MI.getOperand(i + 1).getMBB(); - MIRBuilder.setInsertPt(OpMBB, OpMBB.getFirstTerminator()); - extractParts(MI.getOperand(i).getReg(), NarrowTy, NumParts, - SrcRegs[i / 2]); - } - MachineBasicBlock &MBB = *MI.getParent(); - MIRBuilder.setInsertPt(MBB, MI); - for (unsigned i = 0; i < NumParts; ++i) { - DstRegs[i] = MRI.createGenericVirtualRegister(NarrowTy); - MachineInstrBuilder MIB = - MIRBuilder.buildInstr(TargetOpcode::G_PHI).addDef(DstRegs[i]); - for (unsigned j = 1; j < MI.getNumOperands(); j += 2) - MIB.addUse(SrcRegs[j / 2][i]).add(MI.getOperand(j + 1)); - } - MIRBuilder.setInsertPt(MBB, MBB.getFirstNonPHI()); - MIRBuilder.buildMerge(MI.getOperand(0), DstRegs); - Observer.changedInstr(MI); - MI.eraseFromParent(); - return Legalized; - } - case TargetOpcode::G_EXTRACT_VECTOR_ELT: - case TargetOpcode::G_INSERT_VECTOR_ELT: { - if (TypeIdx != 2) - return UnableToLegalize; - - int OpIdx = MI.getOpcode() == TargetOpcode::G_EXTRACT_VECTOR_ELT ? 2 : 3; - Observer.changingInstr(MI); - narrowScalarSrc(MI, NarrowTy, OpIdx); - Observer.changedInstr(MI); - return Legalized; - } - case TargetOpcode::G_ICMP: { - uint64_t SrcSize = MRI.getType(MI.getOperand(2).getReg()).getSizeInBits(); - if (NarrowSize * 2 != SrcSize) - return UnableToLegalize; - - Observer.changingInstr(MI); - Register LHSL = MRI.createGenericVirtualRegister(NarrowTy); - Register LHSH = MRI.createGenericVirtualRegister(NarrowTy); - MIRBuilder.buildUnmerge({LHSL, LHSH}, MI.getOperand(2)); - - Register RHSL = MRI.createGenericVirtualRegister(NarrowTy); - Register RHSH = MRI.createGenericVirtualRegister(NarrowTy); - MIRBuilder.buildUnmerge({RHSL, RHSH}, MI.getOperand(3)); - - CmpInst::Predicate Pred = - static_cast<CmpInst::Predicate>(MI.getOperand(1).getPredicate()); - LLT ResTy = MRI.getType(MI.getOperand(0).getReg()); - - if (Pred == CmpInst::ICMP_EQ || Pred == CmpInst::ICMP_NE) { - MachineInstrBuilder XorL = MIRBuilder.buildXor(NarrowTy, LHSL, RHSL); - MachineInstrBuilder XorH = MIRBuilder.buildXor(NarrowTy, LHSH, RHSH); - MachineInstrBuilder Or = MIRBuilder.buildOr(NarrowTy, XorL, XorH); - MachineInstrBuilder Zero = MIRBuilder.buildConstant(NarrowTy, 0); - MIRBuilder.buildICmp(Pred, MI.getOperand(0), Or, Zero); - } else { - MachineInstrBuilder CmpH = MIRBuilder.buildICmp(Pred, ResTy, LHSH, RHSH); - MachineInstrBuilder CmpHEQ = - MIRBuilder.buildICmp(CmpInst::Predicate::ICMP_EQ, ResTy, LHSH, RHSH); - MachineInstrBuilder CmpLU = MIRBuilder.buildICmp( - ICmpInst::getUnsignedPredicate(Pred), ResTy, LHSL, RHSL); - MIRBuilder.buildSelect(MI.getOperand(0), CmpHEQ, CmpLU, CmpH); - } - Observer.changedInstr(MI); - MI.eraseFromParent(); - return Legalized; - } - case TargetOpcode::G_SEXT_INREG: { - if (TypeIdx != 0) - return UnableToLegalize; - - int64_t SizeInBits = MI.getOperand(2).getImm(); - - // So long as the new type has more bits than the bits we're extending we - // don't need to break it apart. - if (NarrowTy.getScalarSizeInBits() >= SizeInBits) { - Observer.changingInstr(MI); - // We don't lose any non-extension bits by truncating the src and - // sign-extending the dst. - MachineOperand &MO1 = MI.getOperand(1); - auto TruncMIB = MIRBuilder.buildTrunc(NarrowTy, MO1); - MO1.setReg(TruncMIB.getReg(0)); - - MachineOperand &MO2 = MI.getOperand(0); - Register DstExt = MRI.createGenericVirtualRegister(NarrowTy); - MIRBuilder.setInsertPt(MIRBuilder.getMBB(), ++MIRBuilder.getInsertPt()); - MIRBuilder.buildSExt(MO2, DstExt); - MO2.setReg(DstExt); - Observer.changedInstr(MI); - return Legalized; - } - - // Break it apart. Components below the extension point are unmodified. The - // component containing the extension point becomes a narrower SEXT_INREG. - // Components above it are ashr'd from the component containing the - // extension point. - if (SizeOp0 % NarrowSize != 0) - return UnableToLegalize; - int NumParts = SizeOp0 / NarrowSize; - - // List the registers where the destination will be scattered. - SmallVector<Register, 2> DstRegs; - // List the registers where the source will be split. - SmallVector<Register, 2> SrcRegs; - - // Create all the temporary registers. - for (int i = 0; i < NumParts; ++i) { - Register SrcReg = MRI.createGenericVirtualRegister(NarrowTy); - - SrcRegs.push_back(SrcReg); - } - - // Explode the big arguments into smaller chunks. - MIRBuilder.buildUnmerge(SrcRegs, MI.getOperand(1)); - - Register AshrCstReg = - MIRBuilder.buildConstant(NarrowTy, NarrowTy.getScalarSizeInBits() - 1) - .getReg(0); - Register FullExtensionReg = 0; - Register PartialExtensionReg = 0; - - // Do the operation on each small part. - for (int i = 0; i < NumParts; ++i) { - if ((i + 1) * NarrowTy.getScalarSizeInBits() < SizeInBits) - DstRegs.push_back(SrcRegs[i]); - else if (i * NarrowTy.getScalarSizeInBits() > SizeInBits) { - assert(PartialExtensionReg && - "Expected to visit partial extension before full"); - if (FullExtensionReg) { - DstRegs.push_back(FullExtensionReg); - continue; - } - DstRegs.push_back( - MIRBuilder.buildAShr(NarrowTy, PartialExtensionReg, AshrCstReg) - .getReg(0)); - FullExtensionReg = DstRegs.back(); - } else { - DstRegs.push_back( - MIRBuilder - .buildInstr( - TargetOpcode::G_SEXT_INREG, {NarrowTy}, - {SrcRegs[i], SizeInBits % NarrowTy.getScalarSizeInBits()}) - .getReg(0)); - PartialExtensionReg = DstRegs.back(); - } - } - - // Gather the destination registers into the final destination. - Register DstReg = MI.getOperand(0).getReg(); - MIRBuilder.buildMerge(DstReg, DstRegs); - MI.eraseFromParent(); - return Legalized; - } - case TargetOpcode::G_BSWAP: - case TargetOpcode::G_BITREVERSE: { - if (SizeOp0 % NarrowSize != 0) - return UnableToLegalize; - - Observer.changingInstr(MI); - SmallVector<Register, 2> SrcRegs, DstRegs; - unsigned NumParts = SizeOp0 / NarrowSize; - extractParts(MI.getOperand(1).getReg(), NarrowTy, NumParts, SrcRegs); - - for (unsigned i = 0; i < NumParts; ++i) { - auto DstPart = MIRBuilder.buildInstr(MI.getOpcode(), {NarrowTy}, - {SrcRegs[NumParts - 1 - i]}); - DstRegs.push_back(DstPart.getReg(0)); - } - - MIRBuilder.buildMerge(MI.getOperand(0), DstRegs); - - Observer.changedInstr(MI); - MI.eraseFromParent(); - return Legalized; - } + unsigned NumParts = SizeOp0 / NarrowSize; + SmallVector<Register, 2> DstRegs(NumParts); + SmallVector<SmallVector<Register, 2>, 2> SrcRegs(MI.getNumOperands() / 2); + Observer.changingInstr(MI); + for (unsigned i = 1; i < MI.getNumOperands(); i += 2) { + MachineBasicBlock &OpMBB = *MI.getOperand(i + 1).getMBB(); + MIRBuilder.setInsertPt(OpMBB, OpMBB.getFirstTerminator()); + extractParts(MI.getOperand(i).getReg(), NarrowTy, NumParts, + SrcRegs[i / 2]); + } + MachineBasicBlock &MBB = *MI.getParent(); + MIRBuilder.setInsertPt(MBB, MI); + for (unsigned i = 0; i < NumParts; ++i) { + DstRegs[i] = MRI.createGenericVirtualRegister(NarrowTy); + MachineInstrBuilder MIB = + MIRBuilder.buildInstr(TargetOpcode::G_PHI).addDef(DstRegs[i]); + for (unsigned j = 1; j < MI.getNumOperands(); j += 2) + MIB.addUse(SrcRegs[j / 2][i]).add(MI.getOperand(j + 1)); + } + MIRBuilder.setInsertPt(MBB, MBB.getFirstNonPHI()); + MIRBuilder.buildMerge(MI.getOperand(0), DstRegs); + Observer.changedInstr(MI); + MI.eraseFromParent(); + return Legalized; + } + case TargetOpcode::G_EXTRACT_VECTOR_ELT: + case TargetOpcode::G_INSERT_VECTOR_ELT: { + if (TypeIdx != 2) + return UnableToLegalize; + + int OpIdx = MI.getOpcode() == TargetOpcode::G_EXTRACT_VECTOR_ELT ? 2 : 3; + Observer.changingInstr(MI); + narrowScalarSrc(MI, NarrowTy, OpIdx); + Observer.changedInstr(MI); + return Legalized; + } + case TargetOpcode::G_ICMP: { + uint64_t SrcSize = MRI.getType(MI.getOperand(2).getReg()).getSizeInBits(); + if (NarrowSize * 2 != SrcSize) + return UnableToLegalize; + + Observer.changingInstr(MI); + Register LHSL = MRI.createGenericVirtualRegister(NarrowTy); + Register LHSH = MRI.createGenericVirtualRegister(NarrowTy); + MIRBuilder.buildUnmerge({LHSL, LHSH}, MI.getOperand(2)); + + Register RHSL = MRI.createGenericVirtualRegister(NarrowTy); + Register RHSH = MRI.createGenericVirtualRegister(NarrowTy); + MIRBuilder.buildUnmerge({RHSL, RHSH}, MI.getOperand(3)); + + CmpInst::Predicate Pred = + static_cast<CmpInst::Predicate>(MI.getOperand(1).getPredicate()); + LLT ResTy = MRI.getType(MI.getOperand(0).getReg()); + + if (Pred == CmpInst::ICMP_EQ || Pred == CmpInst::ICMP_NE) { + MachineInstrBuilder XorL = MIRBuilder.buildXor(NarrowTy, LHSL, RHSL); + MachineInstrBuilder XorH = MIRBuilder.buildXor(NarrowTy, LHSH, RHSH); + MachineInstrBuilder Or = MIRBuilder.buildOr(NarrowTy, XorL, XorH); + MachineInstrBuilder Zero = MIRBuilder.buildConstant(NarrowTy, 0); + MIRBuilder.buildICmp(Pred, MI.getOperand(0), Or, Zero); + } else { + MachineInstrBuilder CmpH = MIRBuilder.buildICmp(Pred, ResTy, LHSH, RHSH); + MachineInstrBuilder CmpHEQ = + MIRBuilder.buildICmp(CmpInst::Predicate::ICMP_EQ, ResTy, LHSH, RHSH); + MachineInstrBuilder CmpLU = MIRBuilder.buildICmp( + ICmpInst::getUnsignedPredicate(Pred), ResTy, LHSL, RHSL); + MIRBuilder.buildSelect(MI.getOperand(0), CmpHEQ, CmpLU, CmpH); + } + Observer.changedInstr(MI); + MI.eraseFromParent(); + return Legalized; + } + case TargetOpcode::G_SEXT_INREG: { + if (TypeIdx != 0) + return UnableToLegalize; + + int64_t SizeInBits = MI.getOperand(2).getImm(); + + // So long as the new type has more bits than the bits we're extending we + // don't need to break it apart. + if (NarrowTy.getScalarSizeInBits() >= SizeInBits) { + Observer.changingInstr(MI); + // We don't lose any non-extension bits by truncating the src and + // sign-extending the dst. + MachineOperand &MO1 = MI.getOperand(1); + auto TruncMIB = MIRBuilder.buildTrunc(NarrowTy, MO1); + MO1.setReg(TruncMIB.getReg(0)); + + MachineOperand &MO2 = MI.getOperand(0); + Register DstExt = MRI.createGenericVirtualRegister(NarrowTy); + MIRBuilder.setInsertPt(MIRBuilder.getMBB(), ++MIRBuilder.getInsertPt()); + MIRBuilder.buildSExt(MO2, DstExt); + MO2.setReg(DstExt); + Observer.changedInstr(MI); + return Legalized; + } + + // Break it apart. Components below the extension point are unmodified. The + // component containing the extension point becomes a narrower SEXT_INREG. + // Components above it are ashr'd from the component containing the + // extension point. + if (SizeOp0 % NarrowSize != 0) + return UnableToLegalize; + int NumParts = SizeOp0 / NarrowSize; + + // List the registers where the destination will be scattered. + SmallVector<Register, 2> DstRegs; + // List the registers where the source will be split. + SmallVector<Register, 2> SrcRegs; + + // Create all the temporary registers. + for (int i = 0; i < NumParts; ++i) { + Register SrcReg = MRI.createGenericVirtualRegister(NarrowTy); + + SrcRegs.push_back(SrcReg); + } + + // Explode the big arguments into smaller chunks. + MIRBuilder.buildUnmerge(SrcRegs, MI.getOperand(1)); + + Register AshrCstReg = + MIRBuilder.buildConstant(NarrowTy, NarrowTy.getScalarSizeInBits() - 1) + .getReg(0); + Register FullExtensionReg = 0; + Register PartialExtensionReg = 0; + + // Do the operation on each small part. + for (int i = 0; i < NumParts; ++i) { + if ((i + 1) * NarrowTy.getScalarSizeInBits() < SizeInBits) + DstRegs.push_back(SrcRegs[i]); + else if (i * NarrowTy.getScalarSizeInBits() > SizeInBits) { + assert(PartialExtensionReg && + "Expected to visit partial extension before full"); + if (FullExtensionReg) { + DstRegs.push_back(FullExtensionReg); + continue; + } + DstRegs.push_back( + MIRBuilder.buildAShr(NarrowTy, PartialExtensionReg, AshrCstReg) + .getReg(0)); + FullExtensionReg = DstRegs.back(); + } else { + DstRegs.push_back( + MIRBuilder + .buildInstr( + TargetOpcode::G_SEXT_INREG, {NarrowTy}, + {SrcRegs[i], SizeInBits % NarrowTy.getScalarSizeInBits()}) + .getReg(0)); + PartialExtensionReg = DstRegs.back(); + } + } + + // Gather the destination registers into the final destination. + Register DstReg = MI.getOperand(0).getReg(); + MIRBuilder.buildMerge(DstReg, DstRegs); + MI.eraseFromParent(); + return Legalized; + } + case TargetOpcode::G_BSWAP: + case TargetOpcode::G_BITREVERSE: { + if (SizeOp0 % NarrowSize != 0) + return UnableToLegalize; + + Observer.changingInstr(MI); + SmallVector<Register, 2> SrcRegs, DstRegs; + unsigned NumParts = SizeOp0 / NarrowSize; + extractParts(MI.getOperand(1).getReg(), NarrowTy, NumParts, SrcRegs); + + for (unsigned i = 0; i < NumParts; ++i) { + auto DstPart = MIRBuilder.buildInstr(MI.getOpcode(), {NarrowTy}, + {SrcRegs[NumParts - 1 - i]}); + DstRegs.push_back(DstPart.getReg(0)); + } + + MIRBuilder.buildMerge(MI.getOperand(0), DstRegs); + + Observer.changedInstr(MI); + MI.eraseFromParent(); + return Legalized; + } case TargetOpcode::G_PTR_ADD: - case TargetOpcode::G_PTRMASK: { - if (TypeIdx != 1) - return UnableToLegalize; - Observer.changingInstr(MI); - narrowScalarSrc(MI, NarrowTy, 2); - Observer.changedInstr(MI); - return Legalized; - } + case TargetOpcode::G_PTRMASK: { + if (TypeIdx != 1) + return UnableToLegalize; + Observer.changingInstr(MI); + narrowScalarSrc(MI, NarrowTy, 2); + Observer.changedInstr(MI); + return Legalized; + } case TargetOpcode::G_FPTOUI: case TargetOpcode::G_FPTOSI: return narrowScalarFPTOI(MI, TypeIdx, NarrowTy); @@ -1268,234 +1268,234 @@ LegalizerHelper::LegalizeResult LegalizerHelper::narrowScalar(MachineInstr &MI, Observer.changedInstr(MI); return Legalized; } -} - -Register LegalizerHelper::coerceToScalar(Register Val) { - LLT Ty = MRI.getType(Val); - if (Ty.isScalar()) - return Val; - - const DataLayout &DL = MIRBuilder.getDataLayout(); - LLT NewTy = LLT::scalar(Ty.getSizeInBits()); - if (Ty.isPointer()) { - if (DL.isNonIntegralAddressSpace(Ty.getAddressSpace())) - return Register(); - return MIRBuilder.buildPtrToInt(NewTy, Val).getReg(0); - } - - Register NewVal = Val; - - assert(Ty.isVector()); - LLT EltTy = Ty.getElementType(); - if (EltTy.isPointer()) - NewVal = MIRBuilder.buildPtrToInt(NewTy, NewVal).getReg(0); - return MIRBuilder.buildBitcast(NewTy, NewVal).getReg(0); -} - -void LegalizerHelper::widenScalarSrc(MachineInstr &MI, LLT WideTy, - unsigned OpIdx, unsigned ExtOpcode) { - MachineOperand &MO = MI.getOperand(OpIdx); - auto ExtB = MIRBuilder.buildInstr(ExtOpcode, {WideTy}, {MO}); - MO.setReg(ExtB.getReg(0)); -} - -void LegalizerHelper::narrowScalarSrc(MachineInstr &MI, LLT NarrowTy, - unsigned OpIdx) { - MachineOperand &MO = MI.getOperand(OpIdx); - auto ExtB = MIRBuilder.buildTrunc(NarrowTy, MO); - MO.setReg(ExtB.getReg(0)); -} - -void LegalizerHelper::widenScalarDst(MachineInstr &MI, LLT WideTy, - unsigned OpIdx, unsigned TruncOpcode) { - MachineOperand &MO = MI.getOperand(OpIdx); - Register DstExt = MRI.createGenericVirtualRegister(WideTy); - MIRBuilder.setInsertPt(MIRBuilder.getMBB(), ++MIRBuilder.getInsertPt()); - MIRBuilder.buildInstr(TruncOpcode, {MO}, {DstExt}); - MO.setReg(DstExt); -} - -void LegalizerHelper::narrowScalarDst(MachineInstr &MI, LLT NarrowTy, - unsigned OpIdx, unsigned ExtOpcode) { - MachineOperand &MO = MI.getOperand(OpIdx); - Register DstTrunc = MRI.createGenericVirtualRegister(NarrowTy); - MIRBuilder.setInsertPt(MIRBuilder.getMBB(), ++MIRBuilder.getInsertPt()); - MIRBuilder.buildInstr(ExtOpcode, {MO}, {DstTrunc}); - MO.setReg(DstTrunc); -} - -void LegalizerHelper::moreElementsVectorDst(MachineInstr &MI, LLT WideTy, - unsigned OpIdx) { - MachineOperand &MO = MI.getOperand(OpIdx); - MIRBuilder.setInsertPt(MIRBuilder.getMBB(), ++MIRBuilder.getInsertPt()); +} + +Register LegalizerHelper::coerceToScalar(Register Val) { + LLT Ty = MRI.getType(Val); + if (Ty.isScalar()) + return Val; + + const DataLayout &DL = MIRBuilder.getDataLayout(); + LLT NewTy = LLT::scalar(Ty.getSizeInBits()); + if (Ty.isPointer()) { + if (DL.isNonIntegralAddressSpace(Ty.getAddressSpace())) + return Register(); + return MIRBuilder.buildPtrToInt(NewTy, Val).getReg(0); + } + + Register NewVal = Val; + + assert(Ty.isVector()); + LLT EltTy = Ty.getElementType(); + if (EltTy.isPointer()) + NewVal = MIRBuilder.buildPtrToInt(NewTy, NewVal).getReg(0); + return MIRBuilder.buildBitcast(NewTy, NewVal).getReg(0); +} + +void LegalizerHelper::widenScalarSrc(MachineInstr &MI, LLT WideTy, + unsigned OpIdx, unsigned ExtOpcode) { + MachineOperand &MO = MI.getOperand(OpIdx); + auto ExtB = MIRBuilder.buildInstr(ExtOpcode, {WideTy}, {MO}); + MO.setReg(ExtB.getReg(0)); +} + +void LegalizerHelper::narrowScalarSrc(MachineInstr &MI, LLT NarrowTy, + unsigned OpIdx) { + MachineOperand &MO = MI.getOperand(OpIdx); + auto ExtB = MIRBuilder.buildTrunc(NarrowTy, MO); + MO.setReg(ExtB.getReg(0)); +} + +void LegalizerHelper::widenScalarDst(MachineInstr &MI, LLT WideTy, + unsigned OpIdx, unsigned TruncOpcode) { + MachineOperand &MO = MI.getOperand(OpIdx); + Register DstExt = MRI.createGenericVirtualRegister(WideTy); + MIRBuilder.setInsertPt(MIRBuilder.getMBB(), ++MIRBuilder.getInsertPt()); + MIRBuilder.buildInstr(TruncOpcode, {MO}, {DstExt}); + MO.setReg(DstExt); +} + +void LegalizerHelper::narrowScalarDst(MachineInstr &MI, LLT NarrowTy, + unsigned OpIdx, unsigned ExtOpcode) { + MachineOperand &MO = MI.getOperand(OpIdx); + Register DstTrunc = MRI.createGenericVirtualRegister(NarrowTy); + MIRBuilder.setInsertPt(MIRBuilder.getMBB(), ++MIRBuilder.getInsertPt()); + MIRBuilder.buildInstr(ExtOpcode, {MO}, {DstTrunc}); + MO.setReg(DstTrunc); +} + +void LegalizerHelper::moreElementsVectorDst(MachineInstr &MI, LLT WideTy, + unsigned OpIdx) { + MachineOperand &MO = MI.getOperand(OpIdx); + MIRBuilder.setInsertPt(MIRBuilder.getMBB(), ++MIRBuilder.getInsertPt()); MO.setReg(widenWithUnmerge(WideTy, MO.getReg())); -} - -void LegalizerHelper::moreElementsVectorSrc(MachineInstr &MI, LLT MoreTy, - unsigned OpIdx) { - MachineOperand &MO = MI.getOperand(OpIdx); - - LLT OldTy = MRI.getType(MO.getReg()); - unsigned OldElts = OldTy.getNumElements(); - unsigned NewElts = MoreTy.getNumElements(); - - unsigned NumParts = NewElts / OldElts; - - // Use concat_vectors if the result is a multiple of the number of elements. - if (NumParts * OldElts == NewElts) { - SmallVector<Register, 8> Parts; - Parts.push_back(MO.getReg()); - - Register ImpDef = MIRBuilder.buildUndef(OldTy).getReg(0); - for (unsigned I = 1; I != NumParts; ++I) - Parts.push_back(ImpDef); - - auto Concat = MIRBuilder.buildConcatVectors(MoreTy, Parts); - MO.setReg(Concat.getReg(0)); - return; - } - - Register MoreReg = MRI.createGenericVirtualRegister(MoreTy); - Register ImpDef = MIRBuilder.buildUndef(MoreTy).getReg(0); - MIRBuilder.buildInsert(MoreReg, ImpDef, MO.getReg(), 0); - MO.setReg(MoreReg); -} - -void LegalizerHelper::bitcastSrc(MachineInstr &MI, LLT CastTy, unsigned OpIdx) { - MachineOperand &Op = MI.getOperand(OpIdx); - Op.setReg(MIRBuilder.buildBitcast(CastTy, Op).getReg(0)); -} - -void LegalizerHelper::bitcastDst(MachineInstr &MI, LLT CastTy, unsigned OpIdx) { - MachineOperand &MO = MI.getOperand(OpIdx); - Register CastDst = MRI.createGenericVirtualRegister(CastTy); - MIRBuilder.setInsertPt(MIRBuilder.getMBB(), ++MIRBuilder.getInsertPt()); - MIRBuilder.buildBitcast(MO, CastDst); - MO.setReg(CastDst); -} - -LegalizerHelper::LegalizeResult -LegalizerHelper::widenScalarMergeValues(MachineInstr &MI, unsigned TypeIdx, - LLT WideTy) { - if (TypeIdx != 1) - return UnableToLegalize; - - Register DstReg = MI.getOperand(0).getReg(); - LLT DstTy = MRI.getType(DstReg); - if (DstTy.isVector()) - return UnableToLegalize; - - Register Src1 = MI.getOperand(1).getReg(); - LLT SrcTy = MRI.getType(Src1); - const int DstSize = DstTy.getSizeInBits(); - const int SrcSize = SrcTy.getSizeInBits(); - const int WideSize = WideTy.getSizeInBits(); - const int NumMerge = (DstSize + WideSize - 1) / WideSize; - - unsigned NumOps = MI.getNumOperands(); - unsigned NumSrc = MI.getNumOperands() - 1; - unsigned PartSize = DstTy.getSizeInBits() / NumSrc; - - if (WideSize >= DstSize) { - // Directly pack the bits in the target type. - Register ResultReg = MIRBuilder.buildZExt(WideTy, Src1).getReg(0); - - for (unsigned I = 2; I != NumOps; ++I) { - const unsigned Offset = (I - 1) * PartSize; - - Register SrcReg = MI.getOperand(I).getReg(); - assert(MRI.getType(SrcReg) == LLT::scalar(PartSize)); - - auto ZextInput = MIRBuilder.buildZExt(WideTy, SrcReg); - - Register NextResult = I + 1 == NumOps && WideTy == DstTy ? DstReg : - MRI.createGenericVirtualRegister(WideTy); - - auto ShiftAmt = MIRBuilder.buildConstant(WideTy, Offset); - auto Shl = MIRBuilder.buildShl(WideTy, ZextInput, ShiftAmt); - MIRBuilder.buildOr(NextResult, ResultReg, Shl); - ResultReg = NextResult; - } - - if (WideSize > DstSize) - MIRBuilder.buildTrunc(DstReg, ResultReg); - else if (DstTy.isPointer()) - MIRBuilder.buildIntToPtr(DstReg, ResultReg); - - MI.eraseFromParent(); - return Legalized; - } - - // Unmerge the original values to the GCD type, and recombine to the next - // multiple greater than the original type. - // - // %3:_(s12) = G_MERGE_VALUES %0:_(s4), %1:_(s4), %2:_(s4) -> s6 - // %4:_(s2), %5:_(s2) = G_UNMERGE_VALUES %0 - // %6:_(s2), %7:_(s2) = G_UNMERGE_VALUES %1 - // %8:_(s2), %9:_(s2) = G_UNMERGE_VALUES %2 - // %10:_(s6) = G_MERGE_VALUES %4, %5, %6 - // %11:_(s6) = G_MERGE_VALUES %7, %8, %9 - // %12:_(s12) = G_MERGE_VALUES %10, %11 - // - // Padding with undef if necessary: - // - // %2:_(s8) = G_MERGE_VALUES %0:_(s4), %1:_(s4) -> s6 - // %3:_(s2), %4:_(s2) = G_UNMERGE_VALUES %0 - // %5:_(s2), %6:_(s2) = G_UNMERGE_VALUES %1 - // %7:_(s2) = G_IMPLICIT_DEF - // %8:_(s6) = G_MERGE_VALUES %3, %4, %5 - // %9:_(s6) = G_MERGE_VALUES %6, %7, %7 - // %10:_(s12) = G_MERGE_VALUES %8, %9 - - const int GCD = greatestCommonDivisor(SrcSize, WideSize); - LLT GCDTy = LLT::scalar(GCD); - - SmallVector<Register, 8> Parts; - SmallVector<Register, 8> NewMergeRegs; - SmallVector<Register, 8> Unmerges; - LLT WideDstTy = LLT::scalar(NumMerge * WideSize); - - // Decompose the original operands if they don't evenly divide. - for (int I = 1, E = MI.getNumOperands(); I != E; ++I) { - Register SrcReg = MI.getOperand(I).getReg(); - if (GCD == SrcSize) { - Unmerges.push_back(SrcReg); - } else { - auto Unmerge = MIRBuilder.buildUnmerge(GCDTy, SrcReg); - for (int J = 0, JE = Unmerge->getNumOperands() - 1; J != JE; ++J) - Unmerges.push_back(Unmerge.getReg(J)); - } - } - - // Pad with undef to the next size that is a multiple of the requested size. - if (static_cast<int>(Unmerges.size()) != NumMerge * WideSize) { - Register UndefReg = MIRBuilder.buildUndef(GCDTy).getReg(0); - for (int I = Unmerges.size(); I != NumMerge * WideSize; ++I) - Unmerges.push_back(UndefReg); - } - - const int PartsPerGCD = WideSize / GCD; - - // Build merges of each piece. - ArrayRef<Register> Slicer(Unmerges); - for (int I = 0; I != NumMerge; ++I, Slicer = Slicer.drop_front(PartsPerGCD)) { - auto Merge = MIRBuilder.buildMerge(WideTy, Slicer.take_front(PartsPerGCD)); - NewMergeRegs.push_back(Merge.getReg(0)); - } - - // A truncate may be necessary if the requested type doesn't evenly divide the - // original result type. - if (DstTy.getSizeInBits() == WideDstTy.getSizeInBits()) { - MIRBuilder.buildMerge(DstReg, NewMergeRegs); - } else { - auto FinalMerge = MIRBuilder.buildMerge(WideDstTy, NewMergeRegs); - MIRBuilder.buildTrunc(DstReg, FinalMerge.getReg(0)); - } - - MI.eraseFromParent(); - return Legalized; -} - +} + +void LegalizerHelper::moreElementsVectorSrc(MachineInstr &MI, LLT MoreTy, + unsigned OpIdx) { + MachineOperand &MO = MI.getOperand(OpIdx); + + LLT OldTy = MRI.getType(MO.getReg()); + unsigned OldElts = OldTy.getNumElements(); + unsigned NewElts = MoreTy.getNumElements(); + + unsigned NumParts = NewElts / OldElts; + + // Use concat_vectors if the result is a multiple of the number of elements. + if (NumParts * OldElts == NewElts) { + SmallVector<Register, 8> Parts; + Parts.push_back(MO.getReg()); + + Register ImpDef = MIRBuilder.buildUndef(OldTy).getReg(0); + for (unsigned I = 1; I != NumParts; ++I) + Parts.push_back(ImpDef); + + auto Concat = MIRBuilder.buildConcatVectors(MoreTy, Parts); + MO.setReg(Concat.getReg(0)); + return; + } + + Register MoreReg = MRI.createGenericVirtualRegister(MoreTy); + Register ImpDef = MIRBuilder.buildUndef(MoreTy).getReg(0); + MIRBuilder.buildInsert(MoreReg, ImpDef, MO.getReg(), 0); + MO.setReg(MoreReg); +} + +void LegalizerHelper::bitcastSrc(MachineInstr &MI, LLT CastTy, unsigned OpIdx) { + MachineOperand &Op = MI.getOperand(OpIdx); + Op.setReg(MIRBuilder.buildBitcast(CastTy, Op).getReg(0)); +} + +void LegalizerHelper::bitcastDst(MachineInstr &MI, LLT CastTy, unsigned OpIdx) { + MachineOperand &MO = MI.getOperand(OpIdx); + Register CastDst = MRI.createGenericVirtualRegister(CastTy); + MIRBuilder.setInsertPt(MIRBuilder.getMBB(), ++MIRBuilder.getInsertPt()); + MIRBuilder.buildBitcast(MO, CastDst); + MO.setReg(CastDst); +} + +LegalizerHelper::LegalizeResult +LegalizerHelper::widenScalarMergeValues(MachineInstr &MI, unsigned TypeIdx, + LLT WideTy) { + if (TypeIdx != 1) + return UnableToLegalize; + + Register DstReg = MI.getOperand(0).getReg(); + LLT DstTy = MRI.getType(DstReg); + if (DstTy.isVector()) + return UnableToLegalize; + + Register Src1 = MI.getOperand(1).getReg(); + LLT SrcTy = MRI.getType(Src1); + const int DstSize = DstTy.getSizeInBits(); + const int SrcSize = SrcTy.getSizeInBits(); + const int WideSize = WideTy.getSizeInBits(); + const int NumMerge = (DstSize + WideSize - 1) / WideSize; + + unsigned NumOps = MI.getNumOperands(); + unsigned NumSrc = MI.getNumOperands() - 1; + unsigned PartSize = DstTy.getSizeInBits() / NumSrc; + + if (WideSize >= DstSize) { + // Directly pack the bits in the target type. + Register ResultReg = MIRBuilder.buildZExt(WideTy, Src1).getReg(0); + + for (unsigned I = 2; I != NumOps; ++I) { + const unsigned Offset = (I - 1) * PartSize; + + Register SrcReg = MI.getOperand(I).getReg(); + assert(MRI.getType(SrcReg) == LLT::scalar(PartSize)); + + auto ZextInput = MIRBuilder.buildZExt(WideTy, SrcReg); + + Register NextResult = I + 1 == NumOps && WideTy == DstTy ? DstReg : + MRI.createGenericVirtualRegister(WideTy); + + auto ShiftAmt = MIRBuilder.buildConstant(WideTy, Offset); + auto Shl = MIRBuilder.buildShl(WideTy, ZextInput, ShiftAmt); + MIRBuilder.buildOr(NextResult, ResultReg, Shl); + ResultReg = NextResult; + } + + if (WideSize > DstSize) + MIRBuilder.buildTrunc(DstReg, ResultReg); + else if (DstTy.isPointer()) + MIRBuilder.buildIntToPtr(DstReg, ResultReg); + + MI.eraseFromParent(); + return Legalized; + } + + // Unmerge the original values to the GCD type, and recombine to the next + // multiple greater than the original type. + // + // %3:_(s12) = G_MERGE_VALUES %0:_(s4), %1:_(s4), %2:_(s4) -> s6 + // %4:_(s2), %5:_(s2) = G_UNMERGE_VALUES %0 + // %6:_(s2), %7:_(s2) = G_UNMERGE_VALUES %1 + // %8:_(s2), %9:_(s2) = G_UNMERGE_VALUES %2 + // %10:_(s6) = G_MERGE_VALUES %4, %5, %6 + // %11:_(s6) = G_MERGE_VALUES %7, %8, %9 + // %12:_(s12) = G_MERGE_VALUES %10, %11 + // + // Padding with undef if necessary: + // + // %2:_(s8) = G_MERGE_VALUES %0:_(s4), %1:_(s4) -> s6 + // %3:_(s2), %4:_(s2) = G_UNMERGE_VALUES %0 + // %5:_(s2), %6:_(s2) = G_UNMERGE_VALUES %1 + // %7:_(s2) = G_IMPLICIT_DEF + // %8:_(s6) = G_MERGE_VALUES %3, %4, %5 + // %9:_(s6) = G_MERGE_VALUES %6, %7, %7 + // %10:_(s12) = G_MERGE_VALUES %8, %9 + + const int GCD = greatestCommonDivisor(SrcSize, WideSize); + LLT GCDTy = LLT::scalar(GCD); + + SmallVector<Register, 8> Parts; + SmallVector<Register, 8> NewMergeRegs; + SmallVector<Register, 8> Unmerges; + LLT WideDstTy = LLT::scalar(NumMerge * WideSize); + + // Decompose the original operands if they don't evenly divide. + for (int I = 1, E = MI.getNumOperands(); I != E; ++I) { + Register SrcReg = MI.getOperand(I).getReg(); + if (GCD == SrcSize) { + Unmerges.push_back(SrcReg); + } else { + auto Unmerge = MIRBuilder.buildUnmerge(GCDTy, SrcReg); + for (int J = 0, JE = Unmerge->getNumOperands() - 1; J != JE; ++J) + Unmerges.push_back(Unmerge.getReg(J)); + } + } + + // Pad with undef to the next size that is a multiple of the requested size. + if (static_cast<int>(Unmerges.size()) != NumMerge * WideSize) { + Register UndefReg = MIRBuilder.buildUndef(GCDTy).getReg(0); + for (int I = Unmerges.size(); I != NumMerge * WideSize; ++I) + Unmerges.push_back(UndefReg); + } + + const int PartsPerGCD = WideSize / GCD; + + // Build merges of each piece. + ArrayRef<Register> Slicer(Unmerges); + for (int I = 0; I != NumMerge; ++I, Slicer = Slicer.drop_front(PartsPerGCD)) { + auto Merge = MIRBuilder.buildMerge(WideTy, Slicer.take_front(PartsPerGCD)); + NewMergeRegs.push_back(Merge.getReg(0)); + } + + // A truncate may be necessary if the requested type doesn't evenly divide the + // original result type. + if (DstTy.getSizeInBits() == WideDstTy.getSizeInBits()) { + MIRBuilder.buildMerge(DstReg, NewMergeRegs); + } else { + auto FinalMerge = MIRBuilder.buildMerge(WideDstTy, NewMergeRegs); + MIRBuilder.buildTrunc(DstReg, FinalMerge.getReg(0)); + } + + MI.eraseFromParent(); + return Legalized; +} + Register LegalizerHelper::widenWithUnmerge(LLT WideTy, Register OrigReg) { Register WideReg = MRI.createGenericVirtualRegister(WideTy); LLT OrigTy = MRI.getType(OrigReg); @@ -1530,82 +1530,82 @@ Register LegalizerHelper::widenWithUnmerge(LLT WideTy, Register OrigReg) { return WideReg; } -LegalizerHelper::LegalizeResult -LegalizerHelper::widenScalarUnmergeValues(MachineInstr &MI, unsigned TypeIdx, - LLT WideTy) { - if (TypeIdx != 0) - return UnableToLegalize; - - int NumDst = MI.getNumOperands() - 1; - Register SrcReg = MI.getOperand(NumDst).getReg(); - LLT SrcTy = MRI.getType(SrcReg); - if (SrcTy.isVector()) - return UnableToLegalize; - - Register Dst0Reg = MI.getOperand(0).getReg(); - LLT DstTy = MRI.getType(Dst0Reg); - if (!DstTy.isScalar()) - return UnableToLegalize; - - if (WideTy.getSizeInBits() >= SrcTy.getSizeInBits()) { - if (SrcTy.isPointer()) { - const DataLayout &DL = MIRBuilder.getDataLayout(); - if (DL.isNonIntegralAddressSpace(SrcTy.getAddressSpace())) { - LLVM_DEBUG( - dbgs() << "Not casting non-integral address space integer\n"); - return UnableToLegalize; - } - - SrcTy = LLT::scalar(SrcTy.getSizeInBits()); - SrcReg = MIRBuilder.buildPtrToInt(SrcTy, SrcReg).getReg(0); - } - - // Widen SrcTy to WideTy. This does not affect the result, but since the - // user requested this size, it is probably better handled than SrcTy and - // should reduce the total number of legalization artifacts - if (WideTy.getSizeInBits() > SrcTy.getSizeInBits()) { - SrcTy = WideTy; - SrcReg = MIRBuilder.buildAnyExt(WideTy, SrcReg).getReg(0); - } - - // Theres no unmerge type to target. Directly extract the bits from the - // source type - unsigned DstSize = DstTy.getSizeInBits(); - - MIRBuilder.buildTrunc(Dst0Reg, SrcReg); - for (int I = 1; I != NumDst; ++I) { - auto ShiftAmt = MIRBuilder.buildConstant(SrcTy, DstSize * I); - auto Shr = MIRBuilder.buildLShr(SrcTy, SrcReg, ShiftAmt); - MIRBuilder.buildTrunc(MI.getOperand(I), Shr); - } - - MI.eraseFromParent(); - return Legalized; - } - - // Extend the source to a wider type. - LLT LCMTy = getLCMType(SrcTy, WideTy); - - Register WideSrc = SrcReg; - if (LCMTy.getSizeInBits() != SrcTy.getSizeInBits()) { - // TODO: If this is an integral address space, cast to integer and anyext. - if (SrcTy.isPointer()) { - LLVM_DEBUG(dbgs() << "Widening pointer source types not implemented\n"); - return UnableToLegalize; - } - - WideSrc = MIRBuilder.buildAnyExt(LCMTy, WideSrc).getReg(0); - } - - auto Unmerge = MIRBuilder.buildUnmerge(WideTy, WideSrc); - +LegalizerHelper::LegalizeResult +LegalizerHelper::widenScalarUnmergeValues(MachineInstr &MI, unsigned TypeIdx, + LLT WideTy) { + if (TypeIdx != 0) + return UnableToLegalize; + + int NumDst = MI.getNumOperands() - 1; + Register SrcReg = MI.getOperand(NumDst).getReg(); + LLT SrcTy = MRI.getType(SrcReg); + if (SrcTy.isVector()) + return UnableToLegalize; + + Register Dst0Reg = MI.getOperand(0).getReg(); + LLT DstTy = MRI.getType(Dst0Reg); + if (!DstTy.isScalar()) + return UnableToLegalize; + + if (WideTy.getSizeInBits() >= SrcTy.getSizeInBits()) { + if (SrcTy.isPointer()) { + const DataLayout &DL = MIRBuilder.getDataLayout(); + if (DL.isNonIntegralAddressSpace(SrcTy.getAddressSpace())) { + LLVM_DEBUG( + dbgs() << "Not casting non-integral address space integer\n"); + return UnableToLegalize; + } + + SrcTy = LLT::scalar(SrcTy.getSizeInBits()); + SrcReg = MIRBuilder.buildPtrToInt(SrcTy, SrcReg).getReg(0); + } + + // Widen SrcTy to WideTy. This does not affect the result, but since the + // user requested this size, it is probably better handled than SrcTy and + // should reduce the total number of legalization artifacts + if (WideTy.getSizeInBits() > SrcTy.getSizeInBits()) { + SrcTy = WideTy; + SrcReg = MIRBuilder.buildAnyExt(WideTy, SrcReg).getReg(0); + } + + // Theres no unmerge type to target. Directly extract the bits from the + // source type + unsigned DstSize = DstTy.getSizeInBits(); + + MIRBuilder.buildTrunc(Dst0Reg, SrcReg); + for (int I = 1; I != NumDst; ++I) { + auto ShiftAmt = MIRBuilder.buildConstant(SrcTy, DstSize * I); + auto Shr = MIRBuilder.buildLShr(SrcTy, SrcReg, ShiftAmt); + MIRBuilder.buildTrunc(MI.getOperand(I), Shr); + } + + MI.eraseFromParent(); + return Legalized; + } + + // Extend the source to a wider type. + LLT LCMTy = getLCMType(SrcTy, WideTy); + + Register WideSrc = SrcReg; + if (LCMTy.getSizeInBits() != SrcTy.getSizeInBits()) { + // TODO: If this is an integral address space, cast to integer and anyext. + if (SrcTy.isPointer()) { + LLVM_DEBUG(dbgs() << "Widening pointer source types not implemented\n"); + return UnableToLegalize; + } + + WideSrc = MIRBuilder.buildAnyExt(LCMTy, WideSrc).getReg(0); + } + + auto Unmerge = MIRBuilder.buildUnmerge(WideTy, WideSrc); + // Create a sequence of unmerges and merges to the original results. Since we // may have widened the source, we will need to pad the results with dead defs // to cover the source register. // e.g. widen s48 to s64: // %1:_(s48), %2:_(s48) = G_UNMERGE_VALUES %0:_(s96) - // - // => + // + // => // %4:_(s192) = G_ANYEXT %0:_(s96) // %5:_(s64), %6, %7 = G_UNMERGE_VALUES %4 ; Requested unmerge // ; unpack to GCD type, with extra dead defs @@ -1615,14 +1615,14 @@ LegalizerHelper::widenScalarUnmergeValues(MachineInstr &MI, unsigned TypeIdx, // %1:_(s48) = G_MERGE_VALUES %8:_(s16), %9, %10 ; Remerge to destination // %2:_(s48) = G_MERGE_VALUES %11:_(s16), %12, %13 ; Remerge to destination const LLT GCDTy = getGCDType(WideTy, DstTy); - const int NumUnmerge = Unmerge->getNumOperands() - 1; + const int NumUnmerge = Unmerge->getNumOperands() - 1; const int PartsPerRemerge = DstTy.getSizeInBits() / GCDTy.getSizeInBits(); - + // Directly unmerge to the destination without going through a GCD type // if possible if (PartsPerRemerge == 1) { const int PartsPerUnmerge = WideTy.getSizeInBits() / DstTy.getSizeInBits(); - + for (int I = 0; I != NumUnmerge; ++I) { auto MIB = MIRBuilder.buildInstr(TargetOpcode::G_UNMERGE_VALUES); @@ -1634,15 +1634,15 @@ LegalizerHelper::widenScalarUnmergeValues(MachineInstr &MI, unsigned TypeIdx, // Create dead def for excess components. MIB.addDef(MRI.createGenericVirtualRegister(DstTy)); } - } + } MIB.addUse(Unmerge.getReg(I)); - } + } } else { SmallVector<Register, 16> Parts; for (int J = 0; J != NumUnmerge; ++J) extractGCDType(Parts, GCDTy, Unmerge.getReg(J)); - + SmallVector<Register, 8> RemergeParts; for (int I = 0; I != NumDst; ++I) { for (int J = 0; J < PartsPerRemerge; ++J) { @@ -1653,103 +1653,103 @@ LegalizerHelper::widenScalarUnmergeValues(MachineInstr &MI, unsigned TypeIdx, MIRBuilder.buildMerge(MI.getOperand(I).getReg(), RemergeParts); RemergeParts.clear(); } - } - - MI.eraseFromParent(); - return Legalized; -} - -LegalizerHelper::LegalizeResult -LegalizerHelper::widenScalarExtract(MachineInstr &MI, unsigned TypeIdx, - LLT WideTy) { - Register DstReg = MI.getOperand(0).getReg(); - Register SrcReg = MI.getOperand(1).getReg(); - LLT SrcTy = MRI.getType(SrcReg); - - LLT DstTy = MRI.getType(DstReg); - unsigned Offset = MI.getOperand(2).getImm(); - - if (TypeIdx == 0) { - if (SrcTy.isVector() || DstTy.isVector()) - return UnableToLegalize; - - SrcOp Src(SrcReg); - if (SrcTy.isPointer()) { - // Extracts from pointers can be handled only if they are really just - // simple integers. - const DataLayout &DL = MIRBuilder.getDataLayout(); - if (DL.isNonIntegralAddressSpace(SrcTy.getAddressSpace())) - return UnableToLegalize; - - LLT SrcAsIntTy = LLT::scalar(SrcTy.getSizeInBits()); - Src = MIRBuilder.buildPtrToInt(SrcAsIntTy, Src); - SrcTy = SrcAsIntTy; - } - - if (DstTy.isPointer()) - return UnableToLegalize; - - if (Offset == 0) { - // Avoid a shift in the degenerate case. - MIRBuilder.buildTrunc(DstReg, - MIRBuilder.buildAnyExtOrTrunc(WideTy, Src)); - MI.eraseFromParent(); - return Legalized; - } - - // Do a shift in the source type. - LLT ShiftTy = SrcTy; - if (WideTy.getSizeInBits() > SrcTy.getSizeInBits()) { - Src = MIRBuilder.buildAnyExt(WideTy, Src); - ShiftTy = WideTy; + } + + MI.eraseFromParent(); + return Legalized; +} + +LegalizerHelper::LegalizeResult +LegalizerHelper::widenScalarExtract(MachineInstr &MI, unsigned TypeIdx, + LLT WideTy) { + Register DstReg = MI.getOperand(0).getReg(); + Register SrcReg = MI.getOperand(1).getReg(); + LLT SrcTy = MRI.getType(SrcReg); + + LLT DstTy = MRI.getType(DstReg); + unsigned Offset = MI.getOperand(2).getImm(); + + if (TypeIdx == 0) { + if (SrcTy.isVector() || DstTy.isVector()) + return UnableToLegalize; + + SrcOp Src(SrcReg); + if (SrcTy.isPointer()) { + // Extracts from pointers can be handled only if they are really just + // simple integers. + const DataLayout &DL = MIRBuilder.getDataLayout(); + if (DL.isNonIntegralAddressSpace(SrcTy.getAddressSpace())) + return UnableToLegalize; + + LLT SrcAsIntTy = LLT::scalar(SrcTy.getSizeInBits()); + Src = MIRBuilder.buildPtrToInt(SrcAsIntTy, Src); + SrcTy = SrcAsIntTy; + } + + if (DstTy.isPointer()) + return UnableToLegalize; + + if (Offset == 0) { + // Avoid a shift in the degenerate case. + MIRBuilder.buildTrunc(DstReg, + MIRBuilder.buildAnyExtOrTrunc(WideTy, Src)); + MI.eraseFromParent(); + return Legalized; + } + + // Do a shift in the source type. + LLT ShiftTy = SrcTy; + if (WideTy.getSizeInBits() > SrcTy.getSizeInBits()) { + Src = MIRBuilder.buildAnyExt(WideTy, Src); + ShiftTy = WideTy; } - - auto LShr = MIRBuilder.buildLShr( - ShiftTy, Src, MIRBuilder.buildConstant(ShiftTy, Offset)); - MIRBuilder.buildTrunc(DstReg, LShr); - MI.eraseFromParent(); - return Legalized; - } - - if (SrcTy.isScalar()) { - Observer.changingInstr(MI); - widenScalarSrc(MI, WideTy, 1, TargetOpcode::G_ANYEXT); - Observer.changedInstr(MI); - return Legalized; - } - - if (!SrcTy.isVector()) - return UnableToLegalize; - - if (DstTy != SrcTy.getElementType()) - return UnableToLegalize; - - if (Offset % SrcTy.getScalarSizeInBits() != 0) - return UnableToLegalize; - - Observer.changingInstr(MI); - widenScalarSrc(MI, WideTy, 1, TargetOpcode::G_ANYEXT); - - MI.getOperand(2).setImm((WideTy.getSizeInBits() / SrcTy.getSizeInBits()) * - Offset); - widenScalarDst(MI, WideTy.getScalarType(), 0); - Observer.changedInstr(MI); - return Legalized; -} - -LegalizerHelper::LegalizeResult -LegalizerHelper::widenScalarInsert(MachineInstr &MI, unsigned TypeIdx, - LLT WideTy) { + + auto LShr = MIRBuilder.buildLShr( + ShiftTy, Src, MIRBuilder.buildConstant(ShiftTy, Offset)); + MIRBuilder.buildTrunc(DstReg, LShr); + MI.eraseFromParent(); + return Legalized; + } + + if (SrcTy.isScalar()) { + Observer.changingInstr(MI); + widenScalarSrc(MI, WideTy, 1, TargetOpcode::G_ANYEXT); + Observer.changedInstr(MI); + return Legalized; + } + + if (!SrcTy.isVector()) + return UnableToLegalize; + + if (DstTy != SrcTy.getElementType()) + return UnableToLegalize; + + if (Offset % SrcTy.getScalarSizeInBits() != 0) + return UnableToLegalize; + + Observer.changingInstr(MI); + widenScalarSrc(MI, WideTy, 1, TargetOpcode::G_ANYEXT); + + MI.getOperand(2).setImm((WideTy.getSizeInBits() / SrcTy.getSizeInBits()) * + Offset); + widenScalarDst(MI, WideTy.getScalarType(), 0); + Observer.changedInstr(MI); + return Legalized; +} + +LegalizerHelper::LegalizeResult +LegalizerHelper::widenScalarInsert(MachineInstr &MI, unsigned TypeIdx, + LLT WideTy) { if (TypeIdx != 0 || WideTy.isVector()) - return UnableToLegalize; - Observer.changingInstr(MI); - widenScalarSrc(MI, WideTy, 1, TargetOpcode::G_ANYEXT); - widenScalarDst(MI, WideTy); - Observer.changedInstr(MI); - return Legalized; -} - -LegalizerHelper::LegalizeResult + return UnableToLegalize; + Observer.changingInstr(MI); + widenScalarSrc(MI, WideTy, 1, TargetOpcode::G_ANYEXT); + widenScalarDst(MI, WideTy); + Observer.changedInstr(MI); + return Legalized; +} + +LegalizerHelper::LegalizeResult LegalizerHelper::widenScalarAddoSubo(MachineInstr &MI, unsigned TypeIdx, LLT WideTy) { if (TypeIdx == 1) @@ -1780,503 +1780,503 @@ LegalizerHelper::widenScalarAddoSubo(MachineInstr &MI, unsigned TypeIdx, LegalizerHelper::LegalizeResult LegalizerHelper::widenScalarAddSubShlSat(MachineInstr &MI, unsigned TypeIdx, LLT WideTy) { - bool IsSigned = MI.getOpcode() == TargetOpcode::G_SADDSAT || + bool IsSigned = MI.getOpcode() == TargetOpcode::G_SADDSAT || MI.getOpcode() == TargetOpcode::G_SSUBSAT || MI.getOpcode() == TargetOpcode::G_SSHLSAT; bool IsShift = MI.getOpcode() == TargetOpcode::G_SSHLSAT || MI.getOpcode() == TargetOpcode::G_USHLSAT; - // We can convert this to: - // 1. Any extend iN to iM - // 2. SHL by M-N + // We can convert this to: + // 1. Any extend iN to iM + // 2. SHL by M-N // 3. [US][ADD|SUB|SHL]SAT - // 4. L/ASHR by M-N - // - // It may be more efficient to lower this to a min and a max operation in - // the higher precision arithmetic if the promoted operation isn't legal, - // but this decision is up to the target's lowering request. - Register DstReg = MI.getOperand(0).getReg(); - - unsigned NewBits = WideTy.getScalarSizeInBits(); - unsigned SHLAmount = NewBits - MRI.getType(DstReg).getScalarSizeInBits(); - + // 4. L/ASHR by M-N + // + // It may be more efficient to lower this to a min and a max operation in + // the higher precision arithmetic if the promoted operation isn't legal, + // but this decision is up to the target's lowering request. + Register DstReg = MI.getOperand(0).getReg(); + + unsigned NewBits = WideTy.getScalarSizeInBits(); + unsigned SHLAmount = NewBits - MRI.getType(DstReg).getScalarSizeInBits(); + // Shifts must zero-extend the RHS to preserve the unsigned quantity, and // must not left shift the RHS to preserve the shift amount. - auto LHS = MIRBuilder.buildAnyExt(WideTy, MI.getOperand(1)); + auto LHS = MIRBuilder.buildAnyExt(WideTy, MI.getOperand(1)); auto RHS = IsShift ? MIRBuilder.buildZExt(WideTy, MI.getOperand(2)) : MIRBuilder.buildAnyExt(WideTy, MI.getOperand(2)); - auto ShiftK = MIRBuilder.buildConstant(WideTy, SHLAmount); - auto ShiftL = MIRBuilder.buildShl(WideTy, LHS, ShiftK); + auto ShiftK = MIRBuilder.buildConstant(WideTy, SHLAmount); + auto ShiftL = MIRBuilder.buildShl(WideTy, LHS, ShiftK); auto ShiftR = IsShift ? RHS : MIRBuilder.buildShl(WideTy, RHS, ShiftK); - - auto WideInst = MIRBuilder.buildInstr(MI.getOpcode(), {WideTy}, - {ShiftL, ShiftR}, MI.getFlags()); - - // Use a shift that will preserve the number of sign bits when the trunc is - // folded away. - auto Result = IsSigned ? MIRBuilder.buildAShr(WideTy, WideInst, ShiftK) - : MIRBuilder.buildLShr(WideTy, WideInst, ShiftK); - - MIRBuilder.buildTrunc(DstReg, Result); - MI.eraseFromParent(); - return Legalized; -} - -LegalizerHelper::LegalizeResult -LegalizerHelper::widenScalar(MachineInstr &MI, unsigned TypeIdx, LLT WideTy) { - switch (MI.getOpcode()) { - default: - return UnableToLegalize; - case TargetOpcode::G_EXTRACT: - return widenScalarExtract(MI, TypeIdx, WideTy); - case TargetOpcode::G_INSERT: - return widenScalarInsert(MI, TypeIdx, WideTy); - case TargetOpcode::G_MERGE_VALUES: - return widenScalarMergeValues(MI, TypeIdx, WideTy); - case TargetOpcode::G_UNMERGE_VALUES: - return widenScalarUnmergeValues(MI, TypeIdx, WideTy); + + auto WideInst = MIRBuilder.buildInstr(MI.getOpcode(), {WideTy}, + {ShiftL, ShiftR}, MI.getFlags()); + + // Use a shift that will preserve the number of sign bits when the trunc is + // folded away. + auto Result = IsSigned ? MIRBuilder.buildAShr(WideTy, WideInst, ShiftK) + : MIRBuilder.buildLShr(WideTy, WideInst, ShiftK); + + MIRBuilder.buildTrunc(DstReg, Result); + MI.eraseFromParent(); + return Legalized; +} + +LegalizerHelper::LegalizeResult +LegalizerHelper::widenScalar(MachineInstr &MI, unsigned TypeIdx, LLT WideTy) { + switch (MI.getOpcode()) { + default: + return UnableToLegalize; + case TargetOpcode::G_EXTRACT: + return widenScalarExtract(MI, TypeIdx, WideTy); + case TargetOpcode::G_INSERT: + return widenScalarInsert(MI, TypeIdx, WideTy); + case TargetOpcode::G_MERGE_VALUES: + return widenScalarMergeValues(MI, TypeIdx, WideTy); + case TargetOpcode::G_UNMERGE_VALUES: + return widenScalarUnmergeValues(MI, TypeIdx, WideTy); case TargetOpcode::G_SADDO: case TargetOpcode::G_SSUBO: - case TargetOpcode::G_UADDO: + case TargetOpcode::G_UADDO: case TargetOpcode::G_USUBO: return widenScalarAddoSubo(MI, TypeIdx, WideTy); - case TargetOpcode::G_SADDSAT: - case TargetOpcode::G_SSUBSAT: + case TargetOpcode::G_SADDSAT: + case TargetOpcode::G_SSUBSAT: case TargetOpcode::G_SSHLSAT: - case TargetOpcode::G_UADDSAT: - case TargetOpcode::G_USUBSAT: + case TargetOpcode::G_UADDSAT: + case TargetOpcode::G_USUBSAT: case TargetOpcode::G_USHLSAT: return widenScalarAddSubShlSat(MI, TypeIdx, WideTy); - case TargetOpcode::G_CTTZ: - case TargetOpcode::G_CTTZ_ZERO_UNDEF: - case TargetOpcode::G_CTLZ: - case TargetOpcode::G_CTLZ_ZERO_UNDEF: - case TargetOpcode::G_CTPOP: { - if (TypeIdx == 0) { - Observer.changingInstr(MI); - widenScalarDst(MI, WideTy, 0); - Observer.changedInstr(MI); - return Legalized; - } - - Register SrcReg = MI.getOperand(1).getReg(); - - // First ZEXT the input. - auto MIBSrc = MIRBuilder.buildZExt(WideTy, SrcReg); - LLT CurTy = MRI.getType(SrcReg); - if (MI.getOpcode() == TargetOpcode::G_CTTZ) { - // The count is the same in the larger type except if the original - // value was zero. This can be handled by setting the bit just off - // the top of the original type. - auto TopBit = - APInt::getOneBitSet(WideTy.getSizeInBits(), CurTy.getSizeInBits()); - MIBSrc = MIRBuilder.buildOr( - WideTy, MIBSrc, MIRBuilder.buildConstant(WideTy, TopBit)); - } - - // Perform the operation at the larger size. - auto MIBNewOp = MIRBuilder.buildInstr(MI.getOpcode(), {WideTy}, {MIBSrc}); - // This is already the correct result for CTPOP and CTTZs - if (MI.getOpcode() == TargetOpcode::G_CTLZ || - MI.getOpcode() == TargetOpcode::G_CTLZ_ZERO_UNDEF) { - // The correct result is NewOp - (Difference in widety and current ty). - unsigned SizeDiff = WideTy.getSizeInBits() - CurTy.getSizeInBits(); - MIBNewOp = MIRBuilder.buildSub( - WideTy, MIBNewOp, MIRBuilder.buildConstant(WideTy, SizeDiff)); - } - - MIRBuilder.buildZExtOrTrunc(MI.getOperand(0), MIBNewOp); - MI.eraseFromParent(); - return Legalized; - } - case TargetOpcode::G_BSWAP: { - Observer.changingInstr(MI); - Register DstReg = MI.getOperand(0).getReg(); - - Register ShrReg = MRI.createGenericVirtualRegister(WideTy); - Register DstExt = MRI.createGenericVirtualRegister(WideTy); - Register ShiftAmtReg = MRI.createGenericVirtualRegister(WideTy); - widenScalarSrc(MI, WideTy, 1, TargetOpcode::G_ANYEXT); - - MI.getOperand(0).setReg(DstExt); - - MIRBuilder.setInsertPt(MIRBuilder.getMBB(), ++MIRBuilder.getInsertPt()); - - LLT Ty = MRI.getType(DstReg); - unsigned DiffBits = WideTy.getScalarSizeInBits() - Ty.getScalarSizeInBits(); - MIRBuilder.buildConstant(ShiftAmtReg, DiffBits); - MIRBuilder.buildLShr(ShrReg, DstExt, ShiftAmtReg); - - MIRBuilder.buildTrunc(DstReg, ShrReg); - Observer.changedInstr(MI); - return Legalized; - } - case TargetOpcode::G_BITREVERSE: { - Observer.changingInstr(MI); - - Register DstReg = MI.getOperand(0).getReg(); - LLT Ty = MRI.getType(DstReg); - unsigned DiffBits = WideTy.getScalarSizeInBits() - Ty.getScalarSizeInBits(); - - Register DstExt = MRI.createGenericVirtualRegister(WideTy); - widenScalarSrc(MI, WideTy, 1, TargetOpcode::G_ANYEXT); - MI.getOperand(0).setReg(DstExt); - MIRBuilder.setInsertPt(MIRBuilder.getMBB(), ++MIRBuilder.getInsertPt()); - - auto ShiftAmt = MIRBuilder.buildConstant(WideTy, DiffBits); - auto Shift = MIRBuilder.buildLShr(WideTy, DstExt, ShiftAmt); - MIRBuilder.buildTrunc(DstReg, Shift); - Observer.changedInstr(MI); - return Legalized; - } - case TargetOpcode::G_FREEZE: - Observer.changingInstr(MI); - widenScalarSrc(MI, WideTy, 1, TargetOpcode::G_ANYEXT); - widenScalarDst(MI, WideTy); - Observer.changedInstr(MI); - return Legalized; - - case TargetOpcode::G_ADD: - case TargetOpcode::G_AND: - case TargetOpcode::G_MUL: - case TargetOpcode::G_OR: - case TargetOpcode::G_XOR: - case TargetOpcode::G_SUB: - // Perform operation at larger width (any extension is fines here, high bits - // don't affect the result) and then truncate the result back to the - // original type. - Observer.changingInstr(MI); - widenScalarSrc(MI, WideTy, 1, TargetOpcode::G_ANYEXT); - widenScalarSrc(MI, WideTy, 2, TargetOpcode::G_ANYEXT); - widenScalarDst(MI, WideTy); - Observer.changedInstr(MI); - return Legalized; - - case TargetOpcode::G_SHL: - Observer.changingInstr(MI); - - if (TypeIdx == 0) { - widenScalarSrc(MI, WideTy, 1, TargetOpcode::G_ANYEXT); - widenScalarDst(MI, WideTy); - } else { - assert(TypeIdx == 1); - // The "number of bits to shift" operand must preserve its value as an - // unsigned integer: - widenScalarSrc(MI, WideTy, 2, TargetOpcode::G_ZEXT); - } - - Observer.changedInstr(MI); - return Legalized; - - case TargetOpcode::G_SDIV: - case TargetOpcode::G_SREM: - case TargetOpcode::G_SMIN: - case TargetOpcode::G_SMAX: - Observer.changingInstr(MI); - widenScalarSrc(MI, WideTy, 1, TargetOpcode::G_SEXT); - widenScalarSrc(MI, WideTy, 2, TargetOpcode::G_SEXT); - widenScalarDst(MI, WideTy); - Observer.changedInstr(MI); - return Legalized; - - case TargetOpcode::G_ASHR: - case TargetOpcode::G_LSHR: - Observer.changingInstr(MI); - - if (TypeIdx == 0) { - unsigned CvtOp = MI.getOpcode() == TargetOpcode::G_ASHR ? - TargetOpcode::G_SEXT : TargetOpcode::G_ZEXT; - - widenScalarSrc(MI, WideTy, 1, CvtOp); - widenScalarDst(MI, WideTy); - } else { - assert(TypeIdx == 1); - // The "number of bits to shift" operand must preserve its value as an - // unsigned integer: - widenScalarSrc(MI, WideTy, 2, TargetOpcode::G_ZEXT); - } - - Observer.changedInstr(MI); - return Legalized; - case TargetOpcode::G_UDIV: - case TargetOpcode::G_UREM: - case TargetOpcode::G_UMIN: - case TargetOpcode::G_UMAX: - Observer.changingInstr(MI); - widenScalarSrc(MI, WideTy, 1, TargetOpcode::G_ZEXT); - widenScalarSrc(MI, WideTy, 2, TargetOpcode::G_ZEXT); - widenScalarDst(MI, WideTy); - Observer.changedInstr(MI); - return Legalized; - - case TargetOpcode::G_SELECT: - Observer.changingInstr(MI); - if (TypeIdx == 0) { - // Perform operation at larger width (any extension is fine here, high - // bits don't affect the result) and then truncate the result back to the - // original type. - widenScalarSrc(MI, WideTy, 2, TargetOpcode::G_ANYEXT); - widenScalarSrc(MI, WideTy, 3, TargetOpcode::G_ANYEXT); - widenScalarDst(MI, WideTy); - } else { - bool IsVec = MRI.getType(MI.getOperand(1).getReg()).isVector(); - // Explicit extension is required here since high bits affect the result. - widenScalarSrc(MI, WideTy, 1, MIRBuilder.getBoolExtOp(IsVec, false)); - } - Observer.changedInstr(MI); - return Legalized; - - case TargetOpcode::G_FPTOSI: - case TargetOpcode::G_FPTOUI: - Observer.changingInstr(MI); - - if (TypeIdx == 0) - widenScalarDst(MI, WideTy); - else - widenScalarSrc(MI, WideTy, 1, TargetOpcode::G_FPEXT); - - Observer.changedInstr(MI); - return Legalized; - case TargetOpcode::G_SITOFP: - Observer.changingInstr(MI); + case TargetOpcode::G_CTTZ: + case TargetOpcode::G_CTTZ_ZERO_UNDEF: + case TargetOpcode::G_CTLZ: + case TargetOpcode::G_CTLZ_ZERO_UNDEF: + case TargetOpcode::G_CTPOP: { + if (TypeIdx == 0) { + Observer.changingInstr(MI); + widenScalarDst(MI, WideTy, 0); + Observer.changedInstr(MI); + return Legalized; + } + + Register SrcReg = MI.getOperand(1).getReg(); + + // First ZEXT the input. + auto MIBSrc = MIRBuilder.buildZExt(WideTy, SrcReg); + LLT CurTy = MRI.getType(SrcReg); + if (MI.getOpcode() == TargetOpcode::G_CTTZ) { + // The count is the same in the larger type except if the original + // value was zero. This can be handled by setting the bit just off + // the top of the original type. + auto TopBit = + APInt::getOneBitSet(WideTy.getSizeInBits(), CurTy.getSizeInBits()); + MIBSrc = MIRBuilder.buildOr( + WideTy, MIBSrc, MIRBuilder.buildConstant(WideTy, TopBit)); + } + + // Perform the operation at the larger size. + auto MIBNewOp = MIRBuilder.buildInstr(MI.getOpcode(), {WideTy}, {MIBSrc}); + // This is already the correct result for CTPOP and CTTZs + if (MI.getOpcode() == TargetOpcode::G_CTLZ || + MI.getOpcode() == TargetOpcode::G_CTLZ_ZERO_UNDEF) { + // The correct result is NewOp - (Difference in widety and current ty). + unsigned SizeDiff = WideTy.getSizeInBits() - CurTy.getSizeInBits(); + MIBNewOp = MIRBuilder.buildSub( + WideTy, MIBNewOp, MIRBuilder.buildConstant(WideTy, SizeDiff)); + } + + MIRBuilder.buildZExtOrTrunc(MI.getOperand(0), MIBNewOp); + MI.eraseFromParent(); + return Legalized; + } + case TargetOpcode::G_BSWAP: { + Observer.changingInstr(MI); + Register DstReg = MI.getOperand(0).getReg(); + + Register ShrReg = MRI.createGenericVirtualRegister(WideTy); + Register DstExt = MRI.createGenericVirtualRegister(WideTy); + Register ShiftAmtReg = MRI.createGenericVirtualRegister(WideTy); + widenScalarSrc(MI, WideTy, 1, TargetOpcode::G_ANYEXT); + + MI.getOperand(0).setReg(DstExt); + + MIRBuilder.setInsertPt(MIRBuilder.getMBB(), ++MIRBuilder.getInsertPt()); + + LLT Ty = MRI.getType(DstReg); + unsigned DiffBits = WideTy.getScalarSizeInBits() - Ty.getScalarSizeInBits(); + MIRBuilder.buildConstant(ShiftAmtReg, DiffBits); + MIRBuilder.buildLShr(ShrReg, DstExt, ShiftAmtReg); + + MIRBuilder.buildTrunc(DstReg, ShrReg); + Observer.changedInstr(MI); + return Legalized; + } + case TargetOpcode::G_BITREVERSE: { + Observer.changingInstr(MI); + + Register DstReg = MI.getOperand(0).getReg(); + LLT Ty = MRI.getType(DstReg); + unsigned DiffBits = WideTy.getScalarSizeInBits() - Ty.getScalarSizeInBits(); + + Register DstExt = MRI.createGenericVirtualRegister(WideTy); + widenScalarSrc(MI, WideTy, 1, TargetOpcode::G_ANYEXT); + MI.getOperand(0).setReg(DstExt); + MIRBuilder.setInsertPt(MIRBuilder.getMBB(), ++MIRBuilder.getInsertPt()); + + auto ShiftAmt = MIRBuilder.buildConstant(WideTy, DiffBits); + auto Shift = MIRBuilder.buildLShr(WideTy, DstExt, ShiftAmt); + MIRBuilder.buildTrunc(DstReg, Shift); + Observer.changedInstr(MI); + return Legalized; + } + case TargetOpcode::G_FREEZE: + Observer.changingInstr(MI); + widenScalarSrc(MI, WideTy, 1, TargetOpcode::G_ANYEXT); + widenScalarDst(MI, WideTy); + Observer.changedInstr(MI); + return Legalized; + + case TargetOpcode::G_ADD: + case TargetOpcode::G_AND: + case TargetOpcode::G_MUL: + case TargetOpcode::G_OR: + case TargetOpcode::G_XOR: + case TargetOpcode::G_SUB: + // Perform operation at larger width (any extension is fines here, high bits + // don't affect the result) and then truncate the result back to the + // original type. + Observer.changingInstr(MI); + widenScalarSrc(MI, WideTy, 1, TargetOpcode::G_ANYEXT); + widenScalarSrc(MI, WideTy, 2, TargetOpcode::G_ANYEXT); + widenScalarDst(MI, WideTy); + Observer.changedInstr(MI); + return Legalized; + + case TargetOpcode::G_SHL: + Observer.changingInstr(MI); + + if (TypeIdx == 0) { + widenScalarSrc(MI, WideTy, 1, TargetOpcode::G_ANYEXT); + widenScalarDst(MI, WideTy); + } else { + assert(TypeIdx == 1); + // The "number of bits to shift" operand must preserve its value as an + // unsigned integer: + widenScalarSrc(MI, WideTy, 2, TargetOpcode::G_ZEXT); + } + + Observer.changedInstr(MI); + return Legalized; + + case TargetOpcode::G_SDIV: + case TargetOpcode::G_SREM: + case TargetOpcode::G_SMIN: + case TargetOpcode::G_SMAX: + Observer.changingInstr(MI); + widenScalarSrc(MI, WideTy, 1, TargetOpcode::G_SEXT); + widenScalarSrc(MI, WideTy, 2, TargetOpcode::G_SEXT); + widenScalarDst(MI, WideTy); + Observer.changedInstr(MI); + return Legalized; + + case TargetOpcode::G_ASHR: + case TargetOpcode::G_LSHR: + Observer.changingInstr(MI); + + if (TypeIdx == 0) { + unsigned CvtOp = MI.getOpcode() == TargetOpcode::G_ASHR ? + TargetOpcode::G_SEXT : TargetOpcode::G_ZEXT; + + widenScalarSrc(MI, WideTy, 1, CvtOp); + widenScalarDst(MI, WideTy); + } else { + assert(TypeIdx == 1); + // The "number of bits to shift" operand must preserve its value as an + // unsigned integer: + widenScalarSrc(MI, WideTy, 2, TargetOpcode::G_ZEXT); + } + + Observer.changedInstr(MI); + return Legalized; + case TargetOpcode::G_UDIV: + case TargetOpcode::G_UREM: + case TargetOpcode::G_UMIN: + case TargetOpcode::G_UMAX: + Observer.changingInstr(MI); + widenScalarSrc(MI, WideTy, 1, TargetOpcode::G_ZEXT); + widenScalarSrc(MI, WideTy, 2, TargetOpcode::G_ZEXT); + widenScalarDst(MI, WideTy); + Observer.changedInstr(MI); + return Legalized; + + case TargetOpcode::G_SELECT: + Observer.changingInstr(MI); + if (TypeIdx == 0) { + // Perform operation at larger width (any extension is fine here, high + // bits don't affect the result) and then truncate the result back to the + // original type. + widenScalarSrc(MI, WideTy, 2, TargetOpcode::G_ANYEXT); + widenScalarSrc(MI, WideTy, 3, TargetOpcode::G_ANYEXT); + widenScalarDst(MI, WideTy); + } else { + bool IsVec = MRI.getType(MI.getOperand(1).getReg()).isVector(); + // Explicit extension is required here since high bits affect the result. + widenScalarSrc(MI, WideTy, 1, MIRBuilder.getBoolExtOp(IsVec, false)); + } + Observer.changedInstr(MI); + return Legalized; + + case TargetOpcode::G_FPTOSI: + case TargetOpcode::G_FPTOUI: + Observer.changingInstr(MI); + + if (TypeIdx == 0) + widenScalarDst(MI, WideTy); + else + widenScalarSrc(MI, WideTy, 1, TargetOpcode::G_FPEXT); + + Observer.changedInstr(MI); + return Legalized; + case TargetOpcode::G_SITOFP: + Observer.changingInstr(MI); if (TypeIdx == 0) widenScalarDst(MI, WideTy, 0, TargetOpcode::G_FPTRUNC); else widenScalarSrc(MI, WideTy, 1, TargetOpcode::G_SEXT); - Observer.changedInstr(MI); - return Legalized; - case TargetOpcode::G_UITOFP: - Observer.changingInstr(MI); + Observer.changedInstr(MI); + return Legalized; + case TargetOpcode::G_UITOFP: + Observer.changingInstr(MI); if (TypeIdx == 0) widenScalarDst(MI, WideTy, 0, TargetOpcode::G_FPTRUNC); else widenScalarSrc(MI, WideTy, 1, TargetOpcode::G_ZEXT); - Observer.changedInstr(MI); - return Legalized; - case TargetOpcode::G_LOAD: - case TargetOpcode::G_SEXTLOAD: - case TargetOpcode::G_ZEXTLOAD: - Observer.changingInstr(MI); - widenScalarDst(MI, WideTy); - Observer.changedInstr(MI); - return Legalized; - - case TargetOpcode::G_STORE: { - if (TypeIdx != 0) - return UnableToLegalize; - - LLT Ty = MRI.getType(MI.getOperand(0).getReg()); + Observer.changedInstr(MI); + return Legalized; + case TargetOpcode::G_LOAD: + case TargetOpcode::G_SEXTLOAD: + case TargetOpcode::G_ZEXTLOAD: + Observer.changingInstr(MI); + widenScalarDst(MI, WideTy); + Observer.changedInstr(MI); + return Legalized; + + case TargetOpcode::G_STORE: { + if (TypeIdx != 0) + return UnableToLegalize; + + LLT Ty = MRI.getType(MI.getOperand(0).getReg()); if (!Ty.isScalar()) - return UnableToLegalize; - - Observer.changingInstr(MI); - - unsigned ExtType = Ty.getScalarSizeInBits() == 1 ? - TargetOpcode::G_ZEXT : TargetOpcode::G_ANYEXT; - widenScalarSrc(MI, WideTy, 0, ExtType); - - Observer.changedInstr(MI); - return Legalized; - } - case TargetOpcode::G_CONSTANT: { - MachineOperand &SrcMO = MI.getOperand(1); - LLVMContext &Ctx = MIRBuilder.getMF().getFunction().getContext(); - unsigned ExtOpc = LI.getExtOpcodeForWideningConstant( - MRI.getType(MI.getOperand(0).getReg())); - assert((ExtOpc == TargetOpcode::G_ZEXT || ExtOpc == TargetOpcode::G_SEXT || - ExtOpc == TargetOpcode::G_ANYEXT) && - "Illegal Extend"); - const APInt &SrcVal = SrcMO.getCImm()->getValue(); - const APInt &Val = (ExtOpc == TargetOpcode::G_SEXT) - ? SrcVal.sext(WideTy.getSizeInBits()) - : SrcVal.zext(WideTy.getSizeInBits()); - Observer.changingInstr(MI); - SrcMO.setCImm(ConstantInt::get(Ctx, Val)); - - widenScalarDst(MI, WideTy); - Observer.changedInstr(MI); - return Legalized; - } - case TargetOpcode::G_FCONSTANT: { - MachineOperand &SrcMO = MI.getOperand(1); - LLVMContext &Ctx = MIRBuilder.getMF().getFunction().getContext(); - APFloat Val = SrcMO.getFPImm()->getValueAPF(); - bool LosesInfo; - switch (WideTy.getSizeInBits()) { - case 32: - Val.convert(APFloat::IEEEsingle(), APFloat::rmNearestTiesToEven, - &LosesInfo); - break; - case 64: - Val.convert(APFloat::IEEEdouble(), APFloat::rmNearestTiesToEven, - &LosesInfo); - break; - default: - return UnableToLegalize; - } - - assert(!LosesInfo && "extend should always be lossless"); - - Observer.changingInstr(MI); - SrcMO.setFPImm(ConstantFP::get(Ctx, Val)); - - widenScalarDst(MI, WideTy, 0, TargetOpcode::G_FPTRUNC); - Observer.changedInstr(MI); - return Legalized; - } - case TargetOpcode::G_IMPLICIT_DEF: { - Observer.changingInstr(MI); - widenScalarDst(MI, WideTy); - Observer.changedInstr(MI); - return Legalized; - } - case TargetOpcode::G_BRCOND: - Observer.changingInstr(MI); - widenScalarSrc(MI, WideTy, 0, MIRBuilder.getBoolExtOp(false, false)); - Observer.changedInstr(MI); - return Legalized; - - case TargetOpcode::G_FCMP: - Observer.changingInstr(MI); - if (TypeIdx == 0) - widenScalarDst(MI, WideTy); - else { - widenScalarSrc(MI, WideTy, 2, TargetOpcode::G_FPEXT); - widenScalarSrc(MI, WideTy, 3, TargetOpcode::G_FPEXT); - } - Observer.changedInstr(MI); - return Legalized; - - case TargetOpcode::G_ICMP: - Observer.changingInstr(MI); - if (TypeIdx == 0) - widenScalarDst(MI, WideTy); - else { - unsigned ExtOpcode = CmpInst::isSigned(static_cast<CmpInst::Predicate>( - MI.getOperand(1).getPredicate())) - ? TargetOpcode::G_SEXT - : TargetOpcode::G_ZEXT; - widenScalarSrc(MI, WideTy, 2, ExtOpcode); - widenScalarSrc(MI, WideTy, 3, ExtOpcode); - } - Observer.changedInstr(MI); - return Legalized; - - case TargetOpcode::G_PTR_ADD: - assert(TypeIdx == 1 && "unable to legalize pointer of G_PTR_ADD"); - Observer.changingInstr(MI); - widenScalarSrc(MI, WideTy, 2, TargetOpcode::G_SEXT); - Observer.changedInstr(MI); - return Legalized; - - case TargetOpcode::G_PHI: { - assert(TypeIdx == 0 && "Expecting only Idx 0"); - - Observer.changingInstr(MI); - for (unsigned I = 1; I < MI.getNumOperands(); I += 2) { - MachineBasicBlock &OpMBB = *MI.getOperand(I + 1).getMBB(); - MIRBuilder.setInsertPt(OpMBB, OpMBB.getFirstTerminator()); - widenScalarSrc(MI, WideTy, I, TargetOpcode::G_ANYEXT); - } - - MachineBasicBlock &MBB = *MI.getParent(); - MIRBuilder.setInsertPt(MBB, --MBB.getFirstNonPHI()); - widenScalarDst(MI, WideTy); - Observer.changedInstr(MI); - return Legalized; - } - case TargetOpcode::G_EXTRACT_VECTOR_ELT: { - if (TypeIdx == 0) { - Register VecReg = MI.getOperand(1).getReg(); - LLT VecTy = MRI.getType(VecReg); - Observer.changingInstr(MI); - - widenScalarSrc(MI, LLT::vector(VecTy.getNumElements(), - WideTy.getSizeInBits()), - 1, TargetOpcode::G_SEXT); - - widenScalarDst(MI, WideTy, 0); - Observer.changedInstr(MI); - return Legalized; - } - - if (TypeIdx != 2) - return UnableToLegalize; - Observer.changingInstr(MI); - // TODO: Probably should be zext - widenScalarSrc(MI, WideTy, 2, TargetOpcode::G_SEXT); - Observer.changedInstr(MI); - return Legalized; - } - case TargetOpcode::G_INSERT_VECTOR_ELT: { - if (TypeIdx == 1) { - Observer.changingInstr(MI); - - Register VecReg = MI.getOperand(1).getReg(); - LLT VecTy = MRI.getType(VecReg); - LLT WideVecTy = LLT::vector(VecTy.getNumElements(), WideTy); - - widenScalarSrc(MI, WideVecTy, 1, TargetOpcode::G_ANYEXT); - widenScalarSrc(MI, WideTy, 2, TargetOpcode::G_ANYEXT); - widenScalarDst(MI, WideVecTy, 0); - Observer.changedInstr(MI); - return Legalized; - } - - if (TypeIdx == 2) { - Observer.changingInstr(MI); - // TODO: Probably should be zext - widenScalarSrc(MI, WideTy, 3, TargetOpcode::G_SEXT); - Observer.changedInstr(MI); - return Legalized; - } - - return UnableToLegalize; - } - case TargetOpcode::G_FADD: - case TargetOpcode::G_FMUL: - case TargetOpcode::G_FSUB: - case TargetOpcode::G_FMA: - case TargetOpcode::G_FMAD: - case TargetOpcode::G_FNEG: - case TargetOpcode::G_FABS: - case TargetOpcode::G_FCANONICALIZE: - case TargetOpcode::G_FMINNUM: - case TargetOpcode::G_FMAXNUM: - case TargetOpcode::G_FMINNUM_IEEE: - case TargetOpcode::G_FMAXNUM_IEEE: - case TargetOpcode::G_FMINIMUM: - case TargetOpcode::G_FMAXIMUM: - case TargetOpcode::G_FDIV: - case TargetOpcode::G_FREM: - case TargetOpcode::G_FCEIL: - case TargetOpcode::G_FFLOOR: - case TargetOpcode::G_FCOS: - case TargetOpcode::G_FSIN: - case TargetOpcode::G_FLOG10: - case TargetOpcode::G_FLOG: - case TargetOpcode::G_FLOG2: - case TargetOpcode::G_FRINT: - case TargetOpcode::G_FNEARBYINT: - case TargetOpcode::G_FSQRT: - case TargetOpcode::G_FEXP: - case TargetOpcode::G_FEXP2: - case TargetOpcode::G_FPOW: - case TargetOpcode::G_INTRINSIC_TRUNC: - case TargetOpcode::G_INTRINSIC_ROUND: + return UnableToLegalize; + + Observer.changingInstr(MI); + + unsigned ExtType = Ty.getScalarSizeInBits() == 1 ? + TargetOpcode::G_ZEXT : TargetOpcode::G_ANYEXT; + widenScalarSrc(MI, WideTy, 0, ExtType); + + Observer.changedInstr(MI); + return Legalized; + } + case TargetOpcode::G_CONSTANT: { + MachineOperand &SrcMO = MI.getOperand(1); + LLVMContext &Ctx = MIRBuilder.getMF().getFunction().getContext(); + unsigned ExtOpc = LI.getExtOpcodeForWideningConstant( + MRI.getType(MI.getOperand(0).getReg())); + assert((ExtOpc == TargetOpcode::G_ZEXT || ExtOpc == TargetOpcode::G_SEXT || + ExtOpc == TargetOpcode::G_ANYEXT) && + "Illegal Extend"); + const APInt &SrcVal = SrcMO.getCImm()->getValue(); + const APInt &Val = (ExtOpc == TargetOpcode::G_SEXT) + ? SrcVal.sext(WideTy.getSizeInBits()) + : SrcVal.zext(WideTy.getSizeInBits()); + Observer.changingInstr(MI); + SrcMO.setCImm(ConstantInt::get(Ctx, Val)); + + widenScalarDst(MI, WideTy); + Observer.changedInstr(MI); + return Legalized; + } + case TargetOpcode::G_FCONSTANT: { + MachineOperand &SrcMO = MI.getOperand(1); + LLVMContext &Ctx = MIRBuilder.getMF().getFunction().getContext(); + APFloat Val = SrcMO.getFPImm()->getValueAPF(); + bool LosesInfo; + switch (WideTy.getSizeInBits()) { + case 32: + Val.convert(APFloat::IEEEsingle(), APFloat::rmNearestTiesToEven, + &LosesInfo); + break; + case 64: + Val.convert(APFloat::IEEEdouble(), APFloat::rmNearestTiesToEven, + &LosesInfo); + break; + default: + return UnableToLegalize; + } + + assert(!LosesInfo && "extend should always be lossless"); + + Observer.changingInstr(MI); + SrcMO.setFPImm(ConstantFP::get(Ctx, Val)); + + widenScalarDst(MI, WideTy, 0, TargetOpcode::G_FPTRUNC); + Observer.changedInstr(MI); + return Legalized; + } + case TargetOpcode::G_IMPLICIT_DEF: { + Observer.changingInstr(MI); + widenScalarDst(MI, WideTy); + Observer.changedInstr(MI); + return Legalized; + } + case TargetOpcode::G_BRCOND: + Observer.changingInstr(MI); + widenScalarSrc(MI, WideTy, 0, MIRBuilder.getBoolExtOp(false, false)); + Observer.changedInstr(MI); + return Legalized; + + case TargetOpcode::G_FCMP: + Observer.changingInstr(MI); + if (TypeIdx == 0) + widenScalarDst(MI, WideTy); + else { + widenScalarSrc(MI, WideTy, 2, TargetOpcode::G_FPEXT); + widenScalarSrc(MI, WideTy, 3, TargetOpcode::G_FPEXT); + } + Observer.changedInstr(MI); + return Legalized; + + case TargetOpcode::G_ICMP: + Observer.changingInstr(MI); + if (TypeIdx == 0) + widenScalarDst(MI, WideTy); + else { + unsigned ExtOpcode = CmpInst::isSigned(static_cast<CmpInst::Predicate>( + MI.getOperand(1).getPredicate())) + ? TargetOpcode::G_SEXT + : TargetOpcode::G_ZEXT; + widenScalarSrc(MI, WideTy, 2, ExtOpcode); + widenScalarSrc(MI, WideTy, 3, ExtOpcode); + } + Observer.changedInstr(MI); + return Legalized; + + case TargetOpcode::G_PTR_ADD: + assert(TypeIdx == 1 && "unable to legalize pointer of G_PTR_ADD"); + Observer.changingInstr(MI); + widenScalarSrc(MI, WideTy, 2, TargetOpcode::G_SEXT); + Observer.changedInstr(MI); + return Legalized; + + case TargetOpcode::G_PHI: { + assert(TypeIdx == 0 && "Expecting only Idx 0"); + + Observer.changingInstr(MI); + for (unsigned I = 1; I < MI.getNumOperands(); I += 2) { + MachineBasicBlock &OpMBB = *MI.getOperand(I + 1).getMBB(); + MIRBuilder.setInsertPt(OpMBB, OpMBB.getFirstTerminator()); + widenScalarSrc(MI, WideTy, I, TargetOpcode::G_ANYEXT); + } + + MachineBasicBlock &MBB = *MI.getParent(); + MIRBuilder.setInsertPt(MBB, --MBB.getFirstNonPHI()); + widenScalarDst(MI, WideTy); + Observer.changedInstr(MI); + return Legalized; + } + case TargetOpcode::G_EXTRACT_VECTOR_ELT: { + if (TypeIdx == 0) { + Register VecReg = MI.getOperand(1).getReg(); + LLT VecTy = MRI.getType(VecReg); + Observer.changingInstr(MI); + + widenScalarSrc(MI, LLT::vector(VecTy.getNumElements(), + WideTy.getSizeInBits()), + 1, TargetOpcode::G_SEXT); + + widenScalarDst(MI, WideTy, 0); + Observer.changedInstr(MI); + return Legalized; + } + + if (TypeIdx != 2) + return UnableToLegalize; + Observer.changingInstr(MI); + // TODO: Probably should be zext + widenScalarSrc(MI, WideTy, 2, TargetOpcode::G_SEXT); + Observer.changedInstr(MI); + return Legalized; + } + case TargetOpcode::G_INSERT_VECTOR_ELT: { + if (TypeIdx == 1) { + Observer.changingInstr(MI); + + Register VecReg = MI.getOperand(1).getReg(); + LLT VecTy = MRI.getType(VecReg); + LLT WideVecTy = LLT::vector(VecTy.getNumElements(), WideTy); + + widenScalarSrc(MI, WideVecTy, 1, TargetOpcode::G_ANYEXT); + widenScalarSrc(MI, WideTy, 2, TargetOpcode::G_ANYEXT); + widenScalarDst(MI, WideVecTy, 0); + Observer.changedInstr(MI); + return Legalized; + } + + if (TypeIdx == 2) { + Observer.changingInstr(MI); + // TODO: Probably should be zext + widenScalarSrc(MI, WideTy, 3, TargetOpcode::G_SEXT); + Observer.changedInstr(MI); + return Legalized; + } + + return UnableToLegalize; + } + case TargetOpcode::G_FADD: + case TargetOpcode::G_FMUL: + case TargetOpcode::G_FSUB: + case TargetOpcode::G_FMA: + case TargetOpcode::G_FMAD: + case TargetOpcode::G_FNEG: + case TargetOpcode::G_FABS: + case TargetOpcode::G_FCANONICALIZE: + case TargetOpcode::G_FMINNUM: + case TargetOpcode::G_FMAXNUM: + case TargetOpcode::G_FMINNUM_IEEE: + case TargetOpcode::G_FMAXNUM_IEEE: + case TargetOpcode::G_FMINIMUM: + case TargetOpcode::G_FMAXIMUM: + case TargetOpcode::G_FDIV: + case TargetOpcode::G_FREM: + case TargetOpcode::G_FCEIL: + case TargetOpcode::G_FFLOOR: + case TargetOpcode::G_FCOS: + case TargetOpcode::G_FSIN: + case TargetOpcode::G_FLOG10: + case TargetOpcode::G_FLOG: + case TargetOpcode::G_FLOG2: + case TargetOpcode::G_FRINT: + case TargetOpcode::G_FNEARBYINT: + case TargetOpcode::G_FSQRT: + case TargetOpcode::G_FEXP: + case TargetOpcode::G_FEXP2: + case TargetOpcode::G_FPOW: + case TargetOpcode::G_INTRINSIC_TRUNC: + case TargetOpcode::G_INTRINSIC_ROUND: case TargetOpcode::G_INTRINSIC_ROUNDEVEN: - assert(TypeIdx == 0); - Observer.changingInstr(MI); - - for (unsigned I = 1, E = MI.getNumOperands(); I != E; ++I) - widenScalarSrc(MI, WideTy, I, TargetOpcode::G_FPEXT); - - widenScalarDst(MI, WideTy, 0, TargetOpcode::G_FPTRUNC); - Observer.changedInstr(MI); - return Legalized; + assert(TypeIdx == 0); + Observer.changingInstr(MI); + + for (unsigned I = 1, E = MI.getNumOperands(); I != E; ++I) + widenScalarSrc(MI, WideTy, I, TargetOpcode::G_FPEXT); + + widenScalarDst(MI, WideTy, 0, TargetOpcode::G_FPTRUNC); + Observer.changedInstr(MI); + return Legalized; case TargetOpcode::G_FPOWI: { if (TypeIdx != 0) return UnableToLegalize; @@ -2286,135 +2286,135 @@ LegalizerHelper::widenScalar(MachineInstr &MI, unsigned TypeIdx, LLT WideTy) { Observer.changedInstr(MI); return Legalized; } - case TargetOpcode::G_INTTOPTR: - if (TypeIdx != 1) - return UnableToLegalize; - - Observer.changingInstr(MI); - widenScalarSrc(MI, WideTy, 1, TargetOpcode::G_ZEXT); - Observer.changedInstr(MI); - return Legalized; - case TargetOpcode::G_PTRTOINT: - if (TypeIdx != 0) - return UnableToLegalize; - - Observer.changingInstr(MI); - widenScalarDst(MI, WideTy, 0); - Observer.changedInstr(MI); - return Legalized; - case TargetOpcode::G_BUILD_VECTOR: { - Observer.changingInstr(MI); - - const LLT WideEltTy = TypeIdx == 1 ? WideTy : WideTy.getElementType(); - for (int I = 1, E = MI.getNumOperands(); I != E; ++I) - widenScalarSrc(MI, WideEltTy, I, TargetOpcode::G_ANYEXT); - - // Avoid changing the result vector type if the source element type was - // requested. - if (TypeIdx == 1) { + case TargetOpcode::G_INTTOPTR: + if (TypeIdx != 1) + return UnableToLegalize; + + Observer.changingInstr(MI); + widenScalarSrc(MI, WideTy, 1, TargetOpcode::G_ZEXT); + Observer.changedInstr(MI); + return Legalized; + case TargetOpcode::G_PTRTOINT: + if (TypeIdx != 0) + return UnableToLegalize; + + Observer.changingInstr(MI); + widenScalarDst(MI, WideTy, 0); + Observer.changedInstr(MI); + return Legalized; + case TargetOpcode::G_BUILD_VECTOR: { + Observer.changingInstr(MI); + + const LLT WideEltTy = TypeIdx == 1 ? WideTy : WideTy.getElementType(); + for (int I = 1, E = MI.getNumOperands(); I != E; ++I) + widenScalarSrc(MI, WideEltTy, I, TargetOpcode::G_ANYEXT); + + // Avoid changing the result vector type if the source element type was + // requested. + if (TypeIdx == 1) { MI.setDesc(MIRBuilder.getTII().get(TargetOpcode::G_BUILD_VECTOR_TRUNC)); - } else { - widenScalarDst(MI, WideTy, 0); - } - - Observer.changedInstr(MI); - return Legalized; - } - case TargetOpcode::G_SEXT_INREG: - if (TypeIdx != 0) - return UnableToLegalize; - - Observer.changingInstr(MI); - widenScalarSrc(MI, WideTy, 1, TargetOpcode::G_ANYEXT); - widenScalarDst(MI, WideTy, 0, TargetOpcode::G_TRUNC); - Observer.changedInstr(MI); - return Legalized; - case TargetOpcode::G_PTRMASK: { - if (TypeIdx != 1) - return UnableToLegalize; - Observer.changingInstr(MI); - widenScalarSrc(MI, WideTy, 2, TargetOpcode::G_ZEXT); - Observer.changedInstr(MI); - return Legalized; - } - } -} - -static void getUnmergePieces(SmallVectorImpl<Register> &Pieces, - MachineIRBuilder &B, Register Src, LLT Ty) { - auto Unmerge = B.buildUnmerge(Ty, Src); - for (int I = 0, E = Unmerge->getNumOperands() - 1; I != E; ++I) - Pieces.push_back(Unmerge.getReg(I)); -} - -LegalizerHelper::LegalizeResult -LegalizerHelper::lowerBitcast(MachineInstr &MI) { - Register Dst = MI.getOperand(0).getReg(); - Register Src = MI.getOperand(1).getReg(); - LLT DstTy = MRI.getType(Dst); - LLT SrcTy = MRI.getType(Src); - - if (SrcTy.isVector()) { - LLT SrcEltTy = SrcTy.getElementType(); - SmallVector<Register, 8> SrcRegs; - - if (DstTy.isVector()) { - int NumDstElt = DstTy.getNumElements(); - int NumSrcElt = SrcTy.getNumElements(); - - LLT DstEltTy = DstTy.getElementType(); - LLT DstCastTy = DstEltTy; // Intermediate bitcast result type - LLT SrcPartTy = SrcEltTy; // Original unmerge result type. - - // If there's an element size mismatch, insert intermediate casts to match - // the result element type. - if (NumSrcElt < NumDstElt) { // Source element type is larger. - // %1:_(<4 x s8>) = G_BITCAST %0:_(<2 x s16>) - // - // => - // - // %2:_(s16), %3:_(s16) = G_UNMERGE_VALUES %0 - // %3:_(<2 x s8>) = G_BITCAST %2 - // %4:_(<2 x s8>) = G_BITCAST %3 - // %1:_(<4 x s16>) = G_CONCAT_VECTORS %3, %4 - DstCastTy = LLT::vector(NumDstElt / NumSrcElt, DstEltTy); - SrcPartTy = SrcEltTy; - } else if (NumSrcElt > NumDstElt) { // Source element type is smaller. - // - // %1:_(<2 x s16>) = G_BITCAST %0:_(<4 x s8>) - // - // => - // - // %2:_(<2 x s8>), %3:_(<2 x s8>) = G_UNMERGE_VALUES %0 - // %3:_(s16) = G_BITCAST %2 - // %4:_(s16) = G_BITCAST %3 - // %1:_(<2 x s16>) = G_BUILD_VECTOR %3, %4 - SrcPartTy = LLT::vector(NumSrcElt / NumDstElt, SrcEltTy); - DstCastTy = DstEltTy; - } - - getUnmergePieces(SrcRegs, MIRBuilder, Src, SrcPartTy); - for (Register &SrcReg : SrcRegs) - SrcReg = MIRBuilder.buildBitcast(DstCastTy, SrcReg).getReg(0); - } else - getUnmergePieces(SrcRegs, MIRBuilder, Src, SrcEltTy); - - MIRBuilder.buildMerge(Dst, SrcRegs); - MI.eraseFromParent(); - return Legalized; - } - - if (DstTy.isVector()) { - SmallVector<Register, 8> SrcRegs; - getUnmergePieces(SrcRegs, MIRBuilder, Src, DstTy.getElementType()); - MIRBuilder.buildMerge(Dst, SrcRegs); - MI.eraseFromParent(); - return Legalized; - } - - return UnableToLegalize; -} - + } else { + widenScalarDst(MI, WideTy, 0); + } + + Observer.changedInstr(MI); + return Legalized; + } + case TargetOpcode::G_SEXT_INREG: + if (TypeIdx != 0) + return UnableToLegalize; + + Observer.changingInstr(MI); + widenScalarSrc(MI, WideTy, 1, TargetOpcode::G_ANYEXT); + widenScalarDst(MI, WideTy, 0, TargetOpcode::G_TRUNC); + Observer.changedInstr(MI); + return Legalized; + case TargetOpcode::G_PTRMASK: { + if (TypeIdx != 1) + return UnableToLegalize; + Observer.changingInstr(MI); + widenScalarSrc(MI, WideTy, 2, TargetOpcode::G_ZEXT); + Observer.changedInstr(MI); + return Legalized; + } + } +} + +static void getUnmergePieces(SmallVectorImpl<Register> &Pieces, + MachineIRBuilder &B, Register Src, LLT Ty) { + auto Unmerge = B.buildUnmerge(Ty, Src); + for (int I = 0, E = Unmerge->getNumOperands() - 1; I != E; ++I) + Pieces.push_back(Unmerge.getReg(I)); +} + +LegalizerHelper::LegalizeResult +LegalizerHelper::lowerBitcast(MachineInstr &MI) { + Register Dst = MI.getOperand(0).getReg(); + Register Src = MI.getOperand(1).getReg(); + LLT DstTy = MRI.getType(Dst); + LLT SrcTy = MRI.getType(Src); + + if (SrcTy.isVector()) { + LLT SrcEltTy = SrcTy.getElementType(); + SmallVector<Register, 8> SrcRegs; + + if (DstTy.isVector()) { + int NumDstElt = DstTy.getNumElements(); + int NumSrcElt = SrcTy.getNumElements(); + + LLT DstEltTy = DstTy.getElementType(); + LLT DstCastTy = DstEltTy; // Intermediate bitcast result type + LLT SrcPartTy = SrcEltTy; // Original unmerge result type. + + // If there's an element size mismatch, insert intermediate casts to match + // the result element type. + if (NumSrcElt < NumDstElt) { // Source element type is larger. + // %1:_(<4 x s8>) = G_BITCAST %0:_(<2 x s16>) + // + // => + // + // %2:_(s16), %3:_(s16) = G_UNMERGE_VALUES %0 + // %3:_(<2 x s8>) = G_BITCAST %2 + // %4:_(<2 x s8>) = G_BITCAST %3 + // %1:_(<4 x s16>) = G_CONCAT_VECTORS %3, %4 + DstCastTy = LLT::vector(NumDstElt / NumSrcElt, DstEltTy); + SrcPartTy = SrcEltTy; + } else if (NumSrcElt > NumDstElt) { // Source element type is smaller. + // + // %1:_(<2 x s16>) = G_BITCAST %0:_(<4 x s8>) + // + // => + // + // %2:_(<2 x s8>), %3:_(<2 x s8>) = G_UNMERGE_VALUES %0 + // %3:_(s16) = G_BITCAST %2 + // %4:_(s16) = G_BITCAST %3 + // %1:_(<2 x s16>) = G_BUILD_VECTOR %3, %4 + SrcPartTy = LLT::vector(NumSrcElt / NumDstElt, SrcEltTy); + DstCastTy = DstEltTy; + } + + getUnmergePieces(SrcRegs, MIRBuilder, Src, SrcPartTy); + for (Register &SrcReg : SrcRegs) + SrcReg = MIRBuilder.buildBitcast(DstCastTy, SrcReg).getReg(0); + } else + getUnmergePieces(SrcRegs, MIRBuilder, Src, SrcEltTy); + + MIRBuilder.buildMerge(Dst, SrcRegs); + MI.eraseFromParent(); + return Legalized; + } + + if (DstTy.isVector()) { + SmallVector<Register, 8> SrcRegs; + getUnmergePieces(SrcRegs, MIRBuilder, Src, DstTy.getElementType()); + MIRBuilder.buildMerge(Dst, SrcRegs); + MI.eraseFromParent(); + return Legalized; + } + + return UnableToLegalize; +} + /// Figure out the bit offset into a register when coercing a vector index for /// the wide element type. This is only for the case when promoting vector to /// one with larger elements. @@ -2443,7 +2443,7 @@ static Register getBitcastWiderVectorElementOffset(MachineIRBuilder &B, /// elements, index the bitcasted vector and extract the target element with bit /// operations. This is intended to force the indexing in the native register /// size for architectures that can dynamically index the register file. -LegalizerHelper::LegalizeResult +LegalizerHelper::LegalizeResult LegalizerHelper::bitcastExtractVectorElt(MachineInstr &MI, unsigned TypeIdx, LLT CastTy) { if (TypeIdx != 1) @@ -2786,62 +2786,62 @@ LegalizerHelper::lowerStore(MachineInstr &MI) { } LegalizerHelper::LegalizeResult -LegalizerHelper::bitcast(MachineInstr &MI, unsigned TypeIdx, LLT CastTy) { - switch (MI.getOpcode()) { - case TargetOpcode::G_LOAD: { - if (TypeIdx != 0) - return UnableToLegalize; - - Observer.changingInstr(MI); - bitcastDst(MI, CastTy, 0); - Observer.changedInstr(MI); - return Legalized; - } - case TargetOpcode::G_STORE: { - if (TypeIdx != 0) - return UnableToLegalize; - - Observer.changingInstr(MI); - bitcastSrc(MI, CastTy, 0); - Observer.changedInstr(MI); - return Legalized; - } - case TargetOpcode::G_SELECT: { - if (TypeIdx != 0) - return UnableToLegalize; - - if (MRI.getType(MI.getOperand(1).getReg()).isVector()) { - LLVM_DEBUG( - dbgs() << "bitcast action not implemented for vector select\n"); - return UnableToLegalize; - } - - Observer.changingInstr(MI); - bitcastSrc(MI, CastTy, 2); - bitcastSrc(MI, CastTy, 3); - bitcastDst(MI, CastTy, 0); - Observer.changedInstr(MI); - return Legalized; - } - case TargetOpcode::G_AND: - case TargetOpcode::G_OR: - case TargetOpcode::G_XOR: { - Observer.changingInstr(MI); - bitcastSrc(MI, CastTy, 1); - bitcastSrc(MI, CastTy, 2); - bitcastDst(MI, CastTy, 0); - Observer.changedInstr(MI); - return Legalized; - } +LegalizerHelper::bitcast(MachineInstr &MI, unsigned TypeIdx, LLT CastTy) { + switch (MI.getOpcode()) { + case TargetOpcode::G_LOAD: { + if (TypeIdx != 0) + return UnableToLegalize; + + Observer.changingInstr(MI); + bitcastDst(MI, CastTy, 0); + Observer.changedInstr(MI); + return Legalized; + } + case TargetOpcode::G_STORE: { + if (TypeIdx != 0) + return UnableToLegalize; + + Observer.changingInstr(MI); + bitcastSrc(MI, CastTy, 0); + Observer.changedInstr(MI); + return Legalized; + } + case TargetOpcode::G_SELECT: { + if (TypeIdx != 0) + return UnableToLegalize; + + if (MRI.getType(MI.getOperand(1).getReg()).isVector()) { + LLVM_DEBUG( + dbgs() << "bitcast action not implemented for vector select\n"); + return UnableToLegalize; + } + + Observer.changingInstr(MI); + bitcastSrc(MI, CastTy, 2); + bitcastSrc(MI, CastTy, 3); + bitcastDst(MI, CastTy, 0); + Observer.changedInstr(MI); + return Legalized; + } + case TargetOpcode::G_AND: + case TargetOpcode::G_OR: + case TargetOpcode::G_XOR: { + Observer.changingInstr(MI); + bitcastSrc(MI, CastTy, 1); + bitcastSrc(MI, CastTy, 2); + bitcastDst(MI, CastTy, 0); + Observer.changedInstr(MI); + return Legalized; + } case TargetOpcode::G_EXTRACT_VECTOR_ELT: return bitcastExtractVectorElt(MI, TypeIdx, CastTy); case TargetOpcode::G_INSERT_VECTOR_ELT: return bitcastInsertVectorElt(MI, TypeIdx, CastTy); - default: - return UnableToLegalize; - } -} - + default: + return UnableToLegalize; + } +} + // Legalize an instruction by changing the opcode in place. void LegalizerHelper::changeOpcode(MachineInstr &MI, unsigned NewOpcode) { Observer.changingInstr(MI); @@ -2849,257 +2849,257 @@ void LegalizerHelper::changeOpcode(MachineInstr &MI, unsigned NewOpcode) { Observer.changedInstr(MI); } -LegalizerHelper::LegalizeResult +LegalizerHelper::LegalizeResult LegalizerHelper::lower(MachineInstr &MI, unsigned TypeIdx, LLT LowerHintTy) { - using namespace TargetOpcode; - - switch(MI.getOpcode()) { - default: - return UnableToLegalize; - case TargetOpcode::G_BITCAST: - return lowerBitcast(MI); - case TargetOpcode::G_SREM: - case TargetOpcode::G_UREM: { + using namespace TargetOpcode; + + switch(MI.getOpcode()) { + default: + return UnableToLegalize; + case TargetOpcode::G_BITCAST: + return lowerBitcast(MI); + case TargetOpcode::G_SREM: + case TargetOpcode::G_UREM: { LLT Ty = MRI.getType(MI.getOperand(0).getReg()); - auto Quot = - MIRBuilder.buildInstr(MI.getOpcode() == G_SREM ? G_SDIV : G_UDIV, {Ty}, - {MI.getOperand(1), MI.getOperand(2)}); - - auto Prod = MIRBuilder.buildMul(Ty, Quot, MI.getOperand(2)); - MIRBuilder.buildSub(MI.getOperand(0), MI.getOperand(1), Prod); - MI.eraseFromParent(); - return Legalized; - } - case TargetOpcode::G_SADDO: - case TargetOpcode::G_SSUBO: - return lowerSADDO_SSUBO(MI); + auto Quot = + MIRBuilder.buildInstr(MI.getOpcode() == G_SREM ? G_SDIV : G_UDIV, {Ty}, + {MI.getOperand(1), MI.getOperand(2)}); + + auto Prod = MIRBuilder.buildMul(Ty, Quot, MI.getOperand(2)); + MIRBuilder.buildSub(MI.getOperand(0), MI.getOperand(1), Prod); + MI.eraseFromParent(); + return Legalized; + } + case TargetOpcode::G_SADDO: + case TargetOpcode::G_SSUBO: + return lowerSADDO_SSUBO(MI); case TargetOpcode::G_UMULH: case TargetOpcode::G_SMULH: return lowerSMULH_UMULH(MI); - case TargetOpcode::G_SMULO: - case TargetOpcode::G_UMULO: { - // Generate G_UMULH/G_SMULH to check for overflow and a normal G_MUL for the - // result. - Register Res = MI.getOperand(0).getReg(); - Register Overflow = MI.getOperand(1).getReg(); - Register LHS = MI.getOperand(2).getReg(); - Register RHS = MI.getOperand(3).getReg(); + case TargetOpcode::G_SMULO: + case TargetOpcode::G_UMULO: { + // Generate G_UMULH/G_SMULH to check for overflow and a normal G_MUL for the + // result. + Register Res = MI.getOperand(0).getReg(); + Register Overflow = MI.getOperand(1).getReg(); + Register LHS = MI.getOperand(2).getReg(); + Register RHS = MI.getOperand(3).getReg(); LLT Ty = MRI.getType(Res); - - unsigned Opcode = MI.getOpcode() == TargetOpcode::G_SMULO - ? TargetOpcode::G_SMULH - : TargetOpcode::G_UMULH; - - Observer.changingInstr(MI); - const auto &TII = MIRBuilder.getTII(); - MI.setDesc(TII.get(TargetOpcode::G_MUL)); - MI.RemoveOperand(1); - Observer.changedInstr(MI); - - auto HiPart = MIRBuilder.buildInstr(Opcode, {Ty}, {LHS, RHS}); - auto Zero = MIRBuilder.buildConstant(Ty, 0); - - // Move insert point forward so we can use the Res register if needed. - MIRBuilder.setInsertPt(MIRBuilder.getMBB(), ++MIRBuilder.getInsertPt()); - - // For *signed* multiply, overflow is detected by checking: - // (hi != (lo >> bitwidth-1)) - if (Opcode == TargetOpcode::G_SMULH) { - auto ShiftAmt = MIRBuilder.buildConstant(Ty, Ty.getSizeInBits() - 1); - auto Shifted = MIRBuilder.buildAShr(Ty, Res, ShiftAmt); - MIRBuilder.buildICmp(CmpInst::ICMP_NE, Overflow, HiPart, Shifted); - } else { - MIRBuilder.buildICmp(CmpInst::ICMP_NE, Overflow, HiPart, Zero); - } - return Legalized; - } - case TargetOpcode::G_FNEG: { + + unsigned Opcode = MI.getOpcode() == TargetOpcode::G_SMULO + ? TargetOpcode::G_SMULH + : TargetOpcode::G_UMULH; + + Observer.changingInstr(MI); + const auto &TII = MIRBuilder.getTII(); + MI.setDesc(TII.get(TargetOpcode::G_MUL)); + MI.RemoveOperand(1); + Observer.changedInstr(MI); + + auto HiPart = MIRBuilder.buildInstr(Opcode, {Ty}, {LHS, RHS}); + auto Zero = MIRBuilder.buildConstant(Ty, 0); + + // Move insert point forward so we can use the Res register if needed. + MIRBuilder.setInsertPt(MIRBuilder.getMBB(), ++MIRBuilder.getInsertPt()); + + // For *signed* multiply, overflow is detected by checking: + // (hi != (lo >> bitwidth-1)) + if (Opcode == TargetOpcode::G_SMULH) { + auto ShiftAmt = MIRBuilder.buildConstant(Ty, Ty.getSizeInBits() - 1); + auto Shifted = MIRBuilder.buildAShr(Ty, Res, ShiftAmt); + MIRBuilder.buildICmp(CmpInst::ICMP_NE, Overflow, HiPart, Shifted); + } else { + MIRBuilder.buildICmp(CmpInst::ICMP_NE, Overflow, HiPart, Zero); + } + return Legalized; + } + case TargetOpcode::G_FNEG: { Register Res = MI.getOperand(0).getReg(); LLT Ty = MRI.getType(Res); - // TODO: Handle vector types once we are able to - // represent them. - if (Ty.isVector()) - return UnableToLegalize; + // TODO: Handle vector types once we are able to + // represent them. + if (Ty.isVector()) + return UnableToLegalize; auto SignMask = MIRBuilder.buildConstant(Ty, APInt::getSignMask(Ty.getSizeInBits())); - Register SubByReg = MI.getOperand(1).getReg(); + Register SubByReg = MI.getOperand(1).getReg(); MIRBuilder.buildXor(Res, SubByReg, SignMask); - MI.eraseFromParent(); - return Legalized; - } - case TargetOpcode::G_FSUB: { + MI.eraseFromParent(); + return Legalized; + } + case TargetOpcode::G_FSUB: { Register Res = MI.getOperand(0).getReg(); LLT Ty = MRI.getType(Res); - // Lower (G_FSUB LHS, RHS) to (G_FADD LHS, (G_FNEG RHS)). - // First, check if G_FNEG is marked as Lower. If so, we may - // end up with an infinite loop as G_FSUB is used to legalize G_FNEG. - if (LI.getAction({G_FNEG, {Ty}}).Action == Lower) - return UnableToLegalize; - Register LHS = MI.getOperand(1).getReg(); - Register RHS = MI.getOperand(2).getReg(); - Register Neg = MRI.createGenericVirtualRegister(Ty); - MIRBuilder.buildFNeg(Neg, RHS); - MIRBuilder.buildFAdd(Res, LHS, Neg, MI.getFlags()); - MI.eraseFromParent(); - return Legalized; - } - case TargetOpcode::G_FMAD: - return lowerFMad(MI); - case TargetOpcode::G_FFLOOR: - return lowerFFloor(MI); - case TargetOpcode::G_INTRINSIC_ROUND: - return lowerIntrinsicRound(MI); + // Lower (G_FSUB LHS, RHS) to (G_FADD LHS, (G_FNEG RHS)). + // First, check if G_FNEG is marked as Lower. If so, we may + // end up with an infinite loop as G_FSUB is used to legalize G_FNEG. + if (LI.getAction({G_FNEG, {Ty}}).Action == Lower) + return UnableToLegalize; + Register LHS = MI.getOperand(1).getReg(); + Register RHS = MI.getOperand(2).getReg(); + Register Neg = MRI.createGenericVirtualRegister(Ty); + MIRBuilder.buildFNeg(Neg, RHS); + MIRBuilder.buildFAdd(Res, LHS, Neg, MI.getFlags()); + MI.eraseFromParent(); + return Legalized; + } + case TargetOpcode::G_FMAD: + return lowerFMad(MI); + case TargetOpcode::G_FFLOOR: + return lowerFFloor(MI); + case TargetOpcode::G_INTRINSIC_ROUND: + return lowerIntrinsicRound(MI); case TargetOpcode::G_INTRINSIC_ROUNDEVEN: { // Since round even is the assumed rounding mode for unconstrained FP // operations, rint and roundeven are the same operation. changeOpcode(MI, TargetOpcode::G_FRINT); return Legalized; } - case TargetOpcode::G_ATOMIC_CMPXCHG_WITH_SUCCESS: { - Register OldValRes = MI.getOperand(0).getReg(); - Register SuccessRes = MI.getOperand(1).getReg(); - Register Addr = MI.getOperand(2).getReg(); - Register CmpVal = MI.getOperand(3).getReg(); - Register NewVal = MI.getOperand(4).getReg(); - MIRBuilder.buildAtomicCmpXchg(OldValRes, Addr, CmpVal, NewVal, - **MI.memoperands_begin()); - MIRBuilder.buildICmp(CmpInst::ICMP_EQ, SuccessRes, OldValRes, CmpVal); - MI.eraseFromParent(); - return Legalized; - } - case TargetOpcode::G_LOAD: - case TargetOpcode::G_SEXTLOAD: + case TargetOpcode::G_ATOMIC_CMPXCHG_WITH_SUCCESS: { + Register OldValRes = MI.getOperand(0).getReg(); + Register SuccessRes = MI.getOperand(1).getReg(); + Register Addr = MI.getOperand(2).getReg(); + Register CmpVal = MI.getOperand(3).getReg(); + Register NewVal = MI.getOperand(4).getReg(); + MIRBuilder.buildAtomicCmpXchg(OldValRes, Addr, CmpVal, NewVal, + **MI.memoperands_begin()); + MIRBuilder.buildICmp(CmpInst::ICMP_EQ, SuccessRes, OldValRes, CmpVal); + MI.eraseFromParent(); + return Legalized; + } + case TargetOpcode::G_LOAD: + case TargetOpcode::G_SEXTLOAD: case TargetOpcode::G_ZEXTLOAD: return lowerLoad(MI); case TargetOpcode::G_STORE: return lowerStore(MI); - case TargetOpcode::G_CTLZ_ZERO_UNDEF: - case TargetOpcode::G_CTTZ_ZERO_UNDEF: - case TargetOpcode::G_CTLZ: - case TargetOpcode::G_CTTZ: - case TargetOpcode::G_CTPOP: + case TargetOpcode::G_CTLZ_ZERO_UNDEF: + case TargetOpcode::G_CTTZ_ZERO_UNDEF: + case TargetOpcode::G_CTLZ: + case TargetOpcode::G_CTTZ: + case TargetOpcode::G_CTPOP: return lowerBitCount(MI); - case G_UADDO: { - Register Res = MI.getOperand(0).getReg(); - Register CarryOut = MI.getOperand(1).getReg(); - Register LHS = MI.getOperand(2).getReg(); - Register RHS = MI.getOperand(3).getReg(); - - MIRBuilder.buildAdd(Res, LHS, RHS); - MIRBuilder.buildICmp(CmpInst::ICMP_ULT, CarryOut, Res, RHS); - - MI.eraseFromParent(); - return Legalized; - } - case G_UADDE: { - Register Res = MI.getOperand(0).getReg(); - Register CarryOut = MI.getOperand(1).getReg(); - Register LHS = MI.getOperand(2).getReg(); - Register RHS = MI.getOperand(3).getReg(); - Register CarryIn = MI.getOperand(4).getReg(); - LLT Ty = MRI.getType(Res); - - auto TmpRes = MIRBuilder.buildAdd(Ty, LHS, RHS); - auto ZExtCarryIn = MIRBuilder.buildZExt(Ty, CarryIn); - MIRBuilder.buildAdd(Res, TmpRes, ZExtCarryIn); - MIRBuilder.buildICmp(CmpInst::ICMP_ULT, CarryOut, Res, LHS); - - MI.eraseFromParent(); - return Legalized; - } - case G_USUBO: { - Register Res = MI.getOperand(0).getReg(); - Register BorrowOut = MI.getOperand(1).getReg(); - Register LHS = MI.getOperand(2).getReg(); - Register RHS = MI.getOperand(3).getReg(); - - MIRBuilder.buildSub(Res, LHS, RHS); - MIRBuilder.buildICmp(CmpInst::ICMP_ULT, BorrowOut, LHS, RHS); - - MI.eraseFromParent(); - return Legalized; - } - case G_USUBE: { - Register Res = MI.getOperand(0).getReg(); - Register BorrowOut = MI.getOperand(1).getReg(); - Register LHS = MI.getOperand(2).getReg(); - Register RHS = MI.getOperand(3).getReg(); - Register BorrowIn = MI.getOperand(4).getReg(); - const LLT CondTy = MRI.getType(BorrowOut); - const LLT Ty = MRI.getType(Res); - - auto TmpRes = MIRBuilder.buildSub(Ty, LHS, RHS); - auto ZExtBorrowIn = MIRBuilder.buildZExt(Ty, BorrowIn); - MIRBuilder.buildSub(Res, TmpRes, ZExtBorrowIn); - - auto LHS_EQ_RHS = MIRBuilder.buildICmp(CmpInst::ICMP_EQ, CondTy, LHS, RHS); - auto LHS_ULT_RHS = MIRBuilder.buildICmp(CmpInst::ICMP_ULT, CondTy, LHS, RHS); - MIRBuilder.buildSelect(BorrowOut, LHS_EQ_RHS, BorrowIn, LHS_ULT_RHS); - - MI.eraseFromParent(); - return Legalized; - } - case G_UITOFP: + case G_UADDO: { + Register Res = MI.getOperand(0).getReg(); + Register CarryOut = MI.getOperand(1).getReg(); + Register LHS = MI.getOperand(2).getReg(); + Register RHS = MI.getOperand(3).getReg(); + + MIRBuilder.buildAdd(Res, LHS, RHS); + MIRBuilder.buildICmp(CmpInst::ICMP_ULT, CarryOut, Res, RHS); + + MI.eraseFromParent(); + return Legalized; + } + case G_UADDE: { + Register Res = MI.getOperand(0).getReg(); + Register CarryOut = MI.getOperand(1).getReg(); + Register LHS = MI.getOperand(2).getReg(); + Register RHS = MI.getOperand(3).getReg(); + Register CarryIn = MI.getOperand(4).getReg(); + LLT Ty = MRI.getType(Res); + + auto TmpRes = MIRBuilder.buildAdd(Ty, LHS, RHS); + auto ZExtCarryIn = MIRBuilder.buildZExt(Ty, CarryIn); + MIRBuilder.buildAdd(Res, TmpRes, ZExtCarryIn); + MIRBuilder.buildICmp(CmpInst::ICMP_ULT, CarryOut, Res, LHS); + + MI.eraseFromParent(); + return Legalized; + } + case G_USUBO: { + Register Res = MI.getOperand(0).getReg(); + Register BorrowOut = MI.getOperand(1).getReg(); + Register LHS = MI.getOperand(2).getReg(); + Register RHS = MI.getOperand(3).getReg(); + + MIRBuilder.buildSub(Res, LHS, RHS); + MIRBuilder.buildICmp(CmpInst::ICMP_ULT, BorrowOut, LHS, RHS); + + MI.eraseFromParent(); + return Legalized; + } + case G_USUBE: { + Register Res = MI.getOperand(0).getReg(); + Register BorrowOut = MI.getOperand(1).getReg(); + Register LHS = MI.getOperand(2).getReg(); + Register RHS = MI.getOperand(3).getReg(); + Register BorrowIn = MI.getOperand(4).getReg(); + const LLT CondTy = MRI.getType(BorrowOut); + const LLT Ty = MRI.getType(Res); + + auto TmpRes = MIRBuilder.buildSub(Ty, LHS, RHS); + auto ZExtBorrowIn = MIRBuilder.buildZExt(Ty, BorrowIn); + MIRBuilder.buildSub(Res, TmpRes, ZExtBorrowIn); + + auto LHS_EQ_RHS = MIRBuilder.buildICmp(CmpInst::ICMP_EQ, CondTy, LHS, RHS); + auto LHS_ULT_RHS = MIRBuilder.buildICmp(CmpInst::ICMP_ULT, CondTy, LHS, RHS); + MIRBuilder.buildSelect(BorrowOut, LHS_EQ_RHS, BorrowIn, LHS_ULT_RHS); + + MI.eraseFromParent(); + return Legalized; + } + case G_UITOFP: return lowerUITOFP(MI); - case G_SITOFP: + case G_SITOFP: return lowerSITOFP(MI); - case G_FPTOUI: + case G_FPTOUI: return lowerFPTOUI(MI); - case G_FPTOSI: - return lowerFPTOSI(MI); - case G_FPTRUNC: + case G_FPTOSI: + return lowerFPTOSI(MI); + case G_FPTRUNC: return lowerFPTRUNC(MI); case G_FPOWI: return lowerFPOWI(MI); - case G_SMIN: - case G_SMAX: - case G_UMIN: - case G_UMAX: + case G_SMIN: + case G_SMAX: + case G_UMIN: + case G_UMAX: return lowerMinMax(MI); - case G_FCOPYSIGN: + case G_FCOPYSIGN: return lowerFCopySign(MI); - case G_FMINNUM: - case G_FMAXNUM: - return lowerFMinNumMaxNum(MI); - case G_MERGE_VALUES: - return lowerMergeValues(MI); - case G_UNMERGE_VALUES: - return lowerUnmergeValues(MI); - case TargetOpcode::G_SEXT_INREG: { - assert(MI.getOperand(2).isImm() && "Expected immediate"); - int64_t SizeInBits = MI.getOperand(2).getImm(); - - Register DstReg = MI.getOperand(0).getReg(); - Register SrcReg = MI.getOperand(1).getReg(); - LLT DstTy = MRI.getType(DstReg); - Register TmpRes = MRI.createGenericVirtualRegister(DstTy); - - auto MIBSz = MIRBuilder.buildConstant(DstTy, DstTy.getScalarSizeInBits() - SizeInBits); - MIRBuilder.buildShl(TmpRes, SrcReg, MIBSz->getOperand(0)); - MIRBuilder.buildAShr(DstReg, TmpRes, MIBSz->getOperand(0)); - MI.eraseFromParent(); - return Legalized; - } + case G_FMINNUM: + case G_FMAXNUM: + return lowerFMinNumMaxNum(MI); + case G_MERGE_VALUES: + return lowerMergeValues(MI); + case G_UNMERGE_VALUES: + return lowerUnmergeValues(MI); + case TargetOpcode::G_SEXT_INREG: { + assert(MI.getOperand(2).isImm() && "Expected immediate"); + int64_t SizeInBits = MI.getOperand(2).getImm(); + + Register DstReg = MI.getOperand(0).getReg(); + Register SrcReg = MI.getOperand(1).getReg(); + LLT DstTy = MRI.getType(DstReg); + Register TmpRes = MRI.createGenericVirtualRegister(DstTy); + + auto MIBSz = MIRBuilder.buildConstant(DstTy, DstTy.getScalarSizeInBits() - SizeInBits); + MIRBuilder.buildShl(TmpRes, SrcReg, MIBSz->getOperand(0)); + MIRBuilder.buildAShr(DstReg, TmpRes, MIBSz->getOperand(0)); + MI.eraseFromParent(); + return Legalized; + } case G_EXTRACT_VECTOR_ELT: case G_INSERT_VECTOR_ELT: return lowerExtractInsertVectorElt(MI); - case G_SHUFFLE_VECTOR: - return lowerShuffleVector(MI); - case G_DYN_STACKALLOC: - return lowerDynStackAlloc(MI); - case G_EXTRACT: - return lowerExtract(MI); - case G_INSERT: - return lowerInsert(MI); - case G_BSWAP: - return lowerBswap(MI); - case G_BITREVERSE: - return lowerBitreverse(MI); - case G_READ_REGISTER: - case G_WRITE_REGISTER: - return lowerReadWriteRegister(MI); + case G_SHUFFLE_VECTOR: + return lowerShuffleVector(MI); + case G_DYN_STACKALLOC: + return lowerDynStackAlloc(MI); + case G_EXTRACT: + return lowerExtract(MI); + case G_INSERT: + return lowerInsert(MI); + case G_BSWAP: + return lowerBswap(MI); + case G_BITREVERSE: + return lowerBitreverse(MI); + case G_READ_REGISTER: + case G_WRITE_REGISTER: + return lowerReadWriteRegister(MI); case G_UADDSAT: case G_USUBSAT: { // Try to make a reasonable guess about which lowering strategy to use. The @@ -3109,7 +3109,7 @@ LegalizerHelper::lower(MachineInstr &MI, unsigned TypeIdx, LLT LowerHintTy) { if (LI.isLegalOrCustom({G_UMIN, Ty})) return lowerAddSubSatToMinMax(MI); return lowerAddSubSatToAddoSubo(MI); - } + } case G_SADDSAT: case G_SSUBSAT: { LLT Ty = MRI.getType(MI.getOperand(0).getReg()); @@ -3143,8 +3143,8 @@ LegalizerHelper::lower(MachineInstr &MI, unsigned TypeIdx, LLT LowerHintTy) { case G_SELECT: return lowerSelect(MI); } -} - +} + Align LegalizerHelper::getStackTemporaryAlignment(LLT Ty, Align MinAlign) const { // FIXME: We're missing a way to go back from LLT to llvm::Type to query the @@ -3205,411 +3205,411 @@ Register LegalizerHelper::getVectorElementPointer(Register VecPtr, LLT VecTy, return MIRBuilder.buildPtrAdd(PtrTy, VecPtr, Mul).getReg(0); } -LegalizerHelper::LegalizeResult LegalizerHelper::fewerElementsVectorImplicitDef( - MachineInstr &MI, unsigned TypeIdx, LLT NarrowTy) { - Register DstReg = MI.getOperand(0).getReg(); +LegalizerHelper::LegalizeResult LegalizerHelper::fewerElementsVectorImplicitDef( + MachineInstr &MI, unsigned TypeIdx, LLT NarrowTy) { + Register DstReg = MI.getOperand(0).getReg(); LLT DstTy = MRI.getType(DstReg); LLT LCMTy = getLCMType(DstTy, NarrowTy); - + unsigned NumParts = LCMTy.getSizeInBits() / NarrowTy.getSizeInBits(); - + auto NewUndef = MIRBuilder.buildUndef(NarrowTy); SmallVector<Register, 8> Parts(NumParts, NewUndef.getReg(0)); - + buildWidenedRemergeToDst(DstReg, LCMTy, Parts); - MI.eraseFromParent(); - return Legalized; -} - -// Handle splitting vector operations which need to have the same number of -// elements in each type index, but each type index may have a different element -// type. -// -// e.g. <4 x s64> = G_SHL <4 x s64>, <4 x s32> -> -// <2 x s64> = G_SHL <2 x s64>, <2 x s32> -// <2 x s64> = G_SHL <2 x s64>, <2 x s32> -// -// Also handles some irregular breakdown cases, e.g. -// e.g. <3 x s64> = G_SHL <3 x s64>, <3 x s32> -> -// <2 x s64> = G_SHL <2 x s64>, <2 x s32> -// s64 = G_SHL s64, s32 -LegalizerHelper::LegalizeResult -LegalizerHelper::fewerElementsVectorMultiEltType( - MachineInstr &MI, unsigned TypeIdx, LLT NarrowTyArg) { - if (TypeIdx != 0) - return UnableToLegalize; - - const LLT NarrowTy0 = NarrowTyArg; - const unsigned NewNumElts = - NarrowTy0.isVector() ? NarrowTy0.getNumElements() : 1; - - const Register DstReg = MI.getOperand(0).getReg(); - LLT DstTy = MRI.getType(DstReg); - LLT LeftoverTy0; - - // All of the operands need to have the same number of elements, so if we can - // determine a type breakdown for the result type, we can for all of the - // source types. - int NumParts = getNarrowTypeBreakDown(DstTy, NarrowTy0, LeftoverTy0).first; - if (NumParts < 0) - return UnableToLegalize; - - SmallVector<MachineInstrBuilder, 4> NewInsts; - - SmallVector<Register, 4> DstRegs, LeftoverDstRegs; - SmallVector<Register, 4> PartRegs, LeftoverRegs; - - for (unsigned I = 1, E = MI.getNumOperands(); I != E; ++I) { - Register SrcReg = MI.getOperand(I).getReg(); - LLT SrcTyI = MRI.getType(SrcReg); - LLT NarrowTyI = LLT::scalarOrVector(NewNumElts, SrcTyI.getScalarType()); - LLT LeftoverTyI; - - // Split this operand into the requested typed registers, and any leftover - // required to reproduce the original type. - if (!extractParts(SrcReg, SrcTyI, NarrowTyI, LeftoverTyI, PartRegs, - LeftoverRegs)) - return UnableToLegalize; - - if (I == 1) { - // For the first operand, create an instruction for each part and setup - // the result. - for (Register PartReg : PartRegs) { - Register PartDstReg = MRI.createGenericVirtualRegister(NarrowTy0); - NewInsts.push_back(MIRBuilder.buildInstrNoInsert(MI.getOpcode()) - .addDef(PartDstReg) - .addUse(PartReg)); - DstRegs.push_back(PartDstReg); - } - - for (Register LeftoverReg : LeftoverRegs) { - Register PartDstReg = MRI.createGenericVirtualRegister(LeftoverTy0); - NewInsts.push_back(MIRBuilder.buildInstrNoInsert(MI.getOpcode()) - .addDef(PartDstReg) - .addUse(LeftoverReg)); - LeftoverDstRegs.push_back(PartDstReg); - } - } else { - assert(NewInsts.size() == PartRegs.size() + LeftoverRegs.size()); - - // Add the newly created operand splits to the existing instructions. The - // odd-sized pieces are ordered after the requested NarrowTyArg sized - // pieces. - unsigned InstCount = 0; - for (unsigned J = 0, JE = PartRegs.size(); J != JE; ++J) - NewInsts[InstCount++].addUse(PartRegs[J]); - for (unsigned J = 0, JE = LeftoverRegs.size(); J != JE; ++J) - NewInsts[InstCount++].addUse(LeftoverRegs[J]); - } - - PartRegs.clear(); - LeftoverRegs.clear(); - } - - // Insert the newly built operations and rebuild the result register. - for (auto &MIB : NewInsts) - MIRBuilder.insertInstr(MIB); - - insertParts(DstReg, DstTy, NarrowTy0, DstRegs, LeftoverTy0, LeftoverDstRegs); - - MI.eraseFromParent(); - return Legalized; -} - -LegalizerHelper::LegalizeResult -LegalizerHelper::fewerElementsVectorCasts(MachineInstr &MI, unsigned TypeIdx, - LLT NarrowTy) { - if (TypeIdx != 0) - return UnableToLegalize; - - Register DstReg = MI.getOperand(0).getReg(); - Register SrcReg = MI.getOperand(1).getReg(); - LLT DstTy = MRI.getType(DstReg); - LLT SrcTy = MRI.getType(SrcReg); - - LLT NarrowTy0 = NarrowTy; - LLT NarrowTy1; - unsigned NumParts; - - if (NarrowTy.isVector()) { - // Uneven breakdown not handled. - NumParts = DstTy.getNumElements() / NarrowTy.getNumElements(); - if (NumParts * NarrowTy.getNumElements() != DstTy.getNumElements()) - return UnableToLegalize; - + MI.eraseFromParent(); + return Legalized; +} + +// Handle splitting vector operations which need to have the same number of +// elements in each type index, but each type index may have a different element +// type. +// +// e.g. <4 x s64> = G_SHL <4 x s64>, <4 x s32> -> +// <2 x s64> = G_SHL <2 x s64>, <2 x s32> +// <2 x s64> = G_SHL <2 x s64>, <2 x s32> +// +// Also handles some irregular breakdown cases, e.g. +// e.g. <3 x s64> = G_SHL <3 x s64>, <3 x s32> -> +// <2 x s64> = G_SHL <2 x s64>, <2 x s32> +// s64 = G_SHL s64, s32 +LegalizerHelper::LegalizeResult +LegalizerHelper::fewerElementsVectorMultiEltType( + MachineInstr &MI, unsigned TypeIdx, LLT NarrowTyArg) { + if (TypeIdx != 0) + return UnableToLegalize; + + const LLT NarrowTy0 = NarrowTyArg; + const unsigned NewNumElts = + NarrowTy0.isVector() ? NarrowTy0.getNumElements() : 1; + + const Register DstReg = MI.getOperand(0).getReg(); + LLT DstTy = MRI.getType(DstReg); + LLT LeftoverTy0; + + // All of the operands need to have the same number of elements, so if we can + // determine a type breakdown for the result type, we can for all of the + // source types. + int NumParts = getNarrowTypeBreakDown(DstTy, NarrowTy0, LeftoverTy0).first; + if (NumParts < 0) + return UnableToLegalize; + + SmallVector<MachineInstrBuilder, 4> NewInsts; + + SmallVector<Register, 4> DstRegs, LeftoverDstRegs; + SmallVector<Register, 4> PartRegs, LeftoverRegs; + + for (unsigned I = 1, E = MI.getNumOperands(); I != E; ++I) { + Register SrcReg = MI.getOperand(I).getReg(); + LLT SrcTyI = MRI.getType(SrcReg); + LLT NarrowTyI = LLT::scalarOrVector(NewNumElts, SrcTyI.getScalarType()); + LLT LeftoverTyI; + + // Split this operand into the requested typed registers, and any leftover + // required to reproduce the original type. + if (!extractParts(SrcReg, SrcTyI, NarrowTyI, LeftoverTyI, PartRegs, + LeftoverRegs)) + return UnableToLegalize; + + if (I == 1) { + // For the first operand, create an instruction for each part and setup + // the result. + for (Register PartReg : PartRegs) { + Register PartDstReg = MRI.createGenericVirtualRegister(NarrowTy0); + NewInsts.push_back(MIRBuilder.buildInstrNoInsert(MI.getOpcode()) + .addDef(PartDstReg) + .addUse(PartReg)); + DstRegs.push_back(PartDstReg); + } + + for (Register LeftoverReg : LeftoverRegs) { + Register PartDstReg = MRI.createGenericVirtualRegister(LeftoverTy0); + NewInsts.push_back(MIRBuilder.buildInstrNoInsert(MI.getOpcode()) + .addDef(PartDstReg) + .addUse(LeftoverReg)); + LeftoverDstRegs.push_back(PartDstReg); + } + } else { + assert(NewInsts.size() == PartRegs.size() + LeftoverRegs.size()); + + // Add the newly created operand splits to the existing instructions. The + // odd-sized pieces are ordered after the requested NarrowTyArg sized + // pieces. + unsigned InstCount = 0; + for (unsigned J = 0, JE = PartRegs.size(); J != JE; ++J) + NewInsts[InstCount++].addUse(PartRegs[J]); + for (unsigned J = 0, JE = LeftoverRegs.size(); J != JE; ++J) + NewInsts[InstCount++].addUse(LeftoverRegs[J]); + } + + PartRegs.clear(); + LeftoverRegs.clear(); + } + + // Insert the newly built operations and rebuild the result register. + for (auto &MIB : NewInsts) + MIRBuilder.insertInstr(MIB); + + insertParts(DstReg, DstTy, NarrowTy0, DstRegs, LeftoverTy0, LeftoverDstRegs); + + MI.eraseFromParent(); + return Legalized; +} + +LegalizerHelper::LegalizeResult +LegalizerHelper::fewerElementsVectorCasts(MachineInstr &MI, unsigned TypeIdx, + LLT NarrowTy) { + if (TypeIdx != 0) + return UnableToLegalize; + + Register DstReg = MI.getOperand(0).getReg(); + Register SrcReg = MI.getOperand(1).getReg(); + LLT DstTy = MRI.getType(DstReg); + LLT SrcTy = MRI.getType(SrcReg); + + LLT NarrowTy0 = NarrowTy; + LLT NarrowTy1; + unsigned NumParts; + + if (NarrowTy.isVector()) { + // Uneven breakdown not handled. + NumParts = DstTy.getNumElements() / NarrowTy.getNumElements(); + if (NumParts * NarrowTy.getNumElements() != DstTy.getNumElements()) + return UnableToLegalize; + NarrowTy1 = LLT::vector(NarrowTy.getNumElements(), SrcTy.getElementType()); - } else { - NumParts = DstTy.getNumElements(); - NarrowTy1 = SrcTy.getElementType(); - } - - SmallVector<Register, 4> SrcRegs, DstRegs; - extractParts(SrcReg, NarrowTy1, NumParts, SrcRegs); - - for (unsigned I = 0; I < NumParts; ++I) { - Register DstReg = MRI.createGenericVirtualRegister(NarrowTy0); - MachineInstr *NewInst = - MIRBuilder.buildInstr(MI.getOpcode(), {DstReg}, {SrcRegs[I]}); - - NewInst->setFlags(MI.getFlags()); - DstRegs.push_back(DstReg); - } - - if (NarrowTy.isVector()) - MIRBuilder.buildConcatVectors(DstReg, DstRegs); - else - MIRBuilder.buildBuildVector(DstReg, DstRegs); - - MI.eraseFromParent(); - return Legalized; -} - -LegalizerHelper::LegalizeResult -LegalizerHelper::fewerElementsVectorCmp(MachineInstr &MI, unsigned TypeIdx, - LLT NarrowTy) { - Register DstReg = MI.getOperand(0).getReg(); - Register Src0Reg = MI.getOperand(2).getReg(); - LLT DstTy = MRI.getType(DstReg); - LLT SrcTy = MRI.getType(Src0Reg); - - unsigned NumParts; - LLT NarrowTy0, NarrowTy1; - - if (TypeIdx == 0) { - unsigned NewElts = NarrowTy.isVector() ? NarrowTy.getNumElements() : 1; - unsigned OldElts = DstTy.getNumElements(); - - NarrowTy0 = NarrowTy; - NumParts = NarrowTy.isVector() ? (OldElts / NewElts) : DstTy.getNumElements(); - NarrowTy1 = NarrowTy.isVector() ? - LLT::vector(NarrowTy.getNumElements(), SrcTy.getScalarSizeInBits()) : - SrcTy.getElementType(); - - } else { - unsigned NewElts = NarrowTy.isVector() ? NarrowTy.getNumElements() : 1; - unsigned OldElts = SrcTy.getNumElements(); - - NumParts = NarrowTy.isVector() ? (OldElts / NewElts) : - NarrowTy.getNumElements(); - NarrowTy0 = LLT::vector(NarrowTy.getNumElements(), - DstTy.getScalarSizeInBits()); - NarrowTy1 = NarrowTy; - } - - // FIXME: Don't know how to handle the situation where the small vectors - // aren't all the same size yet. - if (NarrowTy1.isVector() && - NarrowTy1.getNumElements() * NumParts != DstTy.getNumElements()) - return UnableToLegalize; - - CmpInst::Predicate Pred - = static_cast<CmpInst::Predicate>(MI.getOperand(1).getPredicate()); - - SmallVector<Register, 2> Src1Regs, Src2Regs, DstRegs; - extractParts(MI.getOperand(2).getReg(), NarrowTy1, NumParts, Src1Regs); - extractParts(MI.getOperand(3).getReg(), NarrowTy1, NumParts, Src2Regs); - - for (unsigned I = 0; I < NumParts; ++I) { - Register DstReg = MRI.createGenericVirtualRegister(NarrowTy0); - DstRegs.push_back(DstReg); - - if (MI.getOpcode() == TargetOpcode::G_ICMP) - MIRBuilder.buildICmp(Pred, DstReg, Src1Regs[I], Src2Regs[I]); - else { - MachineInstr *NewCmp - = MIRBuilder.buildFCmp(Pred, DstReg, Src1Regs[I], Src2Regs[I]); - NewCmp->setFlags(MI.getFlags()); - } - } - - if (NarrowTy1.isVector()) - MIRBuilder.buildConcatVectors(DstReg, DstRegs); - else - MIRBuilder.buildBuildVector(DstReg, DstRegs); - - MI.eraseFromParent(); - return Legalized; -} - -LegalizerHelper::LegalizeResult -LegalizerHelper::fewerElementsVectorSelect(MachineInstr &MI, unsigned TypeIdx, - LLT NarrowTy) { - Register DstReg = MI.getOperand(0).getReg(); - Register CondReg = MI.getOperand(1).getReg(); - - unsigned NumParts = 0; - LLT NarrowTy0, NarrowTy1; - - LLT DstTy = MRI.getType(DstReg); - LLT CondTy = MRI.getType(CondReg); - unsigned Size = DstTy.getSizeInBits(); - - assert(TypeIdx == 0 || CondTy.isVector()); - - if (TypeIdx == 0) { - NarrowTy0 = NarrowTy; - NarrowTy1 = CondTy; - - unsigned NarrowSize = NarrowTy0.getSizeInBits(); - // FIXME: Don't know how to handle the situation where the small vectors - // aren't all the same size yet. - if (Size % NarrowSize != 0) - return UnableToLegalize; - - NumParts = Size / NarrowSize; - - // Need to break down the condition type - if (CondTy.isVector()) { - if (CondTy.getNumElements() == NumParts) - NarrowTy1 = CondTy.getElementType(); - else - NarrowTy1 = LLT::vector(CondTy.getNumElements() / NumParts, - CondTy.getScalarSizeInBits()); - } - } else { - NumParts = CondTy.getNumElements(); - if (NarrowTy.isVector()) { - // TODO: Handle uneven breakdown. - if (NumParts * NarrowTy.getNumElements() != CondTy.getNumElements()) - return UnableToLegalize; - - return UnableToLegalize; - } else { - NarrowTy0 = DstTy.getElementType(); - NarrowTy1 = NarrowTy; - } - } - - SmallVector<Register, 2> DstRegs, Src0Regs, Src1Regs, Src2Regs; - if (CondTy.isVector()) - extractParts(MI.getOperand(1).getReg(), NarrowTy1, NumParts, Src0Regs); - - extractParts(MI.getOperand(2).getReg(), NarrowTy0, NumParts, Src1Regs); - extractParts(MI.getOperand(3).getReg(), NarrowTy0, NumParts, Src2Regs); - - for (unsigned i = 0; i < NumParts; ++i) { - Register DstReg = MRI.createGenericVirtualRegister(NarrowTy0); - MIRBuilder.buildSelect(DstReg, CondTy.isVector() ? Src0Regs[i] : CondReg, - Src1Regs[i], Src2Regs[i]); - DstRegs.push_back(DstReg); - } - - if (NarrowTy0.isVector()) - MIRBuilder.buildConcatVectors(DstReg, DstRegs); - else - MIRBuilder.buildBuildVector(DstReg, DstRegs); - - MI.eraseFromParent(); - return Legalized; -} - -LegalizerHelper::LegalizeResult -LegalizerHelper::fewerElementsVectorPhi(MachineInstr &MI, unsigned TypeIdx, - LLT NarrowTy) { - const Register DstReg = MI.getOperand(0).getReg(); - LLT PhiTy = MRI.getType(DstReg); - LLT LeftoverTy; - - // All of the operands need to have the same number of elements, so if we can - // determine a type breakdown for the result type, we can for all of the - // source types. - int NumParts, NumLeftover; - std::tie(NumParts, NumLeftover) - = getNarrowTypeBreakDown(PhiTy, NarrowTy, LeftoverTy); - if (NumParts < 0) - return UnableToLegalize; - - SmallVector<Register, 4> DstRegs, LeftoverDstRegs; - SmallVector<MachineInstrBuilder, 4> NewInsts; - - const int TotalNumParts = NumParts + NumLeftover; - - // Insert the new phis in the result block first. - for (int I = 0; I != TotalNumParts; ++I) { - LLT Ty = I < NumParts ? NarrowTy : LeftoverTy; - Register PartDstReg = MRI.createGenericVirtualRegister(Ty); - NewInsts.push_back(MIRBuilder.buildInstr(TargetOpcode::G_PHI) - .addDef(PartDstReg)); - if (I < NumParts) - DstRegs.push_back(PartDstReg); - else - LeftoverDstRegs.push_back(PartDstReg); - } - - MachineBasicBlock *MBB = MI.getParent(); - MIRBuilder.setInsertPt(*MBB, MBB->getFirstNonPHI()); - insertParts(DstReg, PhiTy, NarrowTy, DstRegs, LeftoverTy, LeftoverDstRegs); - - SmallVector<Register, 4> PartRegs, LeftoverRegs; - - // Insert code to extract the incoming values in each predecessor block. - for (unsigned I = 1, E = MI.getNumOperands(); I != E; I += 2) { - PartRegs.clear(); - LeftoverRegs.clear(); - - Register SrcReg = MI.getOperand(I).getReg(); - MachineBasicBlock &OpMBB = *MI.getOperand(I + 1).getMBB(); - MIRBuilder.setInsertPt(OpMBB, OpMBB.getFirstTerminator()); - - LLT Unused; - if (!extractParts(SrcReg, PhiTy, NarrowTy, Unused, PartRegs, - LeftoverRegs)) - return UnableToLegalize; - - // Add the newly created operand splits to the existing instructions. The - // odd-sized pieces are ordered after the requested NarrowTyArg sized - // pieces. - for (int J = 0; J != TotalNumParts; ++J) { - MachineInstrBuilder MIB = NewInsts[J]; - MIB.addUse(J < NumParts ? PartRegs[J] : LeftoverRegs[J - NumParts]); - MIB.addMBB(&OpMBB); - } - } - - MI.eraseFromParent(); - return Legalized; -} - -LegalizerHelper::LegalizeResult -LegalizerHelper::fewerElementsVectorUnmergeValues(MachineInstr &MI, - unsigned TypeIdx, - LLT NarrowTy) { - if (TypeIdx != 1) - return UnableToLegalize; - - const int NumDst = MI.getNumOperands() - 1; - const Register SrcReg = MI.getOperand(NumDst).getReg(); - LLT SrcTy = MRI.getType(SrcReg); - - LLT DstTy = MRI.getType(MI.getOperand(0).getReg()); - - // TODO: Create sequence of extracts. - if (DstTy == NarrowTy) - return UnableToLegalize; - - LLT GCDTy = getGCDType(SrcTy, NarrowTy); - if (DstTy == GCDTy) { - // This would just be a copy of the same unmerge. - // TODO: Create extracts, pad with undef and create intermediate merges. - return UnableToLegalize; - } - - auto Unmerge = MIRBuilder.buildUnmerge(GCDTy, SrcReg); - const int NumUnmerge = Unmerge->getNumOperands() - 1; - const int PartsPerUnmerge = NumDst / NumUnmerge; - - for (int I = 0; I != NumUnmerge; ++I) { - auto MIB = MIRBuilder.buildInstr(TargetOpcode::G_UNMERGE_VALUES); - - for (int J = 0; J != PartsPerUnmerge; ++J) - MIB.addDef(MI.getOperand(I * PartsPerUnmerge + J).getReg()); - MIB.addUse(Unmerge.getReg(I)); - } - - MI.eraseFromParent(); - return Legalized; -} - + } else { + NumParts = DstTy.getNumElements(); + NarrowTy1 = SrcTy.getElementType(); + } + + SmallVector<Register, 4> SrcRegs, DstRegs; + extractParts(SrcReg, NarrowTy1, NumParts, SrcRegs); + + for (unsigned I = 0; I < NumParts; ++I) { + Register DstReg = MRI.createGenericVirtualRegister(NarrowTy0); + MachineInstr *NewInst = + MIRBuilder.buildInstr(MI.getOpcode(), {DstReg}, {SrcRegs[I]}); + + NewInst->setFlags(MI.getFlags()); + DstRegs.push_back(DstReg); + } + + if (NarrowTy.isVector()) + MIRBuilder.buildConcatVectors(DstReg, DstRegs); + else + MIRBuilder.buildBuildVector(DstReg, DstRegs); + + MI.eraseFromParent(); + return Legalized; +} + +LegalizerHelper::LegalizeResult +LegalizerHelper::fewerElementsVectorCmp(MachineInstr &MI, unsigned TypeIdx, + LLT NarrowTy) { + Register DstReg = MI.getOperand(0).getReg(); + Register Src0Reg = MI.getOperand(2).getReg(); + LLT DstTy = MRI.getType(DstReg); + LLT SrcTy = MRI.getType(Src0Reg); + + unsigned NumParts; + LLT NarrowTy0, NarrowTy1; + + if (TypeIdx == 0) { + unsigned NewElts = NarrowTy.isVector() ? NarrowTy.getNumElements() : 1; + unsigned OldElts = DstTy.getNumElements(); + + NarrowTy0 = NarrowTy; + NumParts = NarrowTy.isVector() ? (OldElts / NewElts) : DstTy.getNumElements(); + NarrowTy1 = NarrowTy.isVector() ? + LLT::vector(NarrowTy.getNumElements(), SrcTy.getScalarSizeInBits()) : + SrcTy.getElementType(); + + } else { + unsigned NewElts = NarrowTy.isVector() ? NarrowTy.getNumElements() : 1; + unsigned OldElts = SrcTy.getNumElements(); + + NumParts = NarrowTy.isVector() ? (OldElts / NewElts) : + NarrowTy.getNumElements(); + NarrowTy0 = LLT::vector(NarrowTy.getNumElements(), + DstTy.getScalarSizeInBits()); + NarrowTy1 = NarrowTy; + } + + // FIXME: Don't know how to handle the situation where the small vectors + // aren't all the same size yet. + if (NarrowTy1.isVector() && + NarrowTy1.getNumElements() * NumParts != DstTy.getNumElements()) + return UnableToLegalize; + + CmpInst::Predicate Pred + = static_cast<CmpInst::Predicate>(MI.getOperand(1).getPredicate()); + + SmallVector<Register, 2> Src1Regs, Src2Regs, DstRegs; + extractParts(MI.getOperand(2).getReg(), NarrowTy1, NumParts, Src1Regs); + extractParts(MI.getOperand(3).getReg(), NarrowTy1, NumParts, Src2Regs); + + for (unsigned I = 0; I < NumParts; ++I) { + Register DstReg = MRI.createGenericVirtualRegister(NarrowTy0); + DstRegs.push_back(DstReg); + + if (MI.getOpcode() == TargetOpcode::G_ICMP) + MIRBuilder.buildICmp(Pred, DstReg, Src1Regs[I], Src2Regs[I]); + else { + MachineInstr *NewCmp + = MIRBuilder.buildFCmp(Pred, DstReg, Src1Regs[I], Src2Regs[I]); + NewCmp->setFlags(MI.getFlags()); + } + } + + if (NarrowTy1.isVector()) + MIRBuilder.buildConcatVectors(DstReg, DstRegs); + else + MIRBuilder.buildBuildVector(DstReg, DstRegs); + + MI.eraseFromParent(); + return Legalized; +} + +LegalizerHelper::LegalizeResult +LegalizerHelper::fewerElementsVectorSelect(MachineInstr &MI, unsigned TypeIdx, + LLT NarrowTy) { + Register DstReg = MI.getOperand(0).getReg(); + Register CondReg = MI.getOperand(1).getReg(); + + unsigned NumParts = 0; + LLT NarrowTy0, NarrowTy1; + + LLT DstTy = MRI.getType(DstReg); + LLT CondTy = MRI.getType(CondReg); + unsigned Size = DstTy.getSizeInBits(); + + assert(TypeIdx == 0 || CondTy.isVector()); + + if (TypeIdx == 0) { + NarrowTy0 = NarrowTy; + NarrowTy1 = CondTy; + + unsigned NarrowSize = NarrowTy0.getSizeInBits(); + // FIXME: Don't know how to handle the situation where the small vectors + // aren't all the same size yet. + if (Size % NarrowSize != 0) + return UnableToLegalize; + + NumParts = Size / NarrowSize; + + // Need to break down the condition type + if (CondTy.isVector()) { + if (CondTy.getNumElements() == NumParts) + NarrowTy1 = CondTy.getElementType(); + else + NarrowTy1 = LLT::vector(CondTy.getNumElements() / NumParts, + CondTy.getScalarSizeInBits()); + } + } else { + NumParts = CondTy.getNumElements(); + if (NarrowTy.isVector()) { + // TODO: Handle uneven breakdown. + if (NumParts * NarrowTy.getNumElements() != CondTy.getNumElements()) + return UnableToLegalize; + + return UnableToLegalize; + } else { + NarrowTy0 = DstTy.getElementType(); + NarrowTy1 = NarrowTy; + } + } + + SmallVector<Register, 2> DstRegs, Src0Regs, Src1Regs, Src2Regs; + if (CondTy.isVector()) + extractParts(MI.getOperand(1).getReg(), NarrowTy1, NumParts, Src0Regs); + + extractParts(MI.getOperand(2).getReg(), NarrowTy0, NumParts, Src1Regs); + extractParts(MI.getOperand(3).getReg(), NarrowTy0, NumParts, Src2Regs); + + for (unsigned i = 0; i < NumParts; ++i) { + Register DstReg = MRI.createGenericVirtualRegister(NarrowTy0); + MIRBuilder.buildSelect(DstReg, CondTy.isVector() ? Src0Regs[i] : CondReg, + Src1Regs[i], Src2Regs[i]); + DstRegs.push_back(DstReg); + } + + if (NarrowTy0.isVector()) + MIRBuilder.buildConcatVectors(DstReg, DstRegs); + else + MIRBuilder.buildBuildVector(DstReg, DstRegs); + + MI.eraseFromParent(); + return Legalized; +} + +LegalizerHelper::LegalizeResult +LegalizerHelper::fewerElementsVectorPhi(MachineInstr &MI, unsigned TypeIdx, + LLT NarrowTy) { + const Register DstReg = MI.getOperand(0).getReg(); + LLT PhiTy = MRI.getType(DstReg); + LLT LeftoverTy; + + // All of the operands need to have the same number of elements, so if we can + // determine a type breakdown for the result type, we can for all of the + // source types. + int NumParts, NumLeftover; + std::tie(NumParts, NumLeftover) + = getNarrowTypeBreakDown(PhiTy, NarrowTy, LeftoverTy); + if (NumParts < 0) + return UnableToLegalize; + + SmallVector<Register, 4> DstRegs, LeftoverDstRegs; + SmallVector<MachineInstrBuilder, 4> NewInsts; + + const int TotalNumParts = NumParts + NumLeftover; + + // Insert the new phis in the result block first. + for (int I = 0; I != TotalNumParts; ++I) { + LLT Ty = I < NumParts ? NarrowTy : LeftoverTy; + Register PartDstReg = MRI.createGenericVirtualRegister(Ty); + NewInsts.push_back(MIRBuilder.buildInstr(TargetOpcode::G_PHI) + .addDef(PartDstReg)); + if (I < NumParts) + DstRegs.push_back(PartDstReg); + else + LeftoverDstRegs.push_back(PartDstReg); + } + + MachineBasicBlock *MBB = MI.getParent(); + MIRBuilder.setInsertPt(*MBB, MBB->getFirstNonPHI()); + insertParts(DstReg, PhiTy, NarrowTy, DstRegs, LeftoverTy, LeftoverDstRegs); + + SmallVector<Register, 4> PartRegs, LeftoverRegs; + + // Insert code to extract the incoming values in each predecessor block. + for (unsigned I = 1, E = MI.getNumOperands(); I != E; I += 2) { + PartRegs.clear(); + LeftoverRegs.clear(); + + Register SrcReg = MI.getOperand(I).getReg(); + MachineBasicBlock &OpMBB = *MI.getOperand(I + 1).getMBB(); + MIRBuilder.setInsertPt(OpMBB, OpMBB.getFirstTerminator()); + + LLT Unused; + if (!extractParts(SrcReg, PhiTy, NarrowTy, Unused, PartRegs, + LeftoverRegs)) + return UnableToLegalize; + + // Add the newly created operand splits to the existing instructions. The + // odd-sized pieces are ordered after the requested NarrowTyArg sized + // pieces. + for (int J = 0; J != TotalNumParts; ++J) { + MachineInstrBuilder MIB = NewInsts[J]; + MIB.addUse(J < NumParts ? PartRegs[J] : LeftoverRegs[J - NumParts]); + MIB.addMBB(&OpMBB); + } + } + + MI.eraseFromParent(); + return Legalized; +} + +LegalizerHelper::LegalizeResult +LegalizerHelper::fewerElementsVectorUnmergeValues(MachineInstr &MI, + unsigned TypeIdx, + LLT NarrowTy) { + if (TypeIdx != 1) + return UnableToLegalize; + + const int NumDst = MI.getNumOperands() - 1; + const Register SrcReg = MI.getOperand(NumDst).getReg(); + LLT SrcTy = MRI.getType(SrcReg); + + LLT DstTy = MRI.getType(MI.getOperand(0).getReg()); + + // TODO: Create sequence of extracts. + if (DstTy == NarrowTy) + return UnableToLegalize; + + LLT GCDTy = getGCDType(SrcTy, NarrowTy); + if (DstTy == GCDTy) { + // This would just be a copy of the same unmerge. + // TODO: Create extracts, pad with undef and create intermediate merges. + return UnableToLegalize; + } + + auto Unmerge = MIRBuilder.buildUnmerge(GCDTy, SrcReg); + const int NumUnmerge = Unmerge->getNumOperands() - 1; + const int PartsPerUnmerge = NumDst / NumUnmerge; + + for (int I = 0; I != NumUnmerge; ++I) { + auto MIB = MIRBuilder.buildInstr(TargetOpcode::G_UNMERGE_VALUES); + + for (int J = 0; J != PartsPerUnmerge; ++J) + MIB.addDef(MI.getOperand(I * PartsPerUnmerge + J).getReg()); + MIB.addUse(Unmerge.getReg(I)); + } + + MI.eraseFromParent(); + return Legalized; +} + // Handle FewerElementsVector a G_BUILD_VECTOR or G_CONCAT_VECTORS that produces // a vector // @@ -3625,30 +3625,30 @@ LegalizerHelper::fewerElementsVectorUnmergeValues(MachineInstr &MI, // %7:_(<2 x s16>) = G_IMPLICIT_DEF // %8:_(<6 x s16>) = G_CONCAT_VECTORS %5, %6, %7 // %3:_(<3 x s16>), %8:_(<3 x s16>) = G_UNMERGE_VALUES %8 -LegalizerHelper::LegalizeResult +LegalizerHelper::LegalizeResult LegalizerHelper::fewerElementsVectorMerge(MachineInstr &MI, unsigned TypeIdx, LLT NarrowTy) { - Register DstReg = MI.getOperand(0).getReg(); - LLT DstTy = MRI.getType(DstReg); + Register DstReg = MI.getOperand(0).getReg(); + LLT DstTy = MRI.getType(DstReg); LLT SrcTy = MRI.getType(MI.getOperand(1).getReg()); LLT GCDTy = getGCDType(getGCDType(SrcTy, NarrowTy), DstTy); - + // Break into a common type SmallVector<Register, 16> Parts; for (unsigned I = 1, E = MI.getNumOperands(); I != E; ++I) extractGCDType(Parts, GCDTy, MI.getOperand(I).getReg()); - + // Build the requested new merge, padding with undef. LLT LCMTy = buildLCMMergePieces(DstTy, NarrowTy, GCDTy, Parts, TargetOpcode::G_ANYEXT); - + // Pack into the original result register. buildWidenedRemergeToDst(DstReg, LCMTy, Parts); - + MI.eraseFromParent(); return Legalized; } - + LegalizerHelper::LegalizeResult LegalizerHelper::fewerElementsVectorExtractInsertVectorElt(MachineInstr &MI, unsigned TypeIdx, @@ -3679,11 +3679,11 @@ LegalizerHelper::fewerElementsVectorExtractInsertVectorElt(MachineInstr &MI, MIRBuilder.buildUndef(DstReg); MI.eraseFromParent(); return Legalized; - } - + } + SmallVector<Register, 8> VecParts; LLT GCDTy = extractGCDType(VecParts, VecTy, NarrowVecTy, SrcVec); - + // Build a sequence of NarrowTy pieces in VecParts for this operand. LLT LCMTy = buildLCMMergePieces(VecTy, NarrowVecTy, GCDTy, VecParts, TargetOpcode::G_ANYEXT); @@ -3712,346 +3712,346 @@ LegalizerHelper::fewerElementsVectorExtractInsertVectorElt(MachineInstr &MI, MI.eraseFromParent(); return Legalized; - } - + } + // With a variable index, we can't perform the operation in a smaller type, so // we're forced to expand this. // // TODO: We could emit a chain of compare/select to figure out which piece to // index. return lowerExtractInsertVectorElt(MI); -} - -LegalizerHelper::LegalizeResult -LegalizerHelper::reduceLoadStoreWidth(MachineInstr &MI, unsigned TypeIdx, - LLT NarrowTy) { - // FIXME: Don't know how to handle secondary types yet. - if (TypeIdx != 0) - return UnableToLegalize; - - MachineMemOperand *MMO = *MI.memoperands_begin(); - - // This implementation doesn't work for atomics. Give up instead of doing - // something invalid. - if (MMO->getOrdering() != AtomicOrdering::NotAtomic || - MMO->getFailureOrdering() != AtomicOrdering::NotAtomic) - return UnableToLegalize; - - bool IsLoad = MI.getOpcode() == TargetOpcode::G_LOAD; - Register ValReg = MI.getOperand(0).getReg(); - Register AddrReg = MI.getOperand(1).getReg(); - LLT ValTy = MRI.getType(ValReg); - - // FIXME: Do we need a distinct NarrowMemory legalize action? - if (ValTy.getSizeInBits() != 8 * MMO->getSize()) { - LLVM_DEBUG(dbgs() << "Can't narrow extload/truncstore\n"); - return UnableToLegalize; - } - - int NumParts = -1; - int NumLeftover = -1; - LLT LeftoverTy; - SmallVector<Register, 8> NarrowRegs, NarrowLeftoverRegs; - if (IsLoad) { - std::tie(NumParts, NumLeftover) = getNarrowTypeBreakDown(ValTy, NarrowTy, LeftoverTy); - } else { - if (extractParts(ValReg, ValTy, NarrowTy, LeftoverTy, NarrowRegs, - NarrowLeftoverRegs)) { - NumParts = NarrowRegs.size(); - NumLeftover = NarrowLeftoverRegs.size(); - } - } - - if (NumParts == -1) - return UnableToLegalize; - +} + +LegalizerHelper::LegalizeResult +LegalizerHelper::reduceLoadStoreWidth(MachineInstr &MI, unsigned TypeIdx, + LLT NarrowTy) { + // FIXME: Don't know how to handle secondary types yet. + if (TypeIdx != 0) + return UnableToLegalize; + + MachineMemOperand *MMO = *MI.memoperands_begin(); + + // This implementation doesn't work for atomics. Give up instead of doing + // something invalid. + if (MMO->getOrdering() != AtomicOrdering::NotAtomic || + MMO->getFailureOrdering() != AtomicOrdering::NotAtomic) + return UnableToLegalize; + + bool IsLoad = MI.getOpcode() == TargetOpcode::G_LOAD; + Register ValReg = MI.getOperand(0).getReg(); + Register AddrReg = MI.getOperand(1).getReg(); + LLT ValTy = MRI.getType(ValReg); + + // FIXME: Do we need a distinct NarrowMemory legalize action? + if (ValTy.getSizeInBits() != 8 * MMO->getSize()) { + LLVM_DEBUG(dbgs() << "Can't narrow extload/truncstore\n"); + return UnableToLegalize; + } + + int NumParts = -1; + int NumLeftover = -1; + LLT LeftoverTy; + SmallVector<Register, 8> NarrowRegs, NarrowLeftoverRegs; + if (IsLoad) { + std::tie(NumParts, NumLeftover) = getNarrowTypeBreakDown(ValTy, NarrowTy, LeftoverTy); + } else { + if (extractParts(ValReg, ValTy, NarrowTy, LeftoverTy, NarrowRegs, + NarrowLeftoverRegs)) { + NumParts = NarrowRegs.size(); + NumLeftover = NarrowLeftoverRegs.size(); + } + } + + if (NumParts == -1) + return UnableToLegalize; + LLT PtrTy = MRI.getType(AddrReg); const LLT OffsetTy = LLT::scalar(PtrTy.getSizeInBits()); - - unsigned TotalSize = ValTy.getSizeInBits(); - - // Split the load/store into PartTy sized pieces starting at Offset. If this - // is a load, return the new registers in ValRegs. For a store, each elements - // of ValRegs should be PartTy. Returns the next offset that needs to be - // handled. - auto splitTypePieces = [=](LLT PartTy, SmallVectorImpl<Register> &ValRegs, - unsigned Offset) -> unsigned { - MachineFunction &MF = MIRBuilder.getMF(); - unsigned PartSize = PartTy.getSizeInBits(); - for (unsigned Idx = 0, E = NumParts; Idx != E && Offset < TotalSize; - Offset += PartSize, ++Idx) { - unsigned ByteSize = PartSize / 8; - unsigned ByteOffset = Offset / 8; - Register NewAddrReg; - - MIRBuilder.materializePtrAdd(NewAddrReg, AddrReg, OffsetTy, ByteOffset); - - MachineMemOperand *NewMMO = - MF.getMachineMemOperand(MMO, ByteOffset, ByteSize); - - if (IsLoad) { - Register Dst = MRI.createGenericVirtualRegister(PartTy); - ValRegs.push_back(Dst); - MIRBuilder.buildLoad(Dst, NewAddrReg, *NewMMO); - } else { - MIRBuilder.buildStore(ValRegs[Idx], NewAddrReg, *NewMMO); - } - } - - return Offset; - }; - - unsigned HandledOffset = splitTypePieces(NarrowTy, NarrowRegs, 0); - - // Handle the rest of the register if this isn't an even type breakdown. - if (LeftoverTy.isValid()) - splitTypePieces(LeftoverTy, NarrowLeftoverRegs, HandledOffset); - - if (IsLoad) { - insertParts(ValReg, ValTy, NarrowTy, NarrowRegs, - LeftoverTy, NarrowLeftoverRegs); - } - - MI.eraseFromParent(); - return Legalized; -} - -LegalizerHelper::LegalizeResult -LegalizerHelper::reduceOperationWidth(MachineInstr &MI, unsigned int TypeIdx, - LLT NarrowTy) { - assert(TypeIdx == 0 && "only one type index expected"); - - const unsigned Opc = MI.getOpcode(); - const int NumOps = MI.getNumOperands() - 1; - const Register DstReg = MI.getOperand(0).getReg(); - const unsigned Flags = MI.getFlags(); - const unsigned NarrowSize = NarrowTy.getSizeInBits(); - const LLT NarrowScalarTy = LLT::scalar(NarrowSize); - - assert(NumOps <= 3 && "expected instruction with 1 result and 1-3 sources"); - - // First of all check whether we are narrowing (changing the element type) - // or reducing the vector elements - const LLT DstTy = MRI.getType(DstReg); - const bool IsNarrow = NarrowTy.getScalarType() != DstTy.getScalarType(); - - SmallVector<Register, 8> ExtractedRegs[3]; - SmallVector<Register, 8> Parts; - - unsigned NarrowElts = NarrowTy.isVector() ? NarrowTy.getNumElements() : 1; - - // Break down all the sources into NarrowTy pieces we can operate on. This may - // involve creating merges to a wider type, padded with undef. - for (int I = 0; I != NumOps; ++I) { - Register SrcReg = MI.getOperand(I + 1).getReg(); - LLT SrcTy = MRI.getType(SrcReg); - - // The type to narrow SrcReg to. For narrowing, this is a smaller scalar. - // For fewerElements, this is a smaller vector with the same element type. - LLT OpNarrowTy; - if (IsNarrow) { - OpNarrowTy = NarrowScalarTy; - - // In case of narrowing, we need to cast vectors to scalars for this to - // work properly - // FIXME: Can we do without the bitcast here if we're narrowing? - if (SrcTy.isVector()) { - SrcTy = LLT::scalar(SrcTy.getSizeInBits()); - SrcReg = MIRBuilder.buildBitcast(SrcTy, SrcReg).getReg(0); - } - } else { - OpNarrowTy = LLT::scalarOrVector(NarrowElts, SrcTy.getScalarType()); - } - - LLT GCDTy = extractGCDType(ExtractedRegs[I], SrcTy, OpNarrowTy, SrcReg); - - // Build a sequence of NarrowTy pieces in ExtractedRegs for this operand. - buildLCMMergePieces(SrcTy, OpNarrowTy, GCDTy, ExtractedRegs[I], - TargetOpcode::G_ANYEXT); - } - - SmallVector<Register, 8> ResultRegs; - - // Input operands for each sub-instruction. - SmallVector<SrcOp, 4> InputRegs(NumOps, Register()); - - int NumParts = ExtractedRegs[0].size(); - const unsigned DstSize = DstTy.getSizeInBits(); - const LLT DstScalarTy = LLT::scalar(DstSize); - - // Narrowing needs to use scalar types - LLT DstLCMTy, NarrowDstTy; - if (IsNarrow) { - DstLCMTy = getLCMType(DstScalarTy, NarrowScalarTy); - NarrowDstTy = NarrowScalarTy; - } else { - DstLCMTy = getLCMType(DstTy, NarrowTy); - NarrowDstTy = NarrowTy; - } - - // We widened the source registers to satisfy merge/unmerge size - // constraints. We'll have some extra fully undef parts. - const int NumRealParts = (DstSize + NarrowSize - 1) / NarrowSize; - - for (int I = 0; I != NumRealParts; ++I) { - // Emit this instruction on each of the split pieces. - for (int J = 0; J != NumOps; ++J) - InputRegs[J] = ExtractedRegs[J][I]; - - auto Inst = MIRBuilder.buildInstr(Opc, {NarrowDstTy}, InputRegs, Flags); - ResultRegs.push_back(Inst.getReg(0)); - } - - // Fill out the widened result with undef instead of creating instructions - // with undef inputs. - int NumUndefParts = NumParts - NumRealParts; - if (NumUndefParts != 0) - ResultRegs.append(NumUndefParts, - MIRBuilder.buildUndef(NarrowDstTy).getReg(0)); - - // Extract the possibly padded result. Use a scratch register if we need to do - // a final bitcast, otherwise use the original result register. - Register MergeDstReg; - if (IsNarrow && DstTy.isVector()) - MergeDstReg = MRI.createGenericVirtualRegister(DstScalarTy); - else - MergeDstReg = DstReg; - - buildWidenedRemergeToDst(MergeDstReg, DstLCMTy, ResultRegs); - - // Recast to vector if we narrowed a vector - if (IsNarrow && DstTy.isVector()) - MIRBuilder.buildBitcast(DstReg, MergeDstReg); - - MI.eraseFromParent(); - return Legalized; -} - -LegalizerHelper::LegalizeResult -LegalizerHelper::fewerElementsVectorSextInReg(MachineInstr &MI, unsigned TypeIdx, - LLT NarrowTy) { - Register DstReg = MI.getOperand(0).getReg(); - Register SrcReg = MI.getOperand(1).getReg(); - int64_t Imm = MI.getOperand(2).getImm(); - - LLT DstTy = MRI.getType(DstReg); - - SmallVector<Register, 8> Parts; - LLT GCDTy = extractGCDType(Parts, DstTy, NarrowTy, SrcReg); - LLT LCMTy = buildLCMMergePieces(DstTy, NarrowTy, GCDTy, Parts); - - for (Register &R : Parts) - R = MIRBuilder.buildSExtInReg(NarrowTy, R, Imm).getReg(0); - - buildWidenedRemergeToDst(DstReg, LCMTy, Parts); - - MI.eraseFromParent(); - return Legalized; -} - -LegalizerHelper::LegalizeResult -LegalizerHelper::fewerElementsVector(MachineInstr &MI, unsigned TypeIdx, - LLT NarrowTy) { - using namespace TargetOpcode; - - switch (MI.getOpcode()) { - case G_IMPLICIT_DEF: - return fewerElementsVectorImplicitDef(MI, TypeIdx, NarrowTy); - case G_TRUNC: - case G_AND: - case G_OR: - case G_XOR: - case G_ADD: - case G_SUB: - case G_MUL: + + unsigned TotalSize = ValTy.getSizeInBits(); + + // Split the load/store into PartTy sized pieces starting at Offset. If this + // is a load, return the new registers in ValRegs. For a store, each elements + // of ValRegs should be PartTy. Returns the next offset that needs to be + // handled. + auto splitTypePieces = [=](LLT PartTy, SmallVectorImpl<Register> &ValRegs, + unsigned Offset) -> unsigned { + MachineFunction &MF = MIRBuilder.getMF(); + unsigned PartSize = PartTy.getSizeInBits(); + for (unsigned Idx = 0, E = NumParts; Idx != E && Offset < TotalSize; + Offset += PartSize, ++Idx) { + unsigned ByteSize = PartSize / 8; + unsigned ByteOffset = Offset / 8; + Register NewAddrReg; + + MIRBuilder.materializePtrAdd(NewAddrReg, AddrReg, OffsetTy, ByteOffset); + + MachineMemOperand *NewMMO = + MF.getMachineMemOperand(MMO, ByteOffset, ByteSize); + + if (IsLoad) { + Register Dst = MRI.createGenericVirtualRegister(PartTy); + ValRegs.push_back(Dst); + MIRBuilder.buildLoad(Dst, NewAddrReg, *NewMMO); + } else { + MIRBuilder.buildStore(ValRegs[Idx], NewAddrReg, *NewMMO); + } + } + + return Offset; + }; + + unsigned HandledOffset = splitTypePieces(NarrowTy, NarrowRegs, 0); + + // Handle the rest of the register if this isn't an even type breakdown. + if (LeftoverTy.isValid()) + splitTypePieces(LeftoverTy, NarrowLeftoverRegs, HandledOffset); + + if (IsLoad) { + insertParts(ValReg, ValTy, NarrowTy, NarrowRegs, + LeftoverTy, NarrowLeftoverRegs); + } + + MI.eraseFromParent(); + return Legalized; +} + +LegalizerHelper::LegalizeResult +LegalizerHelper::reduceOperationWidth(MachineInstr &MI, unsigned int TypeIdx, + LLT NarrowTy) { + assert(TypeIdx == 0 && "only one type index expected"); + + const unsigned Opc = MI.getOpcode(); + const int NumOps = MI.getNumOperands() - 1; + const Register DstReg = MI.getOperand(0).getReg(); + const unsigned Flags = MI.getFlags(); + const unsigned NarrowSize = NarrowTy.getSizeInBits(); + const LLT NarrowScalarTy = LLT::scalar(NarrowSize); + + assert(NumOps <= 3 && "expected instruction with 1 result and 1-3 sources"); + + // First of all check whether we are narrowing (changing the element type) + // or reducing the vector elements + const LLT DstTy = MRI.getType(DstReg); + const bool IsNarrow = NarrowTy.getScalarType() != DstTy.getScalarType(); + + SmallVector<Register, 8> ExtractedRegs[3]; + SmallVector<Register, 8> Parts; + + unsigned NarrowElts = NarrowTy.isVector() ? NarrowTy.getNumElements() : 1; + + // Break down all the sources into NarrowTy pieces we can operate on. This may + // involve creating merges to a wider type, padded with undef. + for (int I = 0; I != NumOps; ++I) { + Register SrcReg = MI.getOperand(I + 1).getReg(); + LLT SrcTy = MRI.getType(SrcReg); + + // The type to narrow SrcReg to. For narrowing, this is a smaller scalar. + // For fewerElements, this is a smaller vector with the same element type. + LLT OpNarrowTy; + if (IsNarrow) { + OpNarrowTy = NarrowScalarTy; + + // In case of narrowing, we need to cast vectors to scalars for this to + // work properly + // FIXME: Can we do without the bitcast here if we're narrowing? + if (SrcTy.isVector()) { + SrcTy = LLT::scalar(SrcTy.getSizeInBits()); + SrcReg = MIRBuilder.buildBitcast(SrcTy, SrcReg).getReg(0); + } + } else { + OpNarrowTy = LLT::scalarOrVector(NarrowElts, SrcTy.getScalarType()); + } + + LLT GCDTy = extractGCDType(ExtractedRegs[I], SrcTy, OpNarrowTy, SrcReg); + + // Build a sequence of NarrowTy pieces in ExtractedRegs for this operand. + buildLCMMergePieces(SrcTy, OpNarrowTy, GCDTy, ExtractedRegs[I], + TargetOpcode::G_ANYEXT); + } + + SmallVector<Register, 8> ResultRegs; + + // Input operands for each sub-instruction. + SmallVector<SrcOp, 4> InputRegs(NumOps, Register()); + + int NumParts = ExtractedRegs[0].size(); + const unsigned DstSize = DstTy.getSizeInBits(); + const LLT DstScalarTy = LLT::scalar(DstSize); + + // Narrowing needs to use scalar types + LLT DstLCMTy, NarrowDstTy; + if (IsNarrow) { + DstLCMTy = getLCMType(DstScalarTy, NarrowScalarTy); + NarrowDstTy = NarrowScalarTy; + } else { + DstLCMTy = getLCMType(DstTy, NarrowTy); + NarrowDstTy = NarrowTy; + } + + // We widened the source registers to satisfy merge/unmerge size + // constraints. We'll have some extra fully undef parts. + const int NumRealParts = (DstSize + NarrowSize - 1) / NarrowSize; + + for (int I = 0; I != NumRealParts; ++I) { + // Emit this instruction on each of the split pieces. + for (int J = 0; J != NumOps; ++J) + InputRegs[J] = ExtractedRegs[J][I]; + + auto Inst = MIRBuilder.buildInstr(Opc, {NarrowDstTy}, InputRegs, Flags); + ResultRegs.push_back(Inst.getReg(0)); + } + + // Fill out the widened result with undef instead of creating instructions + // with undef inputs. + int NumUndefParts = NumParts - NumRealParts; + if (NumUndefParts != 0) + ResultRegs.append(NumUndefParts, + MIRBuilder.buildUndef(NarrowDstTy).getReg(0)); + + // Extract the possibly padded result. Use a scratch register if we need to do + // a final bitcast, otherwise use the original result register. + Register MergeDstReg; + if (IsNarrow && DstTy.isVector()) + MergeDstReg = MRI.createGenericVirtualRegister(DstScalarTy); + else + MergeDstReg = DstReg; + + buildWidenedRemergeToDst(MergeDstReg, DstLCMTy, ResultRegs); + + // Recast to vector if we narrowed a vector + if (IsNarrow && DstTy.isVector()) + MIRBuilder.buildBitcast(DstReg, MergeDstReg); + + MI.eraseFromParent(); + return Legalized; +} + +LegalizerHelper::LegalizeResult +LegalizerHelper::fewerElementsVectorSextInReg(MachineInstr &MI, unsigned TypeIdx, + LLT NarrowTy) { + Register DstReg = MI.getOperand(0).getReg(); + Register SrcReg = MI.getOperand(1).getReg(); + int64_t Imm = MI.getOperand(2).getImm(); + + LLT DstTy = MRI.getType(DstReg); + + SmallVector<Register, 8> Parts; + LLT GCDTy = extractGCDType(Parts, DstTy, NarrowTy, SrcReg); + LLT LCMTy = buildLCMMergePieces(DstTy, NarrowTy, GCDTy, Parts); + + for (Register &R : Parts) + R = MIRBuilder.buildSExtInReg(NarrowTy, R, Imm).getReg(0); + + buildWidenedRemergeToDst(DstReg, LCMTy, Parts); + + MI.eraseFromParent(); + return Legalized; +} + +LegalizerHelper::LegalizeResult +LegalizerHelper::fewerElementsVector(MachineInstr &MI, unsigned TypeIdx, + LLT NarrowTy) { + using namespace TargetOpcode; + + switch (MI.getOpcode()) { + case G_IMPLICIT_DEF: + return fewerElementsVectorImplicitDef(MI, TypeIdx, NarrowTy); + case G_TRUNC: + case G_AND: + case G_OR: + case G_XOR: + case G_ADD: + case G_SUB: + case G_MUL: case G_PTR_ADD: - case G_SMULH: - case G_UMULH: - case G_FADD: - case G_FMUL: - case G_FSUB: - case G_FNEG: - case G_FABS: - case G_FCANONICALIZE: - case G_FDIV: - case G_FREM: - case G_FMA: - case G_FMAD: - case G_FPOW: - case G_FEXP: - case G_FEXP2: - case G_FLOG: - case G_FLOG2: - case G_FLOG10: - case G_FNEARBYINT: - case G_FCEIL: - case G_FFLOOR: - case G_FRINT: - case G_INTRINSIC_ROUND: + case G_SMULH: + case G_UMULH: + case G_FADD: + case G_FMUL: + case G_FSUB: + case G_FNEG: + case G_FABS: + case G_FCANONICALIZE: + case G_FDIV: + case G_FREM: + case G_FMA: + case G_FMAD: + case G_FPOW: + case G_FEXP: + case G_FEXP2: + case G_FLOG: + case G_FLOG2: + case G_FLOG10: + case G_FNEARBYINT: + case G_FCEIL: + case G_FFLOOR: + case G_FRINT: + case G_INTRINSIC_ROUND: case G_INTRINSIC_ROUNDEVEN: - case G_INTRINSIC_TRUNC: - case G_FCOS: - case G_FSIN: - case G_FSQRT: - case G_BSWAP: - case G_BITREVERSE: - case G_SDIV: - case G_UDIV: - case G_SREM: - case G_UREM: - case G_SMIN: - case G_SMAX: - case G_UMIN: - case G_UMAX: - case G_FMINNUM: - case G_FMAXNUM: - case G_FMINNUM_IEEE: - case G_FMAXNUM_IEEE: - case G_FMINIMUM: - case G_FMAXIMUM: - case G_FSHL: - case G_FSHR: - case G_FREEZE: - case G_SADDSAT: - case G_SSUBSAT: - case G_UADDSAT: - case G_USUBSAT: - return reduceOperationWidth(MI, TypeIdx, NarrowTy); - case G_SHL: - case G_LSHR: - case G_ASHR: + case G_INTRINSIC_TRUNC: + case G_FCOS: + case G_FSIN: + case G_FSQRT: + case G_BSWAP: + case G_BITREVERSE: + case G_SDIV: + case G_UDIV: + case G_SREM: + case G_UREM: + case G_SMIN: + case G_SMAX: + case G_UMIN: + case G_UMAX: + case G_FMINNUM: + case G_FMAXNUM: + case G_FMINNUM_IEEE: + case G_FMAXNUM_IEEE: + case G_FMINIMUM: + case G_FMAXIMUM: + case G_FSHL: + case G_FSHR: + case G_FREEZE: + case G_SADDSAT: + case G_SSUBSAT: + case G_UADDSAT: + case G_USUBSAT: + return reduceOperationWidth(MI, TypeIdx, NarrowTy); + case G_SHL: + case G_LSHR: + case G_ASHR: case G_SSHLSAT: case G_USHLSAT: - case G_CTLZ: - case G_CTLZ_ZERO_UNDEF: - case G_CTTZ: - case G_CTTZ_ZERO_UNDEF: - case G_CTPOP: - case G_FCOPYSIGN: - return fewerElementsVectorMultiEltType(MI, TypeIdx, NarrowTy); - case G_ZEXT: - case G_SEXT: - case G_ANYEXT: - case G_FPEXT: - case G_FPTRUNC: - case G_SITOFP: - case G_UITOFP: - case G_FPTOSI: - case G_FPTOUI: - case G_INTTOPTR: - case G_PTRTOINT: - case G_ADDRSPACE_CAST: - return fewerElementsVectorCasts(MI, TypeIdx, NarrowTy); - case G_ICMP: - case G_FCMP: - return fewerElementsVectorCmp(MI, TypeIdx, NarrowTy); - case G_SELECT: - return fewerElementsVectorSelect(MI, TypeIdx, NarrowTy); - case G_PHI: - return fewerElementsVectorPhi(MI, TypeIdx, NarrowTy); - case G_UNMERGE_VALUES: - return fewerElementsVectorUnmergeValues(MI, TypeIdx, NarrowTy); - case G_BUILD_VECTOR: + case G_CTLZ: + case G_CTLZ_ZERO_UNDEF: + case G_CTTZ: + case G_CTTZ_ZERO_UNDEF: + case G_CTPOP: + case G_FCOPYSIGN: + return fewerElementsVectorMultiEltType(MI, TypeIdx, NarrowTy); + case G_ZEXT: + case G_SEXT: + case G_ANYEXT: + case G_FPEXT: + case G_FPTRUNC: + case G_SITOFP: + case G_UITOFP: + case G_FPTOSI: + case G_FPTOUI: + case G_INTTOPTR: + case G_PTRTOINT: + case G_ADDRSPACE_CAST: + return fewerElementsVectorCasts(MI, TypeIdx, NarrowTy); + case G_ICMP: + case G_FCMP: + return fewerElementsVectorCmp(MI, TypeIdx, NarrowTy); + case G_SELECT: + return fewerElementsVectorSelect(MI, TypeIdx, NarrowTy); + case G_PHI: + return fewerElementsVectorPhi(MI, TypeIdx, NarrowTy); + case G_UNMERGE_VALUES: + return fewerElementsVectorUnmergeValues(MI, TypeIdx, NarrowTy); + case G_BUILD_VECTOR: assert(TypeIdx == 0 && "not a vector type index"); return fewerElementsVectorMerge(MI, TypeIdx, NarrowTy); case G_CONCAT_VECTORS: @@ -4061,429 +4061,429 @@ LegalizerHelper::fewerElementsVector(MachineInstr &MI, unsigned TypeIdx, case G_EXTRACT_VECTOR_ELT: case G_INSERT_VECTOR_ELT: return fewerElementsVectorExtractInsertVectorElt(MI, TypeIdx, NarrowTy); - case G_LOAD: - case G_STORE: - return reduceLoadStoreWidth(MI, TypeIdx, NarrowTy); - case G_SEXT_INREG: - return fewerElementsVectorSextInReg(MI, TypeIdx, NarrowTy); - default: - return UnableToLegalize; - } -} - -LegalizerHelper::LegalizeResult -LegalizerHelper::narrowScalarShiftByConstant(MachineInstr &MI, const APInt &Amt, - const LLT HalfTy, const LLT AmtTy) { - - Register InL = MRI.createGenericVirtualRegister(HalfTy); - Register InH = MRI.createGenericVirtualRegister(HalfTy); - MIRBuilder.buildUnmerge({InL, InH}, MI.getOperand(1)); - - if (Amt.isNullValue()) { - MIRBuilder.buildMerge(MI.getOperand(0), {InL, InH}); - MI.eraseFromParent(); - return Legalized; - } - - LLT NVT = HalfTy; - unsigned NVTBits = HalfTy.getSizeInBits(); - unsigned VTBits = 2 * NVTBits; - - SrcOp Lo(Register(0)), Hi(Register(0)); - if (MI.getOpcode() == TargetOpcode::G_SHL) { - if (Amt.ugt(VTBits)) { - Lo = Hi = MIRBuilder.buildConstant(NVT, 0); - } else if (Amt.ugt(NVTBits)) { - Lo = MIRBuilder.buildConstant(NVT, 0); - Hi = MIRBuilder.buildShl(NVT, InL, - MIRBuilder.buildConstant(AmtTy, Amt - NVTBits)); - } else if (Amt == NVTBits) { - Lo = MIRBuilder.buildConstant(NVT, 0); - Hi = InL; - } else { - Lo = MIRBuilder.buildShl(NVT, InL, MIRBuilder.buildConstant(AmtTy, Amt)); - auto OrLHS = - MIRBuilder.buildShl(NVT, InH, MIRBuilder.buildConstant(AmtTy, Amt)); - auto OrRHS = MIRBuilder.buildLShr( - NVT, InL, MIRBuilder.buildConstant(AmtTy, -Amt + NVTBits)); - Hi = MIRBuilder.buildOr(NVT, OrLHS, OrRHS); - } - } else if (MI.getOpcode() == TargetOpcode::G_LSHR) { - if (Amt.ugt(VTBits)) { - Lo = Hi = MIRBuilder.buildConstant(NVT, 0); - } else if (Amt.ugt(NVTBits)) { - Lo = MIRBuilder.buildLShr(NVT, InH, - MIRBuilder.buildConstant(AmtTy, Amt - NVTBits)); - Hi = MIRBuilder.buildConstant(NVT, 0); - } else if (Amt == NVTBits) { - Lo = InH; - Hi = MIRBuilder.buildConstant(NVT, 0); - } else { - auto ShiftAmtConst = MIRBuilder.buildConstant(AmtTy, Amt); - - auto OrLHS = MIRBuilder.buildLShr(NVT, InL, ShiftAmtConst); - auto OrRHS = MIRBuilder.buildShl( - NVT, InH, MIRBuilder.buildConstant(AmtTy, -Amt + NVTBits)); - - Lo = MIRBuilder.buildOr(NVT, OrLHS, OrRHS); - Hi = MIRBuilder.buildLShr(NVT, InH, ShiftAmtConst); - } - } else { - if (Amt.ugt(VTBits)) { - Hi = Lo = MIRBuilder.buildAShr( - NVT, InH, MIRBuilder.buildConstant(AmtTy, NVTBits - 1)); - } else if (Amt.ugt(NVTBits)) { - Lo = MIRBuilder.buildAShr(NVT, InH, - MIRBuilder.buildConstant(AmtTy, Amt - NVTBits)); - Hi = MIRBuilder.buildAShr(NVT, InH, - MIRBuilder.buildConstant(AmtTy, NVTBits - 1)); - } else if (Amt == NVTBits) { - Lo = InH; - Hi = MIRBuilder.buildAShr(NVT, InH, - MIRBuilder.buildConstant(AmtTy, NVTBits - 1)); - } else { - auto ShiftAmtConst = MIRBuilder.buildConstant(AmtTy, Amt); - - auto OrLHS = MIRBuilder.buildLShr(NVT, InL, ShiftAmtConst); - auto OrRHS = MIRBuilder.buildShl( - NVT, InH, MIRBuilder.buildConstant(AmtTy, -Amt + NVTBits)); - - Lo = MIRBuilder.buildOr(NVT, OrLHS, OrRHS); - Hi = MIRBuilder.buildAShr(NVT, InH, ShiftAmtConst); - } - } - - MIRBuilder.buildMerge(MI.getOperand(0), {Lo, Hi}); - MI.eraseFromParent(); - - return Legalized; -} - -// TODO: Optimize if constant shift amount. -LegalizerHelper::LegalizeResult -LegalizerHelper::narrowScalarShift(MachineInstr &MI, unsigned TypeIdx, - LLT RequestedTy) { - if (TypeIdx == 1) { - Observer.changingInstr(MI); - narrowScalarSrc(MI, RequestedTy, 2); - Observer.changedInstr(MI); - return Legalized; - } - - Register DstReg = MI.getOperand(0).getReg(); - LLT DstTy = MRI.getType(DstReg); - if (DstTy.isVector()) - return UnableToLegalize; - - Register Amt = MI.getOperand(2).getReg(); - LLT ShiftAmtTy = MRI.getType(Amt); - const unsigned DstEltSize = DstTy.getScalarSizeInBits(); - if (DstEltSize % 2 != 0) - return UnableToLegalize; - - // Ignore the input type. We can only go to exactly half the size of the - // input. If that isn't small enough, the resulting pieces will be further - // legalized. - const unsigned NewBitSize = DstEltSize / 2; - const LLT HalfTy = LLT::scalar(NewBitSize); - const LLT CondTy = LLT::scalar(1); - - if (const MachineInstr *KShiftAmt = - getOpcodeDef(TargetOpcode::G_CONSTANT, Amt, MRI)) { - return narrowScalarShiftByConstant( - MI, KShiftAmt->getOperand(1).getCImm()->getValue(), HalfTy, ShiftAmtTy); - } - - // TODO: Expand with known bits. - - // Handle the fully general expansion by an unknown amount. - auto NewBits = MIRBuilder.buildConstant(ShiftAmtTy, NewBitSize); - - Register InL = MRI.createGenericVirtualRegister(HalfTy); - Register InH = MRI.createGenericVirtualRegister(HalfTy); - MIRBuilder.buildUnmerge({InL, InH}, MI.getOperand(1)); - - auto AmtExcess = MIRBuilder.buildSub(ShiftAmtTy, Amt, NewBits); - auto AmtLack = MIRBuilder.buildSub(ShiftAmtTy, NewBits, Amt); - - auto Zero = MIRBuilder.buildConstant(ShiftAmtTy, 0); - auto IsShort = MIRBuilder.buildICmp(ICmpInst::ICMP_ULT, CondTy, Amt, NewBits); - auto IsZero = MIRBuilder.buildICmp(ICmpInst::ICMP_EQ, CondTy, Amt, Zero); - - Register ResultRegs[2]; - switch (MI.getOpcode()) { - case TargetOpcode::G_SHL: { - // Short: ShAmt < NewBitSize - auto LoS = MIRBuilder.buildShl(HalfTy, InL, Amt); - - auto LoOr = MIRBuilder.buildLShr(HalfTy, InL, AmtLack); - auto HiOr = MIRBuilder.buildShl(HalfTy, InH, Amt); - auto HiS = MIRBuilder.buildOr(HalfTy, LoOr, HiOr); - - // Long: ShAmt >= NewBitSize - auto LoL = MIRBuilder.buildConstant(HalfTy, 0); // Lo part is zero. - auto HiL = MIRBuilder.buildShl(HalfTy, InL, AmtExcess); // Hi from Lo part. - - auto Lo = MIRBuilder.buildSelect(HalfTy, IsShort, LoS, LoL); - auto Hi = MIRBuilder.buildSelect( - HalfTy, IsZero, InH, MIRBuilder.buildSelect(HalfTy, IsShort, HiS, HiL)); - - ResultRegs[0] = Lo.getReg(0); - ResultRegs[1] = Hi.getReg(0); - break; - } - case TargetOpcode::G_LSHR: - case TargetOpcode::G_ASHR: { - // Short: ShAmt < NewBitSize - auto HiS = MIRBuilder.buildInstr(MI.getOpcode(), {HalfTy}, {InH, Amt}); - - auto LoOr = MIRBuilder.buildLShr(HalfTy, InL, Amt); - auto HiOr = MIRBuilder.buildShl(HalfTy, InH, AmtLack); - auto LoS = MIRBuilder.buildOr(HalfTy, LoOr, HiOr); - - // Long: ShAmt >= NewBitSize - MachineInstrBuilder HiL; - if (MI.getOpcode() == TargetOpcode::G_LSHR) { - HiL = MIRBuilder.buildConstant(HalfTy, 0); // Hi part is zero. - } else { - auto ShiftAmt = MIRBuilder.buildConstant(ShiftAmtTy, NewBitSize - 1); - HiL = MIRBuilder.buildAShr(HalfTy, InH, ShiftAmt); // Sign of Hi part. - } - auto LoL = MIRBuilder.buildInstr(MI.getOpcode(), {HalfTy}, - {InH, AmtExcess}); // Lo from Hi part. - - auto Lo = MIRBuilder.buildSelect( - HalfTy, IsZero, InL, MIRBuilder.buildSelect(HalfTy, IsShort, LoS, LoL)); - - auto Hi = MIRBuilder.buildSelect(HalfTy, IsShort, HiS, HiL); - - ResultRegs[0] = Lo.getReg(0); - ResultRegs[1] = Hi.getReg(0); - break; - } - default: - llvm_unreachable("not a shift"); - } - - MIRBuilder.buildMerge(DstReg, ResultRegs); - MI.eraseFromParent(); - return Legalized; -} - -LegalizerHelper::LegalizeResult -LegalizerHelper::moreElementsVectorPhi(MachineInstr &MI, unsigned TypeIdx, - LLT MoreTy) { - assert(TypeIdx == 0 && "Expecting only Idx 0"); - - Observer.changingInstr(MI); - for (unsigned I = 1, E = MI.getNumOperands(); I != E; I += 2) { - MachineBasicBlock &OpMBB = *MI.getOperand(I + 1).getMBB(); - MIRBuilder.setInsertPt(OpMBB, OpMBB.getFirstTerminator()); - moreElementsVectorSrc(MI, MoreTy, I); - } - - MachineBasicBlock &MBB = *MI.getParent(); - MIRBuilder.setInsertPt(MBB, --MBB.getFirstNonPHI()); - moreElementsVectorDst(MI, MoreTy, 0); - Observer.changedInstr(MI); - return Legalized; -} - -LegalizerHelper::LegalizeResult -LegalizerHelper::moreElementsVector(MachineInstr &MI, unsigned TypeIdx, - LLT MoreTy) { - unsigned Opc = MI.getOpcode(); - switch (Opc) { - case TargetOpcode::G_IMPLICIT_DEF: - case TargetOpcode::G_LOAD: { - if (TypeIdx != 0) - return UnableToLegalize; - Observer.changingInstr(MI); - moreElementsVectorDst(MI, MoreTy, 0); - Observer.changedInstr(MI); - return Legalized; - } - case TargetOpcode::G_STORE: - if (TypeIdx != 0) - return UnableToLegalize; - Observer.changingInstr(MI); - moreElementsVectorSrc(MI, MoreTy, 0); - Observer.changedInstr(MI); - return Legalized; - case TargetOpcode::G_AND: - case TargetOpcode::G_OR: - case TargetOpcode::G_XOR: - case TargetOpcode::G_SMIN: - case TargetOpcode::G_SMAX: - case TargetOpcode::G_UMIN: - case TargetOpcode::G_UMAX: - case TargetOpcode::G_FMINNUM: - case TargetOpcode::G_FMAXNUM: - case TargetOpcode::G_FMINNUM_IEEE: - case TargetOpcode::G_FMAXNUM_IEEE: - case TargetOpcode::G_FMINIMUM: - case TargetOpcode::G_FMAXIMUM: { - Observer.changingInstr(MI); - moreElementsVectorSrc(MI, MoreTy, 1); - moreElementsVectorSrc(MI, MoreTy, 2); - moreElementsVectorDst(MI, MoreTy, 0); - Observer.changedInstr(MI); - return Legalized; - } - case TargetOpcode::G_EXTRACT: - if (TypeIdx != 1) - return UnableToLegalize; - Observer.changingInstr(MI); - moreElementsVectorSrc(MI, MoreTy, 1); - Observer.changedInstr(MI); - return Legalized; - case TargetOpcode::G_INSERT: - case TargetOpcode::G_FREEZE: - if (TypeIdx != 0) - return UnableToLegalize; - Observer.changingInstr(MI); - moreElementsVectorSrc(MI, MoreTy, 1); - moreElementsVectorDst(MI, MoreTy, 0); - Observer.changedInstr(MI); - return Legalized; - case TargetOpcode::G_SELECT: - if (TypeIdx != 0) - return UnableToLegalize; - if (MRI.getType(MI.getOperand(1).getReg()).isVector()) - return UnableToLegalize; - - Observer.changingInstr(MI); - moreElementsVectorSrc(MI, MoreTy, 2); - moreElementsVectorSrc(MI, MoreTy, 3); - moreElementsVectorDst(MI, MoreTy, 0); - Observer.changedInstr(MI); - return Legalized; - case TargetOpcode::G_UNMERGE_VALUES: { - if (TypeIdx != 1) - return UnableToLegalize; - - LLT DstTy = MRI.getType(MI.getOperand(0).getReg()); - int NumDst = MI.getNumOperands() - 1; - moreElementsVectorSrc(MI, MoreTy, NumDst); - - auto MIB = MIRBuilder.buildInstr(TargetOpcode::G_UNMERGE_VALUES); - for (int I = 0; I != NumDst; ++I) - MIB.addDef(MI.getOperand(I).getReg()); - - int NewNumDst = MoreTy.getSizeInBits() / DstTy.getSizeInBits(); - for (int I = NumDst; I != NewNumDst; ++I) - MIB.addDef(MRI.createGenericVirtualRegister(DstTy)); - - MIB.addUse(MI.getOperand(NumDst).getReg()); - MI.eraseFromParent(); - return Legalized; - } - case TargetOpcode::G_PHI: - return moreElementsVectorPhi(MI, TypeIdx, MoreTy); - default: - return UnableToLegalize; - } -} - -void LegalizerHelper::multiplyRegisters(SmallVectorImpl<Register> &DstRegs, - ArrayRef<Register> Src1Regs, - ArrayRef<Register> Src2Regs, - LLT NarrowTy) { - MachineIRBuilder &B = MIRBuilder; - unsigned SrcParts = Src1Regs.size(); - unsigned DstParts = DstRegs.size(); - - unsigned DstIdx = 0; // Low bits of the result. - Register FactorSum = - B.buildMul(NarrowTy, Src1Regs[DstIdx], Src2Regs[DstIdx]).getReg(0); - DstRegs[DstIdx] = FactorSum; - - unsigned CarrySumPrevDstIdx; - SmallVector<Register, 4> Factors; - - for (DstIdx = 1; DstIdx < DstParts; DstIdx++) { - // Collect low parts of muls for DstIdx. - for (unsigned i = DstIdx + 1 < SrcParts ? 0 : DstIdx - SrcParts + 1; - i <= std::min(DstIdx, SrcParts - 1); ++i) { - MachineInstrBuilder Mul = - B.buildMul(NarrowTy, Src1Regs[DstIdx - i], Src2Regs[i]); - Factors.push_back(Mul.getReg(0)); - } - // Collect high parts of muls from previous DstIdx. - for (unsigned i = DstIdx < SrcParts ? 0 : DstIdx - SrcParts; - i <= std::min(DstIdx - 1, SrcParts - 1); ++i) { - MachineInstrBuilder Umulh = - B.buildUMulH(NarrowTy, Src1Regs[DstIdx - 1 - i], Src2Regs[i]); - Factors.push_back(Umulh.getReg(0)); - } - // Add CarrySum from additions calculated for previous DstIdx. - if (DstIdx != 1) { - Factors.push_back(CarrySumPrevDstIdx); - } - - Register CarrySum; - // Add all factors and accumulate all carries into CarrySum. - if (DstIdx != DstParts - 1) { - MachineInstrBuilder Uaddo = - B.buildUAddo(NarrowTy, LLT::scalar(1), Factors[0], Factors[1]); - FactorSum = Uaddo.getReg(0); - CarrySum = B.buildZExt(NarrowTy, Uaddo.getReg(1)).getReg(0); - for (unsigned i = 2; i < Factors.size(); ++i) { - MachineInstrBuilder Uaddo = - B.buildUAddo(NarrowTy, LLT::scalar(1), FactorSum, Factors[i]); - FactorSum = Uaddo.getReg(0); - MachineInstrBuilder Carry = B.buildZExt(NarrowTy, Uaddo.getReg(1)); - CarrySum = B.buildAdd(NarrowTy, CarrySum, Carry).getReg(0); - } - } else { - // Since value for the next index is not calculated, neither is CarrySum. - FactorSum = B.buildAdd(NarrowTy, Factors[0], Factors[1]).getReg(0); - for (unsigned i = 2; i < Factors.size(); ++i) - FactorSum = B.buildAdd(NarrowTy, FactorSum, Factors[i]).getReg(0); - } - - CarrySumPrevDstIdx = CarrySum; - DstRegs[DstIdx] = FactorSum; - Factors.clear(); - } -} - -LegalizerHelper::LegalizeResult -LegalizerHelper::narrowScalarMul(MachineInstr &MI, LLT NarrowTy) { - Register DstReg = MI.getOperand(0).getReg(); - Register Src1 = MI.getOperand(1).getReg(); - Register Src2 = MI.getOperand(2).getReg(); - - LLT Ty = MRI.getType(DstReg); - if (Ty.isVector()) - return UnableToLegalize; - - unsigned SrcSize = MRI.getType(Src1).getSizeInBits(); - unsigned DstSize = Ty.getSizeInBits(); - unsigned NarrowSize = NarrowTy.getSizeInBits(); - if (DstSize % NarrowSize != 0 || SrcSize % NarrowSize != 0) - return UnableToLegalize; - - unsigned NumDstParts = DstSize / NarrowSize; - unsigned NumSrcParts = SrcSize / NarrowSize; - bool IsMulHigh = MI.getOpcode() == TargetOpcode::G_UMULH; - unsigned DstTmpParts = NumDstParts * (IsMulHigh ? 2 : 1); - - SmallVector<Register, 2> Src1Parts, Src2Parts; - SmallVector<Register, 2> DstTmpRegs(DstTmpParts); - extractParts(Src1, NarrowTy, NumSrcParts, Src1Parts); - extractParts(Src2, NarrowTy, NumSrcParts, Src2Parts); - multiplyRegisters(DstTmpRegs, Src1Parts, Src2Parts, NarrowTy); - - // Take only high half of registers if this is high mul. - ArrayRef<Register> DstRegs( - IsMulHigh ? &DstTmpRegs[DstTmpParts / 2] : &DstTmpRegs[0], NumDstParts); - MIRBuilder.buildMerge(DstReg, DstRegs); - MI.eraseFromParent(); - return Legalized; -} - -LegalizerHelper::LegalizeResult + case G_LOAD: + case G_STORE: + return reduceLoadStoreWidth(MI, TypeIdx, NarrowTy); + case G_SEXT_INREG: + return fewerElementsVectorSextInReg(MI, TypeIdx, NarrowTy); + default: + return UnableToLegalize; + } +} + +LegalizerHelper::LegalizeResult +LegalizerHelper::narrowScalarShiftByConstant(MachineInstr &MI, const APInt &Amt, + const LLT HalfTy, const LLT AmtTy) { + + Register InL = MRI.createGenericVirtualRegister(HalfTy); + Register InH = MRI.createGenericVirtualRegister(HalfTy); + MIRBuilder.buildUnmerge({InL, InH}, MI.getOperand(1)); + + if (Amt.isNullValue()) { + MIRBuilder.buildMerge(MI.getOperand(0), {InL, InH}); + MI.eraseFromParent(); + return Legalized; + } + + LLT NVT = HalfTy; + unsigned NVTBits = HalfTy.getSizeInBits(); + unsigned VTBits = 2 * NVTBits; + + SrcOp Lo(Register(0)), Hi(Register(0)); + if (MI.getOpcode() == TargetOpcode::G_SHL) { + if (Amt.ugt(VTBits)) { + Lo = Hi = MIRBuilder.buildConstant(NVT, 0); + } else if (Amt.ugt(NVTBits)) { + Lo = MIRBuilder.buildConstant(NVT, 0); + Hi = MIRBuilder.buildShl(NVT, InL, + MIRBuilder.buildConstant(AmtTy, Amt - NVTBits)); + } else if (Amt == NVTBits) { + Lo = MIRBuilder.buildConstant(NVT, 0); + Hi = InL; + } else { + Lo = MIRBuilder.buildShl(NVT, InL, MIRBuilder.buildConstant(AmtTy, Amt)); + auto OrLHS = + MIRBuilder.buildShl(NVT, InH, MIRBuilder.buildConstant(AmtTy, Amt)); + auto OrRHS = MIRBuilder.buildLShr( + NVT, InL, MIRBuilder.buildConstant(AmtTy, -Amt + NVTBits)); + Hi = MIRBuilder.buildOr(NVT, OrLHS, OrRHS); + } + } else if (MI.getOpcode() == TargetOpcode::G_LSHR) { + if (Amt.ugt(VTBits)) { + Lo = Hi = MIRBuilder.buildConstant(NVT, 0); + } else if (Amt.ugt(NVTBits)) { + Lo = MIRBuilder.buildLShr(NVT, InH, + MIRBuilder.buildConstant(AmtTy, Amt - NVTBits)); + Hi = MIRBuilder.buildConstant(NVT, 0); + } else if (Amt == NVTBits) { + Lo = InH; + Hi = MIRBuilder.buildConstant(NVT, 0); + } else { + auto ShiftAmtConst = MIRBuilder.buildConstant(AmtTy, Amt); + + auto OrLHS = MIRBuilder.buildLShr(NVT, InL, ShiftAmtConst); + auto OrRHS = MIRBuilder.buildShl( + NVT, InH, MIRBuilder.buildConstant(AmtTy, -Amt + NVTBits)); + + Lo = MIRBuilder.buildOr(NVT, OrLHS, OrRHS); + Hi = MIRBuilder.buildLShr(NVT, InH, ShiftAmtConst); + } + } else { + if (Amt.ugt(VTBits)) { + Hi = Lo = MIRBuilder.buildAShr( + NVT, InH, MIRBuilder.buildConstant(AmtTy, NVTBits - 1)); + } else if (Amt.ugt(NVTBits)) { + Lo = MIRBuilder.buildAShr(NVT, InH, + MIRBuilder.buildConstant(AmtTy, Amt - NVTBits)); + Hi = MIRBuilder.buildAShr(NVT, InH, + MIRBuilder.buildConstant(AmtTy, NVTBits - 1)); + } else if (Amt == NVTBits) { + Lo = InH; + Hi = MIRBuilder.buildAShr(NVT, InH, + MIRBuilder.buildConstant(AmtTy, NVTBits - 1)); + } else { + auto ShiftAmtConst = MIRBuilder.buildConstant(AmtTy, Amt); + + auto OrLHS = MIRBuilder.buildLShr(NVT, InL, ShiftAmtConst); + auto OrRHS = MIRBuilder.buildShl( + NVT, InH, MIRBuilder.buildConstant(AmtTy, -Amt + NVTBits)); + + Lo = MIRBuilder.buildOr(NVT, OrLHS, OrRHS); + Hi = MIRBuilder.buildAShr(NVT, InH, ShiftAmtConst); + } + } + + MIRBuilder.buildMerge(MI.getOperand(0), {Lo, Hi}); + MI.eraseFromParent(); + + return Legalized; +} + +// TODO: Optimize if constant shift amount. +LegalizerHelper::LegalizeResult +LegalizerHelper::narrowScalarShift(MachineInstr &MI, unsigned TypeIdx, + LLT RequestedTy) { + if (TypeIdx == 1) { + Observer.changingInstr(MI); + narrowScalarSrc(MI, RequestedTy, 2); + Observer.changedInstr(MI); + return Legalized; + } + + Register DstReg = MI.getOperand(0).getReg(); + LLT DstTy = MRI.getType(DstReg); + if (DstTy.isVector()) + return UnableToLegalize; + + Register Amt = MI.getOperand(2).getReg(); + LLT ShiftAmtTy = MRI.getType(Amt); + const unsigned DstEltSize = DstTy.getScalarSizeInBits(); + if (DstEltSize % 2 != 0) + return UnableToLegalize; + + // Ignore the input type. We can only go to exactly half the size of the + // input. If that isn't small enough, the resulting pieces will be further + // legalized. + const unsigned NewBitSize = DstEltSize / 2; + const LLT HalfTy = LLT::scalar(NewBitSize); + const LLT CondTy = LLT::scalar(1); + + if (const MachineInstr *KShiftAmt = + getOpcodeDef(TargetOpcode::G_CONSTANT, Amt, MRI)) { + return narrowScalarShiftByConstant( + MI, KShiftAmt->getOperand(1).getCImm()->getValue(), HalfTy, ShiftAmtTy); + } + + // TODO: Expand with known bits. + + // Handle the fully general expansion by an unknown amount. + auto NewBits = MIRBuilder.buildConstant(ShiftAmtTy, NewBitSize); + + Register InL = MRI.createGenericVirtualRegister(HalfTy); + Register InH = MRI.createGenericVirtualRegister(HalfTy); + MIRBuilder.buildUnmerge({InL, InH}, MI.getOperand(1)); + + auto AmtExcess = MIRBuilder.buildSub(ShiftAmtTy, Amt, NewBits); + auto AmtLack = MIRBuilder.buildSub(ShiftAmtTy, NewBits, Amt); + + auto Zero = MIRBuilder.buildConstant(ShiftAmtTy, 0); + auto IsShort = MIRBuilder.buildICmp(ICmpInst::ICMP_ULT, CondTy, Amt, NewBits); + auto IsZero = MIRBuilder.buildICmp(ICmpInst::ICMP_EQ, CondTy, Amt, Zero); + + Register ResultRegs[2]; + switch (MI.getOpcode()) { + case TargetOpcode::G_SHL: { + // Short: ShAmt < NewBitSize + auto LoS = MIRBuilder.buildShl(HalfTy, InL, Amt); + + auto LoOr = MIRBuilder.buildLShr(HalfTy, InL, AmtLack); + auto HiOr = MIRBuilder.buildShl(HalfTy, InH, Amt); + auto HiS = MIRBuilder.buildOr(HalfTy, LoOr, HiOr); + + // Long: ShAmt >= NewBitSize + auto LoL = MIRBuilder.buildConstant(HalfTy, 0); // Lo part is zero. + auto HiL = MIRBuilder.buildShl(HalfTy, InL, AmtExcess); // Hi from Lo part. + + auto Lo = MIRBuilder.buildSelect(HalfTy, IsShort, LoS, LoL); + auto Hi = MIRBuilder.buildSelect( + HalfTy, IsZero, InH, MIRBuilder.buildSelect(HalfTy, IsShort, HiS, HiL)); + + ResultRegs[0] = Lo.getReg(0); + ResultRegs[1] = Hi.getReg(0); + break; + } + case TargetOpcode::G_LSHR: + case TargetOpcode::G_ASHR: { + // Short: ShAmt < NewBitSize + auto HiS = MIRBuilder.buildInstr(MI.getOpcode(), {HalfTy}, {InH, Amt}); + + auto LoOr = MIRBuilder.buildLShr(HalfTy, InL, Amt); + auto HiOr = MIRBuilder.buildShl(HalfTy, InH, AmtLack); + auto LoS = MIRBuilder.buildOr(HalfTy, LoOr, HiOr); + + // Long: ShAmt >= NewBitSize + MachineInstrBuilder HiL; + if (MI.getOpcode() == TargetOpcode::G_LSHR) { + HiL = MIRBuilder.buildConstant(HalfTy, 0); // Hi part is zero. + } else { + auto ShiftAmt = MIRBuilder.buildConstant(ShiftAmtTy, NewBitSize - 1); + HiL = MIRBuilder.buildAShr(HalfTy, InH, ShiftAmt); // Sign of Hi part. + } + auto LoL = MIRBuilder.buildInstr(MI.getOpcode(), {HalfTy}, + {InH, AmtExcess}); // Lo from Hi part. + + auto Lo = MIRBuilder.buildSelect( + HalfTy, IsZero, InL, MIRBuilder.buildSelect(HalfTy, IsShort, LoS, LoL)); + + auto Hi = MIRBuilder.buildSelect(HalfTy, IsShort, HiS, HiL); + + ResultRegs[0] = Lo.getReg(0); + ResultRegs[1] = Hi.getReg(0); + break; + } + default: + llvm_unreachable("not a shift"); + } + + MIRBuilder.buildMerge(DstReg, ResultRegs); + MI.eraseFromParent(); + return Legalized; +} + +LegalizerHelper::LegalizeResult +LegalizerHelper::moreElementsVectorPhi(MachineInstr &MI, unsigned TypeIdx, + LLT MoreTy) { + assert(TypeIdx == 0 && "Expecting only Idx 0"); + + Observer.changingInstr(MI); + for (unsigned I = 1, E = MI.getNumOperands(); I != E; I += 2) { + MachineBasicBlock &OpMBB = *MI.getOperand(I + 1).getMBB(); + MIRBuilder.setInsertPt(OpMBB, OpMBB.getFirstTerminator()); + moreElementsVectorSrc(MI, MoreTy, I); + } + + MachineBasicBlock &MBB = *MI.getParent(); + MIRBuilder.setInsertPt(MBB, --MBB.getFirstNonPHI()); + moreElementsVectorDst(MI, MoreTy, 0); + Observer.changedInstr(MI); + return Legalized; +} + +LegalizerHelper::LegalizeResult +LegalizerHelper::moreElementsVector(MachineInstr &MI, unsigned TypeIdx, + LLT MoreTy) { + unsigned Opc = MI.getOpcode(); + switch (Opc) { + case TargetOpcode::G_IMPLICIT_DEF: + case TargetOpcode::G_LOAD: { + if (TypeIdx != 0) + return UnableToLegalize; + Observer.changingInstr(MI); + moreElementsVectorDst(MI, MoreTy, 0); + Observer.changedInstr(MI); + return Legalized; + } + case TargetOpcode::G_STORE: + if (TypeIdx != 0) + return UnableToLegalize; + Observer.changingInstr(MI); + moreElementsVectorSrc(MI, MoreTy, 0); + Observer.changedInstr(MI); + return Legalized; + case TargetOpcode::G_AND: + case TargetOpcode::G_OR: + case TargetOpcode::G_XOR: + case TargetOpcode::G_SMIN: + case TargetOpcode::G_SMAX: + case TargetOpcode::G_UMIN: + case TargetOpcode::G_UMAX: + case TargetOpcode::G_FMINNUM: + case TargetOpcode::G_FMAXNUM: + case TargetOpcode::G_FMINNUM_IEEE: + case TargetOpcode::G_FMAXNUM_IEEE: + case TargetOpcode::G_FMINIMUM: + case TargetOpcode::G_FMAXIMUM: { + Observer.changingInstr(MI); + moreElementsVectorSrc(MI, MoreTy, 1); + moreElementsVectorSrc(MI, MoreTy, 2); + moreElementsVectorDst(MI, MoreTy, 0); + Observer.changedInstr(MI); + return Legalized; + } + case TargetOpcode::G_EXTRACT: + if (TypeIdx != 1) + return UnableToLegalize; + Observer.changingInstr(MI); + moreElementsVectorSrc(MI, MoreTy, 1); + Observer.changedInstr(MI); + return Legalized; + case TargetOpcode::G_INSERT: + case TargetOpcode::G_FREEZE: + if (TypeIdx != 0) + return UnableToLegalize; + Observer.changingInstr(MI); + moreElementsVectorSrc(MI, MoreTy, 1); + moreElementsVectorDst(MI, MoreTy, 0); + Observer.changedInstr(MI); + return Legalized; + case TargetOpcode::G_SELECT: + if (TypeIdx != 0) + return UnableToLegalize; + if (MRI.getType(MI.getOperand(1).getReg()).isVector()) + return UnableToLegalize; + + Observer.changingInstr(MI); + moreElementsVectorSrc(MI, MoreTy, 2); + moreElementsVectorSrc(MI, MoreTy, 3); + moreElementsVectorDst(MI, MoreTy, 0); + Observer.changedInstr(MI); + return Legalized; + case TargetOpcode::G_UNMERGE_VALUES: { + if (TypeIdx != 1) + return UnableToLegalize; + + LLT DstTy = MRI.getType(MI.getOperand(0).getReg()); + int NumDst = MI.getNumOperands() - 1; + moreElementsVectorSrc(MI, MoreTy, NumDst); + + auto MIB = MIRBuilder.buildInstr(TargetOpcode::G_UNMERGE_VALUES); + for (int I = 0; I != NumDst; ++I) + MIB.addDef(MI.getOperand(I).getReg()); + + int NewNumDst = MoreTy.getSizeInBits() / DstTy.getSizeInBits(); + for (int I = NumDst; I != NewNumDst; ++I) + MIB.addDef(MRI.createGenericVirtualRegister(DstTy)); + + MIB.addUse(MI.getOperand(NumDst).getReg()); + MI.eraseFromParent(); + return Legalized; + } + case TargetOpcode::G_PHI: + return moreElementsVectorPhi(MI, TypeIdx, MoreTy); + default: + return UnableToLegalize; + } +} + +void LegalizerHelper::multiplyRegisters(SmallVectorImpl<Register> &DstRegs, + ArrayRef<Register> Src1Regs, + ArrayRef<Register> Src2Regs, + LLT NarrowTy) { + MachineIRBuilder &B = MIRBuilder; + unsigned SrcParts = Src1Regs.size(); + unsigned DstParts = DstRegs.size(); + + unsigned DstIdx = 0; // Low bits of the result. + Register FactorSum = + B.buildMul(NarrowTy, Src1Regs[DstIdx], Src2Regs[DstIdx]).getReg(0); + DstRegs[DstIdx] = FactorSum; + + unsigned CarrySumPrevDstIdx; + SmallVector<Register, 4> Factors; + + for (DstIdx = 1; DstIdx < DstParts; DstIdx++) { + // Collect low parts of muls for DstIdx. + for (unsigned i = DstIdx + 1 < SrcParts ? 0 : DstIdx - SrcParts + 1; + i <= std::min(DstIdx, SrcParts - 1); ++i) { + MachineInstrBuilder Mul = + B.buildMul(NarrowTy, Src1Regs[DstIdx - i], Src2Regs[i]); + Factors.push_back(Mul.getReg(0)); + } + // Collect high parts of muls from previous DstIdx. + for (unsigned i = DstIdx < SrcParts ? 0 : DstIdx - SrcParts; + i <= std::min(DstIdx - 1, SrcParts - 1); ++i) { + MachineInstrBuilder Umulh = + B.buildUMulH(NarrowTy, Src1Regs[DstIdx - 1 - i], Src2Regs[i]); + Factors.push_back(Umulh.getReg(0)); + } + // Add CarrySum from additions calculated for previous DstIdx. + if (DstIdx != 1) { + Factors.push_back(CarrySumPrevDstIdx); + } + + Register CarrySum; + // Add all factors and accumulate all carries into CarrySum. + if (DstIdx != DstParts - 1) { + MachineInstrBuilder Uaddo = + B.buildUAddo(NarrowTy, LLT::scalar(1), Factors[0], Factors[1]); + FactorSum = Uaddo.getReg(0); + CarrySum = B.buildZExt(NarrowTy, Uaddo.getReg(1)).getReg(0); + for (unsigned i = 2; i < Factors.size(); ++i) { + MachineInstrBuilder Uaddo = + B.buildUAddo(NarrowTy, LLT::scalar(1), FactorSum, Factors[i]); + FactorSum = Uaddo.getReg(0); + MachineInstrBuilder Carry = B.buildZExt(NarrowTy, Uaddo.getReg(1)); + CarrySum = B.buildAdd(NarrowTy, CarrySum, Carry).getReg(0); + } + } else { + // Since value for the next index is not calculated, neither is CarrySum. + FactorSum = B.buildAdd(NarrowTy, Factors[0], Factors[1]).getReg(0); + for (unsigned i = 2; i < Factors.size(); ++i) + FactorSum = B.buildAdd(NarrowTy, FactorSum, Factors[i]).getReg(0); + } + + CarrySumPrevDstIdx = CarrySum; + DstRegs[DstIdx] = FactorSum; + Factors.clear(); + } +} + +LegalizerHelper::LegalizeResult +LegalizerHelper::narrowScalarMul(MachineInstr &MI, LLT NarrowTy) { + Register DstReg = MI.getOperand(0).getReg(); + Register Src1 = MI.getOperand(1).getReg(); + Register Src2 = MI.getOperand(2).getReg(); + + LLT Ty = MRI.getType(DstReg); + if (Ty.isVector()) + return UnableToLegalize; + + unsigned SrcSize = MRI.getType(Src1).getSizeInBits(); + unsigned DstSize = Ty.getSizeInBits(); + unsigned NarrowSize = NarrowTy.getSizeInBits(); + if (DstSize % NarrowSize != 0 || SrcSize % NarrowSize != 0) + return UnableToLegalize; + + unsigned NumDstParts = DstSize / NarrowSize; + unsigned NumSrcParts = SrcSize / NarrowSize; + bool IsMulHigh = MI.getOpcode() == TargetOpcode::G_UMULH; + unsigned DstTmpParts = NumDstParts * (IsMulHigh ? 2 : 1); + + SmallVector<Register, 2> Src1Parts, Src2Parts; + SmallVector<Register, 2> DstTmpRegs(DstTmpParts); + extractParts(Src1, NarrowTy, NumSrcParts, Src1Parts); + extractParts(Src2, NarrowTy, NumSrcParts, Src2Parts); + multiplyRegisters(DstTmpRegs, Src1Parts, Src2Parts, NarrowTy); + + // Take only high half of registers if this is high mul. + ArrayRef<Register> DstRegs( + IsMulHigh ? &DstTmpRegs[DstTmpParts / 2] : &DstTmpRegs[0], NumDstParts); + MIRBuilder.buildMerge(DstReg, DstRegs); + MI.eraseFromParent(); + return Legalized; +} + +LegalizerHelper::LegalizeResult LegalizerHelper::narrowScalarFPTOI(MachineInstr &MI, unsigned TypeIdx, LLT NarrowTy) { if (TypeIdx != 0) @@ -4509,881 +4509,881 @@ LegalizerHelper::narrowScalarFPTOI(MachineInstr &MI, unsigned TypeIdx, } LegalizerHelper::LegalizeResult -LegalizerHelper::narrowScalarExtract(MachineInstr &MI, unsigned TypeIdx, - LLT NarrowTy) { - if (TypeIdx != 1) - return UnableToLegalize; - - uint64_t NarrowSize = NarrowTy.getSizeInBits(); - - int64_t SizeOp1 = MRI.getType(MI.getOperand(1).getReg()).getSizeInBits(); - // FIXME: add support for when SizeOp1 isn't an exact multiple of - // NarrowSize. - if (SizeOp1 % NarrowSize != 0) - return UnableToLegalize; - int NumParts = SizeOp1 / NarrowSize; - - SmallVector<Register, 2> SrcRegs, DstRegs; - SmallVector<uint64_t, 2> Indexes; - extractParts(MI.getOperand(1).getReg(), NarrowTy, NumParts, SrcRegs); - - Register OpReg = MI.getOperand(0).getReg(); - uint64_t OpStart = MI.getOperand(2).getImm(); - uint64_t OpSize = MRI.getType(OpReg).getSizeInBits(); - for (int i = 0; i < NumParts; ++i) { - unsigned SrcStart = i * NarrowSize; - - if (SrcStart + NarrowSize <= OpStart || SrcStart >= OpStart + OpSize) { - // No part of the extract uses this subregister, ignore it. - continue; - } else if (SrcStart == OpStart && NarrowTy == MRI.getType(OpReg)) { - // The entire subregister is extracted, forward the value. - DstRegs.push_back(SrcRegs[i]); - continue; - } - - // OpSegStart is where this destination segment would start in OpReg if it - // extended infinitely in both directions. - int64_t ExtractOffset; - uint64_t SegSize; - if (OpStart < SrcStart) { - ExtractOffset = 0; - SegSize = std::min(NarrowSize, OpStart + OpSize - SrcStart); - } else { - ExtractOffset = OpStart - SrcStart; - SegSize = std::min(SrcStart + NarrowSize - OpStart, OpSize); - } - - Register SegReg = SrcRegs[i]; - if (ExtractOffset != 0 || SegSize != NarrowSize) { - // A genuine extract is needed. - SegReg = MRI.createGenericVirtualRegister(LLT::scalar(SegSize)); - MIRBuilder.buildExtract(SegReg, SrcRegs[i], ExtractOffset); - } - - DstRegs.push_back(SegReg); - } - - Register DstReg = MI.getOperand(0).getReg(); - if (MRI.getType(DstReg).isVector()) - MIRBuilder.buildBuildVector(DstReg, DstRegs); - else if (DstRegs.size() > 1) - MIRBuilder.buildMerge(DstReg, DstRegs); - else - MIRBuilder.buildCopy(DstReg, DstRegs[0]); - MI.eraseFromParent(); - return Legalized; -} - -LegalizerHelper::LegalizeResult -LegalizerHelper::narrowScalarInsert(MachineInstr &MI, unsigned TypeIdx, - LLT NarrowTy) { - // FIXME: Don't know how to handle secondary types yet. - if (TypeIdx != 0) - return UnableToLegalize; - - uint64_t SizeOp0 = MRI.getType(MI.getOperand(0).getReg()).getSizeInBits(); - uint64_t NarrowSize = NarrowTy.getSizeInBits(); - - // FIXME: add support for when SizeOp0 isn't an exact multiple of - // NarrowSize. - if (SizeOp0 % NarrowSize != 0) - return UnableToLegalize; - - int NumParts = SizeOp0 / NarrowSize; - - SmallVector<Register, 2> SrcRegs, DstRegs; - SmallVector<uint64_t, 2> Indexes; - extractParts(MI.getOperand(1).getReg(), NarrowTy, NumParts, SrcRegs); - - Register OpReg = MI.getOperand(2).getReg(); - uint64_t OpStart = MI.getOperand(3).getImm(); - uint64_t OpSize = MRI.getType(OpReg).getSizeInBits(); - for (int i = 0; i < NumParts; ++i) { - unsigned DstStart = i * NarrowSize; - - if (DstStart + NarrowSize <= OpStart || DstStart >= OpStart + OpSize) { - // No part of the insert affects this subregister, forward the original. - DstRegs.push_back(SrcRegs[i]); - continue; - } else if (DstStart == OpStart && NarrowTy == MRI.getType(OpReg)) { - // The entire subregister is defined by this insert, forward the new - // value. - DstRegs.push_back(OpReg); - continue; - } - - // OpSegStart is where this destination segment would start in OpReg if it - // extended infinitely in both directions. - int64_t ExtractOffset, InsertOffset; - uint64_t SegSize; - if (OpStart < DstStart) { - InsertOffset = 0; - ExtractOffset = DstStart - OpStart; - SegSize = std::min(NarrowSize, OpStart + OpSize - DstStart); - } else { - InsertOffset = OpStart - DstStart; - ExtractOffset = 0; - SegSize = - std::min(NarrowSize - InsertOffset, OpStart + OpSize - DstStart); - } - - Register SegReg = OpReg; - if (ExtractOffset != 0 || SegSize != OpSize) { - // A genuine extract is needed. - SegReg = MRI.createGenericVirtualRegister(LLT::scalar(SegSize)); - MIRBuilder.buildExtract(SegReg, OpReg, ExtractOffset); - } - - Register DstReg = MRI.createGenericVirtualRegister(NarrowTy); - MIRBuilder.buildInsert(DstReg, SrcRegs[i], SegReg, InsertOffset); - DstRegs.push_back(DstReg); - } - - assert(DstRegs.size() == (unsigned)NumParts && "not all parts covered"); - Register DstReg = MI.getOperand(0).getReg(); - if(MRI.getType(DstReg).isVector()) - MIRBuilder.buildBuildVector(DstReg, DstRegs); - else - MIRBuilder.buildMerge(DstReg, DstRegs); - MI.eraseFromParent(); - return Legalized; -} - -LegalizerHelper::LegalizeResult -LegalizerHelper::narrowScalarBasic(MachineInstr &MI, unsigned TypeIdx, - LLT NarrowTy) { - Register DstReg = MI.getOperand(0).getReg(); - LLT DstTy = MRI.getType(DstReg); - - assert(MI.getNumOperands() == 3 && TypeIdx == 0); - - SmallVector<Register, 4> DstRegs, DstLeftoverRegs; - SmallVector<Register, 4> Src0Regs, Src0LeftoverRegs; - SmallVector<Register, 4> Src1Regs, Src1LeftoverRegs; - LLT LeftoverTy; - if (!extractParts(MI.getOperand(1).getReg(), DstTy, NarrowTy, LeftoverTy, - Src0Regs, Src0LeftoverRegs)) - return UnableToLegalize; - - LLT Unused; - if (!extractParts(MI.getOperand(2).getReg(), DstTy, NarrowTy, Unused, - Src1Regs, Src1LeftoverRegs)) - llvm_unreachable("inconsistent extractParts result"); - - for (unsigned I = 0, E = Src1Regs.size(); I != E; ++I) { - auto Inst = MIRBuilder.buildInstr(MI.getOpcode(), {NarrowTy}, - {Src0Regs[I], Src1Regs[I]}); - DstRegs.push_back(Inst.getReg(0)); - } - - for (unsigned I = 0, E = Src1LeftoverRegs.size(); I != E; ++I) { - auto Inst = MIRBuilder.buildInstr( - MI.getOpcode(), - {LeftoverTy}, {Src0LeftoverRegs[I], Src1LeftoverRegs[I]}); - DstLeftoverRegs.push_back(Inst.getReg(0)); - } - - insertParts(DstReg, DstTy, NarrowTy, DstRegs, - LeftoverTy, DstLeftoverRegs); - - MI.eraseFromParent(); - return Legalized; -} - -LegalizerHelper::LegalizeResult -LegalizerHelper::narrowScalarExt(MachineInstr &MI, unsigned TypeIdx, - LLT NarrowTy) { - if (TypeIdx != 0) - return UnableToLegalize; - - Register DstReg = MI.getOperand(0).getReg(); - Register SrcReg = MI.getOperand(1).getReg(); - - LLT DstTy = MRI.getType(DstReg); - if (DstTy.isVector()) - return UnableToLegalize; - - SmallVector<Register, 8> Parts; - LLT GCDTy = extractGCDType(Parts, DstTy, NarrowTy, SrcReg); - LLT LCMTy = buildLCMMergePieces(DstTy, NarrowTy, GCDTy, Parts, MI.getOpcode()); - buildWidenedRemergeToDst(DstReg, LCMTy, Parts); - - MI.eraseFromParent(); - return Legalized; -} - -LegalizerHelper::LegalizeResult -LegalizerHelper::narrowScalarSelect(MachineInstr &MI, unsigned TypeIdx, - LLT NarrowTy) { - if (TypeIdx != 0) - return UnableToLegalize; - - Register CondReg = MI.getOperand(1).getReg(); - LLT CondTy = MRI.getType(CondReg); - if (CondTy.isVector()) // TODO: Handle vselect - return UnableToLegalize; - - Register DstReg = MI.getOperand(0).getReg(); - LLT DstTy = MRI.getType(DstReg); - - SmallVector<Register, 4> DstRegs, DstLeftoverRegs; - SmallVector<Register, 4> Src1Regs, Src1LeftoverRegs; - SmallVector<Register, 4> Src2Regs, Src2LeftoverRegs; - LLT LeftoverTy; - if (!extractParts(MI.getOperand(2).getReg(), DstTy, NarrowTy, LeftoverTy, - Src1Regs, Src1LeftoverRegs)) - return UnableToLegalize; - - LLT Unused; - if (!extractParts(MI.getOperand(3).getReg(), DstTy, NarrowTy, Unused, - Src2Regs, Src2LeftoverRegs)) - llvm_unreachable("inconsistent extractParts result"); - - for (unsigned I = 0, E = Src1Regs.size(); I != E; ++I) { - auto Select = MIRBuilder.buildSelect(NarrowTy, - CondReg, Src1Regs[I], Src2Regs[I]); - DstRegs.push_back(Select.getReg(0)); - } - - for (unsigned I = 0, E = Src1LeftoverRegs.size(); I != E; ++I) { - auto Select = MIRBuilder.buildSelect( - LeftoverTy, CondReg, Src1LeftoverRegs[I], Src2LeftoverRegs[I]); - DstLeftoverRegs.push_back(Select.getReg(0)); - } - - insertParts(DstReg, DstTy, NarrowTy, DstRegs, - LeftoverTy, DstLeftoverRegs); - - MI.eraseFromParent(); - return Legalized; -} - -LegalizerHelper::LegalizeResult -LegalizerHelper::narrowScalarCTLZ(MachineInstr &MI, unsigned TypeIdx, - LLT NarrowTy) { - if (TypeIdx != 1) - return UnableToLegalize; - - Register DstReg = MI.getOperand(0).getReg(); - Register SrcReg = MI.getOperand(1).getReg(); - LLT DstTy = MRI.getType(DstReg); - LLT SrcTy = MRI.getType(SrcReg); - unsigned NarrowSize = NarrowTy.getSizeInBits(); - - if (SrcTy.isScalar() && SrcTy.getSizeInBits() == 2 * NarrowSize) { - const bool IsUndef = MI.getOpcode() == TargetOpcode::G_CTLZ_ZERO_UNDEF; - - MachineIRBuilder &B = MIRBuilder; - auto UnmergeSrc = B.buildUnmerge(NarrowTy, SrcReg); - // ctlz(Hi:Lo) -> Hi == 0 ? (NarrowSize + ctlz(Lo)) : ctlz(Hi) - auto C_0 = B.buildConstant(NarrowTy, 0); - auto HiIsZero = B.buildICmp(CmpInst::ICMP_EQ, LLT::scalar(1), - UnmergeSrc.getReg(1), C_0); - auto LoCTLZ = IsUndef ? - B.buildCTLZ_ZERO_UNDEF(DstTy, UnmergeSrc.getReg(0)) : - B.buildCTLZ(DstTy, UnmergeSrc.getReg(0)); - auto C_NarrowSize = B.buildConstant(DstTy, NarrowSize); - auto HiIsZeroCTLZ = B.buildAdd(DstTy, LoCTLZ, C_NarrowSize); - auto HiCTLZ = B.buildCTLZ_ZERO_UNDEF(DstTy, UnmergeSrc.getReg(1)); - B.buildSelect(DstReg, HiIsZero, HiIsZeroCTLZ, HiCTLZ); - - MI.eraseFromParent(); - return Legalized; - } - - return UnableToLegalize; -} - -LegalizerHelper::LegalizeResult -LegalizerHelper::narrowScalarCTTZ(MachineInstr &MI, unsigned TypeIdx, - LLT NarrowTy) { - if (TypeIdx != 1) - return UnableToLegalize; - - Register DstReg = MI.getOperand(0).getReg(); - Register SrcReg = MI.getOperand(1).getReg(); - LLT DstTy = MRI.getType(DstReg); - LLT SrcTy = MRI.getType(SrcReg); - unsigned NarrowSize = NarrowTy.getSizeInBits(); - - if (SrcTy.isScalar() && SrcTy.getSizeInBits() == 2 * NarrowSize) { - const bool IsUndef = MI.getOpcode() == TargetOpcode::G_CTTZ_ZERO_UNDEF; - - MachineIRBuilder &B = MIRBuilder; - auto UnmergeSrc = B.buildUnmerge(NarrowTy, SrcReg); - // cttz(Hi:Lo) -> Lo == 0 ? (cttz(Hi) + NarrowSize) : cttz(Lo) - auto C_0 = B.buildConstant(NarrowTy, 0); - auto LoIsZero = B.buildICmp(CmpInst::ICMP_EQ, LLT::scalar(1), - UnmergeSrc.getReg(0), C_0); - auto HiCTTZ = IsUndef ? - B.buildCTTZ_ZERO_UNDEF(DstTy, UnmergeSrc.getReg(1)) : - B.buildCTTZ(DstTy, UnmergeSrc.getReg(1)); - auto C_NarrowSize = B.buildConstant(DstTy, NarrowSize); - auto LoIsZeroCTTZ = B.buildAdd(DstTy, HiCTTZ, C_NarrowSize); - auto LoCTTZ = B.buildCTTZ_ZERO_UNDEF(DstTy, UnmergeSrc.getReg(0)); - B.buildSelect(DstReg, LoIsZero, LoIsZeroCTTZ, LoCTTZ); - - MI.eraseFromParent(); - return Legalized; - } - - return UnableToLegalize; -} - -LegalizerHelper::LegalizeResult -LegalizerHelper::narrowScalarCTPOP(MachineInstr &MI, unsigned TypeIdx, - LLT NarrowTy) { - if (TypeIdx != 1) - return UnableToLegalize; - - Register DstReg = MI.getOperand(0).getReg(); - LLT DstTy = MRI.getType(DstReg); - LLT SrcTy = MRI.getType(MI.getOperand(1).getReg()); - unsigned NarrowSize = NarrowTy.getSizeInBits(); - - if (SrcTy.isScalar() && SrcTy.getSizeInBits() == 2 * NarrowSize) { - auto UnmergeSrc = MIRBuilder.buildUnmerge(NarrowTy, MI.getOperand(1)); - - auto LoCTPOP = MIRBuilder.buildCTPOP(DstTy, UnmergeSrc.getReg(0)); - auto HiCTPOP = MIRBuilder.buildCTPOP(DstTy, UnmergeSrc.getReg(1)); - MIRBuilder.buildAdd(DstReg, HiCTPOP, LoCTPOP); - - MI.eraseFromParent(); - return Legalized; - } - - return UnableToLegalize; -} - -LegalizerHelper::LegalizeResult +LegalizerHelper::narrowScalarExtract(MachineInstr &MI, unsigned TypeIdx, + LLT NarrowTy) { + if (TypeIdx != 1) + return UnableToLegalize; + + uint64_t NarrowSize = NarrowTy.getSizeInBits(); + + int64_t SizeOp1 = MRI.getType(MI.getOperand(1).getReg()).getSizeInBits(); + // FIXME: add support for when SizeOp1 isn't an exact multiple of + // NarrowSize. + if (SizeOp1 % NarrowSize != 0) + return UnableToLegalize; + int NumParts = SizeOp1 / NarrowSize; + + SmallVector<Register, 2> SrcRegs, DstRegs; + SmallVector<uint64_t, 2> Indexes; + extractParts(MI.getOperand(1).getReg(), NarrowTy, NumParts, SrcRegs); + + Register OpReg = MI.getOperand(0).getReg(); + uint64_t OpStart = MI.getOperand(2).getImm(); + uint64_t OpSize = MRI.getType(OpReg).getSizeInBits(); + for (int i = 0; i < NumParts; ++i) { + unsigned SrcStart = i * NarrowSize; + + if (SrcStart + NarrowSize <= OpStart || SrcStart >= OpStart + OpSize) { + // No part of the extract uses this subregister, ignore it. + continue; + } else if (SrcStart == OpStart && NarrowTy == MRI.getType(OpReg)) { + // The entire subregister is extracted, forward the value. + DstRegs.push_back(SrcRegs[i]); + continue; + } + + // OpSegStart is where this destination segment would start in OpReg if it + // extended infinitely in both directions. + int64_t ExtractOffset; + uint64_t SegSize; + if (OpStart < SrcStart) { + ExtractOffset = 0; + SegSize = std::min(NarrowSize, OpStart + OpSize - SrcStart); + } else { + ExtractOffset = OpStart - SrcStart; + SegSize = std::min(SrcStart + NarrowSize - OpStart, OpSize); + } + + Register SegReg = SrcRegs[i]; + if (ExtractOffset != 0 || SegSize != NarrowSize) { + // A genuine extract is needed. + SegReg = MRI.createGenericVirtualRegister(LLT::scalar(SegSize)); + MIRBuilder.buildExtract(SegReg, SrcRegs[i], ExtractOffset); + } + + DstRegs.push_back(SegReg); + } + + Register DstReg = MI.getOperand(0).getReg(); + if (MRI.getType(DstReg).isVector()) + MIRBuilder.buildBuildVector(DstReg, DstRegs); + else if (DstRegs.size() > 1) + MIRBuilder.buildMerge(DstReg, DstRegs); + else + MIRBuilder.buildCopy(DstReg, DstRegs[0]); + MI.eraseFromParent(); + return Legalized; +} + +LegalizerHelper::LegalizeResult +LegalizerHelper::narrowScalarInsert(MachineInstr &MI, unsigned TypeIdx, + LLT NarrowTy) { + // FIXME: Don't know how to handle secondary types yet. + if (TypeIdx != 0) + return UnableToLegalize; + + uint64_t SizeOp0 = MRI.getType(MI.getOperand(0).getReg()).getSizeInBits(); + uint64_t NarrowSize = NarrowTy.getSizeInBits(); + + // FIXME: add support for when SizeOp0 isn't an exact multiple of + // NarrowSize. + if (SizeOp0 % NarrowSize != 0) + return UnableToLegalize; + + int NumParts = SizeOp0 / NarrowSize; + + SmallVector<Register, 2> SrcRegs, DstRegs; + SmallVector<uint64_t, 2> Indexes; + extractParts(MI.getOperand(1).getReg(), NarrowTy, NumParts, SrcRegs); + + Register OpReg = MI.getOperand(2).getReg(); + uint64_t OpStart = MI.getOperand(3).getImm(); + uint64_t OpSize = MRI.getType(OpReg).getSizeInBits(); + for (int i = 0; i < NumParts; ++i) { + unsigned DstStart = i * NarrowSize; + + if (DstStart + NarrowSize <= OpStart || DstStart >= OpStart + OpSize) { + // No part of the insert affects this subregister, forward the original. + DstRegs.push_back(SrcRegs[i]); + continue; + } else if (DstStart == OpStart && NarrowTy == MRI.getType(OpReg)) { + // The entire subregister is defined by this insert, forward the new + // value. + DstRegs.push_back(OpReg); + continue; + } + + // OpSegStart is where this destination segment would start in OpReg if it + // extended infinitely in both directions. + int64_t ExtractOffset, InsertOffset; + uint64_t SegSize; + if (OpStart < DstStart) { + InsertOffset = 0; + ExtractOffset = DstStart - OpStart; + SegSize = std::min(NarrowSize, OpStart + OpSize - DstStart); + } else { + InsertOffset = OpStart - DstStart; + ExtractOffset = 0; + SegSize = + std::min(NarrowSize - InsertOffset, OpStart + OpSize - DstStart); + } + + Register SegReg = OpReg; + if (ExtractOffset != 0 || SegSize != OpSize) { + // A genuine extract is needed. + SegReg = MRI.createGenericVirtualRegister(LLT::scalar(SegSize)); + MIRBuilder.buildExtract(SegReg, OpReg, ExtractOffset); + } + + Register DstReg = MRI.createGenericVirtualRegister(NarrowTy); + MIRBuilder.buildInsert(DstReg, SrcRegs[i], SegReg, InsertOffset); + DstRegs.push_back(DstReg); + } + + assert(DstRegs.size() == (unsigned)NumParts && "not all parts covered"); + Register DstReg = MI.getOperand(0).getReg(); + if(MRI.getType(DstReg).isVector()) + MIRBuilder.buildBuildVector(DstReg, DstRegs); + else + MIRBuilder.buildMerge(DstReg, DstRegs); + MI.eraseFromParent(); + return Legalized; +} + +LegalizerHelper::LegalizeResult +LegalizerHelper::narrowScalarBasic(MachineInstr &MI, unsigned TypeIdx, + LLT NarrowTy) { + Register DstReg = MI.getOperand(0).getReg(); + LLT DstTy = MRI.getType(DstReg); + + assert(MI.getNumOperands() == 3 && TypeIdx == 0); + + SmallVector<Register, 4> DstRegs, DstLeftoverRegs; + SmallVector<Register, 4> Src0Regs, Src0LeftoverRegs; + SmallVector<Register, 4> Src1Regs, Src1LeftoverRegs; + LLT LeftoverTy; + if (!extractParts(MI.getOperand(1).getReg(), DstTy, NarrowTy, LeftoverTy, + Src0Regs, Src0LeftoverRegs)) + return UnableToLegalize; + + LLT Unused; + if (!extractParts(MI.getOperand(2).getReg(), DstTy, NarrowTy, Unused, + Src1Regs, Src1LeftoverRegs)) + llvm_unreachable("inconsistent extractParts result"); + + for (unsigned I = 0, E = Src1Regs.size(); I != E; ++I) { + auto Inst = MIRBuilder.buildInstr(MI.getOpcode(), {NarrowTy}, + {Src0Regs[I], Src1Regs[I]}); + DstRegs.push_back(Inst.getReg(0)); + } + + for (unsigned I = 0, E = Src1LeftoverRegs.size(); I != E; ++I) { + auto Inst = MIRBuilder.buildInstr( + MI.getOpcode(), + {LeftoverTy}, {Src0LeftoverRegs[I], Src1LeftoverRegs[I]}); + DstLeftoverRegs.push_back(Inst.getReg(0)); + } + + insertParts(DstReg, DstTy, NarrowTy, DstRegs, + LeftoverTy, DstLeftoverRegs); + + MI.eraseFromParent(); + return Legalized; +} + +LegalizerHelper::LegalizeResult +LegalizerHelper::narrowScalarExt(MachineInstr &MI, unsigned TypeIdx, + LLT NarrowTy) { + if (TypeIdx != 0) + return UnableToLegalize; + + Register DstReg = MI.getOperand(0).getReg(); + Register SrcReg = MI.getOperand(1).getReg(); + + LLT DstTy = MRI.getType(DstReg); + if (DstTy.isVector()) + return UnableToLegalize; + + SmallVector<Register, 8> Parts; + LLT GCDTy = extractGCDType(Parts, DstTy, NarrowTy, SrcReg); + LLT LCMTy = buildLCMMergePieces(DstTy, NarrowTy, GCDTy, Parts, MI.getOpcode()); + buildWidenedRemergeToDst(DstReg, LCMTy, Parts); + + MI.eraseFromParent(); + return Legalized; +} + +LegalizerHelper::LegalizeResult +LegalizerHelper::narrowScalarSelect(MachineInstr &MI, unsigned TypeIdx, + LLT NarrowTy) { + if (TypeIdx != 0) + return UnableToLegalize; + + Register CondReg = MI.getOperand(1).getReg(); + LLT CondTy = MRI.getType(CondReg); + if (CondTy.isVector()) // TODO: Handle vselect + return UnableToLegalize; + + Register DstReg = MI.getOperand(0).getReg(); + LLT DstTy = MRI.getType(DstReg); + + SmallVector<Register, 4> DstRegs, DstLeftoverRegs; + SmallVector<Register, 4> Src1Regs, Src1LeftoverRegs; + SmallVector<Register, 4> Src2Regs, Src2LeftoverRegs; + LLT LeftoverTy; + if (!extractParts(MI.getOperand(2).getReg(), DstTy, NarrowTy, LeftoverTy, + Src1Regs, Src1LeftoverRegs)) + return UnableToLegalize; + + LLT Unused; + if (!extractParts(MI.getOperand(3).getReg(), DstTy, NarrowTy, Unused, + Src2Regs, Src2LeftoverRegs)) + llvm_unreachable("inconsistent extractParts result"); + + for (unsigned I = 0, E = Src1Regs.size(); I != E; ++I) { + auto Select = MIRBuilder.buildSelect(NarrowTy, + CondReg, Src1Regs[I], Src2Regs[I]); + DstRegs.push_back(Select.getReg(0)); + } + + for (unsigned I = 0, E = Src1LeftoverRegs.size(); I != E; ++I) { + auto Select = MIRBuilder.buildSelect( + LeftoverTy, CondReg, Src1LeftoverRegs[I], Src2LeftoverRegs[I]); + DstLeftoverRegs.push_back(Select.getReg(0)); + } + + insertParts(DstReg, DstTy, NarrowTy, DstRegs, + LeftoverTy, DstLeftoverRegs); + + MI.eraseFromParent(); + return Legalized; +} + +LegalizerHelper::LegalizeResult +LegalizerHelper::narrowScalarCTLZ(MachineInstr &MI, unsigned TypeIdx, + LLT NarrowTy) { + if (TypeIdx != 1) + return UnableToLegalize; + + Register DstReg = MI.getOperand(0).getReg(); + Register SrcReg = MI.getOperand(1).getReg(); + LLT DstTy = MRI.getType(DstReg); + LLT SrcTy = MRI.getType(SrcReg); + unsigned NarrowSize = NarrowTy.getSizeInBits(); + + if (SrcTy.isScalar() && SrcTy.getSizeInBits() == 2 * NarrowSize) { + const bool IsUndef = MI.getOpcode() == TargetOpcode::G_CTLZ_ZERO_UNDEF; + + MachineIRBuilder &B = MIRBuilder; + auto UnmergeSrc = B.buildUnmerge(NarrowTy, SrcReg); + // ctlz(Hi:Lo) -> Hi == 0 ? (NarrowSize + ctlz(Lo)) : ctlz(Hi) + auto C_0 = B.buildConstant(NarrowTy, 0); + auto HiIsZero = B.buildICmp(CmpInst::ICMP_EQ, LLT::scalar(1), + UnmergeSrc.getReg(1), C_0); + auto LoCTLZ = IsUndef ? + B.buildCTLZ_ZERO_UNDEF(DstTy, UnmergeSrc.getReg(0)) : + B.buildCTLZ(DstTy, UnmergeSrc.getReg(0)); + auto C_NarrowSize = B.buildConstant(DstTy, NarrowSize); + auto HiIsZeroCTLZ = B.buildAdd(DstTy, LoCTLZ, C_NarrowSize); + auto HiCTLZ = B.buildCTLZ_ZERO_UNDEF(DstTy, UnmergeSrc.getReg(1)); + B.buildSelect(DstReg, HiIsZero, HiIsZeroCTLZ, HiCTLZ); + + MI.eraseFromParent(); + return Legalized; + } + + return UnableToLegalize; +} + +LegalizerHelper::LegalizeResult +LegalizerHelper::narrowScalarCTTZ(MachineInstr &MI, unsigned TypeIdx, + LLT NarrowTy) { + if (TypeIdx != 1) + return UnableToLegalize; + + Register DstReg = MI.getOperand(0).getReg(); + Register SrcReg = MI.getOperand(1).getReg(); + LLT DstTy = MRI.getType(DstReg); + LLT SrcTy = MRI.getType(SrcReg); + unsigned NarrowSize = NarrowTy.getSizeInBits(); + + if (SrcTy.isScalar() && SrcTy.getSizeInBits() == 2 * NarrowSize) { + const bool IsUndef = MI.getOpcode() == TargetOpcode::G_CTTZ_ZERO_UNDEF; + + MachineIRBuilder &B = MIRBuilder; + auto UnmergeSrc = B.buildUnmerge(NarrowTy, SrcReg); + // cttz(Hi:Lo) -> Lo == 0 ? (cttz(Hi) + NarrowSize) : cttz(Lo) + auto C_0 = B.buildConstant(NarrowTy, 0); + auto LoIsZero = B.buildICmp(CmpInst::ICMP_EQ, LLT::scalar(1), + UnmergeSrc.getReg(0), C_0); + auto HiCTTZ = IsUndef ? + B.buildCTTZ_ZERO_UNDEF(DstTy, UnmergeSrc.getReg(1)) : + B.buildCTTZ(DstTy, UnmergeSrc.getReg(1)); + auto C_NarrowSize = B.buildConstant(DstTy, NarrowSize); + auto LoIsZeroCTTZ = B.buildAdd(DstTy, HiCTTZ, C_NarrowSize); + auto LoCTTZ = B.buildCTTZ_ZERO_UNDEF(DstTy, UnmergeSrc.getReg(0)); + B.buildSelect(DstReg, LoIsZero, LoIsZeroCTTZ, LoCTTZ); + + MI.eraseFromParent(); + return Legalized; + } + + return UnableToLegalize; +} + +LegalizerHelper::LegalizeResult +LegalizerHelper::narrowScalarCTPOP(MachineInstr &MI, unsigned TypeIdx, + LLT NarrowTy) { + if (TypeIdx != 1) + return UnableToLegalize; + + Register DstReg = MI.getOperand(0).getReg(); + LLT DstTy = MRI.getType(DstReg); + LLT SrcTy = MRI.getType(MI.getOperand(1).getReg()); + unsigned NarrowSize = NarrowTy.getSizeInBits(); + + if (SrcTy.isScalar() && SrcTy.getSizeInBits() == 2 * NarrowSize) { + auto UnmergeSrc = MIRBuilder.buildUnmerge(NarrowTy, MI.getOperand(1)); + + auto LoCTPOP = MIRBuilder.buildCTPOP(DstTy, UnmergeSrc.getReg(0)); + auto HiCTPOP = MIRBuilder.buildCTPOP(DstTy, UnmergeSrc.getReg(1)); + MIRBuilder.buildAdd(DstReg, HiCTPOP, LoCTPOP); + + MI.eraseFromParent(); + return Legalized; + } + + return UnableToLegalize; +} + +LegalizerHelper::LegalizeResult LegalizerHelper::lowerBitCount(MachineInstr &MI) { - unsigned Opc = MI.getOpcode(); + unsigned Opc = MI.getOpcode(); const auto &TII = MIRBuilder.getTII(); - auto isSupported = [this](const LegalityQuery &Q) { - auto QAction = LI.getAction(Q).Action; - return QAction == Legal || QAction == Libcall || QAction == Custom; - }; - switch (Opc) { - default: - return UnableToLegalize; - case TargetOpcode::G_CTLZ_ZERO_UNDEF: { - // This trivially expands to CTLZ. - Observer.changingInstr(MI); - MI.setDesc(TII.get(TargetOpcode::G_CTLZ)); - Observer.changedInstr(MI); - return Legalized; - } - case TargetOpcode::G_CTLZ: { - Register DstReg = MI.getOperand(0).getReg(); - Register SrcReg = MI.getOperand(1).getReg(); - LLT DstTy = MRI.getType(DstReg); - LLT SrcTy = MRI.getType(SrcReg); - unsigned Len = SrcTy.getSizeInBits(); - - if (isSupported({TargetOpcode::G_CTLZ_ZERO_UNDEF, {DstTy, SrcTy}})) { - // If CTLZ_ZERO_UNDEF is supported, emit that and a select for zero. - auto CtlzZU = MIRBuilder.buildCTLZ_ZERO_UNDEF(DstTy, SrcReg); - auto ZeroSrc = MIRBuilder.buildConstant(SrcTy, 0); - auto ICmp = MIRBuilder.buildICmp( - CmpInst::ICMP_EQ, SrcTy.changeElementSize(1), SrcReg, ZeroSrc); - auto LenConst = MIRBuilder.buildConstant(DstTy, Len); - MIRBuilder.buildSelect(DstReg, ICmp, LenConst, CtlzZU); - MI.eraseFromParent(); - return Legalized; - } - // for now, we do this: - // NewLen = NextPowerOf2(Len); - // x = x | (x >> 1); - // x = x | (x >> 2); - // ... - // x = x | (x >>16); - // x = x | (x >>32); // for 64-bit input - // Upto NewLen/2 - // return Len - popcount(x); - // - // Ref: "Hacker's Delight" by Henry Warren - Register Op = SrcReg; - unsigned NewLen = PowerOf2Ceil(Len); - for (unsigned i = 0; (1U << i) <= (NewLen / 2); ++i) { - auto MIBShiftAmt = MIRBuilder.buildConstant(SrcTy, 1ULL << i); - auto MIBOp = MIRBuilder.buildOr( - SrcTy, Op, MIRBuilder.buildLShr(SrcTy, Op, MIBShiftAmt)); - Op = MIBOp.getReg(0); - } - auto MIBPop = MIRBuilder.buildCTPOP(DstTy, Op); - MIRBuilder.buildSub(MI.getOperand(0), MIRBuilder.buildConstant(DstTy, Len), - MIBPop); - MI.eraseFromParent(); - return Legalized; - } - case TargetOpcode::G_CTTZ_ZERO_UNDEF: { - // This trivially expands to CTTZ. - Observer.changingInstr(MI); - MI.setDesc(TII.get(TargetOpcode::G_CTTZ)); - Observer.changedInstr(MI); - return Legalized; - } - case TargetOpcode::G_CTTZ: { - Register DstReg = MI.getOperand(0).getReg(); - Register SrcReg = MI.getOperand(1).getReg(); - LLT DstTy = MRI.getType(DstReg); - LLT SrcTy = MRI.getType(SrcReg); - - unsigned Len = SrcTy.getSizeInBits(); - if (isSupported({TargetOpcode::G_CTTZ_ZERO_UNDEF, {DstTy, SrcTy}})) { - // If CTTZ_ZERO_UNDEF is legal or custom, emit that and a select with - // zero. - auto CttzZU = MIRBuilder.buildCTTZ_ZERO_UNDEF(DstTy, SrcReg); - auto Zero = MIRBuilder.buildConstant(SrcTy, 0); - auto ICmp = MIRBuilder.buildICmp( - CmpInst::ICMP_EQ, DstTy.changeElementSize(1), SrcReg, Zero); - auto LenConst = MIRBuilder.buildConstant(DstTy, Len); - MIRBuilder.buildSelect(DstReg, ICmp, LenConst, CttzZU); - MI.eraseFromParent(); - return Legalized; - } - // for now, we use: { return popcount(~x & (x - 1)); } - // unless the target has ctlz but not ctpop, in which case we use: - // { return 32 - nlz(~x & (x-1)); } - // Ref: "Hacker's Delight" by Henry Warren + auto isSupported = [this](const LegalityQuery &Q) { + auto QAction = LI.getAction(Q).Action; + return QAction == Legal || QAction == Libcall || QAction == Custom; + }; + switch (Opc) { + default: + return UnableToLegalize; + case TargetOpcode::G_CTLZ_ZERO_UNDEF: { + // This trivially expands to CTLZ. + Observer.changingInstr(MI); + MI.setDesc(TII.get(TargetOpcode::G_CTLZ)); + Observer.changedInstr(MI); + return Legalized; + } + case TargetOpcode::G_CTLZ: { + Register DstReg = MI.getOperand(0).getReg(); + Register SrcReg = MI.getOperand(1).getReg(); + LLT DstTy = MRI.getType(DstReg); + LLT SrcTy = MRI.getType(SrcReg); + unsigned Len = SrcTy.getSizeInBits(); + + if (isSupported({TargetOpcode::G_CTLZ_ZERO_UNDEF, {DstTy, SrcTy}})) { + // If CTLZ_ZERO_UNDEF is supported, emit that and a select for zero. + auto CtlzZU = MIRBuilder.buildCTLZ_ZERO_UNDEF(DstTy, SrcReg); + auto ZeroSrc = MIRBuilder.buildConstant(SrcTy, 0); + auto ICmp = MIRBuilder.buildICmp( + CmpInst::ICMP_EQ, SrcTy.changeElementSize(1), SrcReg, ZeroSrc); + auto LenConst = MIRBuilder.buildConstant(DstTy, Len); + MIRBuilder.buildSelect(DstReg, ICmp, LenConst, CtlzZU); + MI.eraseFromParent(); + return Legalized; + } + // for now, we do this: + // NewLen = NextPowerOf2(Len); + // x = x | (x >> 1); + // x = x | (x >> 2); + // ... + // x = x | (x >>16); + // x = x | (x >>32); // for 64-bit input + // Upto NewLen/2 + // return Len - popcount(x); + // + // Ref: "Hacker's Delight" by Henry Warren + Register Op = SrcReg; + unsigned NewLen = PowerOf2Ceil(Len); + for (unsigned i = 0; (1U << i) <= (NewLen / 2); ++i) { + auto MIBShiftAmt = MIRBuilder.buildConstant(SrcTy, 1ULL << i); + auto MIBOp = MIRBuilder.buildOr( + SrcTy, Op, MIRBuilder.buildLShr(SrcTy, Op, MIBShiftAmt)); + Op = MIBOp.getReg(0); + } + auto MIBPop = MIRBuilder.buildCTPOP(DstTy, Op); + MIRBuilder.buildSub(MI.getOperand(0), MIRBuilder.buildConstant(DstTy, Len), + MIBPop); + MI.eraseFromParent(); + return Legalized; + } + case TargetOpcode::G_CTTZ_ZERO_UNDEF: { + // This trivially expands to CTTZ. + Observer.changingInstr(MI); + MI.setDesc(TII.get(TargetOpcode::G_CTTZ)); + Observer.changedInstr(MI); + return Legalized; + } + case TargetOpcode::G_CTTZ: { + Register DstReg = MI.getOperand(0).getReg(); + Register SrcReg = MI.getOperand(1).getReg(); + LLT DstTy = MRI.getType(DstReg); + LLT SrcTy = MRI.getType(SrcReg); + + unsigned Len = SrcTy.getSizeInBits(); + if (isSupported({TargetOpcode::G_CTTZ_ZERO_UNDEF, {DstTy, SrcTy}})) { + // If CTTZ_ZERO_UNDEF is legal or custom, emit that and a select with + // zero. + auto CttzZU = MIRBuilder.buildCTTZ_ZERO_UNDEF(DstTy, SrcReg); + auto Zero = MIRBuilder.buildConstant(SrcTy, 0); + auto ICmp = MIRBuilder.buildICmp( + CmpInst::ICMP_EQ, DstTy.changeElementSize(1), SrcReg, Zero); + auto LenConst = MIRBuilder.buildConstant(DstTy, Len); + MIRBuilder.buildSelect(DstReg, ICmp, LenConst, CttzZU); + MI.eraseFromParent(); + return Legalized; + } + // for now, we use: { return popcount(~x & (x - 1)); } + // unless the target has ctlz but not ctpop, in which case we use: + // { return 32 - nlz(~x & (x-1)); } + // Ref: "Hacker's Delight" by Henry Warren auto MIBCstNeg1 = MIRBuilder.buildConstant(SrcTy, -1); auto MIBNot = MIRBuilder.buildXor(SrcTy, SrcReg, MIBCstNeg1); - auto MIBTmp = MIRBuilder.buildAnd( + auto MIBTmp = MIRBuilder.buildAnd( SrcTy, MIBNot, MIRBuilder.buildAdd(SrcTy, SrcReg, MIBCstNeg1)); if (!isSupported({TargetOpcode::G_CTPOP, {SrcTy, SrcTy}}) && isSupported({TargetOpcode::G_CTLZ, {SrcTy, SrcTy}})) { auto MIBCstLen = MIRBuilder.buildConstant(SrcTy, Len); - MIRBuilder.buildSub(MI.getOperand(0), MIBCstLen, + MIRBuilder.buildSub(MI.getOperand(0), MIBCstLen, MIRBuilder.buildCTLZ(SrcTy, MIBTmp)); - MI.eraseFromParent(); - return Legalized; - } - MI.setDesc(TII.get(TargetOpcode::G_CTPOP)); - MI.getOperand(1).setReg(MIBTmp.getReg(0)); - return Legalized; - } - case TargetOpcode::G_CTPOP: { + MI.eraseFromParent(); + return Legalized; + } + MI.setDesc(TII.get(TargetOpcode::G_CTPOP)); + MI.getOperand(1).setReg(MIBTmp.getReg(0)); + return Legalized; + } + case TargetOpcode::G_CTPOP: { Register SrcReg = MI.getOperand(1).getReg(); LLT Ty = MRI.getType(SrcReg); - unsigned Size = Ty.getSizeInBits(); - MachineIRBuilder &B = MIRBuilder; - - // Count set bits in blocks of 2 bits. Default approach would be - // B2Count = { val & 0x55555555 } + { (val >> 1) & 0x55555555 } - // We use following formula instead: - // B2Count = val - { (val >> 1) & 0x55555555 } - // since it gives same result in blocks of 2 with one instruction less. - auto C_1 = B.buildConstant(Ty, 1); + unsigned Size = Ty.getSizeInBits(); + MachineIRBuilder &B = MIRBuilder; + + // Count set bits in blocks of 2 bits. Default approach would be + // B2Count = { val & 0x55555555 } + { (val >> 1) & 0x55555555 } + // We use following formula instead: + // B2Count = val - { (val >> 1) & 0x55555555 } + // since it gives same result in blocks of 2 with one instruction less. + auto C_1 = B.buildConstant(Ty, 1); auto B2Set1LoTo1Hi = B.buildLShr(Ty, SrcReg, C_1); - APInt B2Mask1HiTo0 = APInt::getSplat(Size, APInt(8, 0x55)); - auto C_B2Mask1HiTo0 = B.buildConstant(Ty, B2Mask1HiTo0); - auto B2Count1Hi = B.buildAnd(Ty, B2Set1LoTo1Hi, C_B2Mask1HiTo0); + APInt B2Mask1HiTo0 = APInt::getSplat(Size, APInt(8, 0x55)); + auto C_B2Mask1HiTo0 = B.buildConstant(Ty, B2Mask1HiTo0); + auto B2Count1Hi = B.buildAnd(Ty, B2Set1LoTo1Hi, C_B2Mask1HiTo0); auto B2Count = B.buildSub(Ty, SrcReg, B2Count1Hi); - - // In order to get count in blocks of 4 add values from adjacent block of 2. - // B4Count = { B2Count & 0x33333333 } + { (B2Count >> 2) & 0x33333333 } - auto C_2 = B.buildConstant(Ty, 2); - auto B4Set2LoTo2Hi = B.buildLShr(Ty, B2Count, C_2); - APInt B4Mask2HiTo0 = APInt::getSplat(Size, APInt(8, 0x33)); - auto C_B4Mask2HiTo0 = B.buildConstant(Ty, B4Mask2HiTo0); - auto B4HiB2Count = B.buildAnd(Ty, B4Set2LoTo2Hi, C_B4Mask2HiTo0); - auto B4LoB2Count = B.buildAnd(Ty, B2Count, C_B4Mask2HiTo0); - auto B4Count = B.buildAdd(Ty, B4HiB2Count, B4LoB2Count); - - // For count in blocks of 8 bits we don't have to mask high 4 bits before - // addition since count value sits in range {0,...,8} and 4 bits are enough - // to hold such binary values. After addition high 4 bits still hold count - // of set bits in high 4 bit block, set them to zero and get 8 bit result. - // B8Count = { B4Count + (B4Count >> 4) } & 0x0F0F0F0F - auto C_4 = B.buildConstant(Ty, 4); - auto B8HiB4Count = B.buildLShr(Ty, B4Count, C_4); - auto B8CountDirty4Hi = B.buildAdd(Ty, B8HiB4Count, B4Count); - APInt B8Mask4HiTo0 = APInt::getSplat(Size, APInt(8, 0x0F)); - auto C_B8Mask4HiTo0 = B.buildConstant(Ty, B8Mask4HiTo0); - auto B8Count = B.buildAnd(Ty, B8CountDirty4Hi, C_B8Mask4HiTo0); - - assert(Size<=128 && "Scalar size is too large for CTPOP lower algorithm"); - // 8 bits can hold CTPOP result of 128 bit int or smaller. Mul with this - // bitmask will set 8 msb in ResTmp to sum of all B8Counts in 8 bit blocks. - auto MulMask = B.buildConstant(Ty, APInt::getSplat(Size, APInt(8, 0x01))); - auto ResTmp = B.buildMul(Ty, B8Count, MulMask); - - // Shift count result from 8 high bits to low bits. - auto C_SizeM8 = B.buildConstant(Ty, Size - 8); - B.buildLShr(MI.getOperand(0).getReg(), ResTmp, C_SizeM8); - - MI.eraseFromParent(); - return Legalized; - } - } -} - -// Expand s32 = G_UITOFP s64 using bit operations to an IEEE float -// representation. -LegalizerHelper::LegalizeResult -LegalizerHelper::lowerU64ToF32BitOps(MachineInstr &MI) { - Register Dst = MI.getOperand(0).getReg(); - Register Src = MI.getOperand(1).getReg(); - const LLT S64 = LLT::scalar(64); - const LLT S32 = LLT::scalar(32); - const LLT S1 = LLT::scalar(1); - - assert(MRI.getType(Src) == S64 && MRI.getType(Dst) == S32); - - // unsigned cul2f(ulong u) { - // uint lz = clz(u); - // uint e = (u != 0) ? 127U + 63U - lz : 0; - // u = (u << lz) & 0x7fffffffffffffffUL; - // ulong t = u & 0xffffffffffUL; - // uint v = (e << 23) | (uint)(u >> 40); - // uint r = t > 0x8000000000UL ? 1U : (t == 0x8000000000UL ? v & 1U : 0U); - // return as_float(v + r); - // } - - auto Zero32 = MIRBuilder.buildConstant(S32, 0); - auto Zero64 = MIRBuilder.buildConstant(S64, 0); - - auto LZ = MIRBuilder.buildCTLZ_ZERO_UNDEF(S32, Src); - - auto K = MIRBuilder.buildConstant(S32, 127U + 63U); - auto Sub = MIRBuilder.buildSub(S32, K, LZ); - - auto NotZero = MIRBuilder.buildICmp(CmpInst::ICMP_NE, S1, Src, Zero64); - auto E = MIRBuilder.buildSelect(S32, NotZero, Sub, Zero32); - - auto Mask0 = MIRBuilder.buildConstant(S64, (-1ULL) >> 1); - auto ShlLZ = MIRBuilder.buildShl(S64, Src, LZ); - - auto U = MIRBuilder.buildAnd(S64, ShlLZ, Mask0); - - auto Mask1 = MIRBuilder.buildConstant(S64, 0xffffffffffULL); - auto T = MIRBuilder.buildAnd(S64, U, Mask1); - - auto UShl = MIRBuilder.buildLShr(S64, U, MIRBuilder.buildConstant(S64, 40)); - auto ShlE = MIRBuilder.buildShl(S32, E, MIRBuilder.buildConstant(S32, 23)); - auto V = MIRBuilder.buildOr(S32, ShlE, MIRBuilder.buildTrunc(S32, UShl)); - - auto C = MIRBuilder.buildConstant(S64, 0x8000000000ULL); - auto RCmp = MIRBuilder.buildICmp(CmpInst::ICMP_UGT, S1, T, C); - auto TCmp = MIRBuilder.buildICmp(CmpInst::ICMP_EQ, S1, T, C); - auto One = MIRBuilder.buildConstant(S32, 1); - - auto VTrunc1 = MIRBuilder.buildAnd(S32, V, One); - auto Select0 = MIRBuilder.buildSelect(S32, TCmp, VTrunc1, Zero32); - auto R = MIRBuilder.buildSelect(S32, RCmp, One, Select0); - MIRBuilder.buildAdd(Dst, V, R); - - MI.eraseFromParent(); - return Legalized; -} - + + // In order to get count in blocks of 4 add values from adjacent block of 2. + // B4Count = { B2Count & 0x33333333 } + { (B2Count >> 2) & 0x33333333 } + auto C_2 = B.buildConstant(Ty, 2); + auto B4Set2LoTo2Hi = B.buildLShr(Ty, B2Count, C_2); + APInt B4Mask2HiTo0 = APInt::getSplat(Size, APInt(8, 0x33)); + auto C_B4Mask2HiTo0 = B.buildConstant(Ty, B4Mask2HiTo0); + auto B4HiB2Count = B.buildAnd(Ty, B4Set2LoTo2Hi, C_B4Mask2HiTo0); + auto B4LoB2Count = B.buildAnd(Ty, B2Count, C_B4Mask2HiTo0); + auto B4Count = B.buildAdd(Ty, B4HiB2Count, B4LoB2Count); + + // For count in blocks of 8 bits we don't have to mask high 4 bits before + // addition since count value sits in range {0,...,8} and 4 bits are enough + // to hold such binary values. After addition high 4 bits still hold count + // of set bits in high 4 bit block, set them to zero and get 8 bit result. + // B8Count = { B4Count + (B4Count >> 4) } & 0x0F0F0F0F + auto C_4 = B.buildConstant(Ty, 4); + auto B8HiB4Count = B.buildLShr(Ty, B4Count, C_4); + auto B8CountDirty4Hi = B.buildAdd(Ty, B8HiB4Count, B4Count); + APInt B8Mask4HiTo0 = APInt::getSplat(Size, APInt(8, 0x0F)); + auto C_B8Mask4HiTo0 = B.buildConstant(Ty, B8Mask4HiTo0); + auto B8Count = B.buildAnd(Ty, B8CountDirty4Hi, C_B8Mask4HiTo0); + + assert(Size<=128 && "Scalar size is too large for CTPOP lower algorithm"); + // 8 bits can hold CTPOP result of 128 bit int or smaller. Mul with this + // bitmask will set 8 msb in ResTmp to sum of all B8Counts in 8 bit blocks. + auto MulMask = B.buildConstant(Ty, APInt::getSplat(Size, APInt(8, 0x01))); + auto ResTmp = B.buildMul(Ty, B8Count, MulMask); + + // Shift count result from 8 high bits to low bits. + auto C_SizeM8 = B.buildConstant(Ty, Size - 8); + B.buildLShr(MI.getOperand(0).getReg(), ResTmp, C_SizeM8); + + MI.eraseFromParent(); + return Legalized; + } + } +} + +// Expand s32 = G_UITOFP s64 using bit operations to an IEEE float +// representation. +LegalizerHelper::LegalizeResult +LegalizerHelper::lowerU64ToF32BitOps(MachineInstr &MI) { + Register Dst = MI.getOperand(0).getReg(); + Register Src = MI.getOperand(1).getReg(); + const LLT S64 = LLT::scalar(64); + const LLT S32 = LLT::scalar(32); + const LLT S1 = LLT::scalar(1); + + assert(MRI.getType(Src) == S64 && MRI.getType(Dst) == S32); + + // unsigned cul2f(ulong u) { + // uint lz = clz(u); + // uint e = (u != 0) ? 127U + 63U - lz : 0; + // u = (u << lz) & 0x7fffffffffffffffUL; + // ulong t = u & 0xffffffffffUL; + // uint v = (e << 23) | (uint)(u >> 40); + // uint r = t > 0x8000000000UL ? 1U : (t == 0x8000000000UL ? v & 1U : 0U); + // return as_float(v + r); + // } + + auto Zero32 = MIRBuilder.buildConstant(S32, 0); + auto Zero64 = MIRBuilder.buildConstant(S64, 0); + + auto LZ = MIRBuilder.buildCTLZ_ZERO_UNDEF(S32, Src); + + auto K = MIRBuilder.buildConstant(S32, 127U + 63U); + auto Sub = MIRBuilder.buildSub(S32, K, LZ); + + auto NotZero = MIRBuilder.buildICmp(CmpInst::ICMP_NE, S1, Src, Zero64); + auto E = MIRBuilder.buildSelect(S32, NotZero, Sub, Zero32); + + auto Mask0 = MIRBuilder.buildConstant(S64, (-1ULL) >> 1); + auto ShlLZ = MIRBuilder.buildShl(S64, Src, LZ); + + auto U = MIRBuilder.buildAnd(S64, ShlLZ, Mask0); + + auto Mask1 = MIRBuilder.buildConstant(S64, 0xffffffffffULL); + auto T = MIRBuilder.buildAnd(S64, U, Mask1); + + auto UShl = MIRBuilder.buildLShr(S64, U, MIRBuilder.buildConstant(S64, 40)); + auto ShlE = MIRBuilder.buildShl(S32, E, MIRBuilder.buildConstant(S32, 23)); + auto V = MIRBuilder.buildOr(S32, ShlE, MIRBuilder.buildTrunc(S32, UShl)); + + auto C = MIRBuilder.buildConstant(S64, 0x8000000000ULL); + auto RCmp = MIRBuilder.buildICmp(CmpInst::ICMP_UGT, S1, T, C); + auto TCmp = MIRBuilder.buildICmp(CmpInst::ICMP_EQ, S1, T, C); + auto One = MIRBuilder.buildConstant(S32, 1); + + auto VTrunc1 = MIRBuilder.buildAnd(S32, V, One); + auto Select0 = MIRBuilder.buildSelect(S32, TCmp, VTrunc1, Zero32); + auto R = MIRBuilder.buildSelect(S32, RCmp, One, Select0); + MIRBuilder.buildAdd(Dst, V, R); + + MI.eraseFromParent(); + return Legalized; +} + LegalizerHelper::LegalizeResult LegalizerHelper::lowerUITOFP(MachineInstr &MI) { - Register Dst = MI.getOperand(0).getReg(); - Register Src = MI.getOperand(1).getReg(); - LLT DstTy = MRI.getType(Dst); - LLT SrcTy = MRI.getType(Src); - - if (SrcTy == LLT::scalar(1)) { - auto True = MIRBuilder.buildFConstant(DstTy, 1.0); - auto False = MIRBuilder.buildFConstant(DstTy, 0.0); - MIRBuilder.buildSelect(Dst, Src, True, False); - MI.eraseFromParent(); - return Legalized; - } - - if (SrcTy != LLT::scalar(64)) - return UnableToLegalize; - - if (DstTy == LLT::scalar(32)) { - // TODO: SelectionDAG has several alternative expansions to port which may - // be more reasonble depending on the available instructions. If a target - // has sitofp, does not have CTLZ, or can efficiently use f64 as an - // intermediate type, this is probably worse. - return lowerU64ToF32BitOps(MI); - } - - return UnableToLegalize; -} - + Register Dst = MI.getOperand(0).getReg(); + Register Src = MI.getOperand(1).getReg(); + LLT DstTy = MRI.getType(Dst); + LLT SrcTy = MRI.getType(Src); + + if (SrcTy == LLT::scalar(1)) { + auto True = MIRBuilder.buildFConstant(DstTy, 1.0); + auto False = MIRBuilder.buildFConstant(DstTy, 0.0); + MIRBuilder.buildSelect(Dst, Src, True, False); + MI.eraseFromParent(); + return Legalized; + } + + if (SrcTy != LLT::scalar(64)) + return UnableToLegalize; + + if (DstTy == LLT::scalar(32)) { + // TODO: SelectionDAG has several alternative expansions to port which may + // be more reasonble depending on the available instructions. If a target + // has sitofp, does not have CTLZ, or can efficiently use f64 as an + // intermediate type, this is probably worse. + return lowerU64ToF32BitOps(MI); + } + + return UnableToLegalize; +} + LegalizerHelper::LegalizeResult LegalizerHelper::lowerSITOFP(MachineInstr &MI) { - Register Dst = MI.getOperand(0).getReg(); - Register Src = MI.getOperand(1).getReg(); - LLT DstTy = MRI.getType(Dst); - LLT SrcTy = MRI.getType(Src); - - const LLT S64 = LLT::scalar(64); - const LLT S32 = LLT::scalar(32); - const LLT S1 = LLT::scalar(1); - - if (SrcTy == S1) { - auto True = MIRBuilder.buildFConstant(DstTy, -1.0); - auto False = MIRBuilder.buildFConstant(DstTy, 0.0); - MIRBuilder.buildSelect(Dst, Src, True, False); - MI.eraseFromParent(); - return Legalized; - } - - if (SrcTy != S64) - return UnableToLegalize; - - if (DstTy == S32) { - // signed cl2f(long l) { - // long s = l >> 63; - // float r = cul2f((l + s) ^ s); - // return s ? -r : r; - // } - Register L = Src; - auto SignBit = MIRBuilder.buildConstant(S64, 63); - auto S = MIRBuilder.buildAShr(S64, L, SignBit); - - auto LPlusS = MIRBuilder.buildAdd(S64, L, S); - auto Xor = MIRBuilder.buildXor(S64, LPlusS, S); - auto R = MIRBuilder.buildUITOFP(S32, Xor); - - auto RNeg = MIRBuilder.buildFNeg(S32, R); - auto SignNotZero = MIRBuilder.buildICmp(CmpInst::ICMP_NE, S1, S, - MIRBuilder.buildConstant(S64, 0)); - MIRBuilder.buildSelect(Dst, SignNotZero, RNeg, R); - MI.eraseFromParent(); - return Legalized; - } - - return UnableToLegalize; -} - + Register Dst = MI.getOperand(0).getReg(); + Register Src = MI.getOperand(1).getReg(); + LLT DstTy = MRI.getType(Dst); + LLT SrcTy = MRI.getType(Src); + + const LLT S64 = LLT::scalar(64); + const LLT S32 = LLT::scalar(32); + const LLT S1 = LLT::scalar(1); + + if (SrcTy == S1) { + auto True = MIRBuilder.buildFConstant(DstTy, -1.0); + auto False = MIRBuilder.buildFConstant(DstTy, 0.0); + MIRBuilder.buildSelect(Dst, Src, True, False); + MI.eraseFromParent(); + return Legalized; + } + + if (SrcTy != S64) + return UnableToLegalize; + + if (DstTy == S32) { + // signed cl2f(long l) { + // long s = l >> 63; + // float r = cul2f((l + s) ^ s); + // return s ? -r : r; + // } + Register L = Src; + auto SignBit = MIRBuilder.buildConstant(S64, 63); + auto S = MIRBuilder.buildAShr(S64, L, SignBit); + + auto LPlusS = MIRBuilder.buildAdd(S64, L, S); + auto Xor = MIRBuilder.buildXor(S64, LPlusS, S); + auto R = MIRBuilder.buildUITOFP(S32, Xor); + + auto RNeg = MIRBuilder.buildFNeg(S32, R); + auto SignNotZero = MIRBuilder.buildICmp(CmpInst::ICMP_NE, S1, S, + MIRBuilder.buildConstant(S64, 0)); + MIRBuilder.buildSelect(Dst, SignNotZero, RNeg, R); + MI.eraseFromParent(); + return Legalized; + } + + return UnableToLegalize; +} + LegalizerHelper::LegalizeResult LegalizerHelper::lowerFPTOUI(MachineInstr &MI) { - Register Dst = MI.getOperand(0).getReg(); - Register Src = MI.getOperand(1).getReg(); - LLT DstTy = MRI.getType(Dst); - LLT SrcTy = MRI.getType(Src); - const LLT S64 = LLT::scalar(64); - const LLT S32 = LLT::scalar(32); - - if (SrcTy != S64 && SrcTy != S32) - return UnableToLegalize; - if (DstTy != S32 && DstTy != S64) - return UnableToLegalize; - - // FPTOSI gives same result as FPTOUI for positive signed integers. - // FPTOUI needs to deal with fp values that convert to unsigned integers - // greater or equal to 2^31 for float or 2^63 for double. For brevity 2^Exp. - - APInt TwoPExpInt = APInt::getSignMask(DstTy.getSizeInBits()); - APFloat TwoPExpFP(SrcTy.getSizeInBits() == 32 ? APFloat::IEEEsingle() - : APFloat::IEEEdouble(), - APInt::getNullValue(SrcTy.getSizeInBits())); - TwoPExpFP.convertFromAPInt(TwoPExpInt, false, APFloat::rmNearestTiesToEven); - - MachineInstrBuilder FPTOSI = MIRBuilder.buildFPTOSI(DstTy, Src); - - MachineInstrBuilder Threshold = MIRBuilder.buildFConstant(SrcTy, TwoPExpFP); - // For fp Value greater or equal to Threshold(2^Exp), we use FPTOSI on - // (Value - 2^Exp) and add 2^Exp by setting highest bit in result to 1. - MachineInstrBuilder FSub = MIRBuilder.buildFSub(SrcTy, Src, Threshold); - MachineInstrBuilder ResLowBits = MIRBuilder.buildFPTOSI(DstTy, FSub); - MachineInstrBuilder ResHighBit = MIRBuilder.buildConstant(DstTy, TwoPExpInt); - MachineInstrBuilder Res = MIRBuilder.buildXor(DstTy, ResLowBits, ResHighBit); - - const LLT S1 = LLT::scalar(1); - - MachineInstrBuilder FCMP = - MIRBuilder.buildFCmp(CmpInst::FCMP_ULT, S1, Src, Threshold); - MIRBuilder.buildSelect(Dst, FCMP, FPTOSI, Res); - - MI.eraseFromParent(); - return Legalized; -} - -LegalizerHelper::LegalizeResult LegalizerHelper::lowerFPTOSI(MachineInstr &MI) { - Register Dst = MI.getOperand(0).getReg(); - Register Src = MI.getOperand(1).getReg(); - LLT DstTy = MRI.getType(Dst); - LLT SrcTy = MRI.getType(Src); - const LLT S64 = LLT::scalar(64); - const LLT S32 = LLT::scalar(32); - - // FIXME: Only f32 to i64 conversions are supported. - if (SrcTy.getScalarType() != S32 || DstTy.getScalarType() != S64) - return UnableToLegalize; - - // Expand f32 -> i64 conversion - // This algorithm comes from compiler-rt's implementation of fixsfdi: - // https://github.com/llvm/llvm-project/blob/master/compiler-rt/lib/builtins/fixsfdi.c - - unsigned SrcEltBits = SrcTy.getScalarSizeInBits(); - - auto ExponentMask = MIRBuilder.buildConstant(SrcTy, 0x7F800000); - auto ExponentLoBit = MIRBuilder.buildConstant(SrcTy, 23); - - auto AndExpMask = MIRBuilder.buildAnd(SrcTy, Src, ExponentMask); - auto ExponentBits = MIRBuilder.buildLShr(SrcTy, AndExpMask, ExponentLoBit); - - auto SignMask = MIRBuilder.buildConstant(SrcTy, - APInt::getSignMask(SrcEltBits)); - auto AndSignMask = MIRBuilder.buildAnd(SrcTy, Src, SignMask); - auto SignLowBit = MIRBuilder.buildConstant(SrcTy, SrcEltBits - 1); - auto Sign = MIRBuilder.buildAShr(SrcTy, AndSignMask, SignLowBit); - Sign = MIRBuilder.buildSExt(DstTy, Sign); - - auto MantissaMask = MIRBuilder.buildConstant(SrcTy, 0x007FFFFF); - auto AndMantissaMask = MIRBuilder.buildAnd(SrcTy, Src, MantissaMask); - auto K = MIRBuilder.buildConstant(SrcTy, 0x00800000); - - auto R = MIRBuilder.buildOr(SrcTy, AndMantissaMask, K); - R = MIRBuilder.buildZExt(DstTy, R); - - auto Bias = MIRBuilder.buildConstant(SrcTy, 127); - auto Exponent = MIRBuilder.buildSub(SrcTy, ExponentBits, Bias); - auto SubExponent = MIRBuilder.buildSub(SrcTy, Exponent, ExponentLoBit); - auto ExponentSub = MIRBuilder.buildSub(SrcTy, ExponentLoBit, Exponent); - - auto Shl = MIRBuilder.buildShl(DstTy, R, SubExponent); - auto Srl = MIRBuilder.buildLShr(DstTy, R, ExponentSub); - - const LLT S1 = LLT::scalar(1); - auto CmpGt = MIRBuilder.buildICmp(CmpInst::ICMP_SGT, - S1, Exponent, ExponentLoBit); - - R = MIRBuilder.buildSelect(DstTy, CmpGt, Shl, Srl); - - auto XorSign = MIRBuilder.buildXor(DstTy, R, Sign); - auto Ret = MIRBuilder.buildSub(DstTy, XorSign, Sign); - - auto ZeroSrcTy = MIRBuilder.buildConstant(SrcTy, 0); - - auto ExponentLt0 = MIRBuilder.buildICmp(CmpInst::ICMP_SLT, - S1, Exponent, ZeroSrcTy); - - auto ZeroDstTy = MIRBuilder.buildConstant(DstTy, 0); - MIRBuilder.buildSelect(Dst, ExponentLt0, ZeroDstTy, Ret); - - MI.eraseFromParent(); - return Legalized; -} - -// f64 -> f16 conversion using round-to-nearest-even rounding mode. -LegalizerHelper::LegalizeResult -LegalizerHelper::lowerFPTRUNC_F64_TO_F16(MachineInstr &MI) { - Register Dst = MI.getOperand(0).getReg(); - Register Src = MI.getOperand(1).getReg(); - - if (MRI.getType(Src).isVector()) // TODO: Handle vectors directly. - return UnableToLegalize; - - const unsigned ExpMask = 0x7ff; - const unsigned ExpBiasf64 = 1023; - const unsigned ExpBiasf16 = 15; - const LLT S32 = LLT::scalar(32); - const LLT S1 = LLT::scalar(1); - - auto Unmerge = MIRBuilder.buildUnmerge(S32, Src); - Register U = Unmerge.getReg(0); - Register UH = Unmerge.getReg(1); - - auto E = MIRBuilder.buildLShr(S32, UH, MIRBuilder.buildConstant(S32, 20)); - E = MIRBuilder.buildAnd(S32, E, MIRBuilder.buildConstant(S32, ExpMask)); - - // Subtract the fp64 exponent bias (1023) to get the real exponent and - // add the f16 bias (15) to get the biased exponent for the f16 format. - E = MIRBuilder.buildAdd( - S32, E, MIRBuilder.buildConstant(S32, -ExpBiasf64 + ExpBiasf16)); - - auto M = MIRBuilder.buildLShr(S32, UH, MIRBuilder.buildConstant(S32, 8)); - M = MIRBuilder.buildAnd(S32, M, MIRBuilder.buildConstant(S32, 0xffe)); - - auto MaskedSig = MIRBuilder.buildAnd(S32, UH, - MIRBuilder.buildConstant(S32, 0x1ff)); - MaskedSig = MIRBuilder.buildOr(S32, MaskedSig, U); - - auto Zero = MIRBuilder.buildConstant(S32, 0); - auto SigCmpNE0 = MIRBuilder.buildICmp(CmpInst::ICMP_NE, S1, MaskedSig, Zero); - auto Lo40Set = MIRBuilder.buildZExt(S32, SigCmpNE0); - M = MIRBuilder.buildOr(S32, M, Lo40Set); - - // (M != 0 ? 0x0200 : 0) | 0x7c00; - auto Bits0x200 = MIRBuilder.buildConstant(S32, 0x0200); - auto CmpM_NE0 = MIRBuilder.buildICmp(CmpInst::ICMP_NE, S1, M, Zero); - auto SelectCC = MIRBuilder.buildSelect(S32, CmpM_NE0, Bits0x200, Zero); - - auto Bits0x7c00 = MIRBuilder.buildConstant(S32, 0x7c00); - auto I = MIRBuilder.buildOr(S32, SelectCC, Bits0x7c00); - - // N = M | (E << 12); - auto EShl12 = MIRBuilder.buildShl(S32, E, MIRBuilder.buildConstant(S32, 12)); - auto N = MIRBuilder.buildOr(S32, M, EShl12); - - // B = clamp(1-E, 0, 13); - auto One = MIRBuilder.buildConstant(S32, 1); - auto OneSubExp = MIRBuilder.buildSub(S32, One, E); - auto B = MIRBuilder.buildSMax(S32, OneSubExp, Zero); - B = MIRBuilder.buildSMin(S32, B, MIRBuilder.buildConstant(S32, 13)); - - auto SigSetHigh = MIRBuilder.buildOr(S32, M, - MIRBuilder.buildConstant(S32, 0x1000)); - - auto D = MIRBuilder.buildLShr(S32, SigSetHigh, B); - auto D0 = MIRBuilder.buildShl(S32, D, B); - - auto D0_NE_SigSetHigh = MIRBuilder.buildICmp(CmpInst::ICMP_NE, S1, - D0, SigSetHigh); - auto D1 = MIRBuilder.buildZExt(S32, D0_NE_SigSetHigh); - D = MIRBuilder.buildOr(S32, D, D1); - - auto CmpELtOne = MIRBuilder.buildICmp(CmpInst::ICMP_SLT, S1, E, One); - auto V = MIRBuilder.buildSelect(S32, CmpELtOne, D, N); - - auto VLow3 = MIRBuilder.buildAnd(S32, V, MIRBuilder.buildConstant(S32, 7)); - V = MIRBuilder.buildLShr(S32, V, MIRBuilder.buildConstant(S32, 2)); - - auto VLow3Eq3 = MIRBuilder.buildICmp(CmpInst::ICMP_EQ, S1, VLow3, - MIRBuilder.buildConstant(S32, 3)); - auto V0 = MIRBuilder.buildZExt(S32, VLow3Eq3); - - auto VLow3Gt5 = MIRBuilder.buildICmp(CmpInst::ICMP_SGT, S1, VLow3, - MIRBuilder.buildConstant(S32, 5)); - auto V1 = MIRBuilder.buildZExt(S32, VLow3Gt5); - - V1 = MIRBuilder.buildOr(S32, V0, V1); - V = MIRBuilder.buildAdd(S32, V, V1); - - auto CmpEGt30 = MIRBuilder.buildICmp(CmpInst::ICMP_SGT, S1, - E, MIRBuilder.buildConstant(S32, 30)); - V = MIRBuilder.buildSelect(S32, CmpEGt30, - MIRBuilder.buildConstant(S32, 0x7c00), V); - - auto CmpEGt1039 = MIRBuilder.buildICmp(CmpInst::ICMP_EQ, S1, - E, MIRBuilder.buildConstant(S32, 1039)); - V = MIRBuilder.buildSelect(S32, CmpEGt1039, I, V); - - // Extract the sign bit. - auto Sign = MIRBuilder.buildLShr(S32, UH, MIRBuilder.buildConstant(S32, 16)); - Sign = MIRBuilder.buildAnd(S32, Sign, MIRBuilder.buildConstant(S32, 0x8000)); - - // Insert the sign bit - V = MIRBuilder.buildOr(S32, Sign, V); - - MIRBuilder.buildTrunc(Dst, V); - MI.eraseFromParent(); - return Legalized; -} - -LegalizerHelper::LegalizeResult + Register Dst = MI.getOperand(0).getReg(); + Register Src = MI.getOperand(1).getReg(); + LLT DstTy = MRI.getType(Dst); + LLT SrcTy = MRI.getType(Src); + const LLT S64 = LLT::scalar(64); + const LLT S32 = LLT::scalar(32); + + if (SrcTy != S64 && SrcTy != S32) + return UnableToLegalize; + if (DstTy != S32 && DstTy != S64) + return UnableToLegalize; + + // FPTOSI gives same result as FPTOUI for positive signed integers. + // FPTOUI needs to deal with fp values that convert to unsigned integers + // greater or equal to 2^31 for float or 2^63 for double. For brevity 2^Exp. + + APInt TwoPExpInt = APInt::getSignMask(DstTy.getSizeInBits()); + APFloat TwoPExpFP(SrcTy.getSizeInBits() == 32 ? APFloat::IEEEsingle() + : APFloat::IEEEdouble(), + APInt::getNullValue(SrcTy.getSizeInBits())); + TwoPExpFP.convertFromAPInt(TwoPExpInt, false, APFloat::rmNearestTiesToEven); + + MachineInstrBuilder FPTOSI = MIRBuilder.buildFPTOSI(DstTy, Src); + + MachineInstrBuilder Threshold = MIRBuilder.buildFConstant(SrcTy, TwoPExpFP); + // For fp Value greater or equal to Threshold(2^Exp), we use FPTOSI on + // (Value - 2^Exp) and add 2^Exp by setting highest bit in result to 1. + MachineInstrBuilder FSub = MIRBuilder.buildFSub(SrcTy, Src, Threshold); + MachineInstrBuilder ResLowBits = MIRBuilder.buildFPTOSI(DstTy, FSub); + MachineInstrBuilder ResHighBit = MIRBuilder.buildConstant(DstTy, TwoPExpInt); + MachineInstrBuilder Res = MIRBuilder.buildXor(DstTy, ResLowBits, ResHighBit); + + const LLT S1 = LLT::scalar(1); + + MachineInstrBuilder FCMP = + MIRBuilder.buildFCmp(CmpInst::FCMP_ULT, S1, Src, Threshold); + MIRBuilder.buildSelect(Dst, FCMP, FPTOSI, Res); + + MI.eraseFromParent(); + return Legalized; +} + +LegalizerHelper::LegalizeResult LegalizerHelper::lowerFPTOSI(MachineInstr &MI) { + Register Dst = MI.getOperand(0).getReg(); + Register Src = MI.getOperand(1).getReg(); + LLT DstTy = MRI.getType(Dst); + LLT SrcTy = MRI.getType(Src); + const LLT S64 = LLT::scalar(64); + const LLT S32 = LLT::scalar(32); + + // FIXME: Only f32 to i64 conversions are supported. + if (SrcTy.getScalarType() != S32 || DstTy.getScalarType() != S64) + return UnableToLegalize; + + // Expand f32 -> i64 conversion + // This algorithm comes from compiler-rt's implementation of fixsfdi: + // https://github.com/llvm/llvm-project/blob/master/compiler-rt/lib/builtins/fixsfdi.c + + unsigned SrcEltBits = SrcTy.getScalarSizeInBits(); + + auto ExponentMask = MIRBuilder.buildConstant(SrcTy, 0x7F800000); + auto ExponentLoBit = MIRBuilder.buildConstant(SrcTy, 23); + + auto AndExpMask = MIRBuilder.buildAnd(SrcTy, Src, ExponentMask); + auto ExponentBits = MIRBuilder.buildLShr(SrcTy, AndExpMask, ExponentLoBit); + + auto SignMask = MIRBuilder.buildConstant(SrcTy, + APInt::getSignMask(SrcEltBits)); + auto AndSignMask = MIRBuilder.buildAnd(SrcTy, Src, SignMask); + auto SignLowBit = MIRBuilder.buildConstant(SrcTy, SrcEltBits - 1); + auto Sign = MIRBuilder.buildAShr(SrcTy, AndSignMask, SignLowBit); + Sign = MIRBuilder.buildSExt(DstTy, Sign); + + auto MantissaMask = MIRBuilder.buildConstant(SrcTy, 0x007FFFFF); + auto AndMantissaMask = MIRBuilder.buildAnd(SrcTy, Src, MantissaMask); + auto K = MIRBuilder.buildConstant(SrcTy, 0x00800000); + + auto R = MIRBuilder.buildOr(SrcTy, AndMantissaMask, K); + R = MIRBuilder.buildZExt(DstTy, R); + + auto Bias = MIRBuilder.buildConstant(SrcTy, 127); + auto Exponent = MIRBuilder.buildSub(SrcTy, ExponentBits, Bias); + auto SubExponent = MIRBuilder.buildSub(SrcTy, Exponent, ExponentLoBit); + auto ExponentSub = MIRBuilder.buildSub(SrcTy, ExponentLoBit, Exponent); + + auto Shl = MIRBuilder.buildShl(DstTy, R, SubExponent); + auto Srl = MIRBuilder.buildLShr(DstTy, R, ExponentSub); + + const LLT S1 = LLT::scalar(1); + auto CmpGt = MIRBuilder.buildICmp(CmpInst::ICMP_SGT, + S1, Exponent, ExponentLoBit); + + R = MIRBuilder.buildSelect(DstTy, CmpGt, Shl, Srl); + + auto XorSign = MIRBuilder.buildXor(DstTy, R, Sign); + auto Ret = MIRBuilder.buildSub(DstTy, XorSign, Sign); + + auto ZeroSrcTy = MIRBuilder.buildConstant(SrcTy, 0); + + auto ExponentLt0 = MIRBuilder.buildICmp(CmpInst::ICMP_SLT, + S1, Exponent, ZeroSrcTy); + + auto ZeroDstTy = MIRBuilder.buildConstant(DstTy, 0); + MIRBuilder.buildSelect(Dst, ExponentLt0, ZeroDstTy, Ret); + + MI.eraseFromParent(); + return Legalized; +} + +// f64 -> f16 conversion using round-to-nearest-even rounding mode. +LegalizerHelper::LegalizeResult +LegalizerHelper::lowerFPTRUNC_F64_TO_F16(MachineInstr &MI) { + Register Dst = MI.getOperand(0).getReg(); + Register Src = MI.getOperand(1).getReg(); + + if (MRI.getType(Src).isVector()) // TODO: Handle vectors directly. + return UnableToLegalize; + + const unsigned ExpMask = 0x7ff; + const unsigned ExpBiasf64 = 1023; + const unsigned ExpBiasf16 = 15; + const LLT S32 = LLT::scalar(32); + const LLT S1 = LLT::scalar(1); + + auto Unmerge = MIRBuilder.buildUnmerge(S32, Src); + Register U = Unmerge.getReg(0); + Register UH = Unmerge.getReg(1); + + auto E = MIRBuilder.buildLShr(S32, UH, MIRBuilder.buildConstant(S32, 20)); + E = MIRBuilder.buildAnd(S32, E, MIRBuilder.buildConstant(S32, ExpMask)); + + // Subtract the fp64 exponent bias (1023) to get the real exponent and + // add the f16 bias (15) to get the biased exponent for the f16 format. + E = MIRBuilder.buildAdd( + S32, E, MIRBuilder.buildConstant(S32, -ExpBiasf64 + ExpBiasf16)); + + auto M = MIRBuilder.buildLShr(S32, UH, MIRBuilder.buildConstant(S32, 8)); + M = MIRBuilder.buildAnd(S32, M, MIRBuilder.buildConstant(S32, 0xffe)); + + auto MaskedSig = MIRBuilder.buildAnd(S32, UH, + MIRBuilder.buildConstant(S32, 0x1ff)); + MaskedSig = MIRBuilder.buildOr(S32, MaskedSig, U); + + auto Zero = MIRBuilder.buildConstant(S32, 0); + auto SigCmpNE0 = MIRBuilder.buildICmp(CmpInst::ICMP_NE, S1, MaskedSig, Zero); + auto Lo40Set = MIRBuilder.buildZExt(S32, SigCmpNE0); + M = MIRBuilder.buildOr(S32, M, Lo40Set); + + // (M != 0 ? 0x0200 : 0) | 0x7c00; + auto Bits0x200 = MIRBuilder.buildConstant(S32, 0x0200); + auto CmpM_NE0 = MIRBuilder.buildICmp(CmpInst::ICMP_NE, S1, M, Zero); + auto SelectCC = MIRBuilder.buildSelect(S32, CmpM_NE0, Bits0x200, Zero); + + auto Bits0x7c00 = MIRBuilder.buildConstant(S32, 0x7c00); + auto I = MIRBuilder.buildOr(S32, SelectCC, Bits0x7c00); + + // N = M | (E << 12); + auto EShl12 = MIRBuilder.buildShl(S32, E, MIRBuilder.buildConstant(S32, 12)); + auto N = MIRBuilder.buildOr(S32, M, EShl12); + + // B = clamp(1-E, 0, 13); + auto One = MIRBuilder.buildConstant(S32, 1); + auto OneSubExp = MIRBuilder.buildSub(S32, One, E); + auto B = MIRBuilder.buildSMax(S32, OneSubExp, Zero); + B = MIRBuilder.buildSMin(S32, B, MIRBuilder.buildConstant(S32, 13)); + + auto SigSetHigh = MIRBuilder.buildOr(S32, M, + MIRBuilder.buildConstant(S32, 0x1000)); + + auto D = MIRBuilder.buildLShr(S32, SigSetHigh, B); + auto D0 = MIRBuilder.buildShl(S32, D, B); + + auto D0_NE_SigSetHigh = MIRBuilder.buildICmp(CmpInst::ICMP_NE, S1, + D0, SigSetHigh); + auto D1 = MIRBuilder.buildZExt(S32, D0_NE_SigSetHigh); + D = MIRBuilder.buildOr(S32, D, D1); + + auto CmpELtOne = MIRBuilder.buildICmp(CmpInst::ICMP_SLT, S1, E, One); + auto V = MIRBuilder.buildSelect(S32, CmpELtOne, D, N); + + auto VLow3 = MIRBuilder.buildAnd(S32, V, MIRBuilder.buildConstant(S32, 7)); + V = MIRBuilder.buildLShr(S32, V, MIRBuilder.buildConstant(S32, 2)); + + auto VLow3Eq3 = MIRBuilder.buildICmp(CmpInst::ICMP_EQ, S1, VLow3, + MIRBuilder.buildConstant(S32, 3)); + auto V0 = MIRBuilder.buildZExt(S32, VLow3Eq3); + + auto VLow3Gt5 = MIRBuilder.buildICmp(CmpInst::ICMP_SGT, S1, VLow3, + MIRBuilder.buildConstant(S32, 5)); + auto V1 = MIRBuilder.buildZExt(S32, VLow3Gt5); + + V1 = MIRBuilder.buildOr(S32, V0, V1); + V = MIRBuilder.buildAdd(S32, V, V1); + + auto CmpEGt30 = MIRBuilder.buildICmp(CmpInst::ICMP_SGT, S1, + E, MIRBuilder.buildConstant(S32, 30)); + V = MIRBuilder.buildSelect(S32, CmpEGt30, + MIRBuilder.buildConstant(S32, 0x7c00), V); + + auto CmpEGt1039 = MIRBuilder.buildICmp(CmpInst::ICMP_EQ, S1, + E, MIRBuilder.buildConstant(S32, 1039)); + V = MIRBuilder.buildSelect(S32, CmpEGt1039, I, V); + + // Extract the sign bit. + auto Sign = MIRBuilder.buildLShr(S32, UH, MIRBuilder.buildConstant(S32, 16)); + Sign = MIRBuilder.buildAnd(S32, Sign, MIRBuilder.buildConstant(S32, 0x8000)); + + // Insert the sign bit + V = MIRBuilder.buildOr(S32, Sign, V); + + MIRBuilder.buildTrunc(Dst, V); + MI.eraseFromParent(); + return Legalized; +} + +LegalizerHelper::LegalizeResult LegalizerHelper::lowerFPTRUNC(MachineInstr &MI) { - Register Dst = MI.getOperand(0).getReg(); - Register Src = MI.getOperand(1).getReg(); - - LLT DstTy = MRI.getType(Dst); - LLT SrcTy = MRI.getType(Src); - const LLT S64 = LLT::scalar(64); - const LLT S16 = LLT::scalar(16); - - if (DstTy.getScalarType() == S16 && SrcTy.getScalarType() == S64) - return lowerFPTRUNC_F64_TO_F16(MI); - - return UnableToLegalize; -} - + Register Dst = MI.getOperand(0).getReg(); + Register Src = MI.getOperand(1).getReg(); + + LLT DstTy = MRI.getType(Dst); + LLT SrcTy = MRI.getType(Src); + const LLT S64 = LLT::scalar(64); + const LLT S16 = LLT::scalar(16); + + if (DstTy.getScalarType() == S16 && SrcTy.getScalarType() == S64) + return lowerFPTRUNC_F64_TO_F16(MI); + + return UnableToLegalize; +} + // TODO: If RHS is a constant SelectionDAGBuilder expands this into a // multiplication tree. LegalizerHelper::LegalizeResult LegalizerHelper::lowerFPOWI(MachineInstr &MI) { @@ -5398,259 +5398,259 @@ LegalizerHelper::LegalizeResult LegalizerHelper::lowerFPOWI(MachineInstr &MI) { return Legalized; } -static CmpInst::Predicate minMaxToCompare(unsigned Opc) { - switch (Opc) { - case TargetOpcode::G_SMIN: - return CmpInst::ICMP_SLT; - case TargetOpcode::G_SMAX: - return CmpInst::ICMP_SGT; - case TargetOpcode::G_UMIN: - return CmpInst::ICMP_ULT; - case TargetOpcode::G_UMAX: - return CmpInst::ICMP_UGT; - default: - llvm_unreachable("not in integer min/max"); - } -} - +static CmpInst::Predicate minMaxToCompare(unsigned Opc) { + switch (Opc) { + case TargetOpcode::G_SMIN: + return CmpInst::ICMP_SLT; + case TargetOpcode::G_SMAX: + return CmpInst::ICMP_SGT; + case TargetOpcode::G_UMIN: + return CmpInst::ICMP_ULT; + case TargetOpcode::G_UMAX: + return CmpInst::ICMP_UGT; + default: + llvm_unreachable("not in integer min/max"); + } +} + LegalizerHelper::LegalizeResult LegalizerHelper::lowerMinMax(MachineInstr &MI) { - Register Dst = MI.getOperand(0).getReg(); - Register Src0 = MI.getOperand(1).getReg(); - Register Src1 = MI.getOperand(2).getReg(); - - const CmpInst::Predicate Pred = minMaxToCompare(MI.getOpcode()); - LLT CmpType = MRI.getType(Dst).changeElementSize(1); - - auto Cmp = MIRBuilder.buildICmp(Pred, CmpType, Src0, Src1); - MIRBuilder.buildSelect(Dst, Cmp, Src0, Src1); - - MI.eraseFromParent(); - return Legalized; -} - -LegalizerHelper::LegalizeResult + Register Dst = MI.getOperand(0).getReg(); + Register Src0 = MI.getOperand(1).getReg(); + Register Src1 = MI.getOperand(2).getReg(); + + const CmpInst::Predicate Pred = minMaxToCompare(MI.getOpcode()); + LLT CmpType = MRI.getType(Dst).changeElementSize(1); + + auto Cmp = MIRBuilder.buildICmp(Pred, CmpType, Src0, Src1); + MIRBuilder.buildSelect(Dst, Cmp, Src0, Src1); + + MI.eraseFromParent(); + return Legalized; +} + +LegalizerHelper::LegalizeResult LegalizerHelper::lowerFCopySign(MachineInstr &MI) { - Register Dst = MI.getOperand(0).getReg(); - Register Src0 = MI.getOperand(1).getReg(); - Register Src1 = MI.getOperand(2).getReg(); - - const LLT Src0Ty = MRI.getType(Src0); - const LLT Src1Ty = MRI.getType(Src1); - - const int Src0Size = Src0Ty.getScalarSizeInBits(); - const int Src1Size = Src1Ty.getScalarSizeInBits(); - - auto SignBitMask = MIRBuilder.buildConstant( - Src0Ty, APInt::getSignMask(Src0Size)); - - auto NotSignBitMask = MIRBuilder.buildConstant( - Src0Ty, APInt::getLowBitsSet(Src0Size, Src0Size - 1)); - - auto And0 = MIRBuilder.buildAnd(Src0Ty, Src0, NotSignBitMask); - MachineInstr *Or; - - if (Src0Ty == Src1Ty) { - auto And1 = MIRBuilder.buildAnd(Src1Ty, Src1, SignBitMask); - Or = MIRBuilder.buildOr(Dst, And0, And1); - } else if (Src0Size > Src1Size) { - auto ShiftAmt = MIRBuilder.buildConstant(Src0Ty, Src0Size - Src1Size); - auto Zext = MIRBuilder.buildZExt(Src0Ty, Src1); - auto Shift = MIRBuilder.buildShl(Src0Ty, Zext, ShiftAmt); - auto And1 = MIRBuilder.buildAnd(Src0Ty, Shift, SignBitMask); - Or = MIRBuilder.buildOr(Dst, And0, And1); - } else { - auto ShiftAmt = MIRBuilder.buildConstant(Src1Ty, Src1Size - Src0Size); - auto Shift = MIRBuilder.buildLShr(Src1Ty, Src1, ShiftAmt); - auto Trunc = MIRBuilder.buildTrunc(Src0Ty, Shift); - auto And1 = MIRBuilder.buildAnd(Src0Ty, Trunc, SignBitMask); - Or = MIRBuilder.buildOr(Dst, And0, And1); - } - - // Be careful about setting nsz/nnan/ninf on every instruction, since the - // constants are a nan and -0.0, but the final result should preserve - // everything. - if (unsigned Flags = MI.getFlags()) - Or->setFlags(Flags); - - MI.eraseFromParent(); - return Legalized; -} - -LegalizerHelper::LegalizeResult -LegalizerHelper::lowerFMinNumMaxNum(MachineInstr &MI) { - unsigned NewOp = MI.getOpcode() == TargetOpcode::G_FMINNUM ? - TargetOpcode::G_FMINNUM_IEEE : TargetOpcode::G_FMAXNUM_IEEE; - - Register Dst = MI.getOperand(0).getReg(); - Register Src0 = MI.getOperand(1).getReg(); - Register Src1 = MI.getOperand(2).getReg(); - LLT Ty = MRI.getType(Dst); - - if (!MI.getFlag(MachineInstr::FmNoNans)) { - // Insert canonicalizes if it's possible we need to quiet to get correct - // sNaN behavior. - - // Note this must be done here, and not as an optimization combine in the - // absence of a dedicate quiet-snan instruction as we're using an - // omni-purpose G_FCANONICALIZE. - if (!isKnownNeverSNaN(Src0, MRI)) - Src0 = MIRBuilder.buildFCanonicalize(Ty, Src0, MI.getFlags()).getReg(0); - - if (!isKnownNeverSNaN(Src1, MRI)) - Src1 = MIRBuilder.buildFCanonicalize(Ty, Src1, MI.getFlags()).getReg(0); - } - - // If there are no nans, it's safe to simply replace this with the non-IEEE - // version. - MIRBuilder.buildInstr(NewOp, {Dst}, {Src0, Src1}, MI.getFlags()); - MI.eraseFromParent(); - return Legalized; -} - -LegalizerHelper::LegalizeResult LegalizerHelper::lowerFMad(MachineInstr &MI) { - // Expand G_FMAD a, b, c -> G_FADD (G_FMUL a, b), c - Register DstReg = MI.getOperand(0).getReg(); - LLT Ty = MRI.getType(DstReg); - unsigned Flags = MI.getFlags(); - - auto Mul = MIRBuilder.buildFMul(Ty, MI.getOperand(1), MI.getOperand(2), - Flags); - MIRBuilder.buildFAdd(DstReg, Mul, MI.getOperand(3), Flags); - MI.eraseFromParent(); - return Legalized; -} - -LegalizerHelper::LegalizeResult -LegalizerHelper::lowerIntrinsicRound(MachineInstr &MI) { - Register DstReg = MI.getOperand(0).getReg(); - Register X = MI.getOperand(1).getReg(); - const unsigned Flags = MI.getFlags(); - const LLT Ty = MRI.getType(DstReg); - const LLT CondTy = Ty.changeElementSize(1); - - // round(x) => - // t = trunc(x); - // d = fabs(x - t); - // o = copysign(1.0f, x); - // return t + (d >= 0.5 ? o : 0.0); - - auto T = MIRBuilder.buildIntrinsicTrunc(Ty, X, Flags); - - auto Diff = MIRBuilder.buildFSub(Ty, X, T, Flags); - auto AbsDiff = MIRBuilder.buildFAbs(Ty, Diff, Flags); - auto Zero = MIRBuilder.buildFConstant(Ty, 0.0); - auto One = MIRBuilder.buildFConstant(Ty, 1.0); - auto Half = MIRBuilder.buildFConstant(Ty, 0.5); - auto SignOne = MIRBuilder.buildFCopysign(Ty, One, X); - - auto Cmp = MIRBuilder.buildFCmp(CmpInst::FCMP_OGE, CondTy, AbsDiff, Half, - Flags); - auto Sel = MIRBuilder.buildSelect(Ty, Cmp, SignOne, Zero, Flags); - - MIRBuilder.buildFAdd(DstReg, T, Sel, Flags); - - MI.eraseFromParent(); - return Legalized; -} - -LegalizerHelper::LegalizeResult -LegalizerHelper::lowerFFloor(MachineInstr &MI) { - Register DstReg = MI.getOperand(0).getReg(); - Register SrcReg = MI.getOperand(1).getReg(); - unsigned Flags = MI.getFlags(); - LLT Ty = MRI.getType(DstReg); - const LLT CondTy = Ty.changeElementSize(1); - - // result = trunc(src); - // if (src < 0.0 && src != result) - // result += -1.0. - - auto Trunc = MIRBuilder.buildIntrinsicTrunc(Ty, SrcReg, Flags); - auto Zero = MIRBuilder.buildFConstant(Ty, 0.0); - - auto Lt0 = MIRBuilder.buildFCmp(CmpInst::FCMP_OLT, CondTy, - SrcReg, Zero, Flags); - auto NeTrunc = MIRBuilder.buildFCmp(CmpInst::FCMP_ONE, CondTy, - SrcReg, Trunc, Flags); - auto And = MIRBuilder.buildAnd(CondTy, Lt0, NeTrunc); - auto AddVal = MIRBuilder.buildSITOFP(Ty, And); - - MIRBuilder.buildFAdd(DstReg, Trunc, AddVal, Flags); - MI.eraseFromParent(); - return Legalized; -} - -LegalizerHelper::LegalizeResult -LegalizerHelper::lowerMergeValues(MachineInstr &MI) { - const unsigned NumOps = MI.getNumOperands(); - Register DstReg = MI.getOperand(0).getReg(); - Register Src0Reg = MI.getOperand(1).getReg(); - LLT DstTy = MRI.getType(DstReg); - LLT SrcTy = MRI.getType(Src0Reg); - unsigned PartSize = SrcTy.getSizeInBits(); - - LLT WideTy = LLT::scalar(DstTy.getSizeInBits()); - Register ResultReg = MIRBuilder.buildZExt(WideTy, Src0Reg).getReg(0); - - for (unsigned I = 2; I != NumOps; ++I) { - const unsigned Offset = (I - 1) * PartSize; - - Register SrcReg = MI.getOperand(I).getReg(); - auto ZextInput = MIRBuilder.buildZExt(WideTy, SrcReg); - - Register NextResult = I + 1 == NumOps && WideTy == DstTy ? DstReg : - MRI.createGenericVirtualRegister(WideTy); - - auto ShiftAmt = MIRBuilder.buildConstant(WideTy, Offset); - auto Shl = MIRBuilder.buildShl(WideTy, ZextInput, ShiftAmt); - MIRBuilder.buildOr(NextResult, ResultReg, Shl); - ResultReg = NextResult; - } - - if (DstTy.isPointer()) { - if (MIRBuilder.getDataLayout().isNonIntegralAddressSpace( - DstTy.getAddressSpace())) { - LLVM_DEBUG(dbgs() << "Not casting nonintegral address space\n"); - return UnableToLegalize; - } - - MIRBuilder.buildIntToPtr(DstReg, ResultReg); - } - - MI.eraseFromParent(); - return Legalized; -} - -LegalizerHelper::LegalizeResult -LegalizerHelper::lowerUnmergeValues(MachineInstr &MI) { - const unsigned NumDst = MI.getNumOperands() - 1; - Register SrcReg = MI.getOperand(NumDst).getReg(); - Register Dst0Reg = MI.getOperand(0).getReg(); - LLT DstTy = MRI.getType(Dst0Reg); - if (DstTy.isPointer()) - return UnableToLegalize; // TODO - - SrcReg = coerceToScalar(SrcReg); - if (!SrcReg) - return UnableToLegalize; - - // Expand scalarizing unmerge as bitcast to integer and shift. - LLT IntTy = MRI.getType(SrcReg); - - MIRBuilder.buildTrunc(Dst0Reg, SrcReg); - - const unsigned DstSize = DstTy.getSizeInBits(); - unsigned Offset = DstSize; - for (unsigned I = 1; I != NumDst; ++I, Offset += DstSize) { - auto ShiftAmt = MIRBuilder.buildConstant(IntTy, Offset); - auto Shift = MIRBuilder.buildLShr(IntTy, SrcReg, ShiftAmt); - MIRBuilder.buildTrunc(MI.getOperand(I), Shift); - } - - MI.eraseFromParent(); - return Legalized; -} - + Register Dst = MI.getOperand(0).getReg(); + Register Src0 = MI.getOperand(1).getReg(); + Register Src1 = MI.getOperand(2).getReg(); + + const LLT Src0Ty = MRI.getType(Src0); + const LLT Src1Ty = MRI.getType(Src1); + + const int Src0Size = Src0Ty.getScalarSizeInBits(); + const int Src1Size = Src1Ty.getScalarSizeInBits(); + + auto SignBitMask = MIRBuilder.buildConstant( + Src0Ty, APInt::getSignMask(Src0Size)); + + auto NotSignBitMask = MIRBuilder.buildConstant( + Src0Ty, APInt::getLowBitsSet(Src0Size, Src0Size - 1)); + + auto And0 = MIRBuilder.buildAnd(Src0Ty, Src0, NotSignBitMask); + MachineInstr *Or; + + if (Src0Ty == Src1Ty) { + auto And1 = MIRBuilder.buildAnd(Src1Ty, Src1, SignBitMask); + Or = MIRBuilder.buildOr(Dst, And0, And1); + } else if (Src0Size > Src1Size) { + auto ShiftAmt = MIRBuilder.buildConstant(Src0Ty, Src0Size - Src1Size); + auto Zext = MIRBuilder.buildZExt(Src0Ty, Src1); + auto Shift = MIRBuilder.buildShl(Src0Ty, Zext, ShiftAmt); + auto And1 = MIRBuilder.buildAnd(Src0Ty, Shift, SignBitMask); + Or = MIRBuilder.buildOr(Dst, And0, And1); + } else { + auto ShiftAmt = MIRBuilder.buildConstant(Src1Ty, Src1Size - Src0Size); + auto Shift = MIRBuilder.buildLShr(Src1Ty, Src1, ShiftAmt); + auto Trunc = MIRBuilder.buildTrunc(Src0Ty, Shift); + auto And1 = MIRBuilder.buildAnd(Src0Ty, Trunc, SignBitMask); + Or = MIRBuilder.buildOr(Dst, And0, And1); + } + + // Be careful about setting nsz/nnan/ninf on every instruction, since the + // constants are a nan and -0.0, but the final result should preserve + // everything. + if (unsigned Flags = MI.getFlags()) + Or->setFlags(Flags); + + MI.eraseFromParent(); + return Legalized; +} + +LegalizerHelper::LegalizeResult +LegalizerHelper::lowerFMinNumMaxNum(MachineInstr &MI) { + unsigned NewOp = MI.getOpcode() == TargetOpcode::G_FMINNUM ? + TargetOpcode::G_FMINNUM_IEEE : TargetOpcode::G_FMAXNUM_IEEE; + + Register Dst = MI.getOperand(0).getReg(); + Register Src0 = MI.getOperand(1).getReg(); + Register Src1 = MI.getOperand(2).getReg(); + LLT Ty = MRI.getType(Dst); + + if (!MI.getFlag(MachineInstr::FmNoNans)) { + // Insert canonicalizes if it's possible we need to quiet to get correct + // sNaN behavior. + + // Note this must be done here, and not as an optimization combine in the + // absence of a dedicate quiet-snan instruction as we're using an + // omni-purpose G_FCANONICALIZE. + if (!isKnownNeverSNaN(Src0, MRI)) + Src0 = MIRBuilder.buildFCanonicalize(Ty, Src0, MI.getFlags()).getReg(0); + + if (!isKnownNeverSNaN(Src1, MRI)) + Src1 = MIRBuilder.buildFCanonicalize(Ty, Src1, MI.getFlags()).getReg(0); + } + + // If there are no nans, it's safe to simply replace this with the non-IEEE + // version. + MIRBuilder.buildInstr(NewOp, {Dst}, {Src0, Src1}, MI.getFlags()); + MI.eraseFromParent(); + return Legalized; +} + +LegalizerHelper::LegalizeResult LegalizerHelper::lowerFMad(MachineInstr &MI) { + // Expand G_FMAD a, b, c -> G_FADD (G_FMUL a, b), c + Register DstReg = MI.getOperand(0).getReg(); + LLT Ty = MRI.getType(DstReg); + unsigned Flags = MI.getFlags(); + + auto Mul = MIRBuilder.buildFMul(Ty, MI.getOperand(1), MI.getOperand(2), + Flags); + MIRBuilder.buildFAdd(DstReg, Mul, MI.getOperand(3), Flags); + MI.eraseFromParent(); + return Legalized; +} + +LegalizerHelper::LegalizeResult +LegalizerHelper::lowerIntrinsicRound(MachineInstr &MI) { + Register DstReg = MI.getOperand(0).getReg(); + Register X = MI.getOperand(1).getReg(); + const unsigned Flags = MI.getFlags(); + const LLT Ty = MRI.getType(DstReg); + const LLT CondTy = Ty.changeElementSize(1); + + // round(x) => + // t = trunc(x); + // d = fabs(x - t); + // o = copysign(1.0f, x); + // return t + (d >= 0.5 ? o : 0.0); + + auto T = MIRBuilder.buildIntrinsicTrunc(Ty, X, Flags); + + auto Diff = MIRBuilder.buildFSub(Ty, X, T, Flags); + auto AbsDiff = MIRBuilder.buildFAbs(Ty, Diff, Flags); + auto Zero = MIRBuilder.buildFConstant(Ty, 0.0); + auto One = MIRBuilder.buildFConstant(Ty, 1.0); + auto Half = MIRBuilder.buildFConstant(Ty, 0.5); + auto SignOne = MIRBuilder.buildFCopysign(Ty, One, X); + + auto Cmp = MIRBuilder.buildFCmp(CmpInst::FCMP_OGE, CondTy, AbsDiff, Half, + Flags); + auto Sel = MIRBuilder.buildSelect(Ty, Cmp, SignOne, Zero, Flags); + + MIRBuilder.buildFAdd(DstReg, T, Sel, Flags); + + MI.eraseFromParent(); + return Legalized; +} + +LegalizerHelper::LegalizeResult +LegalizerHelper::lowerFFloor(MachineInstr &MI) { + Register DstReg = MI.getOperand(0).getReg(); + Register SrcReg = MI.getOperand(1).getReg(); + unsigned Flags = MI.getFlags(); + LLT Ty = MRI.getType(DstReg); + const LLT CondTy = Ty.changeElementSize(1); + + // result = trunc(src); + // if (src < 0.0 && src != result) + // result += -1.0. + + auto Trunc = MIRBuilder.buildIntrinsicTrunc(Ty, SrcReg, Flags); + auto Zero = MIRBuilder.buildFConstant(Ty, 0.0); + + auto Lt0 = MIRBuilder.buildFCmp(CmpInst::FCMP_OLT, CondTy, + SrcReg, Zero, Flags); + auto NeTrunc = MIRBuilder.buildFCmp(CmpInst::FCMP_ONE, CondTy, + SrcReg, Trunc, Flags); + auto And = MIRBuilder.buildAnd(CondTy, Lt0, NeTrunc); + auto AddVal = MIRBuilder.buildSITOFP(Ty, And); + + MIRBuilder.buildFAdd(DstReg, Trunc, AddVal, Flags); + MI.eraseFromParent(); + return Legalized; +} + +LegalizerHelper::LegalizeResult +LegalizerHelper::lowerMergeValues(MachineInstr &MI) { + const unsigned NumOps = MI.getNumOperands(); + Register DstReg = MI.getOperand(0).getReg(); + Register Src0Reg = MI.getOperand(1).getReg(); + LLT DstTy = MRI.getType(DstReg); + LLT SrcTy = MRI.getType(Src0Reg); + unsigned PartSize = SrcTy.getSizeInBits(); + + LLT WideTy = LLT::scalar(DstTy.getSizeInBits()); + Register ResultReg = MIRBuilder.buildZExt(WideTy, Src0Reg).getReg(0); + + for (unsigned I = 2; I != NumOps; ++I) { + const unsigned Offset = (I - 1) * PartSize; + + Register SrcReg = MI.getOperand(I).getReg(); + auto ZextInput = MIRBuilder.buildZExt(WideTy, SrcReg); + + Register NextResult = I + 1 == NumOps && WideTy == DstTy ? DstReg : + MRI.createGenericVirtualRegister(WideTy); + + auto ShiftAmt = MIRBuilder.buildConstant(WideTy, Offset); + auto Shl = MIRBuilder.buildShl(WideTy, ZextInput, ShiftAmt); + MIRBuilder.buildOr(NextResult, ResultReg, Shl); + ResultReg = NextResult; + } + + if (DstTy.isPointer()) { + if (MIRBuilder.getDataLayout().isNonIntegralAddressSpace( + DstTy.getAddressSpace())) { + LLVM_DEBUG(dbgs() << "Not casting nonintegral address space\n"); + return UnableToLegalize; + } + + MIRBuilder.buildIntToPtr(DstReg, ResultReg); + } + + MI.eraseFromParent(); + return Legalized; +} + +LegalizerHelper::LegalizeResult +LegalizerHelper::lowerUnmergeValues(MachineInstr &MI) { + const unsigned NumDst = MI.getNumOperands() - 1; + Register SrcReg = MI.getOperand(NumDst).getReg(); + Register Dst0Reg = MI.getOperand(0).getReg(); + LLT DstTy = MRI.getType(Dst0Reg); + if (DstTy.isPointer()) + return UnableToLegalize; // TODO + + SrcReg = coerceToScalar(SrcReg); + if (!SrcReg) + return UnableToLegalize; + + // Expand scalarizing unmerge as bitcast to integer and shift. + LLT IntTy = MRI.getType(SrcReg); + + MIRBuilder.buildTrunc(Dst0Reg, SrcReg); + + const unsigned DstSize = DstTy.getSizeInBits(); + unsigned Offset = DstSize; + for (unsigned I = 1; I != NumDst; ++I, Offset += DstSize) { + auto ShiftAmt = MIRBuilder.buildConstant(IntTy, Offset); + auto Shift = MIRBuilder.buildLShr(IntTy, SrcReg, ShiftAmt); + MIRBuilder.buildTrunc(MI.getOperand(I), Shift); + } + + MI.eraseFromParent(); + return Legalized; +} + /// Lower a vector extract or insert by writing the vector to a stack temporary /// and reloading the element or vector. /// @@ -5661,7 +5661,7 @@ LegalizerHelper::lowerUnmergeValues(MachineInstr &MI) { /// %idx = clamp(%idx, %vec.getNumElements()) /// %element_ptr = G_PTR_ADD %stack_temp, %idx /// %dst = G_LOAD %element_ptr -LegalizerHelper::LegalizeResult +LegalizerHelper::LegalizeResult LegalizerHelper::lowerExtractInsertVectorElt(MachineInstr &MI) { Register DstReg = MI.getOperand(0).getReg(); Register SrcVec = MI.getOperand(1).getReg(); @@ -5717,220 +5717,220 @@ LegalizerHelper::lowerExtractInsertVectorElt(MachineInstr &MI) { } LegalizerHelper::LegalizeResult -LegalizerHelper::lowerShuffleVector(MachineInstr &MI) { - Register DstReg = MI.getOperand(0).getReg(); - Register Src0Reg = MI.getOperand(1).getReg(); - Register Src1Reg = MI.getOperand(2).getReg(); - LLT Src0Ty = MRI.getType(Src0Reg); - LLT DstTy = MRI.getType(DstReg); - LLT IdxTy = LLT::scalar(32); - - ArrayRef<int> Mask = MI.getOperand(3).getShuffleMask(); - - if (DstTy.isScalar()) { - if (Src0Ty.isVector()) - return UnableToLegalize; - - // This is just a SELECT. - assert(Mask.size() == 1 && "Expected a single mask element"); - Register Val; - if (Mask[0] < 0 || Mask[0] > 1) - Val = MIRBuilder.buildUndef(DstTy).getReg(0); - else - Val = Mask[0] == 0 ? Src0Reg : Src1Reg; - MIRBuilder.buildCopy(DstReg, Val); - MI.eraseFromParent(); - return Legalized; - } - - Register Undef; - SmallVector<Register, 32> BuildVec; - LLT EltTy = DstTy.getElementType(); - - for (int Idx : Mask) { - if (Idx < 0) { - if (!Undef.isValid()) - Undef = MIRBuilder.buildUndef(EltTy).getReg(0); - BuildVec.push_back(Undef); - continue; - } - - if (Src0Ty.isScalar()) { - BuildVec.push_back(Idx == 0 ? Src0Reg : Src1Reg); - } else { - int NumElts = Src0Ty.getNumElements(); - Register SrcVec = Idx < NumElts ? Src0Reg : Src1Reg; - int ExtractIdx = Idx < NumElts ? Idx : Idx - NumElts; - auto IdxK = MIRBuilder.buildConstant(IdxTy, ExtractIdx); - auto Extract = MIRBuilder.buildExtractVectorElement(EltTy, SrcVec, IdxK); - BuildVec.push_back(Extract.getReg(0)); - } - } - - MIRBuilder.buildBuildVector(DstReg, BuildVec); - MI.eraseFromParent(); - return Legalized; -} - -LegalizerHelper::LegalizeResult -LegalizerHelper::lowerDynStackAlloc(MachineInstr &MI) { - const auto &MF = *MI.getMF(); - const auto &TFI = *MF.getSubtarget().getFrameLowering(); - if (TFI.getStackGrowthDirection() == TargetFrameLowering::StackGrowsUp) - return UnableToLegalize; - - Register Dst = MI.getOperand(0).getReg(); - Register AllocSize = MI.getOperand(1).getReg(); - Align Alignment = assumeAligned(MI.getOperand(2).getImm()); - - LLT PtrTy = MRI.getType(Dst); - LLT IntPtrTy = LLT::scalar(PtrTy.getSizeInBits()); - - Register SPReg = TLI.getStackPointerRegisterToSaveRestore(); - auto SPTmp = MIRBuilder.buildCopy(PtrTy, SPReg); - SPTmp = MIRBuilder.buildCast(IntPtrTy, SPTmp); - - // Subtract the final alloc from the SP. We use G_PTRTOINT here so we don't - // have to generate an extra instruction to negate the alloc and then use - // G_PTR_ADD to add the negative offset. - auto Alloc = MIRBuilder.buildSub(IntPtrTy, SPTmp, AllocSize); - if (Alignment > Align(1)) { - APInt AlignMask(IntPtrTy.getSizeInBits(), Alignment.value(), true); - AlignMask.negate(); - auto AlignCst = MIRBuilder.buildConstant(IntPtrTy, AlignMask); - Alloc = MIRBuilder.buildAnd(IntPtrTy, Alloc, AlignCst); - } - - SPTmp = MIRBuilder.buildCast(PtrTy, Alloc); - MIRBuilder.buildCopy(SPReg, SPTmp); - MIRBuilder.buildCopy(Dst, SPTmp); - - MI.eraseFromParent(); - return Legalized; -} - -LegalizerHelper::LegalizeResult -LegalizerHelper::lowerExtract(MachineInstr &MI) { - Register Dst = MI.getOperand(0).getReg(); - Register Src = MI.getOperand(1).getReg(); - unsigned Offset = MI.getOperand(2).getImm(); - - LLT DstTy = MRI.getType(Dst); - LLT SrcTy = MRI.getType(Src); - - if (DstTy.isScalar() && - (SrcTy.isScalar() || - (SrcTy.isVector() && DstTy == SrcTy.getElementType()))) { - LLT SrcIntTy = SrcTy; - if (!SrcTy.isScalar()) { - SrcIntTy = LLT::scalar(SrcTy.getSizeInBits()); - Src = MIRBuilder.buildBitcast(SrcIntTy, Src).getReg(0); - } - - if (Offset == 0) - MIRBuilder.buildTrunc(Dst, Src); - else { - auto ShiftAmt = MIRBuilder.buildConstant(SrcIntTy, Offset); - auto Shr = MIRBuilder.buildLShr(SrcIntTy, Src, ShiftAmt); - MIRBuilder.buildTrunc(Dst, Shr); - } - - MI.eraseFromParent(); - return Legalized; - } - - return UnableToLegalize; -} - -LegalizerHelper::LegalizeResult LegalizerHelper::lowerInsert(MachineInstr &MI) { - Register Dst = MI.getOperand(0).getReg(); - Register Src = MI.getOperand(1).getReg(); - Register InsertSrc = MI.getOperand(2).getReg(); - uint64_t Offset = MI.getOperand(3).getImm(); - - LLT DstTy = MRI.getType(Src); - LLT InsertTy = MRI.getType(InsertSrc); - - if (InsertTy.isVector() || - (DstTy.isVector() && DstTy.getElementType() != InsertTy)) - return UnableToLegalize; - - const DataLayout &DL = MIRBuilder.getDataLayout(); - if ((DstTy.isPointer() && - DL.isNonIntegralAddressSpace(DstTy.getAddressSpace())) || - (InsertTy.isPointer() && - DL.isNonIntegralAddressSpace(InsertTy.getAddressSpace()))) { - LLVM_DEBUG(dbgs() << "Not casting non-integral address space integer\n"); - return UnableToLegalize; - } - - LLT IntDstTy = DstTy; - - if (!DstTy.isScalar()) { - IntDstTy = LLT::scalar(DstTy.getSizeInBits()); - Src = MIRBuilder.buildCast(IntDstTy, Src).getReg(0); - } - - if (!InsertTy.isScalar()) { - const LLT IntInsertTy = LLT::scalar(InsertTy.getSizeInBits()); - InsertSrc = MIRBuilder.buildPtrToInt(IntInsertTy, InsertSrc).getReg(0); - } - - Register ExtInsSrc = MIRBuilder.buildZExt(IntDstTy, InsertSrc).getReg(0); - if (Offset != 0) { - auto ShiftAmt = MIRBuilder.buildConstant(IntDstTy, Offset); - ExtInsSrc = MIRBuilder.buildShl(IntDstTy, ExtInsSrc, ShiftAmt).getReg(0); - } - - APInt MaskVal = APInt::getBitsSetWithWrap( - DstTy.getSizeInBits(), Offset + InsertTy.getSizeInBits(), Offset); - - auto Mask = MIRBuilder.buildConstant(IntDstTy, MaskVal); - auto MaskedSrc = MIRBuilder.buildAnd(IntDstTy, Src, Mask); - auto Or = MIRBuilder.buildOr(IntDstTy, MaskedSrc, ExtInsSrc); - - MIRBuilder.buildCast(Dst, Or); - MI.eraseFromParent(); - return Legalized; -} - -LegalizerHelper::LegalizeResult -LegalizerHelper::lowerSADDO_SSUBO(MachineInstr &MI) { - Register Dst0 = MI.getOperand(0).getReg(); - Register Dst1 = MI.getOperand(1).getReg(); - Register LHS = MI.getOperand(2).getReg(); - Register RHS = MI.getOperand(3).getReg(); - const bool IsAdd = MI.getOpcode() == TargetOpcode::G_SADDO; - - LLT Ty = MRI.getType(Dst0); - LLT BoolTy = MRI.getType(Dst1); - - if (IsAdd) - MIRBuilder.buildAdd(Dst0, LHS, RHS); - else - MIRBuilder.buildSub(Dst0, LHS, RHS); - - // TODO: If SADDSAT/SSUBSAT is legal, compare results to detect overflow. - - auto Zero = MIRBuilder.buildConstant(Ty, 0); - - // For an addition, the result should be less than one of the operands (LHS) - // if and only if the other operand (RHS) is negative, otherwise there will - // be overflow. - // For a subtraction, the result should be less than one of the operands - // (LHS) if and only if the other operand (RHS) is (non-zero) positive, - // otherwise there will be overflow. - auto ResultLowerThanLHS = - MIRBuilder.buildICmp(CmpInst::ICMP_SLT, BoolTy, Dst0, LHS); - auto ConditionRHS = MIRBuilder.buildICmp( - IsAdd ? CmpInst::ICMP_SLT : CmpInst::ICMP_SGT, BoolTy, RHS, Zero); - - MIRBuilder.buildXor(Dst1, ConditionRHS, ResultLowerThanLHS); - MI.eraseFromParent(); - return Legalized; -} - -LegalizerHelper::LegalizeResult +LegalizerHelper::lowerShuffleVector(MachineInstr &MI) { + Register DstReg = MI.getOperand(0).getReg(); + Register Src0Reg = MI.getOperand(1).getReg(); + Register Src1Reg = MI.getOperand(2).getReg(); + LLT Src0Ty = MRI.getType(Src0Reg); + LLT DstTy = MRI.getType(DstReg); + LLT IdxTy = LLT::scalar(32); + + ArrayRef<int> Mask = MI.getOperand(3).getShuffleMask(); + + if (DstTy.isScalar()) { + if (Src0Ty.isVector()) + return UnableToLegalize; + + // This is just a SELECT. + assert(Mask.size() == 1 && "Expected a single mask element"); + Register Val; + if (Mask[0] < 0 || Mask[0] > 1) + Val = MIRBuilder.buildUndef(DstTy).getReg(0); + else + Val = Mask[0] == 0 ? Src0Reg : Src1Reg; + MIRBuilder.buildCopy(DstReg, Val); + MI.eraseFromParent(); + return Legalized; + } + + Register Undef; + SmallVector<Register, 32> BuildVec; + LLT EltTy = DstTy.getElementType(); + + for (int Idx : Mask) { + if (Idx < 0) { + if (!Undef.isValid()) + Undef = MIRBuilder.buildUndef(EltTy).getReg(0); + BuildVec.push_back(Undef); + continue; + } + + if (Src0Ty.isScalar()) { + BuildVec.push_back(Idx == 0 ? Src0Reg : Src1Reg); + } else { + int NumElts = Src0Ty.getNumElements(); + Register SrcVec = Idx < NumElts ? Src0Reg : Src1Reg; + int ExtractIdx = Idx < NumElts ? Idx : Idx - NumElts; + auto IdxK = MIRBuilder.buildConstant(IdxTy, ExtractIdx); + auto Extract = MIRBuilder.buildExtractVectorElement(EltTy, SrcVec, IdxK); + BuildVec.push_back(Extract.getReg(0)); + } + } + + MIRBuilder.buildBuildVector(DstReg, BuildVec); + MI.eraseFromParent(); + return Legalized; +} + +LegalizerHelper::LegalizeResult +LegalizerHelper::lowerDynStackAlloc(MachineInstr &MI) { + const auto &MF = *MI.getMF(); + const auto &TFI = *MF.getSubtarget().getFrameLowering(); + if (TFI.getStackGrowthDirection() == TargetFrameLowering::StackGrowsUp) + return UnableToLegalize; + + Register Dst = MI.getOperand(0).getReg(); + Register AllocSize = MI.getOperand(1).getReg(); + Align Alignment = assumeAligned(MI.getOperand(2).getImm()); + + LLT PtrTy = MRI.getType(Dst); + LLT IntPtrTy = LLT::scalar(PtrTy.getSizeInBits()); + + Register SPReg = TLI.getStackPointerRegisterToSaveRestore(); + auto SPTmp = MIRBuilder.buildCopy(PtrTy, SPReg); + SPTmp = MIRBuilder.buildCast(IntPtrTy, SPTmp); + + // Subtract the final alloc from the SP. We use G_PTRTOINT here so we don't + // have to generate an extra instruction to negate the alloc and then use + // G_PTR_ADD to add the negative offset. + auto Alloc = MIRBuilder.buildSub(IntPtrTy, SPTmp, AllocSize); + if (Alignment > Align(1)) { + APInt AlignMask(IntPtrTy.getSizeInBits(), Alignment.value(), true); + AlignMask.negate(); + auto AlignCst = MIRBuilder.buildConstant(IntPtrTy, AlignMask); + Alloc = MIRBuilder.buildAnd(IntPtrTy, Alloc, AlignCst); + } + + SPTmp = MIRBuilder.buildCast(PtrTy, Alloc); + MIRBuilder.buildCopy(SPReg, SPTmp); + MIRBuilder.buildCopy(Dst, SPTmp); + + MI.eraseFromParent(); + return Legalized; +} + +LegalizerHelper::LegalizeResult +LegalizerHelper::lowerExtract(MachineInstr &MI) { + Register Dst = MI.getOperand(0).getReg(); + Register Src = MI.getOperand(1).getReg(); + unsigned Offset = MI.getOperand(2).getImm(); + + LLT DstTy = MRI.getType(Dst); + LLT SrcTy = MRI.getType(Src); + + if (DstTy.isScalar() && + (SrcTy.isScalar() || + (SrcTy.isVector() && DstTy == SrcTy.getElementType()))) { + LLT SrcIntTy = SrcTy; + if (!SrcTy.isScalar()) { + SrcIntTy = LLT::scalar(SrcTy.getSizeInBits()); + Src = MIRBuilder.buildBitcast(SrcIntTy, Src).getReg(0); + } + + if (Offset == 0) + MIRBuilder.buildTrunc(Dst, Src); + else { + auto ShiftAmt = MIRBuilder.buildConstant(SrcIntTy, Offset); + auto Shr = MIRBuilder.buildLShr(SrcIntTy, Src, ShiftAmt); + MIRBuilder.buildTrunc(Dst, Shr); + } + + MI.eraseFromParent(); + return Legalized; + } + + return UnableToLegalize; +} + +LegalizerHelper::LegalizeResult LegalizerHelper::lowerInsert(MachineInstr &MI) { + Register Dst = MI.getOperand(0).getReg(); + Register Src = MI.getOperand(1).getReg(); + Register InsertSrc = MI.getOperand(2).getReg(); + uint64_t Offset = MI.getOperand(3).getImm(); + + LLT DstTy = MRI.getType(Src); + LLT InsertTy = MRI.getType(InsertSrc); + + if (InsertTy.isVector() || + (DstTy.isVector() && DstTy.getElementType() != InsertTy)) + return UnableToLegalize; + + const DataLayout &DL = MIRBuilder.getDataLayout(); + if ((DstTy.isPointer() && + DL.isNonIntegralAddressSpace(DstTy.getAddressSpace())) || + (InsertTy.isPointer() && + DL.isNonIntegralAddressSpace(InsertTy.getAddressSpace()))) { + LLVM_DEBUG(dbgs() << "Not casting non-integral address space integer\n"); + return UnableToLegalize; + } + + LLT IntDstTy = DstTy; + + if (!DstTy.isScalar()) { + IntDstTy = LLT::scalar(DstTy.getSizeInBits()); + Src = MIRBuilder.buildCast(IntDstTy, Src).getReg(0); + } + + if (!InsertTy.isScalar()) { + const LLT IntInsertTy = LLT::scalar(InsertTy.getSizeInBits()); + InsertSrc = MIRBuilder.buildPtrToInt(IntInsertTy, InsertSrc).getReg(0); + } + + Register ExtInsSrc = MIRBuilder.buildZExt(IntDstTy, InsertSrc).getReg(0); + if (Offset != 0) { + auto ShiftAmt = MIRBuilder.buildConstant(IntDstTy, Offset); + ExtInsSrc = MIRBuilder.buildShl(IntDstTy, ExtInsSrc, ShiftAmt).getReg(0); + } + + APInt MaskVal = APInt::getBitsSetWithWrap( + DstTy.getSizeInBits(), Offset + InsertTy.getSizeInBits(), Offset); + + auto Mask = MIRBuilder.buildConstant(IntDstTy, MaskVal); + auto MaskedSrc = MIRBuilder.buildAnd(IntDstTy, Src, Mask); + auto Or = MIRBuilder.buildOr(IntDstTy, MaskedSrc, ExtInsSrc); + + MIRBuilder.buildCast(Dst, Or); + MI.eraseFromParent(); + return Legalized; +} + +LegalizerHelper::LegalizeResult +LegalizerHelper::lowerSADDO_SSUBO(MachineInstr &MI) { + Register Dst0 = MI.getOperand(0).getReg(); + Register Dst1 = MI.getOperand(1).getReg(); + Register LHS = MI.getOperand(2).getReg(); + Register RHS = MI.getOperand(3).getReg(); + const bool IsAdd = MI.getOpcode() == TargetOpcode::G_SADDO; + + LLT Ty = MRI.getType(Dst0); + LLT BoolTy = MRI.getType(Dst1); + + if (IsAdd) + MIRBuilder.buildAdd(Dst0, LHS, RHS); + else + MIRBuilder.buildSub(Dst0, LHS, RHS); + + // TODO: If SADDSAT/SSUBSAT is legal, compare results to detect overflow. + + auto Zero = MIRBuilder.buildConstant(Ty, 0); + + // For an addition, the result should be less than one of the operands (LHS) + // if and only if the other operand (RHS) is negative, otherwise there will + // be overflow. + // For a subtraction, the result should be less than one of the operands + // (LHS) if and only if the other operand (RHS) is (non-zero) positive, + // otherwise there will be overflow. + auto ResultLowerThanLHS = + MIRBuilder.buildICmp(CmpInst::ICMP_SLT, BoolTy, Dst0, LHS); + auto ConditionRHS = MIRBuilder.buildICmp( + IsAdd ? CmpInst::ICMP_SLT : CmpInst::ICMP_SGT, BoolTy, RHS, Zero); + + MIRBuilder.buildXor(Dst1, ConditionRHS, ResultLowerThanLHS); + MI.eraseFromParent(); + return Legalized; +} + +LegalizerHelper::LegalizeResult LegalizerHelper::lowerAddSubSatToMinMax(MachineInstr &MI) { Register Res = MI.getOperand(0).getReg(); Register LHS = MI.getOperand(1).getReg(); @@ -6110,107 +6110,107 @@ LegalizerHelper::lowerShlSat(MachineInstr &MI) { } LegalizerHelper::LegalizeResult -LegalizerHelper::lowerBswap(MachineInstr &MI) { - Register Dst = MI.getOperand(0).getReg(); - Register Src = MI.getOperand(1).getReg(); - const LLT Ty = MRI.getType(Src); - unsigned SizeInBytes = (Ty.getScalarSizeInBits() + 7) / 8; - unsigned BaseShiftAmt = (SizeInBytes - 1) * 8; - - // Swap most and least significant byte, set remaining bytes in Res to zero. - auto ShiftAmt = MIRBuilder.buildConstant(Ty, BaseShiftAmt); - auto LSByteShiftedLeft = MIRBuilder.buildShl(Ty, Src, ShiftAmt); - auto MSByteShiftedRight = MIRBuilder.buildLShr(Ty, Src, ShiftAmt); - auto Res = MIRBuilder.buildOr(Ty, MSByteShiftedRight, LSByteShiftedLeft); - - // Set i-th high/low byte in Res to i-th low/high byte from Src. - for (unsigned i = 1; i < SizeInBytes / 2; ++i) { - // AND with Mask leaves byte i unchanged and sets remaining bytes to 0. - APInt APMask(SizeInBytes * 8, 0xFF << (i * 8)); - auto Mask = MIRBuilder.buildConstant(Ty, APMask); - auto ShiftAmt = MIRBuilder.buildConstant(Ty, BaseShiftAmt - 16 * i); - // Low byte shifted left to place of high byte: (Src & Mask) << ShiftAmt. - auto LoByte = MIRBuilder.buildAnd(Ty, Src, Mask); - auto LoShiftedLeft = MIRBuilder.buildShl(Ty, LoByte, ShiftAmt); - Res = MIRBuilder.buildOr(Ty, Res, LoShiftedLeft); - // High byte shifted right to place of low byte: (Src >> ShiftAmt) & Mask. - auto SrcShiftedRight = MIRBuilder.buildLShr(Ty, Src, ShiftAmt); - auto HiShiftedRight = MIRBuilder.buildAnd(Ty, SrcShiftedRight, Mask); - Res = MIRBuilder.buildOr(Ty, Res, HiShiftedRight); - } - Res.getInstr()->getOperand(0).setReg(Dst); - - MI.eraseFromParent(); - return Legalized; -} - -//{ (Src & Mask) >> N } | { (Src << N) & Mask } -static MachineInstrBuilder SwapN(unsigned N, DstOp Dst, MachineIRBuilder &B, - MachineInstrBuilder Src, APInt Mask) { - const LLT Ty = Dst.getLLTTy(*B.getMRI()); - MachineInstrBuilder C_N = B.buildConstant(Ty, N); - MachineInstrBuilder MaskLoNTo0 = B.buildConstant(Ty, Mask); - auto LHS = B.buildLShr(Ty, B.buildAnd(Ty, Src, MaskLoNTo0), C_N); - auto RHS = B.buildAnd(Ty, B.buildShl(Ty, Src, C_N), MaskLoNTo0); - return B.buildOr(Dst, LHS, RHS); -} - -LegalizerHelper::LegalizeResult -LegalizerHelper::lowerBitreverse(MachineInstr &MI) { - Register Dst = MI.getOperand(0).getReg(); - Register Src = MI.getOperand(1).getReg(); - const LLT Ty = MRI.getType(Src); - unsigned Size = Ty.getSizeInBits(); - - MachineInstrBuilder BSWAP = - MIRBuilder.buildInstr(TargetOpcode::G_BSWAP, {Ty}, {Src}); - - // swap high and low 4 bits in 8 bit blocks 7654|3210 -> 3210|7654 - // [(val & 0xF0F0F0F0) >> 4] | [(val & 0x0F0F0F0F) << 4] - // -> [(val & 0xF0F0F0F0) >> 4] | [(val << 4) & 0xF0F0F0F0] - MachineInstrBuilder Swap4 = - SwapN(4, Ty, MIRBuilder, BSWAP, APInt::getSplat(Size, APInt(8, 0xF0))); - - // swap high and low 2 bits in 4 bit blocks 32|10 76|54 -> 10|32 54|76 - // [(val & 0xCCCCCCCC) >> 2] & [(val & 0x33333333) << 2] - // -> [(val & 0xCCCCCCCC) >> 2] & [(val << 2) & 0xCCCCCCCC] - MachineInstrBuilder Swap2 = - SwapN(2, Ty, MIRBuilder, Swap4, APInt::getSplat(Size, APInt(8, 0xCC))); - - // swap high and low 1 bit in 2 bit blocks 1|0 3|2 5|4 7|6 -> 0|1 2|3 4|5 6|7 - // [(val & 0xAAAAAAAA) >> 1] & [(val & 0x55555555) << 1] - // -> [(val & 0xAAAAAAAA) >> 1] & [(val << 1) & 0xAAAAAAAA] - SwapN(1, Dst, MIRBuilder, Swap2, APInt::getSplat(Size, APInt(8, 0xAA))); - - MI.eraseFromParent(); - return Legalized; -} - -LegalizerHelper::LegalizeResult -LegalizerHelper::lowerReadWriteRegister(MachineInstr &MI) { - MachineFunction &MF = MIRBuilder.getMF(); - - bool IsRead = MI.getOpcode() == TargetOpcode::G_READ_REGISTER; - int NameOpIdx = IsRead ? 1 : 0; - int ValRegIndex = IsRead ? 0 : 1; - - Register ValReg = MI.getOperand(ValRegIndex).getReg(); - const LLT Ty = MRI.getType(ValReg); - const MDString *RegStr = cast<MDString>( - cast<MDNode>(MI.getOperand(NameOpIdx).getMetadata())->getOperand(0)); - +LegalizerHelper::lowerBswap(MachineInstr &MI) { + Register Dst = MI.getOperand(0).getReg(); + Register Src = MI.getOperand(1).getReg(); + const LLT Ty = MRI.getType(Src); + unsigned SizeInBytes = (Ty.getScalarSizeInBits() + 7) / 8; + unsigned BaseShiftAmt = (SizeInBytes - 1) * 8; + + // Swap most and least significant byte, set remaining bytes in Res to zero. + auto ShiftAmt = MIRBuilder.buildConstant(Ty, BaseShiftAmt); + auto LSByteShiftedLeft = MIRBuilder.buildShl(Ty, Src, ShiftAmt); + auto MSByteShiftedRight = MIRBuilder.buildLShr(Ty, Src, ShiftAmt); + auto Res = MIRBuilder.buildOr(Ty, MSByteShiftedRight, LSByteShiftedLeft); + + // Set i-th high/low byte in Res to i-th low/high byte from Src. + for (unsigned i = 1; i < SizeInBytes / 2; ++i) { + // AND with Mask leaves byte i unchanged and sets remaining bytes to 0. + APInt APMask(SizeInBytes * 8, 0xFF << (i * 8)); + auto Mask = MIRBuilder.buildConstant(Ty, APMask); + auto ShiftAmt = MIRBuilder.buildConstant(Ty, BaseShiftAmt - 16 * i); + // Low byte shifted left to place of high byte: (Src & Mask) << ShiftAmt. + auto LoByte = MIRBuilder.buildAnd(Ty, Src, Mask); + auto LoShiftedLeft = MIRBuilder.buildShl(Ty, LoByte, ShiftAmt); + Res = MIRBuilder.buildOr(Ty, Res, LoShiftedLeft); + // High byte shifted right to place of low byte: (Src >> ShiftAmt) & Mask. + auto SrcShiftedRight = MIRBuilder.buildLShr(Ty, Src, ShiftAmt); + auto HiShiftedRight = MIRBuilder.buildAnd(Ty, SrcShiftedRight, Mask); + Res = MIRBuilder.buildOr(Ty, Res, HiShiftedRight); + } + Res.getInstr()->getOperand(0).setReg(Dst); + + MI.eraseFromParent(); + return Legalized; +} + +//{ (Src & Mask) >> N } | { (Src << N) & Mask } +static MachineInstrBuilder SwapN(unsigned N, DstOp Dst, MachineIRBuilder &B, + MachineInstrBuilder Src, APInt Mask) { + const LLT Ty = Dst.getLLTTy(*B.getMRI()); + MachineInstrBuilder C_N = B.buildConstant(Ty, N); + MachineInstrBuilder MaskLoNTo0 = B.buildConstant(Ty, Mask); + auto LHS = B.buildLShr(Ty, B.buildAnd(Ty, Src, MaskLoNTo0), C_N); + auto RHS = B.buildAnd(Ty, B.buildShl(Ty, Src, C_N), MaskLoNTo0); + return B.buildOr(Dst, LHS, RHS); +} + +LegalizerHelper::LegalizeResult +LegalizerHelper::lowerBitreverse(MachineInstr &MI) { + Register Dst = MI.getOperand(0).getReg(); + Register Src = MI.getOperand(1).getReg(); + const LLT Ty = MRI.getType(Src); + unsigned Size = Ty.getSizeInBits(); + + MachineInstrBuilder BSWAP = + MIRBuilder.buildInstr(TargetOpcode::G_BSWAP, {Ty}, {Src}); + + // swap high and low 4 bits in 8 bit blocks 7654|3210 -> 3210|7654 + // [(val & 0xF0F0F0F0) >> 4] | [(val & 0x0F0F0F0F) << 4] + // -> [(val & 0xF0F0F0F0) >> 4] | [(val << 4) & 0xF0F0F0F0] + MachineInstrBuilder Swap4 = + SwapN(4, Ty, MIRBuilder, BSWAP, APInt::getSplat(Size, APInt(8, 0xF0))); + + // swap high and low 2 bits in 4 bit blocks 32|10 76|54 -> 10|32 54|76 + // [(val & 0xCCCCCCCC) >> 2] & [(val & 0x33333333) << 2] + // -> [(val & 0xCCCCCCCC) >> 2] & [(val << 2) & 0xCCCCCCCC] + MachineInstrBuilder Swap2 = + SwapN(2, Ty, MIRBuilder, Swap4, APInt::getSplat(Size, APInt(8, 0xCC))); + + // swap high and low 1 bit in 2 bit blocks 1|0 3|2 5|4 7|6 -> 0|1 2|3 4|5 6|7 + // [(val & 0xAAAAAAAA) >> 1] & [(val & 0x55555555) << 1] + // -> [(val & 0xAAAAAAAA) >> 1] & [(val << 1) & 0xAAAAAAAA] + SwapN(1, Dst, MIRBuilder, Swap2, APInt::getSplat(Size, APInt(8, 0xAA))); + + MI.eraseFromParent(); + return Legalized; +} + +LegalizerHelper::LegalizeResult +LegalizerHelper::lowerReadWriteRegister(MachineInstr &MI) { + MachineFunction &MF = MIRBuilder.getMF(); + + bool IsRead = MI.getOpcode() == TargetOpcode::G_READ_REGISTER; + int NameOpIdx = IsRead ? 1 : 0; + int ValRegIndex = IsRead ? 0 : 1; + + Register ValReg = MI.getOperand(ValRegIndex).getReg(); + const LLT Ty = MRI.getType(ValReg); + const MDString *RegStr = cast<MDString>( + cast<MDNode>(MI.getOperand(NameOpIdx).getMetadata())->getOperand(0)); + Register PhysReg = TLI.getRegisterByName(RegStr->getString().data(), Ty, MF); - if (!PhysReg.isValid()) - return UnableToLegalize; - - if (IsRead) - MIRBuilder.buildCopy(ValReg, PhysReg); - else - MIRBuilder.buildCopy(PhysReg, ValReg); - - MI.eraseFromParent(); - return Legalized; -} + if (!PhysReg.isValid()) + return UnableToLegalize; + + if (IsRead) + MIRBuilder.buildCopy(ValReg, PhysReg); + else + MIRBuilder.buildCopy(PhysReg, ValReg); + + MI.eraseFromParent(); + return Legalized; +} LegalizerHelper::LegalizeResult LegalizerHelper::lowerSMULH_UMULH(MachineInstr &MI) { diff --git a/contrib/libs/llvm12/lib/CodeGen/GlobalISel/LegalizerInfo.cpp b/contrib/libs/llvm12/lib/CodeGen/GlobalISel/LegalizerInfo.cpp index 90af6c35d9..30acac14bc 100644 --- a/contrib/libs/llvm12/lib/CodeGen/GlobalISel/LegalizerInfo.cpp +++ b/contrib/libs/llvm12/lib/CodeGen/GlobalISel/LegalizerInfo.cpp @@ -1,750 +1,750 @@ -//===- lib/CodeGen/GlobalISel/LegalizerInfo.cpp - Legalizer ---------------===// -// -// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. -// See https://llvm.org/LICENSE.txt for license information. -// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception -// -//===----------------------------------------------------------------------===// -// -// Implement an interface to specify and query how an illegal operation on a -// given type should be expanded. -// -// Issues to be resolved: -// + Make it fast. -// + Support weird types like i3, <7 x i3>, ... -// + Operations with more than one type (ICMP, CMPXCHG, intrinsics, ...) -// -//===----------------------------------------------------------------------===// - -#include "llvm/CodeGen/GlobalISel/LegalizerInfo.h" -#include "llvm/ADT/SmallBitVector.h" -#include "llvm/CodeGen/GlobalISel/GISelChangeObserver.h" -#include "llvm/CodeGen/MachineInstr.h" -#include "llvm/CodeGen/MachineOperand.h" -#include "llvm/CodeGen/MachineRegisterInfo.h" -#include "llvm/CodeGen/TargetOpcodes.h" -#include "llvm/MC/MCInstrDesc.h" -#include "llvm/MC/MCInstrInfo.h" -#include "llvm/Support/Debug.h" -#include "llvm/Support/ErrorHandling.h" -#include "llvm/Support/LowLevelTypeImpl.h" -#include "llvm/Support/MathExtras.h" -#include <algorithm> -#include <map> - -using namespace llvm; -using namespace LegalizeActions; - -#define DEBUG_TYPE "legalizer-info" - -cl::opt<bool> llvm::DisableGISelLegalityCheck( - "disable-gisel-legality-check", - cl::desc("Don't verify that MIR is fully legal between GlobalISel passes"), - cl::Hidden); - -raw_ostream &llvm::operator<<(raw_ostream &OS, LegalizeAction Action) { - switch (Action) { - case Legal: - OS << "Legal"; - break; - case NarrowScalar: - OS << "NarrowScalar"; - break; - case WidenScalar: - OS << "WidenScalar"; - break; - case FewerElements: - OS << "FewerElements"; - break; - case MoreElements: - OS << "MoreElements"; - break; - case Bitcast: - OS << "Bitcast"; - break; - case Lower: - OS << "Lower"; - break; - case Libcall: - OS << "Libcall"; - break; - case Custom: - OS << "Custom"; - break; - case Unsupported: - OS << "Unsupported"; - break; - case NotFound: - OS << "NotFound"; - break; - case UseLegacyRules: - OS << "UseLegacyRules"; - break; - } - return OS; -} - -raw_ostream &LegalityQuery::print(raw_ostream &OS) const { - OS << Opcode << ", Tys={"; - for (const auto &Type : Types) { - OS << Type << ", "; - } - OS << "}, Opcode="; - - OS << Opcode << ", MMOs={"; - for (const auto &MMODescr : MMODescrs) { - OS << MMODescr.SizeInBits << ", "; - } - OS << "}"; - - return OS; -} - -#ifndef NDEBUG -// Make sure the rule won't (trivially) loop forever. -static bool hasNoSimpleLoops(const LegalizeRule &Rule, const LegalityQuery &Q, - const std::pair<unsigned, LLT> &Mutation) { - switch (Rule.getAction()) { +//===- lib/CodeGen/GlobalISel/LegalizerInfo.cpp - Legalizer ---------------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +// +// Implement an interface to specify and query how an illegal operation on a +// given type should be expanded. +// +// Issues to be resolved: +// + Make it fast. +// + Support weird types like i3, <7 x i3>, ... +// + Operations with more than one type (ICMP, CMPXCHG, intrinsics, ...) +// +//===----------------------------------------------------------------------===// + +#include "llvm/CodeGen/GlobalISel/LegalizerInfo.h" +#include "llvm/ADT/SmallBitVector.h" +#include "llvm/CodeGen/GlobalISel/GISelChangeObserver.h" +#include "llvm/CodeGen/MachineInstr.h" +#include "llvm/CodeGen/MachineOperand.h" +#include "llvm/CodeGen/MachineRegisterInfo.h" +#include "llvm/CodeGen/TargetOpcodes.h" +#include "llvm/MC/MCInstrDesc.h" +#include "llvm/MC/MCInstrInfo.h" +#include "llvm/Support/Debug.h" +#include "llvm/Support/ErrorHandling.h" +#include "llvm/Support/LowLevelTypeImpl.h" +#include "llvm/Support/MathExtras.h" +#include <algorithm> +#include <map> + +using namespace llvm; +using namespace LegalizeActions; + +#define DEBUG_TYPE "legalizer-info" + +cl::opt<bool> llvm::DisableGISelLegalityCheck( + "disable-gisel-legality-check", + cl::desc("Don't verify that MIR is fully legal between GlobalISel passes"), + cl::Hidden); + +raw_ostream &llvm::operator<<(raw_ostream &OS, LegalizeAction Action) { + switch (Action) { case Legal: - case Custom: - case Lower: - case MoreElements: - case FewerElements: - break; - default: - return Q.Types[Mutation.first] != Mutation.second; - } - return true; -} - -// Make sure the returned mutation makes sense for the match type. -static bool mutationIsSane(const LegalizeRule &Rule, - const LegalityQuery &Q, - std::pair<unsigned, LLT> Mutation) { - // If the user wants a custom mutation, then we can't really say much about - // it. Return true, and trust that they're doing the right thing. + OS << "Legal"; + break; + case NarrowScalar: + OS << "NarrowScalar"; + break; + case WidenScalar: + OS << "WidenScalar"; + break; + case FewerElements: + OS << "FewerElements"; + break; + case MoreElements: + OS << "MoreElements"; + break; + case Bitcast: + OS << "Bitcast"; + break; + case Lower: + OS << "Lower"; + break; + case Libcall: + OS << "Libcall"; + break; + case Custom: + OS << "Custom"; + break; + case Unsupported: + OS << "Unsupported"; + break; + case NotFound: + OS << "NotFound"; + break; + case UseLegacyRules: + OS << "UseLegacyRules"; + break; + } + return OS; +} + +raw_ostream &LegalityQuery::print(raw_ostream &OS) const { + OS << Opcode << ", Tys={"; + for (const auto &Type : Types) { + OS << Type << ", "; + } + OS << "}, Opcode="; + + OS << Opcode << ", MMOs={"; + for (const auto &MMODescr : MMODescrs) { + OS << MMODescr.SizeInBits << ", "; + } + OS << "}"; + + return OS; +} + +#ifndef NDEBUG +// Make sure the rule won't (trivially) loop forever. +static bool hasNoSimpleLoops(const LegalizeRule &Rule, const LegalityQuery &Q, + const std::pair<unsigned, LLT> &Mutation) { + switch (Rule.getAction()) { + case Legal: + case Custom: + case Lower: + case MoreElements: + case FewerElements: + break; + default: + return Q.Types[Mutation.first] != Mutation.second; + } + return true; +} + +// Make sure the returned mutation makes sense for the match type. +static bool mutationIsSane(const LegalizeRule &Rule, + const LegalityQuery &Q, + std::pair<unsigned, LLT> Mutation) { + // If the user wants a custom mutation, then we can't really say much about + // it. Return true, and trust that they're doing the right thing. if (Rule.getAction() == Custom || Rule.getAction() == Legal) - return true; - - const unsigned TypeIdx = Mutation.first; - const LLT OldTy = Q.Types[TypeIdx]; - const LLT NewTy = Mutation.second; - - switch (Rule.getAction()) { - case FewerElements: - if (!OldTy.isVector()) - return false; - LLVM_FALLTHROUGH; - case MoreElements: { - // MoreElements can go from scalar to vector. - const unsigned OldElts = OldTy.isVector() ? OldTy.getNumElements() : 1; - if (NewTy.isVector()) { - if (Rule.getAction() == FewerElements) { - // Make sure the element count really decreased. - if (NewTy.getNumElements() >= OldElts) - return false; - } else { - // Make sure the element count really increased. - if (NewTy.getNumElements() <= OldElts) - return false; - } + return true; + + const unsigned TypeIdx = Mutation.first; + const LLT OldTy = Q.Types[TypeIdx]; + const LLT NewTy = Mutation.second; + + switch (Rule.getAction()) { + case FewerElements: + if (!OldTy.isVector()) + return false; + LLVM_FALLTHROUGH; + case MoreElements: { + // MoreElements can go from scalar to vector. + const unsigned OldElts = OldTy.isVector() ? OldTy.getNumElements() : 1; + if (NewTy.isVector()) { + if (Rule.getAction() == FewerElements) { + // Make sure the element count really decreased. + if (NewTy.getNumElements() >= OldElts) + return false; + } else { + // Make sure the element count really increased. + if (NewTy.getNumElements() <= OldElts) + return false; + } } else if (Rule.getAction() == MoreElements) return false; - - // Make sure the element type didn't change. - return NewTy.getScalarType() == OldTy.getScalarType(); - } - case NarrowScalar: - case WidenScalar: { - if (OldTy.isVector()) { - // Number of elements should not change. - if (!NewTy.isVector() || OldTy.getNumElements() != NewTy.getNumElements()) - return false; - } else { - // Both types must be vectors - if (NewTy.isVector()) - return false; - } - - if (Rule.getAction() == NarrowScalar) { - // Make sure the size really decreased. - if (NewTy.getScalarSizeInBits() >= OldTy.getScalarSizeInBits()) - return false; - } else { - // Make sure the size really increased. - if (NewTy.getScalarSizeInBits() <= OldTy.getScalarSizeInBits()) - return false; - } - - return true; - } - case Bitcast: { - return OldTy != NewTy && OldTy.getSizeInBits() == NewTy.getSizeInBits(); - } - default: - return true; - } -} -#endif - -LegalizeActionStep LegalizeRuleSet::apply(const LegalityQuery &Query) const { - LLVM_DEBUG(dbgs() << "Applying legalizer ruleset to: "; Query.print(dbgs()); - dbgs() << "\n"); - if (Rules.empty()) { - LLVM_DEBUG(dbgs() << ".. fallback to legacy rules (no rules defined)\n"); - return {LegalizeAction::UseLegacyRules, 0, LLT{}}; - } - for (const LegalizeRule &Rule : Rules) { - if (Rule.match(Query)) { - LLVM_DEBUG(dbgs() << ".. match\n"); - std::pair<unsigned, LLT> Mutation = Rule.determineMutation(Query); - LLVM_DEBUG(dbgs() << ".. .. " << Rule.getAction() << ", " - << Mutation.first << ", " << Mutation.second << "\n"); - assert(mutationIsSane(Rule, Query, Mutation) && - "legality mutation invalid for match"); - assert(hasNoSimpleLoops(Rule, Query, Mutation) && "Simple loop detected"); - return {Rule.getAction(), Mutation.first, Mutation.second}; - } else - LLVM_DEBUG(dbgs() << ".. no match\n"); - } - LLVM_DEBUG(dbgs() << ".. unsupported\n"); - return {LegalizeAction::Unsupported, 0, LLT{}}; -} - -bool LegalizeRuleSet::verifyTypeIdxsCoverage(unsigned NumTypeIdxs) const { -#ifndef NDEBUG - if (Rules.empty()) { - LLVM_DEBUG( - dbgs() << ".. type index coverage check SKIPPED: no rules defined\n"); - return true; - } - const int64_t FirstUncovered = TypeIdxsCovered.find_first_unset(); - if (FirstUncovered < 0) { - LLVM_DEBUG(dbgs() << ".. type index coverage check SKIPPED:" - " user-defined predicate detected\n"); - return true; - } - const bool AllCovered = (FirstUncovered >= NumTypeIdxs); - if (NumTypeIdxs > 0) - LLVM_DEBUG(dbgs() << ".. the first uncovered type index: " << FirstUncovered - << ", " << (AllCovered ? "OK" : "FAIL") << "\n"); - return AllCovered; -#else - return true; -#endif -} - -bool LegalizeRuleSet::verifyImmIdxsCoverage(unsigned NumImmIdxs) const { -#ifndef NDEBUG - if (Rules.empty()) { - LLVM_DEBUG( - dbgs() << ".. imm index coverage check SKIPPED: no rules defined\n"); - return true; - } - const int64_t FirstUncovered = ImmIdxsCovered.find_first_unset(); - if (FirstUncovered < 0) { - LLVM_DEBUG(dbgs() << ".. imm index coverage check SKIPPED:" - " user-defined predicate detected\n"); - return true; - } - const bool AllCovered = (FirstUncovered >= NumImmIdxs); - LLVM_DEBUG(dbgs() << ".. the first uncovered imm index: " << FirstUncovered - << ", " << (AllCovered ? "OK" : "FAIL") << "\n"); - return AllCovered; -#else - return true; -#endif -} - -LegalizerInfo::LegalizerInfo() : TablesInitialized(false) { - // Set defaults. - // FIXME: these two (G_ANYEXT and G_TRUNC?) can be legalized to the - // fundamental load/store Jakob proposed. Once loads & stores are supported. - setScalarAction(TargetOpcode::G_ANYEXT, 1, {{1, Legal}}); - setScalarAction(TargetOpcode::G_ZEXT, 1, {{1, Legal}}); - setScalarAction(TargetOpcode::G_SEXT, 1, {{1, Legal}}); - setScalarAction(TargetOpcode::G_TRUNC, 0, {{1, Legal}}); - setScalarAction(TargetOpcode::G_TRUNC, 1, {{1, Legal}}); - - setScalarAction(TargetOpcode::G_INTRINSIC, 0, {{1, Legal}}); - setScalarAction(TargetOpcode::G_INTRINSIC_W_SIDE_EFFECTS, 0, {{1, Legal}}); - - setLegalizeScalarToDifferentSizeStrategy( - TargetOpcode::G_IMPLICIT_DEF, 0, narrowToSmallerAndUnsupportedIfTooSmall); - setLegalizeScalarToDifferentSizeStrategy( - TargetOpcode::G_ADD, 0, widenToLargerTypesAndNarrowToLargest); - setLegalizeScalarToDifferentSizeStrategy( - TargetOpcode::G_OR, 0, widenToLargerTypesAndNarrowToLargest); - setLegalizeScalarToDifferentSizeStrategy( - TargetOpcode::G_LOAD, 0, narrowToSmallerAndUnsupportedIfTooSmall); - setLegalizeScalarToDifferentSizeStrategy( - TargetOpcode::G_STORE, 0, narrowToSmallerAndUnsupportedIfTooSmall); - - setLegalizeScalarToDifferentSizeStrategy( - TargetOpcode::G_BRCOND, 0, widenToLargerTypesUnsupportedOtherwise); - setLegalizeScalarToDifferentSizeStrategy( - TargetOpcode::G_INSERT, 0, narrowToSmallerAndUnsupportedIfTooSmall); - setLegalizeScalarToDifferentSizeStrategy( - TargetOpcode::G_EXTRACT, 0, narrowToSmallerAndUnsupportedIfTooSmall); - setLegalizeScalarToDifferentSizeStrategy( - TargetOpcode::G_EXTRACT, 1, narrowToSmallerAndUnsupportedIfTooSmall); - setScalarAction(TargetOpcode::G_FNEG, 0, {{1, Lower}}); -} - -void LegalizerInfo::computeTables() { - assert(TablesInitialized == false); - - for (unsigned OpcodeIdx = 0; OpcodeIdx <= LastOp - FirstOp; ++OpcodeIdx) { - const unsigned Opcode = FirstOp + OpcodeIdx; - for (unsigned TypeIdx = 0; TypeIdx != SpecifiedActions[OpcodeIdx].size(); - ++TypeIdx) { - // 0. Collect information specified through the setAction API, i.e. - // for specific bit sizes. - // For scalar types: - SizeAndActionsVec ScalarSpecifiedActions; - // For pointer types: - std::map<uint16_t, SizeAndActionsVec> AddressSpace2SpecifiedActions; - // For vector types: - std::map<uint16_t, SizeAndActionsVec> ElemSize2SpecifiedActions; - for (auto LLT2Action : SpecifiedActions[OpcodeIdx][TypeIdx]) { - const LLT Type = LLT2Action.first; - const LegalizeAction Action = LLT2Action.second; - - auto SizeAction = std::make_pair(Type.getSizeInBits(), Action); - if (Type.isPointer()) - AddressSpace2SpecifiedActions[Type.getAddressSpace()].push_back( - SizeAction); - else if (Type.isVector()) - ElemSize2SpecifiedActions[Type.getElementType().getSizeInBits()] - .push_back(SizeAction); - else - ScalarSpecifiedActions.push_back(SizeAction); - } - - // 1. Handle scalar types - { - // Decide how to handle bit sizes for which no explicit specification - // was given. - SizeChangeStrategy S = &unsupportedForDifferentSizes; - if (TypeIdx < ScalarSizeChangeStrategies[OpcodeIdx].size() && - ScalarSizeChangeStrategies[OpcodeIdx][TypeIdx] != nullptr) - S = ScalarSizeChangeStrategies[OpcodeIdx][TypeIdx]; - llvm::sort(ScalarSpecifiedActions); - checkPartialSizeAndActionsVector(ScalarSpecifiedActions); - setScalarAction(Opcode, TypeIdx, S(ScalarSpecifiedActions)); - } - - // 2. Handle pointer types - for (auto PointerSpecifiedActions : AddressSpace2SpecifiedActions) { - llvm::sort(PointerSpecifiedActions.second); - checkPartialSizeAndActionsVector(PointerSpecifiedActions.second); - // For pointer types, we assume that there isn't a meaningfull way - // to change the number of bits used in the pointer. - setPointerAction( - Opcode, TypeIdx, PointerSpecifiedActions.first, - unsupportedForDifferentSizes(PointerSpecifiedActions.second)); - } - - // 3. Handle vector types - SizeAndActionsVec ElementSizesSeen; - for (auto VectorSpecifiedActions : ElemSize2SpecifiedActions) { - llvm::sort(VectorSpecifiedActions.second); - const uint16_t ElementSize = VectorSpecifiedActions.first; - ElementSizesSeen.push_back({ElementSize, Legal}); - checkPartialSizeAndActionsVector(VectorSpecifiedActions.second); - // For vector types, we assume that the best way to adapt the number - // of elements is to the next larger number of elements type for which - // the vector type is legal, unless there is no such type. In that case, - // legalize towards a vector type with a smaller number of elements. - SizeAndActionsVec NumElementsActions; - for (SizeAndAction BitsizeAndAction : VectorSpecifiedActions.second) { - assert(BitsizeAndAction.first % ElementSize == 0); - const uint16_t NumElements = BitsizeAndAction.first / ElementSize; - NumElementsActions.push_back({NumElements, BitsizeAndAction.second}); - } - setVectorNumElementAction( - Opcode, TypeIdx, ElementSize, - moreToWiderTypesAndLessToWidest(NumElementsActions)); - } - llvm::sort(ElementSizesSeen); - SizeChangeStrategy VectorElementSizeChangeStrategy = - &unsupportedForDifferentSizes; - if (TypeIdx < VectorElementSizeChangeStrategies[OpcodeIdx].size() && - VectorElementSizeChangeStrategies[OpcodeIdx][TypeIdx] != nullptr) - VectorElementSizeChangeStrategy = - VectorElementSizeChangeStrategies[OpcodeIdx][TypeIdx]; - setScalarInVectorAction( - Opcode, TypeIdx, VectorElementSizeChangeStrategy(ElementSizesSeen)); - } - } - - TablesInitialized = true; -} - -// FIXME: inefficient implementation for now. Without ComputeValueVTs we're -// probably going to need specialized lookup structures for various types before -// we have any hope of doing well with something like <13 x i3>. Even the common -// cases should do better than what we have now. -std::pair<LegalizeAction, LLT> -LegalizerInfo::getAspectAction(const InstrAspect &Aspect) const { - assert(TablesInitialized && "backend forgot to call computeTables"); - // These *have* to be implemented for now, they're the fundamental basis of - // how everything else is transformed. - if (Aspect.Type.isScalar() || Aspect.Type.isPointer()) - return findScalarLegalAction(Aspect); - assert(Aspect.Type.isVector()); - return findVectorLegalAction(Aspect); -} - -/// Helper function to get LLT for the given type index. -static LLT getTypeFromTypeIdx(const MachineInstr &MI, - const MachineRegisterInfo &MRI, unsigned OpIdx, - unsigned TypeIdx) { - assert(TypeIdx < MI.getNumOperands() && "Unexpected TypeIdx"); - // G_UNMERGE_VALUES has variable number of operands, but there is only - // one source type and one destination type as all destinations must be the - // same type. So, get the last operand if TypeIdx == 1. - if (MI.getOpcode() == TargetOpcode::G_UNMERGE_VALUES && TypeIdx == 1) - return MRI.getType(MI.getOperand(MI.getNumOperands() - 1).getReg()); - return MRI.getType(MI.getOperand(OpIdx).getReg()); -} - -unsigned LegalizerInfo::getOpcodeIdxForOpcode(unsigned Opcode) const { - assert(Opcode >= FirstOp && Opcode <= LastOp && "Unsupported opcode"); - return Opcode - FirstOp; -} - -unsigned LegalizerInfo::getActionDefinitionsIdx(unsigned Opcode) const { - unsigned OpcodeIdx = getOpcodeIdxForOpcode(Opcode); - if (unsigned Alias = RulesForOpcode[OpcodeIdx].getAlias()) { - LLVM_DEBUG(dbgs() << ".. opcode " << Opcode << " is aliased to " << Alias - << "\n"); - OpcodeIdx = getOpcodeIdxForOpcode(Alias); - assert(RulesForOpcode[OpcodeIdx].getAlias() == 0 && "Cannot chain aliases"); - } - - return OpcodeIdx; -} - -const LegalizeRuleSet & -LegalizerInfo::getActionDefinitions(unsigned Opcode) const { - unsigned OpcodeIdx = getActionDefinitionsIdx(Opcode); - return RulesForOpcode[OpcodeIdx]; -} - -LegalizeRuleSet &LegalizerInfo::getActionDefinitionsBuilder(unsigned Opcode) { - unsigned OpcodeIdx = getActionDefinitionsIdx(Opcode); - auto &Result = RulesForOpcode[OpcodeIdx]; - assert(!Result.isAliasedByAnother() && "Modifying this opcode will modify aliases"); - return Result; -} - -LegalizeRuleSet &LegalizerInfo::getActionDefinitionsBuilder( - std::initializer_list<unsigned> Opcodes) { - unsigned Representative = *Opcodes.begin(); - - assert(!llvm::empty(Opcodes) && Opcodes.begin() + 1 != Opcodes.end() && - "Initializer list must have at least two opcodes"); - - for (auto I = Opcodes.begin() + 1, E = Opcodes.end(); I != E; ++I) - aliasActionDefinitions(Representative, *I); - - auto &Return = getActionDefinitionsBuilder(Representative); - Return.setIsAliasedByAnother(); - return Return; -} - -void LegalizerInfo::aliasActionDefinitions(unsigned OpcodeTo, - unsigned OpcodeFrom) { - assert(OpcodeTo != OpcodeFrom && "Cannot alias to self"); - assert(OpcodeTo >= FirstOp && OpcodeTo <= LastOp && "Unsupported opcode"); - const unsigned OpcodeFromIdx = getOpcodeIdxForOpcode(OpcodeFrom); - RulesForOpcode[OpcodeFromIdx].aliasTo(OpcodeTo); -} - -LegalizeActionStep -LegalizerInfo::getAction(const LegalityQuery &Query) const { - LegalizeActionStep Step = getActionDefinitions(Query.Opcode).apply(Query); - if (Step.Action != LegalizeAction::UseLegacyRules) { - return Step; - } - - for (unsigned i = 0; i < Query.Types.size(); ++i) { - auto Action = getAspectAction({Query.Opcode, i, Query.Types[i]}); - if (Action.first != Legal) { - LLVM_DEBUG(dbgs() << ".. (legacy) Type " << i << " Action=" - << Action.first << ", " << Action.second << "\n"); - return {Action.first, i, Action.second}; - } else - LLVM_DEBUG(dbgs() << ".. (legacy) Type " << i << " Legal\n"); - } - LLVM_DEBUG(dbgs() << ".. (legacy) Legal\n"); - return {Legal, 0, LLT{}}; -} - -LegalizeActionStep -LegalizerInfo::getAction(const MachineInstr &MI, - const MachineRegisterInfo &MRI) const { - SmallVector<LLT, 2> Types; - SmallBitVector SeenTypes(8); - const MCOperandInfo *OpInfo = MI.getDesc().OpInfo; - // FIXME: probably we'll need to cache the results here somehow? - for (unsigned i = 0; i < MI.getDesc().getNumOperands(); ++i) { - if (!OpInfo[i].isGenericType()) - continue; - - // We must only record actions once for each TypeIdx; otherwise we'd - // try to legalize operands multiple times down the line. - unsigned TypeIdx = OpInfo[i].getGenericTypeIndex(); - if (SeenTypes[TypeIdx]) - continue; - - SeenTypes.set(TypeIdx); - - LLT Ty = getTypeFromTypeIdx(MI, MRI, i, TypeIdx); - Types.push_back(Ty); - } - - SmallVector<LegalityQuery::MemDesc, 2> MemDescrs; - for (const auto &MMO : MI.memoperands()) - MemDescrs.push_back({8 * MMO->getSize() /* in bits */, - 8 * MMO->getAlign().value(), MMO->getOrdering()}); - - return getAction({MI.getOpcode(), Types, MemDescrs}); -} - -bool LegalizerInfo::isLegal(const MachineInstr &MI, - const MachineRegisterInfo &MRI) const { - return getAction(MI, MRI).Action == Legal; -} - -bool LegalizerInfo::isLegalOrCustom(const MachineInstr &MI, - const MachineRegisterInfo &MRI) const { - auto Action = getAction(MI, MRI).Action; - // If the action is custom, it may not necessarily modify the instruction, - // so we have to assume it's legal. - return Action == Legal || Action == Custom; -} - -LegalizerInfo::SizeAndActionsVec -LegalizerInfo::increaseToLargerTypesAndDecreaseToLargest( - const SizeAndActionsVec &v, LegalizeAction IncreaseAction, - LegalizeAction DecreaseAction) { - SizeAndActionsVec result; - unsigned LargestSizeSoFar = 0; - if (v.size() >= 1 && v[0].first != 1) - result.push_back({1, IncreaseAction}); - for (size_t i = 0; i < v.size(); ++i) { - result.push_back(v[i]); - LargestSizeSoFar = v[i].first; - if (i + 1 < v.size() && v[i + 1].first != v[i].first + 1) { - result.push_back({LargestSizeSoFar + 1, IncreaseAction}); - LargestSizeSoFar = v[i].first + 1; - } - } - result.push_back({LargestSizeSoFar + 1, DecreaseAction}); - return result; -} - -LegalizerInfo::SizeAndActionsVec -LegalizerInfo::decreaseToSmallerTypesAndIncreaseToSmallest( - const SizeAndActionsVec &v, LegalizeAction DecreaseAction, - LegalizeAction IncreaseAction) { - SizeAndActionsVec result; - if (v.size() == 0 || v[0].first != 1) - result.push_back({1, IncreaseAction}); - for (size_t i = 0; i < v.size(); ++i) { - result.push_back(v[i]); - if (i + 1 == v.size() || v[i + 1].first != v[i].first + 1) { - result.push_back({v[i].first + 1, DecreaseAction}); - } - } - return result; -} - -LegalizerInfo::SizeAndAction -LegalizerInfo::findAction(const SizeAndActionsVec &Vec, const uint32_t Size) { - assert(Size >= 1); - // Find the last element in Vec that has a bitsize equal to or smaller than - // the requested bit size. - // That is the element just before the first element that is bigger than Size. - auto It = partition_point( - Vec, [=](const SizeAndAction &A) { return A.first <= Size; }); - assert(It != Vec.begin() && "Does Vec not start with size 1?"); - int VecIdx = It - Vec.begin() - 1; - - LegalizeAction Action = Vec[VecIdx].second; - switch (Action) { - case Legal: - case Bitcast: - case Lower: - case Libcall: - case Custom: - return {Size, Action}; - case FewerElements: - // FIXME: is this special case still needed and correct? - // Special case for scalarization: - if (Vec == SizeAndActionsVec({{1, FewerElements}})) - return {1, FewerElements}; - LLVM_FALLTHROUGH; - case NarrowScalar: { - // The following needs to be a loop, as for now, we do allow needing to - // go over "Unsupported" bit sizes before finding a legalizable bit size. - // e.g. (s8, WidenScalar), (s9, Unsupported), (s32, Legal). if Size==8, - // we need to iterate over s9, and then to s32 to return (s32, Legal). - // If we want to get rid of the below loop, we should have stronger asserts - // when building the SizeAndActionsVecs, probably not allowing - // "Unsupported" unless at the ends of the vector. - for (int i = VecIdx - 1; i >= 0; --i) - if (!needsLegalizingToDifferentSize(Vec[i].second) && - Vec[i].second != Unsupported) - return {Vec[i].first, Action}; - llvm_unreachable(""); - } - case WidenScalar: - case MoreElements: { - // See above, the following needs to be a loop, at least for now. - for (std::size_t i = VecIdx + 1; i < Vec.size(); ++i) - if (!needsLegalizingToDifferentSize(Vec[i].second) && - Vec[i].second != Unsupported) - return {Vec[i].first, Action}; - llvm_unreachable(""); - } - case Unsupported: - return {Size, Unsupported}; - case NotFound: - case UseLegacyRules: - llvm_unreachable("NotFound"); - } - llvm_unreachable("Action has an unknown enum value"); -} - -std::pair<LegalizeAction, LLT> -LegalizerInfo::findScalarLegalAction(const InstrAspect &Aspect) const { - assert(Aspect.Type.isScalar() || Aspect.Type.isPointer()); - if (Aspect.Opcode < FirstOp || Aspect.Opcode > LastOp) - return {NotFound, LLT()}; - const unsigned OpcodeIdx = getOpcodeIdxForOpcode(Aspect.Opcode); - if (Aspect.Type.isPointer() && - AddrSpace2PointerActions[OpcodeIdx].find(Aspect.Type.getAddressSpace()) == - AddrSpace2PointerActions[OpcodeIdx].end()) { - return {NotFound, LLT()}; - } - const SmallVector<SizeAndActionsVec, 1> &Actions = - Aspect.Type.isPointer() - ? AddrSpace2PointerActions[OpcodeIdx] - .find(Aspect.Type.getAddressSpace()) - ->second - : ScalarActions[OpcodeIdx]; - if (Aspect.Idx >= Actions.size()) - return {NotFound, LLT()}; - const SizeAndActionsVec &Vec = Actions[Aspect.Idx]; - // FIXME: speed up this search, e.g. by using a results cache for repeated - // queries? - auto SizeAndAction = findAction(Vec, Aspect.Type.getSizeInBits()); - return {SizeAndAction.second, - Aspect.Type.isScalar() ? LLT::scalar(SizeAndAction.first) - : LLT::pointer(Aspect.Type.getAddressSpace(), - SizeAndAction.first)}; -} - -std::pair<LegalizeAction, LLT> -LegalizerInfo::findVectorLegalAction(const InstrAspect &Aspect) const { - assert(Aspect.Type.isVector()); - // First legalize the vector element size, then legalize the number of - // lanes in the vector. - if (Aspect.Opcode < FirstOp || Aspect.Opcode > LastOp) - return {NotFound, Aspect.Type}; - const unsigned OpcodeIdx = getOpcodeIdxForOpcode(Aspect.Opcode); - const unsigned TypeIdx = Aspect.Idx; - if (TypeIdx >= ScalarInVectorActions[OpcodeIdx].size()) - return {NotFound, Aspect.Type}; - const SizeAndActionsVec &ElemSizeVec = - ScalarInVectorActions[OpcodeIdx][TypeIdx]; - - LLT IntermediateType; - auto ElementSizeAndAction = - findAction(ElemSizeVec, Aspect.Type.getScalarSizeInBits()); - IntermediateType = - LLT::vector(Aspect.Type.getNumElements(), ElementSizeAndAction.first); - if (ElementSizeAndAction.second != Legal) - return {ElementSizeAndAction.second, IntermediateType}; - - auto i = NumElements2Actions[OpcodeIdx].find( - IntermediateType.getScalarSizeInBits()); - if (i == NumElements2Actions[OpcodeIdx].end()) { - return {NotFound, IntermediateType}; - } - const SizeAndActionsVec &NumElementsVec = (*i).second[TypeIdx]; - auto NumElementsAndAction = - findAction(NumElementsVec, IntermediateType.getNumElements()); - return {NumElementsAndAction.second, - LLT::vector(NumElementsAndAction.first, - IntermediateType.getScalarSizeInBits())}; -} - -unsigned LegalizerInfo::getExtOpcodeForWideningConstant(LLT SmallTy) const { - return SmallTy.isByteSized() ? TargetOpcode::G_SEXT : TargetOpcode::G_ZEXT; -} - -/// \pre Type indices of every opcode form a dense set starting from 0. -void LegalizerInfo::verify(const MCInstrInfo &MII) const { -#ifndef NDEBUG - std::vector<unsigned> FailedOpcodes; - for (unsigned Opcode = FirstOp; Opcode <= LastOp; ++Opcode) { - const MCInstrDesc &MCID = MII.get(Opcode); - const unsigned NumTypeIdxs = std::accumulate( - MCID.opInfo_begin(), MCID.opInfo_end(), 0U, - [](unsigned Acc, const MCOperandInfo &OpInfo) { - return OpInfo.isGenericType() - ? std::max(OpInfo.getGenericTypeIndex() + 1U, Acc) - : Acc; - }); - const unsigned NumImmIdxs = std::accumulate( - MCID.opInfo_begin(), MCID.opInfo_end(), 0U, - [](unsigned Acc, const MCOperandInfo &OpInfo) { - return OpInfo.isGenericImm() - ? std::max(OpInfo.getGenericImmIndex() + 1U, Acc) - : Acc; - }); - LLVM_DEBUG(dbgs() << MII.getName(Opcode) << " (opcode " << Opcode - << "): " << NumTypeIdxs << " type ind" - << (NumTypeIdxs == 1 ? "ex" : "ices") << ", " - << NumImmIdxs << " imm ind" - << (NumImmIdxs == 1 ? "ex" : "ices") << "\n"); - const LegalizeRuleSet &RuleSet = getActionDefinitions(Opcode); - if (!RuleSet.verifyTypeIdxsCoverage(NumTypeIdxs)) - FailedOpcodes.push_back(Opcode); - else if (!RuleSet.verifyImmIdxsCoverage(NumImmIdxs)) - FailedOpcodes.push_back(Opcode); - } - if (!FailedOpcodes.empty()) { - errs() << "The following opcodes have ill-defined legalization rules:"; - for (unsigned Opcode : FailedOpcodes) - errs() << " " << MII.getName(Opcode); - errs() << "\n"; - - report_fatal_error("ill-defined LegalizerInfo" - ", try -debug-only=legalizer-info for details"); - } -#endif -} - -#ifndef NDEBUG -// FIXME: This should be in the MachineVerifier, but it can't use the -// LegalizerInfo as it's currently in the separate GlobalISel library. -// Note that RegBankSelected property already checked in the verifier -// has the same layering problem, but we only use inline methods so -// end up not needing to link against the GlobalISel library. -const MachineInstr *llvm::machineFunctionIsIllegal(const MachineFunction &MF) { - if (const LegalizerInfo *MLI = MF.getSubtarget().getLegalizerInfo()) { - const MachineRegisterInfo &MRI = MF.getRegInfo(); - for (const MachineBasicBlock &MBB : MF) - for (const MachineInstr &MI : MBB) - if (isPreISelGenericOpcode(MI.getOpcode()) && - !MLI->isLegalOrCustom(MI, MRI)) - return &MI; - } - return nullptr; -} -#endif + + // Make sure the element type didn't change. + return NewTy.getScalarType() == OldTy.getScalarType(); + } + case NarrowScalar: + case WidenScalar: { + if (OldTy.isVector()) { + // Number of elements should not change. + if (!NewTy.isVector() || OldTy.getNumElements() != NewTy.getNumElements()) + return false; + } else { + // Both types must be vectors + if (NewTy.isVector()) + return false; + } + + if (Rule.getAction() == NarrowScalar) { + // Make sure the size really decreased. + if (NewTy.getScalarSizeInBits() >= OldTy.getScalarSizeInBits()) + return false; + } else { + // Make sure the size really increased. + if (NewTy.getScalarSizeInBits() <= OldTy.getScalarSizeInBits()) + return false; + } + + return true; + } + case Bitcast: { + return OldTy != NewTy && OldTy.getSizeInBits() == NewTy.getSizeInBits(); + } + default: + return true; + } +} +#endif + +LegalizeActionStep LegalizeRuleSet::apply(const LegalityQuery &Query) const { + LLVM_DEBUG(dbgs() << "Applying legalizer ruleset to: "; Query.print(dbgs()); + dbgs() << "\n"); + if (Rules.empty()) { + LLVM_DEBUG(dbgs() << ".. fallback to legacy rules (no rules defined)\n"); + return {LegalizeAction::UseLegacyRules, 0, LLT{}}; + } + for (const LegalizeRule &Rule : Rules) { + if (Rule.match(Query)) { + LLVM_DEBUG(dbgs() << ".. match\n"); + std::pair<unsigned, LLT> Mutation = Rule.determineMutation(Query); + LLVM_DEBUG(dbgs() << ".. .. " << Rule.getAction() << ", " + << Mutation.first << ", " << Mutation.second << "\n"); + assert(mutationIsSane(Rule, Query, Mutation) && + "legality mutation invalid for match"); + assert(hasNoSimpleLoops(Rule, Query, Mutation) && "Simple loop detected"); + return {Rule.getAction(), Mutation.first, Mutation.second}; + } else + LLVM_DEBUG(dbgs() << ".. no match\n"); + } + LLVM_DEBUG(dbgs() << ".. unsupported\n"); + return {LegalizeAction::Unsupported, 0, LLT{}}; +} + +bool LegalizeRuleSet::verifyTypeIdxsCoverage(unsigned NumTypeIdxs) const { +#ifndef NDEBUG + if (Rules.empty()) { + LLVM_DEBUG( + dbgs() << ".. type index coverage check SKIPPED: no rules defined\n"); + return true; + } + const int64_t FirstUncovered = TypeIdxsCovered.find_first_unset(); + if (FirstUncovered < 0) { + LLVM_DEBUG(dbgs() << ".. type index coverage check SKIPPED:" + " user-defined predicate detected\n"); + return true; + } + const bool AllCovered = (FirstUncovered >= NumTypeIdxs); + if (NumTypeIdxs > 0) + LLVM_DEBUG(dbgs() << ".. the first uncovered type index: " << FirstUncovered + << ", " << (AllCovered ? "OK" : "FAIL") << "\n"); + return AllCovered; +#else + return true; +#endif +} + +bool LegalizeRuleSet::verifyImmIdxsCoverage(unsigned NumImmIdxs) const { +#ifndef NDEBUG + if (Rules.empty()) { + LLVM_DEBUG( + dbgs() << ".. imm index coverage check SKIPPED: no rules defined\n"); + return true; + } + const int64_t FirstUncovered = ImmIdxsCovered.find_first_unset(); + if (FirstUncovered < 0) { + LLVM_DEBUG(dbgs() << ".. imm index coverage check SKIPPED:" + " user-defined predicate detected\n"); + return true; + } + const bool AllCovered = (FirstUncovered >= NumImmIdxs); + LLVM_DEBUG(dbgs() << ".. the first uncovered imm index: " << FirstUncovered + << ", " << (AllCovered ? "OK" : "FAIL") << "\n"); + return AllCovered; +#else + return true; +#endif +} + +LegalizerInfo::LegalizerInfo() : TablesInitialized(false) { + // Set defaults. + // FIXME: these two (G_ANYEXT and G_TRUNC?) can be legalized to the + // fundamental load/store Jakob proposed. Once loads & stores are supported. + setScalarAction(TargetOpcode::G_ANYEXT, 1, {{1, Legal}}); + setScalarAction(TargetOpcode::G_ZEXT, 1, {{1, Legal}}); + setScalarAction(TargetOpcode::G_SEXT, 1, {{1, Legal}}); + setScalarAction(TargetOpcode::G_TRUNC, 0, {{1, Legal}}); + setScalarAction(TargetOpcode::G_TRUNC, 1, {{1, Legal}}); + + setScalarAction(TargetOpcode::G_INTRINSIC, 0, {{1, Legal}}); + setScalarAction(TargetOpcode::G_INTRINSIC_W_SIDE_EFFECTS, 0, {{1, Legal}}); + + setLegalizeScalarToDifferentSizeStrategy( + TargetOpcode::G_IMPLICIT_DEF, 0, narrowToSmallerAndUnsupportedIfTooSmall); + setLegalizeScalarToDifferentSizeStrategy( + TargetOpcode::G_ADD, 0, widenToLargerTypesAndNarrowToLargest); + setLegalizeScalarToDifferentSizeStrategy( + TargetOpcode::G_OR, 0, widenToLargerTypesAndNarrowToLargest); + setLegalizeScalarToDifferentSizeStrategy( + TargetOpcode::G_LOAD, 0, narrowToSmallerAndUnsupportedIfTooSmall); + setLegalizeScalarToDifferentSizeStrategy( + TargetOpcode::G_STORE, 0, narrowToSmallerAndUnsupportedIfTooSmall); + + setLegalizeScalarToDifferentSizeStrategy( + TargetOpcode::G_BRCOND, 0, widenToLargerTypesUnsupportedOtherwise); + setLegalizeScalarToDifferentSizeStrategy( + TargetOpcode::G_INSERT, 0, narrowToSmallerAndUnsupportedIfTooSmall); + setLegalizeScalarToDifferentSizeStrategy( + TargetOpcode::G_EXTRACT, 0, narrowToSmallerAndUnsupportedIfTooSmall); + setLegalizeScalarToDifferentSizeStrategy( + TargetOpcode::G_EXTRACT, 1, narrowToSmallerAndUnsupportedIfTooSmall); + setScalarAction(TargetOpcode::G_FNEG, 0, {{1, Lower}}); +} + +void LegalizerInfo::computeTables() { + assert(TablesInitialized == false); + + for (unsigned OpcodeIdx = 0; OpcodeIdx <= LastOp - FirstOp; ++OpcodeIdx) { + const unsigned Opcode = FirstOp + OpcodeIdx; + for (unsigned TypeIdx = 0; TypeIdx != SpecifiedActions[OpcodeIdx].size(); + ++TypeIdx) { + // 0. Collect information specified through the setAction API, i.e. + // for specific bit sizes. + // For scalar types: + SizeAndActionsVec ScalarSpecifiedActions; + // For pointer types: + std::map<uint16_t, SizeAndActionsVec> AddressSpace2SpecifiedActions; + // For vector types: + std::map<uint16_t, SizeAndActionsVec> ElemSize2SpecifiedActions; + for (auto LLT2Action : SpecifiedActions[OpcodeIdx][TypeIdx]) { + const LLT Type = LLT2Action.first; + const LegalizeAction Action = LLT2Action.second; + + auto SizeAction = std::make_pair(Type.getSizeInBits(), Action); + if (Type.isPointer()) + AddressSpace2SpecifiedActions[Type.getAddressSpace()].push_back( + SizeAction); + else if (Type.isVector()) + ElemSize2SpecifiedActions[Type.getElementType().getSizeInBits()] + .push_back(SizeAction); + else + ScalarSpecifiedActions.push_back(SizeAction); + } + + // 1. Handle scalar types + { + // Decide how to handle bit sizes for which no explicit specification + // was given. + SizeChangeStrategy S = &unsupportedForDifferentSizes; + if (TypeIdx < ScalarSizeChangeStrategies[OpcodeIdx].size() && + ScalarSizeChangeStrategies[OpcodeIdx][TypeIdx] != nullptr) + S = ScalarSizeChangeStrategies[OpcodeIdx][TypeIdx]; + llvm::sort(ScalarSpecifiedActions); + checkPartialSizeAndActionsVector(ScalarSpecifiedActions); + setScalarAction(Opcode, TypeIdx, S(ScalarSpecifiedActions)); + } + + // 2. Handle pointer types + for (auto PointerSpecifiedActions : AddressSpace2SpecifiedActions) { + llvm::sort(PointerSpecifiedActions.second); + checkPartialSizeAndActionsVector(PointerSpecifiedActions.second); + // For pointer types, we assume that there isn't a meaningfull way + // to change the number of bits used in the pointer. + setPointerAction( + Opcode, TypeIdx, PointerSpecifiedActions.first, + unsupportedForDifferentSizes(PointerSpecifiedActions.second)); + } + + // 3. Handle vector types + SizeAndActionsVec ElementSizesSeen; + for (auto VectorSpecifiedActions : ElemSize2SpecifiedActions) { + llvm::sort(VectorSpecifiedActions.second); + const uint16_t ElementSize = VectorSpecifiedActions.first; + ElementSizesSeen.push_back({ElementSize, Legal}); + checkPartialSizeAndActionsVector(VectorSpecifiedActions.second); + // For vector types, we assume that the best way to adapt the number + // of elements is to the next larger number of elements type for which + // the vector type is legal, unless there is no such type. In that case, + // legalize towards a vector type with a smaller number of elements. + SizeAndActionsVec NumElementsActions; + for (SizeAndAction BitsizeAndAction : VectorSpecifiedActions.second) { + assert(BitsizeAndAction.first % ElementSize == 0); + const uint16_t NumElements = BitsizeAndAction.first / ElementSize; + NumElementsActions.push_back({NumElements, BitsizeAndAction.second}); + } + setVectorNumElementAction( + Opcode, TypeIdx, ElementSize, + moreToWiderTypesAndLessToWidest(NumElementsActions)); + } + llvm::sort(ElementSizesSeen); + SizeChangeStrategy VectorElementSizeChangeStrategy = + &unsupportedForDifferentSizes; + if (TypeIdx < VectorElementSizeChangeStrategies[OpcodeIdx].size() && + VectorElementSizeChangeStrategies[OpcodeIdx][TypeIdx] != nullptr) + VectorElementSizeChangeStrategy = + VectorElementSizeChangeStrategies[OpcodeIdx][TypeIdx]; + setScalarInVectorAction( + Opcode, TypeIdx, VectorElementSizeChangeStrategy(ElementSizesSeen)); + } + } + + TablesInitialized = true; +} + +// FIXME: inefficient implementation for now. Without ComputeValueVTs we're +// probably going to need specialized lookup structures for various types before +// we have any hope of doing well with something like <13 x i3>. Even the common +// cases should do better than what we have now. +std::pair<LegalizeAction, LLT> +LegalizerInfo::getAspectAction(const InstrAspect &Aspect) const { + assert(TablesInitialized && "backend forgot to call computeTables"); + // These *have* to be implemented for now, they're the fundamental basis of + // how everything else is transformed. + if (Aspect.Type.isScalar() || Aspect.Type.isPointer()) + return findScalarLegalAction(Aspect); + assert(Aspect.Type.isVector()); + return findVectorLegalAction(Aspect); +} + +/// Helper function to get LLT for the given type index. +static LLT getTypeFromTypeIdx(const MachineInstr &MI, + const MachineRegisterInfo &MRI, unsigned OpIdx, + unsigned TypeIdx) { + assert(TypeIdx < MI.getNumOperands() && "Unexpected TypeIdx"); + // G_UNMERGE_VALUES has variable number of operands, but there is only + // one source type and one destination type as all destinations must be the + // same type. So, get the last operand if TypeIdx == 1. + if (MI.getOpcode() == TargetOpcode::G_UNMERGE_VALUES && TypeIdx == 1) + return MRI.getType(MI.getOperand(MI.getNumOperands() - 1).getReg()); + return MRI.getType(MI.getOperand(OpIdx).getReg()); +} + +unsigned LegalizerInfo::getOpcodeIdxForOpcode(unsigned Opcode) const { + assert(Opcode >= FirstOp && Opcode <= LastOp && "Unsupported opcode"); + return Opcode - FirstOp; +} + +unsigned LegalizerInfo::getActionDefinitionsIdx(unsigned Opcode) const { + unsigned OpcodeIdx = getOpcodeIdxForOpcode(Opcode); + if (unsigned Alias = RulesForOpcode[OpcodeIdx].getAlias()) { + LLVM_DEBUG(dbgs() << ".. opcode " << Opcode << " is aliased to " << Alias + << "\n"); + OpcodeIdx = getOpcodeIdxForOpcode(Alias); + assert(RulesForOpcode[OpcodeIdx].getAlias() == 0 && "Cannot chain aliases"); + } + + return OpcodeIdx; +} + +const LegalizeRuleSet & +LegalizerInfo::getActionDefinitions(unsigned Opcode) const { + unsigned OpcodeIdx = getActionDefinitionsIdx(Opcode); + return RulesForOpcode[OpcodeIdx]; +} + +LegalizeRuleSet &LegalizerInfo::getActionDefinitionsBuilder(unsigned Opcode) { + unsigned OpcodeIdx = getActionDefinitionsIdx(Opcode); + auto &Result = RulesForOpcode[OpcodeIdx]; + assert(!Result.isAliasedByAnother() && "Modifying this opcode will modify aliases"); + return Result; +} + +LegalizeRuleSet &LegalizerInfo::getActionDefinitionsBuilder( + std::initializer_list<unsigned> Opcodes) { + unsigned Representative = *Opcodes.begin(); + + assert(!llvm::empty(Opcodes) && Opcodes.begin() + 1 != Opcodes.end() && + "Initializer list must have at least two opcodes"); + + for (auto I = Opcodes.begin() + 1, E = Opcodes.end(); I != E; ++I) + aliasActionDefinitions(Representative, *I); + + auto &Return = getActionDefinitionsBuilder(Representative); + Return.setIsAliasedByAnother(); + return Return; +} + +void LegalizerInfo::aliasActionDefinitions(unsigned OpcodeTo, + unsigned OpcodeFrom) { + assert(OpcodeTo != OpcodeFrom && "Cannot alias to self"); + assert(OpcodeTo >= FirstOp && OpcodeTo <= LastOp && "Unsupported opcode"); + const unsigned OpcodeFromIdx = getOpcodeIdxForOpcode(OpcodeFrom); + RulesForOpcode[OpcodeFromIdx].aliasTo(OpcodeTo); +} + +LegalizeActionStep +LegalizerInfo::getAction(const LegalityQuery &Query) const { + LegalizeActionStep Step = getActionDefinitions(Query.Opcode).apply(Query); + if (Step.Action != LegalizeAction::UseLegacyRules) { + return Step; + } + + for (unsigned i = 0; i < Query.Types.size(); ++i) { + auto Action = getAspectAction({Query.Opcode, i, Query.Types[i]}); + if (Action.first != Legal) { + LLVM_DEBUG(dbgs() << ".. (legacy) Type " << i << " Action=" + << Action.first << ", " << Action.second << "\n"); + return {Action.first, i, Action.second}; + } else + LLVM_DEBUG(dbgs() << ".. (legacy) Type " << i << " Legal\n"); + } + LLVM_DEBUG(dbgs() << ".. (legacy) Legal\n"); + return {Legal, 0, LLT{}}; +} + +LegalizeActionStep +LegalizerInfo::getAction(const MachineInstr &MI, + const MachineRegisterInfo &MRI) const { + SmallVector<LLT, 2> Types; + SmallBitVector SeenTypes(8); + const MCOperandInfo *OpInfo = MI.getDesc().OpInfo; + // FIXME: probably we'll need to cache the results here somehow? + for (unsigned i = 0; i < MI.getDesc().getNumOperands(); ++i) { + if (!OpInfo[i].isGenericType()) + continue; + + // We must only record actions once for each TypeIdx; otherwise we'd + // try to legalize operands multiple times down the line. + unsigned TypeIdx = OpInfo[i].getGenericTypeIndex(); + if (SeenTypes[TypeIdx]) + continue; + + SeenTypes.set(TypeIdx); + + LLT Ty = getTypeFromTypeIdx(MI, MRI, i, TypeIdx); + Types.push_back(Ty); + } + + SmallVector<LegalityQuery::MemDesc, 2> MemDescrs; + for (const auto &MMO : MI.memoperands()) + MemDescrs.push_back({8 * MMO->getSize() /* in bits */, + 8 * MMO->getAlign().value(), MMO->getOrdering()}); + + return getAction({MI.getOpcode(), Types, MemDescrs}); +} + +bool LegalizerInfo::isLegal(const MachineInstr &MI, + const MachineRegisterInfo &MRI) const { + return getAction(MI, MRI).Action == Legal; +} + +bool LegalizerInfo::isLegalOrCustom(const MachineInstr &MI, + const MachineRegisterInfo &MRI) const { + auto Action = getAction(MI, MRI).Action; + // If the action is custom, it may not necessarily modify the instruction, + // so we have to assume it's legal. + return Action == Legal || Action == Custom; +} + +LegalizerInfo::SizeAndActionsVec +LegalizerInfo::increaseToLargerTypesAndDecreaseToLargest( + const SizeAndActionsVec &v, LegalizeAction IncreaseAction, + LegalizeAction DecreaseAction) { + SizeAndActionsVec result; + unsigned LargestSizeSoFar = 0; + if (v.size() >= 1 && v[0].first != 1) + result.push_back({1, IncreaseAction}); + for (size_t i = 0; i < v.size(); ++i) { + result.push_back(v[i]); + LargestSizeSoFar = v[i].first; + if (i + 1 < v.size() && v[i + 1].first != v[i].first + 1) { + result.push_back({LargestSizeSoFar + 1, IncreaseAction}); + LargestSizeSoFar = v[i].first + 1; + } + } + result.push_back({LargestSizeSoFar + 1, DecreaseAction}); + return result; +} + +LegalizerInfo::SizeAndActionsVec +LegalizerInfo::decreaseToSmallerTypesAndIncreaseToSmallest( + const SizeAndActionsVec &v, LegalizeAction DecreaseAction, + LegalizeAction IncreaseAction) { + SizeAndActionsVec result; + if (v.size() == 0 || v[0].first != 1) + result.push_back({1, IncreaseAction}); + for (size_t i = 0; i < v.size(); ++i) { + result.push_back(v[i]); + if (i + 1 == v.size() || v[i + 1].first != v[i].first + 1) { + result.push_back({v[i].first + 1, DecreaseAction}); + } + } + return result; +} + +LegalizerInfo::SizeAndAction +LegalizerInfo::findAction(const SizeAndActionsVec &Vec, const uint32_t Size) { + assert(Size >= 1); + // Find the last element in Vec that has a bitsize equal to or smaller than + // the requested bit size. + // That is the element just before the first element that is bigger than Size. + auto It = partition_point( + Vec, [=](const SizeAndAction &A) { return A.first <= Size; }); + assert(It != Vec.begin() && "Does Vec not start with size 1?"); + int VecIdx = It - Vec.begin() - 1; + + LegalizeAction Action = Vec[VecIdx].second; + switch (Action) { + case Legal: + case Bitcast: + case Lower: + case Libcall: + case Custom: + return {Size, Action}; + case FewerElements: + // FIXME: is this special case still needed and correct? + // Special case for scalarization: + if (Vec == SizeAndActionsVec({{1, FewerElements}})) + return {1, FewerElements}; + LLVM_FALLTHROUGH; + case NarrowScalar: { + // The following needs to be a loop, as for now, we do allow needing to + // go over "Unsupported" bit sizes before finding a legalizable bit size. + // e.g. (s8, WidenScalar), (s9, Unsupported), (s32, Legal). if Size==8, + // we need to iterate over s9, and then to s32 to return (s32, Legal). + // If we want to get rid of the below loop, we should have stronger asserts + // when building the SizeAndActionsVecs, probably not allowing + // "Unsupported" unless at the ends of the vector. + for (int i = VecIdx - 1; i >= 0; --i) + if (!needsLegalizingToDifferentSize(Vec[i].second) && + Vec[i].second != Unsupported) + return {Vec[i].first, Action}; + llvm_unreachable(""); + } + case WidenScalar: + case MoreElements: { + // See above, the following needs to be a loop, at least for now. + for (std::size_t i = VecIdx + 1; i < Vec.size(); ++i) + if (!needsLegalizingToDifferentSize(Vec[i].second) && + Vec[i].second != Unsupported) + return {Vec[i].first, Action}; + llvm_unreachable(""); + } + case Unsupported: + return {Size, Unsupported}; + case NotFound: + case UseLegacyRules: + llvm_unreachable("NotFound"); + } + llvm_unreachable("Action has an unknown enum value"); +} + +std::pair<LegalizeAction, LLT> +LegalizerInfo::findScalarLegalAction(const InstrAspect &Aspect) const { + assert(Aspect.Type.isScalar() || Aspect.Type.isPointer()); + if (Aspect.Opcode < FirstOp || Aspect.Opcode > LastOp) + return {NotFound, LLT()}; + const unsigned OpcodeIdx = getOpcodeIdxForOpcode(Aspect.Opcode); + if (Aspect.Type.isPointer() && + AddrSpace2PointerActions[OpcodeIdx].find(Aspect.Type.getAddressSpace()) == + AddrSpace2PointerActions[OpcodeIdx].end()) { + return {NotFound, LLT()}; + } + const SmallVector<SizeAndActionsVec, 1> &Actions = + Aspect.Type.isPointer() + ? AddrSpace2PointerActions[OpcodeIdx] + .find(Aspect.Type.getAddressSpace()) + ->second + : ScalarActions[OpcodeIdx]; + if (Aspect.Idx >= Actions.size()) + return {NotFound, LLT()}; + const SizeAndActionsVec &Vec = Actions[Aspect.Idx]; + // FIXME: speed up this search, e.g. by using a results cache for repeated + // queries? + auto SizeAndAction = findAction(Vec, Aspect.Type.getSizeInBits()); + return {SizeAndAction.second, + Aspect.Type.isScalar() ? LLT::scalar(SizeAndAction.first) + : LLT::pointer(Aspect.Type.getAddressSpace(), + SizeAndAction.first)}; +} + +std::pair<LegalizeAction, LLT> +LegalizerInfo::findVectorLegalAction(const InstrAspect &Aspect) const { + assert(Aspect.Type.isVector()); + // First legalize the vector element size, then legalize the number of + // lanes in the vector. + if (Aspect.Opcode < FirstOp || Aspect.Opcode > LastOp) + return {NotFound, Aspect.Type}; + const unsigned OpcodeIdx = getOpcodeIdxForOpcode(Aspect.Opcode); + const unsigned TypeIdx = Aspect.Idx; + if (TypeIdx >= ScalarInVectorActions[OpcodeIdx].size()) + return {NotFound, Aspect.Type}; + const SizeAndActionsVec &ElemSizeVec = + ScalarInVectorActions[OpcodeIdx][TypeIdx]; + + LLT IntermediateType; + auto ElementSizeAndAction = + findAction(ElemSizeVec, Aspect.Type.getScalarSizeInBits()); + IntermediateType = + LLT::vector(Aspect.Type.getNumElements(), ElementSizeAndAction.first); + if (ElementSizeAndAction.second != Legal) + return {ElementSizeAndAction.second, IntermediateType}; + + auto i = NumElements2Actions[OpcodeIdx].find( + IntermediateType.getScalarSizeInBits()); + if (i == NumElements2Actions[OpcodeIdx].end()) { + return {NotFound, IntermediateType}; + } + const SizeAndActionsVec &NumElementsVec = (*i).second[TypeIdx]; + auto NumElementsAndAction = + findAction(NumElementsVec, IntermediateType.getNumElements()); + return {NumElementsAndAction.second, + LLT::vector(NumElementsAndAction.first, + IntermediateType.getScalarSizeInBits())}; +} + +unsigned LegalizerInfo::getExtOpcodeForWideningConstant(LLT SmallTy) const { + return SmallTy.isByteSized() ? TargetOpcode::G_SEXT : TargetOpcode::G_ZEXT; +} + +/// \pre Type indices of every opcode form a dense set starting from 0. +void LegalizerInfo::verify(const MCInstrInfo &MII) const { +#ifndef NDEBUG + std::vector<unsigned> FailedOpcodes; + for (unsigned Opcode = FirstOp; Opcode <= LastOp; ++Opcode) { + const MCInstrDesc &MCID = MII.get(Opcode); + const unsigned NumTypeIdxs = std::accumulate( + MCID.opInfo_begin(), MCID.opInfo_end(), 0U, + [](unsigned Acc, const MCOperandInfo &OpInfo) { + return OpInfo.isGenericType() + ? std::max(OpInfo.getGenericTypeIndex() + 1U, Acc) + : Acc; + }); + const unsigned NumImmIdxs = std::accumulate( + MCID.opInfo_begin(), MCID.opInfo_end(), 0U, + [](unsigned Acc, const MCOperandInfo &OpInfo) { + return OpInfo.isGenericImm() + ? std::max(OpInfo.getGenericImmIndex() + 1U, Acc) + : Acc; + }); + LLVM_DEBUG(dbgs() << MII.getName(Opcode) << " (opcode " << Opcode + << "): " << NumTypeIdxs << " type ind" + << (NumTypeIdxs == 1 ? "ex" : "ices") << ", " + << NumImmIdxs << " imm ind" + << (NumImmIdxs == 1 ? "ex" : "ices") << "\n"); + const LegalizeRuleSet &RuleSet = getActionDefinitions(Opcode); + if (!RuleSet.verifyTypeIdxsCoverage(NumTypeIdxs)) + FailedOpcodes.push_back(Opcode); + else if (!RuleSet.verifyImmIdxsCoverage(NumImmIdxs)) + FailedOpcodes.push_back(Opcode); + } + if (!FailedOpcodes.empty()) { + errs() << "The following opcodes have ill-defined legalization rules:"; + for (unsigned Opcode : FailedOpcodes) + errs() << " " << MII.getName(Opcode); + errs() << "\n"; + + report_fatal_error("ill-defined LegalizerInfo" + ", try -debug-only=legalizer-info for details"); + } +#endif +} + +#ifndef NDEBUG +// FIXME: This should be in the MachineVerifier, but it can't use the +// LegalizerInfo as it's currently in the separate GlobalISel library. +// Note that RegBankSelected property already checked in the verifier +// has the same layering problem, but we only use inline methods so +// end up not needing to link against the GlobalISel library. +const MachineInstr *llvm::machineFunctionIsIllegal(const MachineFunction &MF) { + if (const LegalizerInfo *MLI = MF.getSubtarget().getLegalizerInfo()) { + const MachineRegisterInfo &MRI = MF.getRegInfo(); + for (const MachineBasicBlock &MBB : MF) + for (const MachineInstr &MI : MBB) + if (isPreISelGenericOpcode(MI.getOpcode()) && + !MLI->isLegalOrCustom(MI, MRI)) + return &MI; + } + return nullptr; +} +#endif diff --git a/contrib/libs/llvm12/lib/CodeGen/GlobalISel/Localizer.cpp b/contrib/libs/llvm12/lib/CodeGen/GlobalISel/Localizer.cpp index 3ca927bee1..30c00c63f6 100644 --- a/contrib/libs/llvm12/lib/CodeGen/GlobalISel/Localizer.cpp +++ b/contrib/libs/llvm12/lib/CodeGen/GlobalISel/Localizer.cpp @@ -1,62 +1,62 @@ -//===- Localizer.cpp ---------------------- Localize some instrs -*- C++ -*-==// -// -// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. -// See https://llvm.org/LICENSE.txt for license information. -// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception -// -//===----------------------------------------------------------------------===// -/// \file -/// This file implements the Localizer class. -//===----------------------------------------------------------------------===// - -#include "llvm/CodeGen/GlobalISel/Localizer.h" -#include "llvm/ADT/DenseMap.h" +//===- Localizer.cpp ---------------------- Localize some instrs -*- C++ -*-==// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +/// \file +/// This file implements the Localizer class. +//===----------------------------------------------------------------------===// + +#include "llvm/CodeGen/GlobalISel/Localizer.h" +#include "llvm/ADT/DenseMap.h" #include "llvm/ADT/STLExtras.h" -#include "llvm/Analysis/TargetTransformInfo.h" -#include "llvm/CodeGen/MachineRegisterInfo.h" -#include "llvm/CodeGen/TargetLowering.h" -#include "llvm/InitializePasses.h" -#include "llvm/Support/Debug.h" - -#define DEBUG_TYPE "localizer" - -using namespace llvm; - -char Localizer::ID = 0; -INITIALIZE_PASS_BEGIN(Localizer, DEBUG_TYPE, - "Move/duplicate certain instructions close to their use", - false, false) -INITIALIZE_PASS_DEPENDENCY(TargetTransformInfoWrapperPass) -INITIALIZE_PASS_END(Localizer, DEBUG_TYPE, - "Move/duplicate certain instructions close to their use", - false, false) - -Localizer::Localizer(std::function<bool(const MachineFunction &)> F) - : MachineFunctionPass(ID), DoNotRunPass(F) {} - -Localizer::Localizer() - : Localizer([](const MachineFunction &) { return false; }) {} - -void Localizer::init(MachineFunction &MF) { - MRI = &MF.getRegInfo(); - TTI = &getAnalysis<TargetTransformInfoWrapperPass>().getTTI(MF.getFunction()); -} - -void Localizer::getAnalysisUsage(AnalysisUsage &AU) const { - AU.addRequired<TargetTransformInfoWrapperPass>(); - getSelectionDAGFallbackAnalysisUsage(AU); - MachineFunctionPass::getAnalysisUsage(AU); -} - -bool Localizer::isLocalUse(MachineOperand &MOUse, const MachineInstr &Def, - MachineBasicBlock *&InsertMBB) { - MachineInstr &MIUse = *MOUse.getParent(); - InsertMBB = MIUse.getParent(); - if (MIUse.isPHI()) - InsertMBB = MIUse.getOperand(MIUse.getOperandNo(&MOUse) + 1).getMBB(); - return InsertMBB == Def.getParent(); -} - +#include "llvm/Analysis/TargetTransformInfo.h" +#include "llvm/CodeGen/MachineRegisterInfo.h" +#include "llvm/CodeGen/TargetLowering.h" +#include "llvm/InitializePasses.h" +#include "llvm/Support/Debug.h" + +#define DEBUG_TYPE "localizer" + +using namespace llvm; + +char Localizer::ID = 0; +INITIALIZE_PASS_BEGIN(Localizer, DEBUG_TYPE, + "Move/duplicate certain instructions close to their use", + false, false) +INITIALIZE_PASS_DEPENDENCY(TargetTransformInfoWrapperPass) +INITIALIZE_PASS_END(Localizer, DEBUG_TYPE, + "Move/duplicate certain instructions close to their use", + false, false) + +Localizer::Localizer(std::function<bool(const MachineFunction &)> F) + : MachineFunctionPass(ID), DoNotRunPass(F) {} + +Localizer::Localizer() + : Localizer([](const MachineFunction &) { return false; }) {} + +void Localizer::init(MachineFunction &MF) { + MRI = &MF.getRegInfo(); + TTI = &getAnalysis<TargetTransformInfoWrapperPass>().getTTI(MF.getFunction()); +} + +void Localizer::getAnalysisUsage(AnalysisUsage &AU) const { + AU.addRequired<TargetTransformInfoWrapperPass>(); + getSelectionDAGFallbackAnalysisUsage(AU); + MachineFunctionPass::getAnalysisUsage(AU); +} + +bool Localizer::isLocalUse(MachineOperand &MOUse, const MachineInstr &Def, + MachineBasicBlock *&InsertMBB) { + MachineInstr &MIUse = *MOUse.getParent(); + InsertMBB = MIUse.getParent(); + if (MIUse.isPHI()) + InsertMBB = MIUse.getOperand(MIUse.getOperandNo(&MOUse) + 1).getMBB(); + return InsertMBB == Def.getParent(); +} + bool Localizer::isNonUniquePhiValue(MachineOperand &Op) const { MachineInstr *MI = Op.getParent(); if (!MI->isPHI()) @@ -71,43 +71,43 @@ bool Localizer::isNonUniquePhiValue(MachineOperand &Op) const { return false; } -bool Localizer::localizeInterBlock(MachineFunction &MF, - LocalizedSetVecT &LocalizedInstrs) { - bool Changed = false; - DenseMap<std::pair<MachineBasicBlock *, unsigned>, unsigned> MBBWithLocalDef; - - // Since the IRTranslator only emits constants into the entry block, and the - // rest of the GISel pipeline generally emits constants close to their users, - // we only localize instructions in the entry block here. This might change if - // we start doing CSE across blocks. - auto &MBB = MF.front(); - auto &TL = *MF.getSubtarget().getTargetLowering(); - for (auto RI = MBB.rbegin(), RE = MBB.rend(); RI != RE; ++RI) { - MachineInstr &MI = *RI; - if (!TL.shouldLocalize(MI, TTI)) - continue; - LLVM_DEBUG(dbgs() << "Should localize: " << MI); - assert(MI.getDesc().getNumDefs() == 1 && - "More than one definition not supported yet"); - Register Reg = MI.getOperand(0).getReg(); - // Check if all the users of MI are local. - // We are going to invalidation the list of use operands, so we - // can't use range iterator. - for (auto MOIt = MRI->use_begin(Reg), MOItEnd = MRI->use_end(); - MOIt != MOItEnd;) { - MachineOperand &MOUse = *MOIt++; - // Check if the use is already local. - MachineBasicBlock *InsertMBB; - LLVM_DEBUG(MachineInstr &MIUse = *MOUse.getParent(); - dbgs() << "Checking use: " << MIUse - << " #Opd: " << MIUse.getOperandNo(&MOUse) << '\n'); - if (isLocalUse(MOUse, MI, InsertMBB)) { - // Even if we're in the same block, if the block is very large we could - // still have many long live ranges. Try to do intra-block localization - // too. - LocalizedInstrs.insert(&MI); - continue; - } +bool Localizer::localizeInterBlock(MachineFunction &MF, + LocalizedSetVecT &LocalizedInstrs) { + bool Changed = false; + DenseMap<std::pair<MachineBasicBlock *, unsigned>, unsigned> MBBWithLocalDef; + + // Since the IRTranslator only emits constants into the entry block, and the + // rest of the GISel pipeline generally emits constants close to their users, + // we only localize instructions in the entry block here. This might change if + // we start doing CSE across blocks. + auto &MBB = MF.front(); + auto &TL = *MF.getSubtarget().getTargetLowering(); + for (auto RI = MBB.rbegin(), RE = MBB.rend(); RI != RE; ++RI) { + MachineInstr &MI = *RI; + if (!TL.shouldLocalize(MI, TTI)) + continue; + LLVM_DEBUG(dbgs() << "Should localize: " << MI); + assert(MI.getDesc().getNumDefs() == 1 && + "More than one definition not supported yet"); + Register Reg = MI.getOperand(0).getReg(); + // Check if all the users of MI are local. + // We are going to invalidation the list of use operands, so we + // can't use range iterator. + for (auto MOIt = MRI->use_begin(Reg), MOItEnd = MRI->use_end(); + MOIt != MOItEnd;) { + MachineOperand &MOUse = *MOIt++; + // Check if the use is already local. + MachineBasicBlock *InsertMBB; + LLVM_DEBUG(MachineInstr &MIUse = *MOUse.getParent(); + dbgs() << "Checking use: " << MIUse + << " #Opd: " << MIUse.getOperandNo(&MOUse) << '\n'); + if (isLocalUse(MOUse, MI, InsertMBB)) { + // Even if we're in the same block, if the block is very large we could + // still have many long live ranges. Try to do intra-block localization + // too. + LocalizedInstrs.insert(&MI); + continue; + } // If the use is a phi operand that's not unique, don't try to localize. // If we do, we can cause unnecessary instruction bloat by duplicating @@ -116,95 +116,95 @@ bool Localizer::localizeInterBlock(MachineFunction &MF, if (isNonUniquePhiValue(MOUse)) continue; - LLVM_DEBUG(dbgs() << "Fixing non-local use\n"); - Changed = true; - auto MBBAndReg = std::make_pair(InsertMBB, Reg); - auto NewVRegIt = MBBWithLocalDef.find(MBBAndReg); - if (NewVRegIt == MBBWithLocalDef.end()) { - // Create the localized instruction. - MachineInstr *LocalizedMI = MF.CloneMachineInstr(&MI); - LocalizedInstrs.insert(LocalizedMI); - MachineInstr &UseMI = *MOUse.getParent(); - if (MRI->hasOneUse(Reg) && !UseMI.isPHI()) - InsertMBB->insert(InsertMBB->SkipPHIsAndLabels(UseMI), LocalizedMI); - else - InsertMBB->insert(InsertMBB->SkipPHIsAndLabels(InsertMBB->begin()), - LocalizedMI); - - // Set a new register for the definition. - Register NewReg = MRI->createGenericVirtualRegister(MRI->getType(Reg)); - MRI->setRegClassOrRegBank(NewReg, MRI->getRegClassOrRegBank(Reg)); - LocalizedMI->getOperand(0).setReg(NewReg); - NewVRegIt = - MBBWithLocalDef.insert(std::make_pair(MBBAndReg, NewReg)).first; - LLVM_DEBUG(dbgs() << "Inserted: " << *LocalizedMI); - } - LLVM_DEBUG(dbgs() << "Update use with: " << printReg(NewVRegIt->second) - << '\n'); - // Update the user reg. - MOUse.setReg(NewVRegIt->second); - } - } - return Changed; -} - -bool Localizer::localizeIntraBlock(LocalizedSetVecT &LocalizedInstrs) { - bool Changed = false; - - // For each already-localized instruction which has multiple users, then we - // scan the block top down from the current position until we hit one of them. - - // FIXME: Consider doing inst duplication if live ranges are very long due to - // many users, but this case may be better served by regalloc improvements. - - for (MachineInstr *MI : LocalizedInstrs) { - Register Reg = MI->getOperand(0).getReg(); - MachineBasicBlock &MBB = *MI->getParent(); - // All of the user MIs of this reg. - SmallPtrSet<MachineInstr *, 32> Users; - for (MachineInstr &UseMI : MRI->use_nodbg_instructions(Reg)) { - if (!UseMI.isPHI()) - Users.insert(&UseMI); - } - // If all the users were PHIs then they're not going to be in our block, - // don't try to move this instruction. - if (Users.empty()) - continue; - - MachineBasicBlock::iterator II(MI); - ++II; - while (II != MBB.end() && !Users.count(&*II)) - ++II; - - LLVM_DEBUG(dbgs() << "Intra-block: moving " << *MI << " before " << *&*II - << "\n"); - assert(II != MBB.end() && "Didn't find the user in the MBB"); - MI->removeFromParent(); - MBB.insert(II, MI); - Changed = true; - } - return Changed; -} - -bool Localizer::runOnMachineFunction(MachineFunction &MF) { - // If the ISel pipeline failed, do not bother running that pass. - if (MF.getProperties().hasProperty( - MachineFunctionProperties::Property::FailedISel)) - return false; - - // Don't run the pass if the target asked so. - if (DoNotRunPass(MF)) - return false; - - LLVM_DEBUG(dbgs() << "Localize instructions for: " << MF.getName() << '\n'); - - init(MF); - - // Keep track of the instructions we localized. We'll do a second pass of - // intra-block localization to further reduce live ranges. - LocalizedSetVecT LocalizedInstrs; - - bool Changed = localizeInterBlock(MF, LocalizedInstrs); - Changed |= localizeIntraBlock(LocalizedInstrs); - return Changed; -} + LLVM_DEBUG(dbgs() << "Fixing non-local use\n"); + Changed = true; + auto MBBAndReg = std::make_pair(InsertMBB, Reg); + auto NewVRegIt = MBBWithLocalDef.find(MBBAndReg); + if (NewVRegIt == MBBWithLocalDef.end()) { + // Create the localized instruction. + MachineInstr *LocalizedMI = MF.CloneMachineInstr(&MI); + LocalizedInstrs.insert(LocalizedMI); + MachineInstr &UseMI = *MOUse.getParent(); + if (MRI->hasOneUse(Reg) && !UseMI.isPHI()) + InsertMBB->insert(InsertMBB->SkipPHIsAndLabels(UseMI), LocalizedMI); + else + InsertMBB->insert(InsertMBB->SkipPHIsAndLabels(InsertMBB->begin()), + LocalizedMI); + + // Set a new register for the definition. + Register NewReg = MRI->createGenericVirtualRegister(MRI->getType(Reg)); + MRI->setRegClassOrRegBank(NewReg, MRI->getRegClassOrRegBank(Reg)); + LocalizedMI->getOperand(0).setReg(NewReg); + NewVRegIt = + MBBWithLocalDef.insert(std::make_pair(MBBAndReg, NewReg)).first; + LLVM_DEBUG(dbgs() << "Inserted: " << *LocalizedMI); + } + LLVM_DEBUG(dbgs() << "Update use with: " << printReg(NewVRegIt->second) + << '\n'); + // Update the user reg. + MOUse.setReg(NewVRegIt->second); + } + } + return Changed; +} + +bool Localizer::localizeIntraBlock(LocalizedSetVecT &LocalizedInstrs) { + bool Changed = false; + + // For each already-localized instruction which has multiple users, then we + // scan the block top down from the current position until we hit one of them. + + // FIXME: Consider doing inst duplication if live ranges are very long due to + // many users, but this case may be better served by regalloc improvements. + + for (MachineInstr *MI : LocalizedInstrs) { + Register Reg = MI->getOperand(0).getReg(); + MachineBasicBlock &MBB = *MI->getParent(); + // All of the user MIs of this reg. + SmallPtrSet<MachineInstr *, 32> Users; + for (MachineInstr &UseMI : MRI->use_nodbg_instructions(Reg)) { + if (!UseMI.isPHI()) + Users.insert(&UseMI); + } + // If all the users were PHIs then they're not going to be in our block, + // don't try to move this instruction. + if (Users.empty()) + continue; + + MachineBasicBlock::iterator II(MI); + ++II; + while (II != MBB.end() && !Users.count(&*II)) + ++II; + + LLVM_DEBUG(dbgs() << "Intra-block: moving " << *MI << " before " << *&*II + << "\n"); + assert(II != MBB.end() && "Didn't find the user in the MBB"); + MI->removeFromParent(); + MBB.insert(II, MI); + Changed = true; + } + return Changed; +} + +bool Localizer::runOnMachineFunction(MachineFunction &MF) { + // If the ISel pipeline failed, do not bother running that pass. + if (MF.getProperties().hasProperty( + MachineFunctionProperties::Property::FailedISel)) + return false; + + // Don't run the pass if the target asked so. + if (DoNotRunPass(MF)) + return false; + + LLVM_DEBUG(dbgs() << "Localize instructions for: " << MF.getName() << '\n'); + + init(MF); + + // Keep track of the instructions we localized. We'll do a second pass of + // intra-block localization to further reduce live ranges. + LocalizedSetVecT LocalizedInstrs; + + bool Changed = localizeInterBlock(MF, LocalizedInstrs); + Changed |= localizeIntraBlock(LocalizedInstrs); + return Changed; +} diff --git a/contrib/libs/llvm12/lib/CodeGen/GlobalISel/LostDebugLocObserver.cpp b/contrib/libs/llvm12/lib/CodeGen/GlobalISel/LostDebugLocObserver.cpp index 50e1d10c9a..6d606e5550 100644 --- a/contrib/libs/llvm12/lib/CodeGen/GlobalISel/LostDebugLocObserver.cpp +++ b/contrib/libs/llvm12/lib/CodeGen/GlobalISel/LostDebugLocObserver.cpp @@ -1,113 +1,113 @@ -//===----- llvm/CodeGen/GlobalISel/LostDebugLocObserver.cpp -----*- C++ -*-===// -// -// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. -// See https://llvm.org/LICENSE.txt for license information. -// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception -// -//===----------------------------------------------------------------------===// -// -/// Tracks DebugLocs between checkpoints and verifies that they are transferred. -// -//===----------------------------------------------------------------------===// - -#include "llvm/CodeGen/GlobalISel/LostDebugLocObserver.h" - -using namespace llvm; - -#define LOC_DEBUG(X) DEBUG_WITH_TYPE(DebugType.str().c_str(), X) - -void LostDebugLocObserver::analyzeDebugLocations() { - if (LostDebugLocs.empty()) { - LOC_DEBUG(dbgs() << ".. No debug info was present\n"); - return; - } - if (PotentialMIsForDebugLocs.empty()) { - LOC_DEBUG( - dbgs() << ".. No instructions to carry debug info (dead code?)\n"); - return; - } - - LOC_DEBUG(dbgs() << ".. Searching " << PotentialMIsForDebugLocs.size() - << " instrs for " << LostDebugLocs.size() << " locations\n"); - SmallPtrSet<MachineInstr *, 4> FoundIn; - for (MachineInstr *MI : PotentialMIsForDebugLocs) { - if (!MI->getDebugLoc()) - continue; - // Check this first in case there's a matching line-0 location on both input - // and output. - if (MI->getDebugLoc().getLine() == 0) { - LOC_DEBUG( - dbgs() << ".. Assuming line-0 location covers remainder (if any)\n"); - return; - } - if (LostDebugLocs.erase(MI->getDebugLoc())) { - LOC_DEBUG(dbgs() << ".. .. found " << MI->getDebugLoc() << " in " << *MI); - FoundIn.insert(MI); - continue; - } - } - if (LostDebugLocs.empty()) - return; - - NumLostDebugLocs += LostDebugLocs.size(); - LOC_DEBUG({ - dbgs() << ".. Lost locations:\n"; - for (const DebugLoc &Loc : LostDebugLocs) { - dbgs() << ".. .. "; - Loc.print(dbgs()); - dbgs() << "\n"; - } - dbgs() << ".. MIs with matched locations:\n"; - for (MachineInstr *MI : FoundIn) - if (PotentialMIsForDebugLocs.erase(MI)) - dbgs() << ".. .. " << *MI; - dbgs() << ".. Remaining MIs with unmatched/no locations:\n"; - for (const MachineInstr *MI : PotentialMIsForDebugLocs) - dbgs() << ".. .. " << *MI; - }); -} - -void LostDebugLocObserver::checkpoint(bool CheckDebugLocs) { - if (CheckDebugLocs) - analyzeDebugLocations(); - PotentialMIsForDebugLocs.clear(); - LostDebugLocs.clear(); -} - -void LostDebugLocObserver::createdInstr(MachineInstr &MI) { - PotentialMIsForDebugLocs.insert(&MI); -} - -static bool irTranslatorNeverAddsLocations(unsigned Opcode) { - switch (Opcode) { - default: - return false; - case TargetOpcode::G_CONSTANT: - case TargetOpcode::G_FCONSTANT: - case TargetOpcode::G_IMPLICIT_DEF: - case TargetOpcode::G_GLOBAL_VALUE: - return true; - } -} - -void LostDebugLocObserver::erasingInstr(MachineInstr &MI) { - if (irTranslatorNeverAddsLocations(MI.getOpcode())) - return; - - PotentialMIsForDebugLocs.erase(&MI); - if (MI.getDebugLoc()) - LostDebugLocs.insert(MI.getDebugLoc()); -} - -void LostDebugLocObserver::changingInstr(MachineInstr &MI) { - if (irTranslatorNeverAddsLocations(MI.getOpcode())) - return; - - PotentialMIsForDebugLocs.erase(&MI); - if (MI.getDebugLoc()) - LostDebugLocs.insert(MI.getDebugLoc()); -} - -void LostDebugLocObserver::changedInstr(MachineInstr &MI) { - PotentialMIsForDebugLocs.insert(&MI); -} +//===----- llvm/CodeGen/GlobalISel/LostDebugLocObserver.cpp -----*- C++ -*-===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +// +/// Tracks DebugLocs between checkpoints and verifies that they are transferred. +// +//===----------------------------------------------------------------------===// + +#include "llvm/CodeGen/GlobalISel/LostDebugLocObserver.h" + +using namespace llvm; + +#define LOC_DEBUG(X) DEBUG_WITH_TYPE(DebugType.str().c_str(), X) + +void LostDebugLocObserver::analyzeDebugLocations() { + if (LostDebugLocs.empty()) { + LOC_DEBUG(dbgs() << ".. No debug info was present\n"); + return; + } + if (PotentialMIsForDebugLocs.empty()) { + LOC_DEBUG( + dbgs() << ".. No instructions to carry debug info (dead code?)\n"); + return; + } + + LOC_DEBUG(dbgs() << ".. Searching " << PotentialMIsForDebugLocs.size() + << " instrs for " << LostDebugLocs.size() << " locations\n"); + SmallPtrSet<MachineInstr *, 4> FoundIn; + for (MachineInstr *MI : PotentialMIsForDebugLocs) { + if (!MI->getDebugLoc()) + continue; + // Check this first in case there's a matching line-0 location on both input + // and output. + if (MI->getDebugLoc().getLine() == 0) { + LOC_DEBUG( + dbgs() << ".. Assuming line-0 location covers remainder (if any)\n"); + return; + } + if (LostDebugLocs.erase(MI->getDebugLoc())) { + LOC_DEBUG(dbgs() << ".. .. found " << MI->getDebugLoc() << " in " << *MI); + FoundIn.insert(MI); + continue; + } + } + if (LostDebugLocs.empty()) + return; + + NumLostDebugLocs += LostDebugLocs.size(); + LOC_DEBUG({ + dbgs() << ".. Lost locations:\n"; + for (const DebugLoc &Loc : LostDebugLocs) { + dbgs() << ".. .. "; + Loc.print(dbgs()); + dbgs() << "\n"; + } + dbgs() << ".. MIs with matched locations:\n"; + for (MachineInstr *MI : FoundIn) + if (PotentialMIsForDebugLocs.erase(MI)) + dbgs() << ".. .. " << *MI; + dbgs() << ".. Remaining MIs with unmatched/no locations:\n"; + for (const MachineInstr *MI : PotentialMIsForDebugLocs) + dbgs() << ".. .. " << *MI; + }); +} + +void LostDebugLocObserver::checkpoint(bool CheckDebugLocs) { + if (CheckDebugLocs) + analyzeDebugLocations(); + PotentialMIsForDebugLocs.clear(); + LostDebugLocs.clear(); +} + +void LostDebugLocObserver::createdInstr(MachineInstr &MI) { + PotentialMIsForDebugLocs.insert(&MI); +} + +static bool irTranslatorNeverAddsLocations(unsigned Opcode) { + switch (Opcode) { + default: + return false; + case TargetOpcode::G_CONSTANT: + case TargetOpcode::G_FCONSTANT: + case TargetOpcode::G_IMPLICIT_DEF: + case TargetOpcode::G_GLOBAL_VALUE: + return true; + } +} + +void LostDebugLocObserver::erasingInstr(MachineInstr &MI) { + if (irTranslatorNeverAddsLocations(MI.getOpcode())) + return; + + PotentialMIsForDebugLocs.erase(&MI); + if (MI.getDebugLoc()) + LostDebugLocs.insert(MI.getDebugLoc()); +} + +void LostDebugLocObserver::changingInstr(MachineInstr &MI) { + if (irTranslatorNeverAddsLocations(MI.getOpcode())) + return; + + PotentialMIsForDebugLocs.erase(&MI); + if (MI.getDebugLoc()) + LostDebugLocs.insert(MI.getDebugLoc()); +} + +void LostDebugLocObserver::changedInstr(MachineInstr &MI) { + PotentialMIsForDebugLocs.insert(&MI); +} diff --git a/contrib/libs/llvm12/lib/CodeGen/GlobalISel/MachineIRBuilder.cpp b/contrib/libs/llvm12/lib/CodeGen/GlobalISel/MachineIRBuilder.cpp index d8dd3ca281..67ef02a4e7 100644 --- a/contrib/libs/llvm12/lib/CodeGen/GlobalISel/MachineIRBuilder.cpp +++ b/contrib/libs/llvm12/lib/CodeGen/GlobalISel/MachineIRBuilder.cpp @@ -1,332 +1,332 @@ -//===-- llvm/CodeGen/GlobalISel/MachineIRBuilder.cpp - MIBuilder--*- C++ -*-==// -// -// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. -// See https://llvm.org/LICENSE.txt for license information. -// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception -// -//===----------------------------------------------------------------------===// -/// \file -/// This file implements the MachineIRBuidler class. -//===----------------------------------------------------------------------===// -#include "llvm/CodeGen/GlobalISel/MachineIRBuilder.h" +//===-- llvm/CodeGen/GlobalISel/MachineIRBuilder.cpp - MIBuilder--*- C++ -*-==// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +/// \file +/// This file implements the MachineIRBuidler class. +//===----------------------------------------------------------------------===// +#include "llvm/CodeGen/GlobalISel/MachineIRBuilder.h" #include "llvm/Analysis/MemoryLocation.h" -#include "llvm/CodeGen/GlobalISel/GISelChangeObserver.h" -#include "llvm/CodeGen/MachineFunction.h" -#include "llvm/CodeGen/MachineInstr.h" -#include "llvm/CodeGen/MachineInstrBuilder.h" -#include "llvm/CodeGen/MachineRegisterInfo.h" -#include "llvm/CodeGen/TargetInstrInfo.h" -#include "llvm/CodeGen/TargetLowering.h" -#include "llvm/CodeGen/TargetOpcodes.h" -#include "llvm/CodeGen/TargetSubtargetInfo.h" -#include "llvm/IR/DebugInfo.h" - -using namespace llvm; - -void MachineIRBuilder::setMF(MachineFunction &MF) { - State.MF = &MF; - State.MBB = nullptr; - State.MRI = &MF.getRegInfo(); - State.TII = MF.getSubtarget().getInstrInfo(); - State.DL = DebugLoc(); - State.II = MachineBasicBlock::iterator(); - State.Observer = nullptr; -} - -//------------------------------------------------------------------------------ -// Build instruction variants. -//------------------------------------------------------------------------------ - -MachineInstrBuilder MachineIRBuilder::buildInstrNoInsert(unsigned Opcode) { - MachineInstrBuilder MIB = BuildMI(getMF(), getDL(), getTII().get(Opcode)); - return MIB; -} - -MachineInstrBuilder MachineIRBuilder::insertInstr(MachineInstrBuilder MIB) { - getMBB().insert(getInsertPt(), MIB); - recordInsertion(MIB); - return MIB; -} - -MachineInstrBuilder -MachineIRBuilder::buildDirectDbgValue(Register Reg, const MDNode *Variable, - const MDNode *Expr) { - assert(isa<DILocalVariable>(Variable) && "not a variable"); - assert(cast<DIExpression>(Expr)->isValid() && "not an expression"); - assert( - cast<DILocalVariable>(Variable)->isValidLocationForIntrinsic(getDL()) && - "Expected inlined-at fields to agree"); - return insertInstr(BuildMI(getMF(), getDL(), - getTII().get(TargetOpcode::DBG_VALUE), - /*IsIndirect*/ false, Reg, Variable, Expr)); -} - -MachineInstrBuilder -MachineIRBuilder::buildIndirectDbgValue(Register Reg, const MDNode *Variable, - const MDNode *Expr) { - assert(isa<DILocalVariable>(Variable) && "not a variable"); - assert(cast<DIExpression>(Expr)->isValid() && "not an expression"); - assert( - cast<DILocalVariable>(Variable)->isValidLocationForIntrinsic(getDL()) && - "Expected inlined-at fields to agree"); - return insertInstr(BuildMI(getMF(), getDL(), - getTII().get(TargetOpcode::DBG_VALUE), - /*IsIndirect*/ true, Reg, Variable, Expr)); -} - -MachineInstrBuilder MachineIRBuilder::buildFIDbgValue(int FI, - const MDNode *Variable, - const MDNode *Expr) { - assert(isa<DILocalVariable>(Variable) && "not a variable"); - assert(cast<DIExpression>(Expr)->isValid() && "not an expression"); - assert( - cast<DILocalVariable>(Variable)->isValidLocationForIntrinsic(getDL()) && - "Expected inlined-at fields to agree"); - return buildInstr(TargetOpcode::DBG_VALUE) - .addFrameIndex(FI) - .addImm(0) - .addMetadata(Variable) - .addMetadata(Expr); -} - -MachineInstrBuilder MachineIRBuilder::buildConstDbgValue(const Constant &C, - const MDNode *Variable, - const MDNode *Expr) { - assert(isa<DILocalVariable>(Variable) && "not a variable"); - assert(cast<DIExpression>(Expr)->isValid() && "not an expression"); - assert( - cast<DILocalVariable>(Variable)->isValidLocationForIntrinsic(getDL()) && - "Expected inlined-at fields to agree"); - auto MIB = buildInstrNoInsert(TargetOpcode::DBG_VALUE); - if (auto *CI = dyn_cast<ConstantInt>(&C)) { - if (CI->getBitWidth() > 64) - MIB.addCImm(CI); - else - MIB.addImm(CI->getZExtValue()); - } else if (auto *CFP = dyn_cast<ConstantFP>(&C)) { - MIB.addFPImm(CFP); - } else { +#include "llvm/CodeGen/GlobalISel/GISelChangeObserver.h" +#include "llvm/CodeGen/MachineFunction.h" +#include "llvm/CodeGen/MachineInstr.h" +#include "llvm/CodeGen/MachineInstrBuilder.h" +#include "llvm/CodeGen/MachineRegisterInfo.h" +#include "llvm/CodeGen/TargetInstrInfo.h" +#include "llvm/CodeGen/TargetLowering.h" +#include "llvm/CodeGen/TargetOpcodes.h" +#include "llvm/CodeGen/TargetSubtargetInfo.h" +#include "llvm/IR/DebugInfo.h" + +using namespace llvm; + +void MachineIRBuilder::setMF(MachineFunction &MF) { + State.MF = &MF; + State.MBB = nullptr; + State.MRI = &MF.getRegInfo(); + State.TII = MF.getSubtarget().getInstrInfo(); + State.DL = DebugLoc(); + State.II = MachineBasicBlock::iterator(); + State.Observer = nullptr; +} + +//------------------------------------------------------------------------------ +// Build instruction variants. +//------------------------------------------------------------------------------ + +MachineInstrBuilder MachineIRBuilder::buildInstrNoInsert(unsigned Opcode) { + MachineInstrBuilder MIB = BuildMI(getMF(), getDL(), getTII().get(Opcode)); + return MIB; +} + +MachineInstrBuilder MachineIRBuilder::insertInstr(MachineInstrBuilder MIB) { + getMBB().insert(getInsertPt(), MIB); + recordInsertion(MIB); + return MIB; +} + +MachineInstrBuilder +MachineIRBuilder::buildDirectDbgValue(Register Reg, const MDNode *Variable, + const MDNode *Expr) { + assert(isa<DILocalVariable>(Variable) && "not a variable"); + assert(cast<DIExpression>(Expr)->isValid() && "not an expression"); + assert( + cast<DILocalVariable>(Variable)->isValidLocationForIntrinsic(getDL()) && + "Expected inlined-at fields to agree"); + return insertInstr(BuildMI(getMF(), getDL(), + getTII().get(TargetOpcode::DBG_VALUE), + /*IsIndirect*/ false, Reg, Variable, Expr)); +} + +MachineInstrBuilder +MachineIRBuilder::buildIndirectDbgValue(Register Reg, const MDNode *Variable, + const MDNode *Expr) { + assert(isa<DILocalVariable>(Variable) && "not a variable"); + assert(cast<DIExpression>(Expr)->isValid() && "not an expression"); + assert( + cast<DILocalVariable>(Variable)->isValidLocationForIntrinsic(getDL()) && + "Expected inlined-at fields to agree"); + return insertInstr(BuildMI(getMF(), getDL(), + getTII().get(TargetOpcode::DBG_VALUE), + /*IsIndirect*/ true, Reg, Variable, Expr)); +} + +MachineInstrBuilder MachineIRBuilder::buildFIDbgValue(int FI, + const MDNode *Variable, + const MDNode *Expr) { + assert(isa<DILocalVariable>(Variable) && "not a variable"); + assert(cast<DIExpression>(Expr)->isValid() && "not an expression"); + assert( + cast<DILocalVariable>(Variable)->isValidLocationForIntrinsic(getDL()) && + "Expected inlined-at fields to agree"); + return buildInstr(TargetOpcode::DBG_VALUE) + .addFrameIndex(FI) + .addImm(0) + .addMetadata(Variable) + .addMetadata(Expr); +} + +MachineInstrBuilder MachineIRBuilder::buildConstDbgValue(const Constant &C, + const MDNode *Variable, + const MDNode *Expr) { + assert(isa<DILocalVariable>(Variable) && "not a variable"); + assert(cast<DIExpression>(Expr)->isValid() && "not an expression"); + assert( + cast<DILocalVariable>(Variable)->isValidLocationForIntrinsic(getDL()) && + "Expected inlined-at fields to agree"); + auto MIB = buildInstrNoInsert(TargetOpcode::DBG_VALUE); + if (auto *CI = dyn_cast<ConstantInt>(&C)) { + if (CI->getBitWidth() > 64) + MIB.addCImm(CI); + else + MIB.addImm(CI->getZExtValue()); + } else if (auto *CFP = dyn_cast<ConstantFP>(&C)) { + MIB.addFPImm(CFP); + } else { // Insert $noreg if we didn't find a usable constant and had to drop it. MIB.addReg(Register()); - } - - MIB.addImm(0).addMetadata(Variable).addMetadata(Expr); - return insertInstr(MIB); -} - -MachineInstrBuilder MachineIRBuilder::buildDbgLabel(const MDNode *Label) { - assert(isa<DILabel>(Label) && "not a label"); - assert(cast<DILabel>(Label)->isValidLocationForIntrinsic(State.DL) && - "Expected inlined-at fields to agree"); - auto MIB = buildInstr(TargetOpcode::DBG_LABEL); - - return MIB.addMetadata(Label); -} - -MachineInstrBuilder MachineIRBuilder::buildDynStackAlloc(const DstOp &Res, - const SrcOp &Size, - Align Alignment) { - assert(Res.getLLTTy(*getMRI()).isPointer() && "expected ptr dst type"); - auto MIB = buildInstr(TargetOpcode::G_DYN_STACKALLOC); - Res.addDefToMIB(*getMRI(), MIB); - Size.addSrcToMIB(MIB); - MIB.addImm(Alignment.value()); - return MIB; -} - -MachineInstrBuilder MachineIRBuilder::buildFrameIndex(const DstOp &Res, - int Idx) { - assert(Res.getLLTTy(*getMRI()).isPointer() && "invalid operand type"); - auto MIB = buildInstr(TargetOpcode::G_FRAME_INDEX); - Res.addDefToMIB(*getMRI(), MIB); - MIB.addFrameIndex(Idx); - return MIB; -} - -MachineInstrBuilder MachineIRBuilder::buildGlobalValue(const DstOp &Res, - const GlobalValue *GV) { - assert(Res.getLLTTy(*getMRI()).isPointer() && "invalid operand type"); - assert(Res.getLLTTy(*getMRI()).getAddressSpace() == - GV->getType()->getAddressSpace() && - "address space mismatch"); - - auto MIB = buildInstr(TargetOpcode::G_GLOBAL_VALUE); - Res.addDefToMIB(*getMRI(), MIB); - MIB.addGlobalAddress(GV); - return MIB; -} - -MachineInstrBuilder MachineIRBuilder::buildJumpTable(const LLT PtrTy, - unsigned JTI) { - return buildInstr(TargetOpcode::G_JUMP_TABLE, {PtrTy}, {}) - .addJumpTableIndex(JTI); -} - + } + + MIB.addImm(0).addMetadata(Variable).addMetadata(Expr); + return insertInstr(MIB); +} + +MachineInstrBuilder MachineIRBuilder::buildDbgLabel(const MDNode *Label) { + assert(isa<DILabel>(Label) && "not a label"); + assert(cast<DILabel>(Label)->isValidLocationForIntrinsic(State.DL) && + "Expected inlined-at fields to agree"); + auto MIB = buildInstr(TargetOpcode::DBG_LABEL); + + return MIB.addMetadata(Label); +} + +MachineInstrBuilder MachineIRBuilder::buildDynStackAlloc(const DstOp &Res, + const SrcOp &Size, + Align Alignment) { + assert(Res.getLLTTy(*getMRI()).isPointer() && "expected ptr dst type"); + auto MIB = buildInstr(TargetOpcode::G_DYN_STACKALLOC); + Res.addDefToMIB(*getMRI(), MIB); + Size.addSrcToMIB(MIB); + MIB.addImm(Alignment.value()); + return MIB; +} + +MachineInstrBuilder MachineIRBuilder::buildFrameIndex(const DstOp &Res, + int Idx) { + assert(Res.getLLTTy(*getMRI()).isPointer() && "invalid operand type"); + auto MIB = buildInstr(TargetOpcode::G_FRAME_INDEX); + Res.addDefToMIB(*getMRI(), MIB); + MIB.addFrameIndex(Idx); + return MIB; +} + +MachineInstrBuilder MachineIRBuilder::buildGlobalValue(const DstOp &Res, + const GlobalValue *GV) { + assert(Res.getLLTTy(*getMRI()).isPointer() && "invalid operand type"); + assert(Res.getLLTTy(*getMRI()).getAddressSpace() == + GV->getType()->getAddressSpace() && + "address space mismatch"); + + auto MIB = buildInstr(TargetOpcode::G_GLOBAL_VALUE); + Res.addDefToMIB(*getMRI(), MIB); + MIB.addGlobalAddress(GV); + return MIB; +} + +MachineInstrBuilder MachineIRBuilder::buildJumpTable(const LLT PtrTy, + unsigned JTI) { + return buildInstr(TargetOpcode::G_JUMP_TABLE, {PtrTy}, {}) + .addJumpTableIndex(JTI); +} + void MachineIRBuilder::validateUnaryOp(const LLT Res, const LLT Op0) { assert((Res.isScalar() || Res.isVector()) && "invalid operand type"); assert((Res == Op0) && "type mismatch"); } -void MachineIRBuilder::validateBinaryOp(const LLT Res, const LLT Op0, - const LLT Op1) { - assert((Res.isScalar() || Res.isVector()) && "invalid operand type"); - assert((Res == Op0 && Res == Op1) && "type mismatch"); -} - -void MachineIRBuilder::validateShiftOp(const LLT Res, const LLT Op0, - const LLT Op1) { - assert((Res.isScalar() || Res.isVector()) && "invalid operand type"); - assert((Res == Op0) && "type mismatch"); -} - -MachineInstrBuilder MachineIRBuilder::buildPtrAdd(const DstOp &Res, - const SrcOp &Op0, - const SrcOp &Op1) { - assert(Res.getLLTTy(*getMRI()).getScalarType().isPointer() && - Res.getLLTTy(*getMRI()) == Op0.getLLTTy(*getMRI()) && "type mismatch"); - assert(Op1.getLLTTy(*getMRI()).getScalarType().isScalar() && "invalid offset type"); - - return buildInstr(TargetOpcode::G_PTR_ADD, {Res}, {Op0, Op1}); -} - -Optional<MachineInstrBuilder> -MachineIRBuilder::materializePtrAdd(Register &Res, Register Op0, - const LLT ValueTy, uint64_t Value) { - assert(Res == 0 && "Res is a result argument"); - assert(ValueTy.isScalar() && "invalid offset type"); - - if (Value == 0) { - Res = Op0; - return None; - } - - Res = getMRI()->createGenericVirtualRegister(getMRI()->getType(Op0)); - auto Cst = buildConstant(ValueTy, Value); - return buildPtrAdd(Res, Op0, Cst.getReg(0)); -} - -MachineInstrBuilder MachineIRBuilder::buildMaskLowPtrBits(const DstOp &Res, - const SrcOp &Op0, - uint32_t NumBits) { - LLT PtrTy = Res.getLLTTy(*getMRI()); - LLT MaskTy = LLT::scalar(PtrTy.getSizeInBits()); - Register MaskReg = getMRI()->createGenericVirtualRegister(MaskTy); - buildConstant(MaskReg, maskTrailingZeros<uint64_t>(NumBits)); - return buildPtrMask(Res, Op0, MaskReg); -} - -MachineInstrBuilder MachineIRBuilder::buildBr(MachineBasicBlock &Dest) { - return buildInstr(TargetOpcode::G_BR).addMBB(&Dest); -} - -MachineInstrBuilder MachineIRBuilder::buildBrIndirect(Register Tgt) { - assert(getMRI()->getType(Tgt).isPointer() && "invalid branch destination"); - return buildInstr(TargetOpcode::G_BRINDIRECT).addUse(Tgt); -} - -MachineInstrBuilder MachineIRBuilder::buildBrJT(Register TablePtr, - unsigned JTI, - Register IndexReg) { - assert(getMRI()->getType(TablePtr).isPointer() && - "Table reg must be a pointer"); - return buildInstr(TargetOpcode::G_BRJT) - .addUse(TablePtr) - .addJumpTableIndex(JTI) - .addUse(IndexReg); -} - -MachineInstrBuilder MachineIRBuilder::buildCopy(const DstOp &Res, - const SrcOp &Op) { - return buildInstr(TargetOpcode::COPY, Res, Op); -} - -MachineInstrBuilder MachineIRBuilder::buildConstant(const DstOp &Res, - const ConstantInt &Val) { - LLT Ty = Res.getLLTTy(*getMRI()); - LLT EltTy = Ty.getScalarType(); - assert(EltTy.getScalarSizeInBits() == Val.getBitWidth() && - "creating constant with the wrong size"); - - if (Ty.isVector()) { - auto Const = buildInstr(TargetOpcode::G_CONSTANT) - .addDef(getMRI()->createGenericVirtualRegister(EltTy)) - .addCImm(&Val); - return buildSplatVector(Res, Const); - } - - auto Const = buildInstr(TargetOpcode::G_CONSTANT); - Const->setDebugLoc(DebugLoc()); - Res.addDefToMIB(*getMRI(), Const); - Const.addCImm(&Val); - return Const; -} - -MachineInstrBuilder MachineIRBuilder::buildConstant(const DstOp &Res, - int64_t Val) { - auto IntN = IntegerType::get(getMF().getFunction().getContext(), - Res.getLLTTy(*getMRI()).getScalarSizeInBits()); - ConstantInt *CI = ConstantInt::get(IntN, Val, true); - return buildConstant(Res, *CI); -} - -MachineInstrBuilder MachineIRBuilder::buildFConstant(const DstOp &Res, - const ConstantFP &Val) { - LLT Ty = Res.getLLTTy(*getMRI()); - LLT EltTy = Ty.getScalarType(); - - assert(APFloat::getSizeInBits(Val.getValueAPF().getSemantics()) - == EltTy.getSizeInBits() && - "creating fconstant with the wrong size"); - - assert(!Ty.isPointer() && "invalid operand type"); - - if (Ty.isVector()) { - auto Const = buildInstr(TargetOpcode::G_FCONSTANT) - .addDef(getMRI()->createGenericVirtualRegister(EltTy)) - .addFPImm(&Val); - - return buildSplatVector(Res, Const); - } - - auto Const = buildInstr(TargetOpcode::G_FCONSTANT); - Const->setDebugLoc(DebugLoc()); - Res.addDefToMIB(*getMRI(), Const); - Const.addFPImm(&Val); - return Const; -} - -MachineInstrBuilder MachineIRBuilder::buildConstant(const DstOp &Res, - const APInt &Val) { - ConstantInt *CI = ConstantInt::get(getMF().getFunction().getContext(), Val); - return buildConstant(Res, *CI); -} - -MachineInstrBuilder MachineIRBuilder::buildFConstant(const DstOp &Res, - double Val) { - LLT DstTy = Res.getLLTTy(*getMRI()); - auto &Ctx = getMF().getFunction().getContext(); - auto *CFP = - ConstantFP::get(Ctx, getAPFloatFromSize(Val, DstTy.getScalarSizeInBits())); - return buildFConstant(Res, *CFP); -} - -MachineInstrBuilder MachineIRBuilder::buildFConstant(const DstOp &Res, - const APFloat &Val) { - auto &Ctx = getMF().getFunction().getContext(); - auto *CFP = ConstantFP::get(Ctx, Val); - return buildFConstant(Res, *CFP); -} - +void MachineIRBuilder::validateBinaryOp(const LLT Res, const LLT Op0, + const LLT Op1) { + assert((Res.isScalar() || Res.isVector()) && "invalid operand type"); + assert((Res == Op0 && Res == Op1) && "type mismatch"); +} + +void MachineIRBuilder::validateShiftOp(const LLT Res, const LLT Op0, + const LLT Op1) { + assert((Res.isScalar() || Res.isVector()) && "invalid operand type"); + assert((Res == Op0) && "type mismatch"); +} + +MachineInstrBuilder MachineIRBuilder::buildPtrAdd(const DstOp &Res, + const SrcOp &Op0, + const SrcOp &Op1) { + assert(Res.getLLTTy(*getMRI()).getScalarType().isPointer() && + Res.getLLTTy(*getMRI()) == Op0.getLLTTy(*getMRI()) && "type mismatch"); + assert(Op1.getLLTTy(*getMRI()).getScalarType().isScalar() && "invalid offset type"); + + return buildInstr(TargetOpcode::G_PTR_ADD, {Res}, {Op0, Op1}); +} + +Optional<MachineInstrBuilder> +MachineIRBuilder::materializePtrAdd(Register &Res, Register Op0, + const LLT ValueTy, uint64_t Value) { + assert(Res == 0 && "Res is a result argument"); + assert(ValueTy.isScalar() && "invalid offset type"); + + if (Value == 0) { + Res = Op0; + return None; + } + + Res = getMRI()->createGenericVirtualRegister(getMRI()->getType(Op0)); + auto Cst = buildConstant(ValueTy, Value); + return buildPtrAdd(Res, Op0, Cst.getReg(0)); +} + +MachineInstrBuilder MachineIRBuilder::buildMaskLowPtrBits(const DstOp &Res, + const SrcOp &Op0, + uint32_t NumBits) { + LLT PtrTy = Res.getLLTTy(*getMRI()); + LLT MaskTy = LLT::scalar(PtrTy.getSizeInBits()); + Register MaskReg = getMRI()->createGenericVirtualRegister(MaskTy); + buildConstant(MaskReg, maskTrailingZeros<uint64_t>(NumBits)); + return buildPtrMask(Res, Op0, MaskReg); +} + +MachineInstrBuilder MachineIRBuilder::buildBr(MachineBasicBlock &Dest) { + return buildInstr(TargetOpcode::G_BR).addMBB(&Dest); +} + +MachineInstrBuilder MachineIRBuilder::buildBrIndirect(Register Tgt) { + assert(getMRI()->getType(Tgt).isPointer() && "invalid branch destination"); + return buildInstr(TargetOpcode::G_BRINDIRECT).addUse(Tgt); +} + +MachineInstrBuilder MachineIRBuilder::buildBrJT(Register TablePtr, + unsigned JTI, + Register IndexReg) { + assert(getMRI()->getType(TablePtr).isPointer() && + "Table reg must be a pointer"); + return buildInstr(TargetOpcode::G_BRJT) + .addUse(TablePtr) + .addJumpTableIndex(JTI) + .addUse(IndexReg); +} + +MachineInstrBuilder MachineIRBuilder::buildCopy(const DstOp &Res, + const SrcOp &Op) { + return buildInstr(TargetOpcode::COPY, Res, Op); +} + +MachineInstrBuilder MachineIRBuilder::buildConstant(const DstOp &Res, + const ConstantInt &Val) { + LLT Ty = Res.getLLTTy(*getMRI()); + LLT EltTy = Ty.getScalarType(); + assert(EltTy.getScalarSizeInBits() == Val.getBitWidth() && + "creating constant with the wrong size"); + + if (Ty.isVector()) { + auto Const = buildInstr(TargetOpcode::G_CONSTANT) + .addDef(getMRI()->createGenericVirtualRegister(EltTy)) + .addCImm(&Val); + return buildSplatVector(Res, Const); + } + + auto Const = buildInstr(TargetOpcode::G_CONSTANT); + Const->setDebugLoc(DebugLoc()); + Res.addDefToMIB(*getMRI(), Const); + Const.addCImm(&Val); + return Const; +} + +MachineInstrBuilder MachineIRBuilder::buildConstant(const DstOp &Res, + int64_t Val) { + auto IntN = IntegerType::get(getMF().getFunction().getContext(), + Res.getLLTTy(*getMRI()).getScalarSizeInBits()); + ConstantInt *CI = ConstantInt::get(IntN, Val, true); + return buildConstant(Res, *CI); +} + +MachineInstrBuilder MachineIRBuilder::buildFConstant(const DstOp &Res, + const ConstantFP &Val) { + LLT Ty = Res.getLLTTy(*getMRI()); + LLT EltTy = Ty.getScalarType(); + + assert(APFloat::getSizeInBits(Val.getValueAPF().getSemantics()) + == EltTy.getSizeInBits() && + "creating fconstant with the wrong size"); + + assert(!Ty.isPointer() && "invalid operand type"); + + if (Ty.isVector()) { + auto Const = buildInstr(TargetOpcode::G_FCONSTANT) + .addDef(getMRI()->createGenericVirtualRegister(EltTy)) + .addFPImm(&Val); + + return buildSplatVector(Res, Const); + } + + auto Const = buildInstr(TargetOpcode::G_FCONSTANT); + Const->setDebugLoc(DebugLoc()); + Res.addDefToMIB(*getMRI(), Const); + Const.addFPImm(&Val); + return Const; +} + +MachineInstrBuilder MachineIRBuilder::buildConstant(const DstOp &Res, + const APInt &Val) { + ConstantInt *CI = ConstantInt::get(getMF().getFunction().getContext(), Val); + return buildConstant(Res, *CI); +} + +MachineInstrBuilder MachineIRBuilder::buildFConstant(const DstOp &Res, + double Val) { + LLT DstTy = Res.getLLTTy(*getMRI()); + auto &Ctx = getMF().getFunction().getContext(); + auto *CFP = + ConstantFP::get(Ctx, getAPFloatFromSize(Val, DstTy.getScalarSizeInBits())); + return buildFConstant(Res, *CFP); +} + +MachineInstrBuilder MachineIRBuilder::buildFConstant(const DstOp &Res, + const APFloat &Val) { + auto &Ctx = getMF().getFunction().getContext(); + auto *CFP = ConstantFP::get(Ctx, Val); + return buildFConstant(Res, *CFP); +} + MachineInstrBuilder MachineIRBuilder::buildBrCond(const SrcOp &Tst, - MachineBasicBlock &Dest) { + MachineBasicBlock &Dest) { assert(Tst.getLLTTy(*getMRI()).isScalar() && "invalid operand type"); - + auto MIB = buildInstr(TargetOpcode::G_BRCOND); Tst.addSrcToMIB(MIB); MIB.addMBB(&Dest); return MIB; -} - +} + MachineInstrBuilder MachineIRBuilder::buildLoad(const DstOp &Dst, const SrcOp &Addr, MachinePointerInfo PtrInfo, Align Alignment, @@ -340,52 +340,52 @@ MachineIRBuilder::buildLoad(const DstOp &Dst, const SrcOp &Addr, MachineMemOperand *MMO = getMF().getMachineMemOperand(PtrInfo, MMOFlags, Size, Alignment, AAInfo); return buildLoad(Dst, Addr, *MMO); -} - -MachineInstrBuilder MachineIRBuilder::buildLoadInstr(unsigned Opcode, - const DstOp &Res, - const SrcOp &Addr, - MachineMemOperand &MMO) { - assert(Res.getLLTTy(*getMRI()).isValid() && "invalid operand type"); - assert(Addr.getLLTTy(*getMRI()).isPointer() && "invalid operand type"); - - auto MIB = buildInstr(Opcode); - Res.addDefToMIB(*getMRI(), MIB); - Addr.addSrcToMIB(MIB); - MIB.addMemOperand(&MMO); - return MIB; -} - -MachineInstrBuilder MachineIRBuilder::buildLoadFromOffset( - const DstOp &Dst, const SrcOp &BasePtr, - MachineMemOperand &BaseMMO, int64_t Offset) { - LLT LoadTy = Dst.getLLTTy(*getMRI()); - MachineMemOperand *OffsetMMO = - getMF().getMachineMemOperand(&BaseMMO, Offset, LoadTy.getSizeInBytes()); - - if (Offset == 0) // This may be a size or type changing load. - return buildLoad(Dst, BasePtr, *OffsetMMO); - - LLT PtrTy = BasePtr.getLLTTy(*getMRI()); - LLT OffsetTy = LLT::scalar(PtrTy.getSizeInBits()); - auto ConstOffset = buildConstant(OffsetTy, Offset); - auto Ptr = buildPtrAdd(PtrTy, BasePtr, ConstOffset); - return buildLoad(Dst, Ptr, *OffsetMMO); -} - -MachineInstrBuilder MachineIRBuilder::buildStore(const SrcOp &Val, - const SrcOp &Addr, - MachineMemOperand &MMO) { - assert(Val.getLLTTy(*getMRI()).isValid() && "invalid operand type"); - assert(Addr.getLLTTy(*getMRI()).isPointer() && "invalid operand type"); - - auto MIB = buildInstr(TargetOpcode::G_STORE); - Val.addSrcToMIB(MIB); - Addr.addSrcToMIB(MIB); - MIB.addMemOperand(&MMO); - return MIB; -} - +} + +MachineInstrBuilder MachineIRBuilder::buildLoadInstr(unsigned Opcode, + const DstOp &Res, + const SrcOp &Addr, + MachineMemOperand &MMO) { + assert(Res.getLLTTy(*getMRI()).isValid() && "invalid operand type"); + assert(Addr.getLLTTy(*getMRI()).isPointer() && "invalid operand type"); + + auto MIB = buildInstr(Opcode); + Res.addDefToMIB(*getMRI(), MIB); + Addr.addSrcToMIB(MIB); + MIB.addMemOperand(&MMO); + return MIB; +} + +MachineInstrBuilder MachineIRBuilder::buildLoadFromOffset( + const DstOp &Dst, const SrcOp &BasePtr, + MachineMemOperand &BaseMMO, int64_t Offset) { + LLT LoadTy = Dst.getLLTTy(*getMRI()); + MachineMemOperand *OffsetMMO = + getMF().getMachineMemOperand(&BaseMMO, Offset, LoadTy.getSizeInBytes()); + + if (Offset == 0) // This may be a size or type changing load. + return buildLoad(Dst, BasePtr, *OffsetMMO); + + LLT PtrTy = BasePtr.getLLTTy(*getMRI()); + LLT OffsetTy = LLT::scalar(PtrTy.getSizeInBits()); + auto ConstOffset = buildConstant(OffsetTy, Offset); + auto Ptr = buildPtrAdd(PtrTy, BasePtr, ConstOffset); + return buildLoad(Dst, Ptr, *OffsetMMO); +} + +MachineInstrBuilder MachineIRBuilder::buildStore(const SrcOp &Val, + const SrcOp &Addr, + MachineMemOperand &MMO) { + assert(Val.getLLTTy(*getMRI()).isValid() && "invalid operand type"); + assert(Addr.getLLTTy(*getMRI()).isPointer() && "invalid operand type"); + + auto MIB = buildInstr(TargetOpcode::G_STORE); + Val.addSrcToMIB(MIB); + Addr.addSrcToMIB(MIB); + MIB.addMemOperand(&MMO); + return MIB; +} + MachineInstrBuilder MachineIRBuilder::buildStore(const SrcOp &Val, const SrcOp &Addr, MachinePointerInfo PtrInfo, Align Alignment, @@ -401,240 +401,240 @@ MachineIRBuilder::buildStore(const SrcOp &Val, const SrcOp &Addr, return buildStore(Val, Addr, *MMO); } -MachineInstrBuilder MachineIRBuilder::buildAnyExt(const DstOp &Res, - const SrcOp &Op) { - return buildInstr(TargetOpcode::G_ANYEXT, Res, Op); -} - -MachineInstrBuilder MachineIRBuilder::buildSExt(const DstOp &Res, - const SrcOp &Op) { - return buildInstr(TargetOpcode::G_SEXT, Res, Op); -} - -MachineInstrBuilder MachineIRBuilder::buildZExt(const DstOp &Res, - const SrcOp &Op) { - return buildInstr(TargetOpcode::G_ZEXT, Res, Op); -} - -unsigned MachineIRBuilder::getBoolExtOp(bool IsVec, bool IsFP) const { - const auto *TLI = getMF().getSubtarget().getTargetLowering(); - switch (TLI->getBooleanContents(IsVec, IsFP)) { - case TargetLoweringBase::ZeroOrNegativeOneBooleanContent: - return TargetOpcode::G_SEXT; - case TargetLoweringBase::ZeroOrOneBooleanContent: - return TargetOpcode::G_ZEXT; - default: - return TargetOpcode::G_ANYEXT; - } -} - -MachineInstrBuilder MachineIRBuilder::buildBoolExt(const DstOp &Res, - const SrcOp &Op, - bool IsFP) { - unsigned ExtOp = getBoolExtOp(getMRI()->getType(Op.getReg()).isVector(), IsFP); - return buildInstr(ExtOp, Res, Op); -} - -MachineInstrBuilder MachineIRBuilder::buildExtOrTrunc(unsigned ExtOpc, - const DstOp &Res, - const SrcOp &Op) { - assert((TargetOpcode::G_ANYEXT == ExtOpc || TargetOpcode::G_ZEXT == ExtOpc || - TargetOpcode::G_SEXT == ExtOpc) && - "Expecting Extending Opc"); - assert(Res.getLLTTy(*getMRI()).isScalar() || - Res.getLLTTy(*getMRI()).isVector()); - assert(Res.getLLTTy(*getMRI()).isScalar() == - Op.getLLTTy(*getMRI()).isScalar()); - - unsigned Opcode = TargetOpcode::COPY; - if (Res.getLLTTy(*getMRI()).getSizeInBits() > - Op.getLLTTy(*getMRI()).getSizeInBits()) - Opcode = ExtOpc; - else if (Res.getLLTTy(*getMRI()).getSizeInBits() < - Op.getLLTTy(*getMRI()).getSizeInBits()) - Opcode = TargetOpcode::G_TRUNC; - else - assert(Res.getLLTTy(*getMRI()) == Op.getLLTTy(*getMRI())); - - return buildInstr(Opcode, Res, Op); -} - -MachineInstrBuilder MachineIRBuilder::buildSExtOrTrunc(const DstOp &Res, - const SrcOp &Op) { - return buildExtOrTrunc(TargetOpcode::G_SEXT, Res, Op); -} - -MachineInstrBuilder MachineIRBuilder::buildZExtOrTrunc(const DstOp &Res, - const SrcOp &Op) { - return buildExtOrTrunc(TargetOpcode::G_ZEXT, Res, Op); -} - -MachineInstrBuilder MachineIRBuilder::buildAnyExtOrTrunc(const DstOp &Res, - const SrcOp &Op) { - return buildExtOrTrunc(TargetOpcode::G_ANYEXT, Res, Op); -} - -MachineInstrBuilder MachineIRBuilder::buildCast(const DstOp &Dst, - const SrcOp &Src) { - LLT SrcTy = Src.getLLTTy(*getMRI()); - LLT DstTy = Dst.getLLTTy(*getMRI()); - if (SrcTy == DstTy) - return buildCopy(Dst, Src); - - unsigned Opcode; - if (SrcTy.isPointer() && DstTy.isScalar()) - Opcode = TargetOpcode::G_PTRTOINT; - else if (DstTy.isPointer() && SrcTy.isScalar()) - Opcode = TargetOpcode::G_INTTOPTR; - else { - assert(!SrcTy.isPointer() && !DstTy.isPointer() && "n G_ADDRCAST yet"); - Opcode = TargetOpcode::G_BITCAST; - } - - return buildInstr(Opcode, Dst, Src); -} - -MachineInstrBuilder MachineIRBuilder::buildExtract(const DstOp &Dst, - const SrcOp &Src, - uint64_t Index) { - LLT SrcTy = Src.getLLTTy(*getMRI()); - LLT DstTy = Dst.getLLTTy(*getMRI()); - -#ifndef NDEBUG - assert(SrcTy.isValid() && "invalid operand type"); - assert(DstTy.isValid() && "invalid operand type"); - assert(Index + DstTy.getSizeInBits() <= SrcTy.getSizeInBits() && - "extracting off end of register"); -#endif - - if (DstTy.getSizeInBits() == SrcTy.getSizeInBits()) { - assert(Index == 0 && "insertion past the end of a register"); - return buildCast(Dst, Src); - } - - auto Extract = buildInstr(TargetOpcode::G_EXTRACT); - Dst.addDefToMIB(*getMRI(), Extract); - Src.addSrcToMIB(Extract); - Extract.addImm(Index); - return Extract; -} - -void MachineIRBuilder::buildSequence(Register Res, ArrayRef<Register> Ops, - ArrayRef<uint64_t> Indices) { -#ifndef NDEBUG - assert(Ops.size() == Indices.size() && "incompatible args"); - assert(!Ops.empty() && "invalid trivial sequence"); - assert(llvm::is_sorted(Indices) && - "sequence offsets must be in ascending order"); - - assert(getMRI()->getType(Res).isValid() && "invalid operand type"); - for (auto Op : Ops) - assert(getMRI()->getType(Op).isValid() && "invalid operand type"); -#endif - - LLT ResTy = getMRI()->getType(Res); - LLT OpTy = getMRI()->getType(Ops[0]); - unsigned OpSize = OpTy.getSizeInBits(); - bool MaybeMerge = true; - for (unsigned i = 0; i < Ops.size(); ++i) { - if (getMRI()->getType(Ops[i]) != OpTy || Indices[i] != i * OpSize) { - MaybeMerge = false; - break; - } - } - - if (MaybeMerge && Ops.size() * OpSize == ResTy.getSizeInBits()) { - buildMerge(Res, Ops); - return; - } - - Register ResIn = getMRI()->createGenericVirtualRegister(ResTy); - buildUndef(ResIn); - - for (unsigned i = 0; i < Ops.size(); ++i) { - Register ResOut = i + 1 == Ops.size() - ? Res - : getMRI()->createGenericVirtualRegister(ResTy); - buildInsert(ResOut, ResIn, Ops[i], Indices[i]); - ResIn = ResOut; - } -} - -MachineInstrBuilder MachineIRBuilder::buildUndef(const DstOp &Res) { - return buildInstr(TargetOpcode::G_IMPLICIT_DEF, {Res}, {}); -} - -MachineInstrBuilder MachineIRBuilder::buildMerge(const DstOp &Res, - ArrayRef<Register> Ops) { - // Unfortunately to convert from ArrayRef<LLT> to ArrayRef<SrcOp>, - // we need some temporary storage for the DstOp objects. Here we use a - // sufficiently large SmallVector to not go through the heap. - SmallVector<SrcOp, 8> TmpVec(Ops.begin(), Ops.end()); - assert(TmpVec.size() > 1); - return buildInstr(TargetOpcode::G_MERGE_VALUES, Res, TmpVec); -} - -MachineInstrBuilder -MachineIRBuilder::buildMerge(const DstOp &Res, - std::initializer_list<SrcOp> Ops) { - assert(Ops.size() > 1); - return buildInstr(TargetOpcode::G_MERGE_VALUES, Res, Ops); -} - -MachineInstrBuilder MachineIRBuilder::buildUnmerge(ArrayRef<LLT> Res, - const SrcOp &Op) { - // Unfortunately to convert from ArrayRef<LLT> to ArrayRef<DstOp>, - // we need some temporary storage for the DstOp objects. Here we use a - // sufficiently large SmallVector to not go through the heap. - SmallVector<DstOp, 8> TmpVec(Res.begin(), Res.end()); - assert(TmpVec.size() > 1); - return buildInstr(TargetOpcode::G_UNMERGE_VALUES, TmpVec, Op); -} - -MachineInstrBuilder MachineIRBuilder::buildUnmerge(LLT Res, - const SrcOp &Op) { - unsigned NumReg = Op.getLLTTy(*getMRI()).getSizeInBits() / Res.getSizeInBits(); - SmallVector<Register, 8> TmpVec; - for (unsigned I = 0; I != NumReg; ++I) - TmpVec.push_back(getMRI()->createGenericVirtualRegister(Res)); - return buildUnmerge(TmpVec, Op); -} - -MachineInstrBuilder MachineIRBuilder::buildUnmerge(ArrayRef<Register> Res, - const SrcOp &Op) { - // Unfortunately to convert from ArrayRef<Register> to ArrayRef<DstOp>, - // we need some temporary storage for the DstOp objects. Here we use a - // sufficiently large SmallVector to not go through the heap. - SmallVector<DstOp, 8> TmpVec(Res.begin(), Res.end()); - assert(TmpVec.size() > 1); - return buildInstr(TargetOpcode::G_UNMERGE_VALUES, TmpVec, Op); -} - -MachineInstrBuilder MachineIRBuilder::buildBuildVector(const DstOp &Res, - ArrayRef<Register> Ops) { - // Unfortunately to convert from ArrayRef<Register> to ArrayRef<SrcOp>, - // we need some temporary storage for the DstOp objects. Here we use a - // sufficiently large SmallVector to not go through the heap. - SmallVector<SrcOp, 8> TmpVec(Ops.begin(), Ops.end()); - return buildInstr(TargetOpcode::G_BUILD_VECTOR, Res, TmpVec); -} - -MachineInstrBuilder MachineIRBuilder::buildSplatVector(const DstOp &Res, - const SrcOp &Src) { - SmallVector<SrcOp, 8> TmpVec(Res.getLLTTy(*getMRI()).getNumElements(), Src); - return buildInstr(TargetOpcode::G_BUILD_VECTOR, Res, TmpVec); -} - -MachineInstrBuilder -MachineIRBuilder::buildBuildVectorTrunc(const DstOp &Res, - ArrayRef<Register> Ops) { - // Unfortunately to convert from ArrayRef<Register> to ArrayRef<SrcOp>, - // we need some temporary storage for the DstOp objects. Here we use a - // sufficiently large SmallVector to not go through the heap. - SmallVector<SrcOp, 8> TmpVec(Ops.begin(), Ops.end()); - return buildInstr(TargetOpcode::G_BUILD_VECTOR_TRUNC, Res, TmpVec); -} - +MachineInstrBuilder MachineIRBuilder::buildAnyExt(const DstOp &Res, + const SrcOp &Op) { + return buildInstr(TargetOpcode::G_ANYEXT, Res, Op); +} + +MachineInstrBuilder MachineIRBuilder::buildSExt(const DstOp &Res, + const SrcOp &Op) { + return buildInstr(TargetOpcode::G_SEXT, Res, Op); +} + +MachineInstrBuilder MachineIRBuilder::buildZExt(const DstOp &Res, + const SrcOp &Op) { + return buildInstr(TargetOpcode::G_ZEXT, Res, Op); +} + +unsigned MachineIRBuilder::getBoolExtOp(bool IsVec, bool IsFP) const { + const auto *TLI = getMF().getSubtarget().getTargetLowering(); + switch (TLI->getBooleanContents(IsVec, IsFP)) { + case TargetLoweringBase::ZeroOrNegativeOneBooleanContent: + return TargetOpcode::G_SEXT; + case TargetLoweringBase::ZeroOrOneBooleanContent: + return TargetOpcode::G_ZEXT; + default: + return TargetOpcode::G_ANYEXT; + } +} + +MachineInstrBuilder MachineIRBuilder::buildBoolExt(const DstOp &Res, + const SrcOp &Op, + bool IsFP) { + unsigned ExtOp = getBoolExtOp(getMRI()->getType(Op.getReg()).isVector(), IsFP); + return buildInstr(ExtOp, Res, Op); +} + +MachineInstrBuilder MachineIRBuilder::buildExtOrTrunc(unsigned ExtOpc, + const DstOp &Res, + const SrcOp &Op) { + assert((TargetOpcode::G_ANYEXT == ExtOpc || TargetOpcode::G_ZEXT == ExtOpc || + TargetOpcode::G_SEXT == ExtOpc) && + "Expecting Extending Opc"); + assert(Res.getLLTTy(*getMRI()).isScalar() || + Res.getLLTTy(*getMRI()).isVector()); + assert(Res.getLLTTy(*getMRI()).isScalar() == + Op.getLLTTy(*getMRI()).isScalar()); + + unsigned Opcode = TargetOpcode::COPY; + if (Res.getLLTTy(*getMRI()).getSizeInBits() > + Op.getLLTTy(*getMRI()).getSizeInBits()) + Opcode = ExtOpc; + else if (Res.getLLTTy(*getMRI()).getSizeInBits() < + Op.getLLTTy(*getMRI()).getSizeInBits()) + Opcode = TargetOpcode::G_TRUNC; + else + assert(Res.getLLTTy(*getMRI()) == Op.getLLTTy(*getMRI())); + + return buildInstr(Opcode, Res, Op); +} + +MachineInstrBuilder MachineIRBuilder::buildSExtOrTrunc(const DstOp &Res, + const SrcOp &Op) { + return buildExtOrTrunc(TargetOpcode::G_SEXT, Res, Op); +} + +MachineInstrBuilder MachineIRBuilder::buildZExtOrTrunc(const DstOp &Res, + const SrcOp &Op) { + return buildExtOrTrunc(TargetOpcode::G_ZEXT, Res, Op); +} + +MachineInstrBuilder MachineIRBuilder::buildAnyExtOrTrunc(const DstOp &Res, + const SrcOp &Op) { + return buildExtOrTrunc(TargetOpcode::G_ANYEXT, Res, Op); +} + +MachineInstrBuilder MachineIRBuilder::buildCast(const DstOp &Dst, + const SrcOp &Src) { + LLT SrcTy = Src.getLLTTy(*getMRI()); + LLT DstTy = Dst.getLLTTy(*getMRI()); + if (SrcTy == DstTy) + return buildCopy(Dst, Src); + + unsigned Opcode; + if (SrcTy.isPointer() && DstTy.isScalar()) + Opcode = TargetOpcode::G_PTRTOINT; + else if (DstTy.isPointer() && SrcTy.isScalar()) + Opcode = TargetOpcode::G_INTTOPTR; + else { + assert(!SrcTy.isPointer() && !DstTy.isPointer() && "n G_ADDRCAST yet"); + Opcode = TargetOpcode::G_BITCAST; + } + + return buildInstr(Opcode, Dst, Src); +} + +MachineInstrBuilder MachineIRBuilder::buildExtract(const DstOp &Dst, + const SrcOp &Src, + uint64_t Index) { + LLT SrcTy = Src.getLLTTy(*getMRI()); + LLT DstTy = Dst.getLLTTy(*getMRI()); + +#ifndef NDEBUG + assert(SrcTy.isValid() && "invalid operand type"); + assert(DstTy.isValid() && "invalid operand type"); + assert(Index + DstTy.getSizeInBits() <= SrcTy.getSizeInBits() && + "extracting off end of register"); +#endif + + if (DstTy.getSizeInBits() == SrcTy.getSizeInBits()) { + assert(Index == 0 && "insertion past the end of a register"); + return buildCast(Dst, Src); + } + + auto Extract = buildInstr(TargetOpcode::G_EXTRACT); + Dst.addDefToMIB(*getMRI(), Extract); + Src.addSrcToMIB(Extract); + Extract.addImm(Index); + return Extract; +} + +void MachineIRBuilder::buildSequence(Register Res, ArrayRef<Register> Ops, + ArrayRef<uint64_t> Indices) { +#ifndef NDEBUG + assert(Ops.size() == Indices.size() && "incompatible args"); + assert(!Ops.empty() && "invalid trivial sequence"); + assert(llvm::is_sorted(Indices) && + "sequence offsets must be in ascending order"); + + assert(getMRI()->getType(Res).isValid() && "invalid operand type"); + for (auto Op : Ops) + assert(getMRI()->getType(Op).isValid() && "invalid operand type"); +#endif + + LLT ResTy = getMRI()->getType(Res); + LLT OpTy = getMRI()->getType(Ops[0]); + unsigned OpSize = OpTy.getSizeInBits(); + bool MaybeMerge = true; + for (unsigned i = 0; i < Ops.size(); ++i) { + if (getMRI()->getType(Ops[i]) != OpTy || Indices[i] != i * OpSize) { + MaybeMerge = false; + break; + } + } + + if (MaybeMerge && Ops.size() * OpSize == ResTy.getSizeInBits()) { + buildMerge(Res, Ops); + return; + } + + Register ResIn = getMRI()->createGenericVirtualRegister(ResTy); + buildUndef(ResIn); + + for (unsigned i = 0; i < Ops.size(); ++i) { + Register ResOut = i + 1 == Ops.size() + ? Res + : getMRI()->createGenericVirtualRegister(ResTy); + buildInsert(ResOut, ResIn, Ops[i], Indices[i]); + ResIn = ResOut; + } +} + +MachineInstrBuilder MachineIRBuilder::buildUndef(const DstOp &Res) { + return buildInstr(TargetOpcode::G_IMPLICIT_DEF, {Res}, {}); +} + +MachineInstrBuilder MachineIRBuilder::buildMerge(const DstOp &Res, + ArrayRef<Register> Ops) { + // Unfortunately to convert from ArrayRef<LLT> to ArrayRef<SrcOp>, + // we need some temporary storage for the DstOp objects. Here we use a + // sufficiently large SmallVector to not go through the heap. + SmallVector<SrcOp, 8> TmpVec(Ops.begin(), Ops.end()); + assert(TmpVec.size() > 1); + return buildInstr(TargetOpcode::G_MERGE_VALUES, Res, TmpVec); +} + +MachineInstrBuilder +MachineIRBuilder::buildMerge(const DstOp &Res, + std::initializer_list<SrcOp> Ops) { + assert(Ops.size() > 1); + return buildInstr(TargetOpcode::G_MERGE_VALUES, Res, Ops); +} + +MachineInstrBuilder MachineIRBuilder::buildUnmerge(ArrayRef<LLT> Res, + const SrcOp &Op) { + // Unfortunately to convert from ArrayRef<LLT> to ArrayRef<DstOp>, + // we need some temporary storage for the DstOp objects. Here we use a + // sufficiently large SmallVector to not go through the heap. + SmallVector<DstOp, 8> TmpVec(Res.begin(), Res.end()); + assert(TmpVec.size() > 1); + return buildInstr(TargetOpcode::G_UNMERGE_VALUES, TmpVec, Op); +} + +MachineInstrBuilder MachineIRBuilder::buildUnmerge(LLT Res, + const SrcOp &Op) { + unsigned NumReg = Op.getLLTTy(*getMRI()).getSizeInBits() / Res.getSizeInBits(); + SmallVector<Register, 8> TmpVec; + for (unsigned I = 0; I != NumReg; ++I) + TmpVec.push_back(getMRI()->createGenericVirtualRegister(Res)); + return buildUnmerge(TmpVec, Op); +} + +MachineInstrBuilder MachineIRBuilder::buildUnmerge(ArrayRef<Register> Res, + const SrcOp &Op) { + // Unfortunately to convert from ArrayRef<Register> to ArrayRef<DstOp>, + // we need some temporary storage for the DstOp objects. Here we use a + // sufficiently large SmallVector to not go through the heap. + SmallVector<DstOp, 8> TmpVec(Res.begin(), Res.end()); + assert(TmpVec.size() > 1); + return buildInstr(TargetOpcode::G_UNMERGE_VALUES, TmpVec, Op); +} + +MachineInstrBuilder MachineIRBuilder::buildBuildVector(const DstOp &Res, + ArrayRef<Register> Ops) { + // Unfortunately to convert from ArrayRef<Register> to ArrayRef<SrcOp>, + // we need some temporary storage for the DstOp objects. Here we use a + // sufficiently large SmallVector to not go through the heap. + SmallVector<SrcOp, 8> TmpVec(Ops.begin(), Ops.end()); + return buildInstr(TargetOpcode::G_BUILD_VECTOR, Res, TmpVec); +} + +MachineInstrBuilder MachineIRBuilder::buildSplatVector(const DstOp &Res, + const SrcOp &Src) { + SmallVector<SrcOp, 8> TmpVec(Res.getLLTTy(*getMRI()).getNumElements(), Src); + return buildInstr(TargetOpcode::G_BUILD_VECTOR, Res, TmpVec); +} + +MachineInstrBuilder +MachineIRBuilder::buildBuildVectorTrunc(const DstOp &Res, + ArrayRef<Register> Ops) { + // Unfortunately to convert from ArrayRef<Register> to ArrayRef<SrcOp>, + // we need some temporary storage for the DstOp objects. Here we use a + // sufficiently large SmallVector to not go through the heap. + SmallVector<SrcOp, 8> TmpVec(Ops.begin(), Ops.end()); + return buildInstr(TargetOpcode::G_BUILD_VECTOR_TRUNC, Res, TmpVec); +} + MachineInstrBuilder MachineIRBuilder::buildShuffleSplat(const DstOp &Res, const SrcOp &Src) { LLT DstTy = Res.getLLTTy(*getMRI()); @@ -664,328 +664,328 @@ MachineInstrBuilder MachineIRBuilder::buildShuffleVector(const DstOp &Res, .addShuffleMask(MaskAlloc); } -MachineInstrBuilder -MachineIRBuilder::buildConcatVectors(const DstOp &Res, ArrayRef<Register> Ops) { - // Unfortunately to convert from ArrayRef<Register> to ArrayRef<SrcOp>, - // we need some temporary storage for the DstOp objects. Here we use a - // sufficiently large SmallVector to not go through the heap. - SmallVector<SrcOp, 8> TmpVec(Ops.begin(), Ops.end()); - return buildInstr(TargetOpcode::G_CONCAT_VECTORS, Res, TmpVec); -} - -MachineInstrBuilder MachineIRBuilder::buildInsert(const DstOp &Res, - const SrcOp &Src, - const SrcOp &Op, - unsigned Index) { - assert(Index + Op.getLLTTy(*getMRI()).getSizeInBits() <= - Res.getLLTTy(*getMRI()).getSizeInBits() && - "insertion past the end of a register"); - - if (Res.getLLTTy(*getMRI()).getSizeInBits() == - Op.getLLTTy(*getMRI()).getSizeInBits()) { - return buildCast(Res, Op); - } - - return buildInstr(TargetOpcode::G_INSERT, Res, {Src, Op, uint64_t(Index)}); -} - -MachineInstrBuilder MachineIRBuilder::buildIntrinsic(Intrinsic::ID ID, - ArrayRef<Register> ResultRegs, - bool HasSideEffects) { - auto MIB = - buildInstr(HasSideEffects ? TargetOpcode::G_INTRINSIC_W_SIDE_EFFECTS - : TargetOpcode::G_INTRINSIC); - for (unsigned ResultReg : ResultRegs) - MIB.addDef(ResultReg); - MIB.addIntrinsicID(ID); - return MIB; -} - -MachineInstrBuilder MachineIRBuilder::buildIntrinsic(Intrinsic::ID ID, - ArrayRef<DstOp> Results, - bool HasSideEffects) { - auto MIB = - buildInstr(HasSideEffects ? TargetOpcode::G_INTRINSIC_W_SIDE_EFFECTS - : TargetOpcode::G_INTRINSIC); - for (DstOp Result : Results) - Result.addDefToMIB(*getMRI(), MIB); - MIB.addIntrinsicID(ID); - return MIB; -} - -MachineInstrBuilder MachineIRBuilder::buildTrunc(const DstOp &Res, - const SrcOp &Op) { - return buildInstr(TargetOpcode::G_TRUNC, Res, Op); -} - -MachineInstrBuilder MachineIRBuilder::buildFPTrunc(const DstOp &Res, - const SrcOp &Op, - Optional<unsigned> Flags) { - return buildInstr(TargetOpcode::G_FPTRUNC, Res, Op, Flags); -} - -MachineInstrBuilder MachineIRBuilder::buildICmp(CmpInst::Predicate Pred, - const DstOp &Res, - const SrcOp &Op0, - const SrcOp &Op1) { - return buildInstr(TargetOpcode::G_ICMP, Res, {Pred, Op0, Op1}); -} - -MachineInstrBuilder MachineIRBuilder::buildFCmp(CmpInst::Predicate Pred, - const DstOp &Res, - const SrcOp &Op0, - const SrcOp &Op1, - Optional<unsigned> Flags) { - - return buildInstr(TargetOpcode::G_FCMP, Res, {Pred, Op0, Op1}, Flags); -} - -MachineInstrBuilder MachineIRBuilder::buildSelect(const DstOp &Res, - const SrcOp &Tst, - const SrcOp &Op0, - const SrcOp &Op1, - Optional<unsigned> Flags) { - - return buildInstr(TargetOpcode::G_SELECT, {Res}, {Tst, Op0, Op1}, Flags); -} - -MachineInstrBuilder -MachineIRBuilder::buildInsertVectorElement(const DstOp &Res, const SrcOp &Val, - const SrcOp &Elt, const SrcOp &Idx) { - return buildInstr(TargetOpcode::G_INSERT_VECTOR_ELT, Res, {Val, Elt, Idx}); -} - -MachineInstrBuilder -MachineIRBuilder::buildExtractVectorElement(const DstOp &Res, const SrcOp &Val, - const SrcOp &Idx) { - return buildInstr(TargetOpcode::G_EXTRACT_VECTOR_ELT, Res, {Val, Idx}); -} - -MachineInstrBuilder MachineIRBuilder::buildAtomicCmpXchgWithSuccess( - Register OldValRes, Register SuccessRes, Register Addr, Register CmpVal, - Register NewVal, MachineMemOperand &MMO) { -#ifndef NDEBUG - LLT OldValResTy = getMRI()->getType(OldValRes); - LLT SuccessResTy = getMRI()->getType(SuccessRes); - LLT AddrTy = getMRI()->getType(Addr); - LLT CmpValTy = getMRI()->getType(CmpVal); - LLT NewValTy = getMRI()->getType(NewVal); - assert(OldValResTy.isScalar() && "invalid operand type"); - assert(SuccessResTy.isScalar() && "invalid operand type"); - assert(AddrTy.isPointer() && "invalid operand type"); - assert(CmpValTy.isValid() && "invalid operand type"); - assert(NewValTy.isValid() && "invalid operand type"); - assert(OldValResTy == CmpValTy && "type mismatch"); - assert(OldValResTy == NewValTy && "type mismatch"); -#endif - - return buildInstr(TargetOpcode::G_ATOMIC_CMPXCHG_WITH_SUCCESS) - .addDef(OldValRes) - .addDef(SuccessRes) - .addUse(Addr) - .addUse(CmpVal) - .addUse(NewVal) - .addMemOperand(&MMO); -} - -MachineInstrBuilder -MachineIRBuilder::buildAtomicCmpXchg(Register OldValRes, Register Addr, - Register CmpVal, Register NewVal, - MachineMemOperand &MMO) { -#ifndef NDEBUG - LLT OldValResTy = getMRI()->getType(OldValRes); - LLT AddrTy = getMRI()->getType(Addr); - LLT CmpValTy = getMRI()->getType(CmpVal); - LLT NewValTy = getMRI()->getType(NewVal); - assert(OldValResTy.isScalar() && "invalid operand type"); - assert(AddrTy.isPointer() && "invalid operand type"); - assert(CmpValTy.isValid() && "invalid operand type"); - assert(NewValTy.isValid() && "invalid operand type"); - assert(OldValResTy == CmpValTy && "type mismatch"); - assert(OldValResTy == NewValTy && "type mismatch"); -#endif - - return buildInstr(TargetOpcode::G_ATOMIC_CMPXCHG) - .addDef(OldValRes) - .addUse(Addr) - .addUse(CmpVal) - .addUse(NewVal) - .addMemOperand(&MMO); -} - -MachineInstrBuilder MachineIRBuilder::buildAtomicRMW( - unsigned Opcode, const DstOp &OldValRes, - const SrcOp &Addr, const SrcOp &Val, - MachineMemOperand &MMO) { - -#ifndef NDEBUG - LLT OldValResTy = OldValRes.getLLTTy(*getMRI()); - LLT AddrTy = Addr.getLLTTy(*getMRI()); - LLT ValTy = Val.getLLTTy(*getMRI()); - assert(OldValResTy.isScalar() && "invalid operand type"); - assert(AddrTy.isPointer() && "invalid operand type"); - assert(ValTy.isValid() && "invalid operand type"); - assert(OldValResTy == ValTy && "type mismatch"); - assert(MMO.isAtomic() && "not atomic mem operand"); -#endif - - auto MIB = buildInstr(Opcode); - OldValRes.addDefToMIB(*getMRI(), MIB); - Addr.addSrcToMIB(MIB); - Val.addSrcToMIB(MIB); - MIB.addMemOperand(&MMO); - return MIB; -} - -MachineInstrBuilder -MachineIRBuilder::buildAtomicRMWXchg(Register OldValRes, Register Addr, - Register Val, MachineMemOperand &MMO) { - return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_XCHG, OldValRes, Addr, Val, - MMO); -} -MachineInstrBuilder -MachineIRBuilder::buildAtomicRMWAdd(Register OldValRes, Register Addr, - Register Val, MachineMemOperand &MMO) { - return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_ADD, OldValRes, Addr, Val, - MMO); -} -MachineInstrBuilder -MachineIRBuilder::buildAtomicRMWSub(Register OldValRes, Register Addr, - Register Val, MachineMemOperand &MMO) { - return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_SUB, OldValRes, Addr, Val, - MMO); -} -MachineInstrBuilder -MachineIRBuilder::buildAtomicRMWAnd(Register OldValRes, Register Addr, - Register Val, MachineMemOperand &MMO) { - return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_AND, OldValRes, Addr, Val, - MMO); -} -MachineInstrBuilder -MachineIRBuilder::buildAtomicRMWNand(Register OldValRes, Register Addr, - Register Val, MachineMemOperand &MMO) { - return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_NAND, OldValRes, Addr, Val, - MMO); -} -MachineInstrBuilder MachineIRBuilder::buildAtomicRMWOr(Register OldValRes, - Register Addr, - Register Val, - MachineMemOperand &MMO) { - return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_OR, OldValRes, Addr, Val, - MMO); -} -MachineInstrBuilder -MachineIRBuilder::buildAtomicRMWXor(Register OldValRes, Register Addr, - Register Val, MachineMemOperand &MMO) { - return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_XOR, OldValRes, Addr, Val, - MMO); -} -MachineInstrBuilder -MachineIRBuilder::buildAtomicRMWMax(Register OldValRes, Register Addr, - Register Val, MachineMemOperand &MMO) { - return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_MAX, OldValRes, Addr, Val, - MMO); -} -MachineInstrBuilder -MachineIRBuilder::buildAtomicRMWMin(Register OldValRes, Register Addr, - Register Val, MachineMemOperand &MMO) { - return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_MIN, OldValRes, Addr, Val, - MMO); -} -MachineInstrBuilder -MachineIRBuilder::buildAtomicRMWUmax(Register OldValRes, Register Addr, - Register Val, MachineMemOperand &MMO) { - return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_UMAX, OldValRes, Addr, Val, - MMO); -} -MachineInstrBuilder -MachineIRBuilder::buildAtomicRMWUmin(Register OldValRes, Register Addr, - Register Val, MachineMemOperand &MMO) { - return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_UMIN, OldValRes, Addr, Val, - MMO); -} - -MachineInstrBuilder -MachineIRBuilder::buildAtomicRMWFAdd( - const DstOp &OldValRes, const SrcOp &Addr, const SrcOp &Val, - MachineMemOperand &MMO) { - return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_FADD, OldValRes, Addr, Val, - MMO); -} - -MachineInstrBuilder -MachineIRBuilder::buildAtomicRMWFSub(const DstOp &OldValRes, const SrcOp &Addr, const SrcOp &Val, - MachineMemOperand &MMO) { - return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_FSUB, OldValRes, Addr, Val, - MMO); -} - -MachineInstrBuilder -MachineIRBuilder::buildFence(unsigned Ordering, unsigned Scope) { - return buildInstr(TargetOpcode::G_FENCE) - .addImm(Ordering) - .addImm(Scope); -} - -MachineInstrBuilder -MachineIRBuilder::buildBlockAddress(Register Res, const BlockAddress *BA) { -#ifndef NDEBUG - assert(getMRI()->getType(Res).isPointer() && "invalid res type"); -#endif - - return buildInstr(TargetOpcode::G_BLOCK_ADDR).addDef(Res).addBlockAddress(BA); -} - -void MachineIRBuilder::validateTruncExt(const LLT DstTy, const LLT SrcTy, - bool IsExtend) { -#ifndef NDEBUG - if (DstTy.isVector()) { - assert(SrcTy.isVector() && "mismatched cast between vector and non-vector"); - assert(SrcTy.getNumElements() == DstTy.getNumElements() && - "different number of elements in a trunc/ext"); - } else - assert(DstTy.isScalar() && SrcTy.isScalar() && "invalid extend/trunc"); - - if (IsExtend) - assert(DstTy.getSizeInBits() > SrcTy.getSizeInBits() && - "invalid narrowing extend"); - else - assert(DstTy.getSizeInBits() < SrcTy.getSizeInBits() && - "invalid widening trunc"); -#endif -} - -void MachineIRBuilder::validateSelectOp(const LLT ResTy, const LLT TstTy, - const LLT Op0Ty, const LLT Op1Ty) { -#ifndef NDEBUG - assert((ResTy.isScalar() || ResTy.isVector() || ResTy.isPointer()) && - "invalid operand type"); - assert((ResTy == Op0Ty && ResTy == Op1Ty) && "type mismatch"); - if (ResTy.isScalar() || ResTy.isPointer()) - assert(TstTy.isScalar() && "type mismatch"); - else - assert((TstTy.isScalar() || - (TstTy.isVector() && - TstTy.getNumElements() == Op0Ty.getNumElements())) && - "type mismatch"); -#endif -} - -MachineInstrBuilder MachineIRBuilder::buildInstr(unsigned Opc, - ArrayRef<DstOp> DstOps, - ArrayRef<SrcOp> SrcOps, - Optional<unsigned> Flags) { - switch (Opc) { - default: - break; - case TargetOpcode::G_SELECT: { - assert(DstOps.size() == 1 && "Invalid select"); - assert(SrcOps.size() == 3 && "Invalid select"); - validateSelectOp( - DstOps[0].getLLTTy(*getMRI()), SrcOps[0].getLLTTy(*getMRI()), - SrcOps[1].getLLTTy(*getMRI()), SrcOps[2].getLLTTy(*getMRI())); - break; - } +MachineInstrBuilder +MachineIRBuilder::buildConcatVectors(const DstOp &Res, ArrayRef<Register> Ops) { + // Unfortunately to convert from ArrayRef<Register> to ArrayRef<SrcOp>, + // we need some temporary storage for the DstOp objects. Here we use a + // sufficiently large SmallVector to not go through the heap. + SmallVector<SrcOp, 8> TmpVec(Ops.begin(), Ops.end()); + return buildInstr(TargetOpcode::G_CONCAT_VECTORS, Res, TmpVec); +} + +MachineInstrBuilder MachineIRBuilder::buildInsert(const DstOp &Res, + const SrcOp &Src, + const SrcOp &Op, + unsigned Index) { + assert(Index + Op.getLLTTy(*getMRI()).getSizeInBits() <= + Res.getLLTTy(*getMRI()).getSizeInBits() && + "insertion past the end of a register"); + + if (Res.getLLTTy(*getMRI()).getSizeInBits() == + Op.getLLTTy(*getMRI()).getSizeInBits()) { + return buildCast(Res, Op); + } + + return buildInstr(TargetOpcode::G_INSERT, Res, {Src, Op, uint64_t(Index)}); +} + +MachineInstrBuilder MachineIRBuilder::buildIntrinsic(Intrinsic::ID ID, + ArrayRef<Register> ResultRegs, + bool HasSideEffects) { + auto MIB = + buildInstr(HasSideEffects ? TargetOpcode::G_INTRINSIC_W_SIDE_EFFECTS + : TargetOpcode::G_INTRINSIC); + for (unsigned ResultReg : ResultRegs) + MIB.addDef(ResultReg); + MIB.addIntrinsicID(ID); + return MIB; +} + +MachineInstrBuilder MachineIRBuilder::buildIntrinsic(Intrinsic::ID ID, + ArrayRef<DstOp> Results, + bool HasSideEffects) { + auto MIB = + buildInstr(HasSideEffects ? TargetOpcode::G_INTRINSIC_W_SIDE_EFFECTS + : TargetOpcode::G_INTRINSIC); + for (DstOp Result : Results) + Result.addDefToMIB(*getMRI(), MIB); + MIB.addIntrinsicID(ID); + return MIB; +} + +MachineInstrBuilder MachineIRBuilder::buildTrunc(const DstOp &Res, + const SrcOp &Op) { + return buildInstr(TargetOpcode::G_TRUNC, Res, Op); +} + +MachineInstrBuilder MachineIRBuilder::buildFPTrunc(const DstOp &Res, + const SrcOp &Op, + Optional<unsigned> Flags) { + return buildInstr(TargetOpcode::G_FPTRUNC, Res, Op, Flags); +} + +MachineInstrBuilder MachineIRBuilder::buildICmp(CmpInst::Predicate Pred, + const DstOp &Res, + const SrcOp &Op0, + const SrcOp &Op1) { + return buildInstr(TargetOpcode::G_ICMP, Res, {Pred, Op0, Op1}); +} + +MachineInstrBuilder MachineIRBuilder::buildFCmp(CmpInst::Predicate Pred, + const DstOp &Res, + const SrcOp &Op0, + const SrcOp &Op1, + Optional<unsigned> Flags) { + + return buildInstr(TargetOpcode::G_FCMP, Res, {Pred, Op0, Op1}, Flags); +} + +MachineInstrBuilder MachineIRBuilder::buildSelect(const DstOp &Res, + const SrcOp &Tst, + const SrcOp &Op0, + const SrcOp &Op1, + Optional<unsigned> Flags) { + + return buildInstr(TargetOpcode::G_SELECT, {Res}, {Tst, Op0, Op1}, Flags); +} + +MachineInstrBuilder +MachineIRBuilder::buildInsertVectorElement(const DstOp &Res, const SrcOp &Val, + const SrcOp &Elt, const SrcOp &Idx) { + return buildInstr(TargetOpcode::G_INSERT_VECTOR_ELT, Res, {Val, Elt, Idx}); +} + +MachineInstrBuilder +MachineIRBuilder::buildExtractVectorElement(const DstOp &Res, const SrcOp &Val, + const SrcOp &Idx) { + return buildInstr(TargetOpcode::G_EXTRACT_VECTOR_ELT, Res, {Val, Idx}); +} + +MachineInstrBuilder MachineIRBuilder::buildAtomicCmpXchgWithSuccess( + Register OldValRes, Register SuccessRes, Register Addr, Register CmpVal, + Register NewVal, MachineMemOperand &MMO) { +#ifndef NDEBUG + LLT OldValResTy = getMRI()->getType(OldValRes); + LLT SuccessResTy = getMRI()->getType(SuccessRes); + LLT AddrTy = getMRI()->getType(Addr); + LLT CmpValTy = getMRI()->getType(CmpVal); + LLT NewValTy = getMRI()->getType(NewVal); + assert(OldValResTy.isScalar() && "invalid operand type"); + assert(SuccessResTy.isScalar() && "invalid operand type"); + assert(AddrTy.isPointer() && "invalid operand type"); + assert(CmpValTy.isValid() && "invalid operand type"); + assert(NewValTy.isValid() && "invalid operand type"); + assert(OldValResTy == CmpValTy && "type mismatch"); + assert(OldValResTy == NewValTy && "type mismatch"); +#endif + + return buildInstr(TargetOpcode::G_ATOMIC_CMPXCHG_WITH_SUCCESS) + .addDef(OldValRes) + .addDef(SuccessRes) + .addUse(Addr) + .addUse(CmpVal) + .addUse(NewVal) + .addMemOperand(&MMO); +} + +MachineInstrBuilder +MachineIRBuilder::buildAtomicCmpXchg(Register OldValRes, Register Addr, + Register CmpVal, Register NewVal, + MachineMemOperand &MMO) { +#ifndef NDEBUG + LLT OldValResTy = getMRI()->getType(OldValRes); + LLT AddrTy = getMRI()->getType(Addr); + LLT CmpValTy = getMRI()->getType(CmpVal); + LLT NewValTy = getMRI()->getType(NewVal); + assert(OldValResTy.isScalar() && "invalid operand type"); + assert(AddrTy.isPointer() && "invalid operand type"); + assert(CmpValTy.isValid() && "invalid operand type"); + assert(NewValTy.isValid() && "invalid operand type"); + assert(OldValResTy == CmpValTy && "type mismatch"); + assert(OldValResTy == NewValTy && "type mismatch"); +#endif + + return buildInstr(TargetOpcode::G_ATOMIC_CMPXCHG) + .addDef(OldValRes) + .addUse(Addr) + .addUse(CmpVal) + .addUse(NewVal) + .addMemOperand(&MMO); +} + +MachineInstrBuilder MachineIRBuilder::buildAtomicRMW( + unsigned Opcode, const DstOp &OldValRes, + const SrcOp &Addr, const SrcOp &Val, + MachineMemOperand &MMO) { + +#ifndef NDEBUG + LLT OldValResTy = OldValRes.getLLTTy(*getMRI()); + LLT AddrTy = Addr.getLLTTy(*getMRI()); + LLT ValTy = Val.getLLTTy(*getMRI()); + assert(OldValResTy.isScalar() && "invalid operand type"); + assert(AddrTy.isPointer() && "invalid operand type"); + assert(ValTy.isValid() && "invalid operand type"); + assert(OldValResTy == ValTy && "type mismatch"); + assert(MMO.isAtomic() && "not atomic mem operand"); +#endif + + auto MIB = buildInstr(Opcode); + OldValRes.addDefToMIB(*getMRI(), MIB); + Addr.addSrcToMIB(MIB); + Val.addSrcToMIB(MIB); + MIB.addMemOperand(&MMO); + return MIB; +} + +MachineInstrBuilder +MachineIRBuilder::buildAtomicRMWXchg(Register OldValRes, Register Addr, + Register Val, MachineMemOperand &MMO) { + return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_XCHG, OldValRes, Addr, Val, + MMO); +} +MachineInstrBuilder +MachineIRBuilder::buildAtomicRMWAdd(Register OldValRes, Register Addr, + Register Val, MachineMemOperand &MMO) { + return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_ADD, OldValRes, Addr, Val, + MMO); +} +MachineInstrBuilder +MachineIRBuilder::buildAtomicRMWSub(Register OldValRes, Register Addr, + Register Val, MachineMemOperand &MMO) { + return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_SUB, OldValRes, Addr, Val, + MMO); +} +MachineInstrBuilder +MachineIRBuilder::buildAtomicRMWAnd(Register OldValRes, Register Addr, + Register Val, MachineMemOperand &MMO) { + return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_AND, OldValRes, Addr, Val, + MMO); +} +MachineInstrBuilder +MachineIRBuilder::buildAtomicRMWNand(Register OldValRes, Register Addr, + Register Val, MachineMemOperand &MMO) { + return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_NAND, OldValRes, Addr, Val, + MMO); +} +MachineInstrBuilder MachineIRBuilder::buildAtomicRMWOr(Register OldValRes, + Register Addr, + Register Val, + MachineMemOperand &MMO) { + return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_OR, OldValRes, Addr, Val, + MMO); +} +MachineInstrBuilder +MachineIRBuilder::buildAtomicRMWXor(Register OldValRes, Register Addr, + Register Val, MachineMemOperand &MMO) { + return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_XOR, OldValRes, Addr, Val, + MMO); +} +MachineInstrBuilder +MachineIRBuilder::buildAtomicRMWMax(Register OldValRes, Register Addr, + Register Val, MachineMemOperand &MMO) { + return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_MAX, OldValRes, Addr, Val, + MMO); +} +MachineInstrBuilder +MachineIRBuilder::buildAtomicRMWMin(Register OldValRes, Register Addr, + Register Val, MachineMemOperand &MMO) { + return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_MIN, OldValRes, Addr, Val, + MMO); +} +MachineInstrBuilder +MachineIRBuilder::buildAtomicRMWUmax(Register OldValRes, Register Addr, + Register Val, MachineMemOperand &MMO) { + return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_UMAX, OldValRes, Addr, Val, + MMO); +} +MachineInstrBuilder +MachineIRBuilder::buildAtomicRMWUmin(Register OldValRes, Register Addr, + Register Val, MachineMemOperand &MMO) { + return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_UMIN, OldValRes, Addr, Val, + MMO); +} + +MachineInstrBuilder +MachineIRBuilder::buildAtomicRMWFAdd( + const DstOp &OldValRes, const SrcOp &Addr, const SrcOp &Val, + MachineMemOperand &MMO) { + return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_FADD, OldValRes, Addr, Val, + MMO); +} + +MachineInstrBuilder +MachineIRBuilder::buildAtomicRMWFSub(const DstOp &OldValRes, const SrcOp &Addr, const SrcOp &Val, + MachineMemOperand &MMO) { + return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_FSUB, OldValRes, Addr, Val, + MMO); +} + +MachineInstrBuilder +MachineIRBuilder::buildFence(unsigned Ordering, unsigned Scope) { + return buildInstr(TargetOpcode::G_FENCE) + .addImm(Ordering) + .addImm(Scope); +} + +MachineInstrBuilder +MachineIRBuilder::buildBlockAddress(Register Res, const BlockAddress *BA) { +#ifndef NDEBUG + assert(getMRI()->getType(Res).isPointer() && "invalid res type"); +#endif + + return buildInstr(TargetOpcode::G_BLOCK_ADDR).addDef(Res).addBlockAddress(BA); +} + +void MachineIRBuilder::validateTruncExt(const LLT DstTy, const LLT SrcTy, + bool IsExtend) { +#ifndef NDEBUG + if (DstTy.isVector()) { + assert(SrcTy.isVector() && "mismatched cast between vector and non-vector"); + assert(SrcTy.getNumElements() == DstTy.getNumElements() && + "different number of elements in a trunc/ext"); + } else + assert(DstTy.isScalar() && SrcTy.isScalar() && "invalid extend/trunc"); + + if (IsExtend) + assert(DstTy.getSizeInBits() > SrcTy.getSizeInBits() && + "invalid narrowing extend"); + else + assert(DstTy.getSizeInBits() < SrcTy.getSizeInBits() && + "invalid widening trunc"); +#endif +} + +void MachineIRBuilder::validateSelectOp(const LLT ResTy, const LLT TstTy, + const LLT Op0Ty, const LLT Op1Ty) { +#ifndef NDEBUG + assert((ResTy.isScalar() || ResTy.isVector() || ResTy.isPointer()) && + "invalid operand type"); + assert((ResTy == Op0Ty && ResTy == Op1Ty) && "type mismatch"); + if (ResTy.isScalar() || ResTy.isPointer()) + assert(TstTy.isScalar() && "type mismatch"); + else + assert((TstTy.isScalar() || + (TstTy.isVector() && + TstTy.getNumElements() == Op0Ty.getNumElements())) && + "type mismatch"); +#endif +} + +MachineInstrBuilder MachineIRBuilder::buildInstr(unsigned Opc, + ArrayRef<DstOp> DstOps, + ArrayRef<SrcOp> SrcOps, + Optional<unsigned> Flags) { + switch (Opc) { + default: + break; + case TargetOpcode::G_SELECT: { + assert(DstOps.size() == 1 && "Invalid select"); + assert(SrcOps.size() == 3 && "Invalid select"); + validateSelectOp( + DstOps[0].getLLTTy(*getMRI()), SrcOps[0].getLLTTy(*getMRI()), + SrcOps[1].getLLTTy(*getMRI()), SrcOps[2].getLLTTy(*getMRI())); + break; + } case TargetOpcode::G_FNEG: case TargetOpcode::G_ABS: // All these are unary ops. @@ -994,230 +994,230 @@ MachineInstrBuilder MachineIRBuilder::buildInstr(unsigned Opc, validateUnaryOp(DstOps[0].getLLTTy(*getMRI()), SrcOps[0].getLLTTy(*getMRI())); break; - case TargetOpcode::G_ADD: - case TargetOpcode::G_AND: - case TargetOpcode::G_MUL: - case TargetOpcode::G_OR: - case TargetOpcode::G_SUB: - case TargetOpcode::G_XOR: - case TargetOpcode::G_UDIV: - case TargetOpcode::G_SDIV: - case TargetOpcode::G_UREM: - case TargetOpcode::G_SREM: - case TargetOpcode::G_SMIN: - case TargetOpcode::G_SMAX: - case TargetOpcode::G_UMIN: - case TargetOpcode::G_UMAX: - case TargetOpcode::G_UADDSAT: - case TargetOpcode::G_SADDSAT: - case TargetOpcode::G_USUBSAT: - case TargetOpcode::G_SSUBSAT: { - // All these are binary ops. - assert(DstOps.size() == 1 && "Invalid Dst"); - assert(SrcOps.size() == 2 && "Invalid Srcs"); - validateBinaryOp(DstOps[0].getLLTTy(*getMRI()), - SrcOps[0].getLLTTy(*getMRI()), - SrcOps[1].getLLTTy(*getMRI())); - break; - } - case TargetOpcode::G_SHL: - case TargetOpcode::G_ASHR: + case TargetOpcode::G_ADD: + case TargetOpcode::G_AND: + case TargetOpcode::G_MUL: + case TargetOpcode::G_OR: + case TargetOpcode::G_SUB: + case TargetOpcode::G_XOR: + case TargetOpcode::G_UDIV: + case TargetOpcode::G_SDIV: + case TargetOpcode::G_UREM: + case TargetOpcode::G_SREM: + case TargetOpcode::G_SMIN: + case TargetOpcode::G_SMAX: + case TargetOpcode::G_UMIN: + case TargetOpcode::G_UMAX: + case TargetOpcode::G_UADDSAT: + case TargetOpcode::G_SADDSAT: + case TargetOpcode::G_USUBSAT: + case TargetOpcode::G_SSUBSAT: { + // All these are binary ops. + assert(DstOps.size() == 1 && "Invalid Dst"); + assert(SrcOps.size() == 2 && "Invalid Srcs"); + validateBinaryOp(DstOps[0].getLLTTy(*getMRI()), + SrcOps[0].getLLTTy(*getMRI()), + SrcOps[1].getLLTTy(*getMRI())); + break; + } + case TargetOpcode::G_SHL: + case TargetOpcode::G_ASHR: case TargetOpcode::G_LSHR: case TargetOpcode::G_USHLSAT: case TargetOpcode::G_SSHLSAT: { - assert(DstOps.size() == 1 && "Invalid Dst"); - assert(SrcOps.size() == 2 && "Invalid Srcs"); - validateShiftOp(DstOps[0].getLLTTy(*getMRI()), - SrcOps[0].getLLTTy(*getMRI()), - SrcOps[1].getLLTTy(*getMRI())); - break; - } - case TargetOpcode::G_SEXT: - case TargetOpcode::G_ZEXT: - case TargetOpcode::G_ANYEXT: - assert(DstOps.size() == 1 && "Invalid Dst"); - assert(SrcOps.size() == 1 && "Invalid Srcs"); - validateTruncExt(DstOps[0].getLLTTy(*getMRI()), - SrcOps[0].getLLTTy(*getMRI()), true); - break; - case TargetOpcode::G_TRUNC: - case TargetOpcode::G_FPTRUNC: { - assert(DstOps.size() == 1 && "Invalid Dst"); - assert(SrcOps.size() == 1 && "Invalid Srcs"); - validateTruncExt(DstOps[0].getLLTTy(*getMRI()), - SrcOps[0].getLLTTy(*getMRI()), false); - break; - } - case TargetOpcode::G_BITCAST: { - assert(DstOps.size() == 1 && "Invalid Dst"); - assert(SrcOps.size() == 1 && "Invalid Srcs"); - assert(DstOps[0].getLLTTy(*getMRI()).getSizeInBits() == - SrcOps[0].getLLTTy(*getMRI()).getSizeInBits() && "invalid bitcast"); - break; - } - case TargetOpcode::COPY: - assert(DstOps.size() == 1 && "Invalid Dst"); - // If the caller wants to add a subreg source it has to be done separately - // so we may not have any SrcOps at this point yet. - break; - case TargetOpcode::G_FCMP: - case TargetOpcode::G_ICMP: { - assert(DstOps.size() == 1 && "Invalid Dst Operands"); - assert(SrcOps.size() == 3 && "Invalid Src Operands"); - // For F/ICMP, the first src operand is the predicate, followed by - // the two comparands. - assert(SrcOps[0].getSrcOpKind() == SrcOp::SrcType::Ty_Predicate && - "Expecting predicate"); - assert([&]() -> bool { - CmpInst::Predicate Pred = SrcOps[0].getPredicate(); - return Opc == TargetOpcode::G_ICMP ? CmpInst::isIntPredicate(Pred) - : CmpInst::isFPPredicate(Pred); - }() && "Invalid predicate"); - assert(SrcOps[1].getLLTTy(*getMRI()) == SrcOps[2].getLLTTy(*getMRI()) && - "Type mismatch"); - assert([&]() -> bool { - LLT Op0Ty = SrcOps[1].getLLTTy(*getMRI()); - LLT DstTy = DstOps[0].getLLTTy(*getMRI()); - if (Op0Ty.isScalar() || Op0Ty.isPointer()) - return DstTy.isScalar(); - else - return DstTy.isVector() && - DstTy.getNumElements() == Op0Ty.getNumElements(); - }() && "Type Mismatch"); - break; - } - case TargetOpcode::G_UNMERGE_VALUES: { - assert(!DstOps.empty() && "Invalid trivial sequence"); - assert(SrcOps.size() == 1 && "Invalid src for Unmerge"); + assert(DstOps.size() == 1 && "Invalid Dst"); + assert(SrcOps.size() == 2 && "Invalid Srcs"); + validateShiftOp(DstOps[0].getLLTTy(*getMRI()), + SrcOps[0].getLLTTy(*getMRI()), + SrcOps[1].getLLTTy(*getMRI())); + break; + } + case TargetOpcode::G_SEXT: + case TargetOpcode::G_ZEXT: + case TargetOpcode::G_ANYEXT: + assert(DstOps.size() == 1 && "Invalid Dst"); + assert(SrcOps.size() == 1 && "Invalid Srcs"); + validateTruncExt(DstOps[0].getLLTTy(*getMRI()), + SrcOps[0].getLLTTy(*getMRI()), true); + break; + case TargetOpcode::G_TRUNC: + case TargetOpcode::G_FPTRUNC: { + assert(DstOps.size() == 1 && "Invalid Dst"); + assert(SrcOps.size() == 1 && "Invalid Srcs"); + validateTruncExt(DstOps[0].getLLTTy(*getMRI()), + SrcOps[0].getLLTTy(*getMRI()), false); + break; + } + case TargetOpcode::G_BITCAST: { + assert(DstOps.size() == 1 && "Invalid Dst"); + assert(SrcOps.size() == 1 && "Invalid Srcs"); + assert(DstOps[0].getLLTTy(*getMRI()).getSizeInBits() == + SrcOps[0].getLLTTy(*getMRI()).getSizeInBits() && "invalid bitcast"); + break; + } + case TargetOpcode::COPY: + assert(DstOps.size() == 1 && "Invalid Dst"); + // If the caller wants to add a subreg source it has to be done separately + // so we may not have any SrcOps at this point yet. + break; + case TargetOpcode::G_FCMP: + case TargetOpcode::G_ICMP: { + assert(DstOps.size() == 1 && "Invalid Dst Operands"); + assert(SrcOps.size() == 3 && "Invalid Src Operands"); + // For F/ICMP, the first src operand is the predicate, followed by + // the two comparands. + assert(SrcOps[0].getSrcOpKind() == SrcOp::SrcType::Ty_Predicate && + "Expecting predicate"); + assert([&]() -> bool { + CmpInst::Predicate Pred = SrcOps[0].getPredicate(); + return Opc == TargetOpcode::G_ICMP ? CmpInst::isIntPredicate(Pred) + : CmpInst::isFPPredicate(Pred); + }() && "Invalid predicate"); + assert(SrcOps[1].getLLTTy(*getMRI()) == SrcOps[2].getLLTTy(*getMRI()) && + "Type mismatch"); + assert([&]() -> bool { + LLT Op0Ty = SrcOps[1].getLLTTy(*getMRI()); + LLT DstTy = DstOps[0].getLLTTy(*getMRI()); + if (Op0Ty.isScalar() || Op0Ty.isPointer()) + return DstTy.isScalar(); + else + return DstTy.isVector() && + DstTy.getNumElements() == Op0Ty.getNumElements(); + }() && "Type Mismatch"); + break; + } + case TargetOpcode::G_UNMERGE_VALUES: { + assert(!DstOps.empty() && "Invalid trivial sequence"); + assert(SrcOps.size() == 1 && "Invalid src for Unmerge"); assert(llvm::all_of(DstOps, [&, this](const DstOp &Op) { return Op.getLLTTy(*getMRI()) == DstOps[0].getLLTTy(*getMRI()); }) && - "type mismatch in output list"); - assert(DstOps.size() * DstOps[0].getLLTTy(*getMRI()).getSizeInBits() == - SrcOps[0].getLLTTy(*getMRI()).getSizeInBits() && - "input operands do not cover output register"); - break; - } - case TargetOpcode::G_MERGE_VALUES: { - assert(!SrcOps.empty() && "invalid trivial sequence"); - assert(DstOps.size() == 1 && "Invalid Dst"); + "type mismatch in output list"); + assert(DstOps.size() * DstOps[0].getLLTTy(*getMRI()).getSizeInBits() == + SrcOps[0].getLLTTy(*getMRI()).getSizeInBits() && + "input operands do not cover output register"); + break; + } + case TargetOpcode::G_MERGE_VALUES: { + assert(!SrcOps.empty() && "invalid trivial sequence"); + assert(DstOps.size() == 1 && "Invalid Dst"); assert(llvm::all_of(SrcOps, [&, this](const SrcOp &Op) { return Op.getLLTTy(*getMRI()) == SrcOps[0].getLLTTy(*getMRI()); }) && - "type mismatch in input list"); - assert(SrcOps.size() * SrcOps[0].getLLTTy(*getMRI()).getSizeInBits() == - DstOps[0].getLLTTy(*getMRI()).getSizeInBits() && - "input operands do not cover output register"); - if (SrcOps.size() == 1) - return buildCast(DstOps[0], SrcOps[0]); - if (DstOps[0].getLLTTy(*getMRI()).isVector()) { - if (SrcOps[0].getLLTTy(*getMRI()).isVector()) - return buildInstr(TargetOpcode::G_CONCAT_VECTORS, DstOps, SrcOps); - return buildInstr(TargetOpcode::G_BUILD_VECTOR, DstOps, SrcOps); - } - break; - } - case TargetOpcode::G_EXTRACT_VECTOR_ELT: { - assert(DstOps.size() == 1 && "Invalid Dst size"); - assert(SrcOps.size() == 2 && "Invalid Src size"); - assert(SrcOps[0].getLLTTy(*getMRI()).isVector() && "Invalid operand type"); - assert((DstOps[0].getLLTTy(*getMRI()).isScalar() || - DstOps[0].getLLTTy(*getMRI()).isPointer()) && - "Invalid operand type"); - assert(SrcOps[1].getLLTTy(*getMRI()).isScalar() && "Invalid operand type"); - assert(SrcOps[0].getLLTTy(*getMRI()).getElementType() == - DstOps[0].getLLTTy(*getMRI()) && - "Type mismatch"); - break; - } - case TargetOpcode::G_INSERT_VECTOR_ELT: { - assert(DstOps.size() == 1 && "Invalid dst size"); - assert(SrcOps.size() == 3 && "Invalid src size"); - assert(DstOps[0].getLLTTy(*getMRI()).isVector() && - SrcOps[0].getLLTTy(*getMRI()).isVector() && "Invalid operand type"); - assert(DstOps[0].getLLTTy(*getMRI()).getElementType() == - SrcOps[1].getLLTTy(*getMRI()) && - "Type mismatch"); - assert(SrcOps[2].getLLTTy(*getMRI()).isScalar() && "Invalid index"); - assert(DstOps[0].getLLTTy(*getMRI()).getNumElements() == - SrcOps[0].getLLTTy(*getMRI()).getNumElements() && - "Type mismatch"); - break; - } - case TargetOpcode::G_BUILD_VECTOR: { - assert((!SrcOps.empty() || SrcOps.size() < 2) && - "Must have at least 2 operands"); - assert(DstOps.size() == 1 && "Invalid DstOps"); - assert(DstOps[0].getLLTTy(*getMRI()).isVector() && - "Res type must be a vector"); + "type mismatch in input list"); + assert(SrcOps.size() * SrcOps[0].getLLTTy(*getMRI()).getSizeInBits() == + DstOps[0].getLLTTy(*getMRI()).getSizeInBits() && + "input operands do not cover output register"); + if (SrcOps.size() == 1) + return buildCast(DstOps[0], SrcOps[0]); + if (DstOps[0].getLLTTy(*getMRI()).isVector()) { + if (SrcOps[0].getLLTTy(*getMRI()).isVector()) + return buildInstr(TargetOpcode::G_CONCAT_VECTORS, DstOps, SrcOps); + return buildInstr(TargetOpcode::G_BUILD_VECTOR, DstOps, SrcOps); + } + break; + } + case TargetOpcode::G_EXTRACT_VECTOR_ELT: { + assert(DstOps.size() == 1 && "Invalid Dst size"); + assert(SrcOps.size() == 2 && "Invalid Src size"); + assert(SrcOps[0].getLLTTy(*getMRI()).isVector() && "Invalid operand type"); + assert((DstOps[0].getLLTTy(*getMRI()).isScalar() || + DstOps[0].getLLTTy(*getMRI()).isPointer()) && + "Invalid operand type"); + assert(SrcOps[1].getLLTTy(*getMRI()).isScalar() && "Invalid operand type"); + assert(SrcOps[0].getLLTTy(*getMRI()).getElementType() == + DstOps[0].getLLTTy(*getMRI()) && + "Type mismatch"); + break; + } + case TargetOpcode::G_INSERT_VECTOR_ELT: { + assert(DstOps.size() == 1 && "Invalid dst size"); + assert(SrcOps.size() == 3 && "Invalid src size"); + assert(DstOps[0].getLLTTy(*getMRI()).isVector() && + SrcOps[0].getLLTTy(*getMRI()).isVector() && "Invalid operand type"); + assert(DstOps[0].getLLTTy(*getMRI()).getElementType() == + SrcOps[1].getLLTTy(*getMRI()) && + "Type mismatch"); + assert(SrcOps[2].getLLTTy(*getMRI()).isScalar() && "Invalid index"); + assert(DstOps[0].getLLTTy(*getMRI()).getNumElements() == + SrcOps[0].getLLTTy(*getMRI()).getNumElements() && + "Type mismatch"); + break; + } + case TargetOpcode::G_BUILD_VECTOR: { + assert((!SrcOps.empty() || SrcOps.size() < 2) && + "Must have at least 2 operands"); + assert(DstOps.size() == 1 && "Invalid DstOps"); + assert(DstOps[0].getLLTTy(*getMRI()).isVector() && + "Res type must be a vector"); assert(llvm::all_of(SrcOps, [&, this](const SrcOp &Op) { return Op.getLLTTy(*getMRI()) == SrcOps[0].getLLTTy(*getMRI()); }) && - "type mismatch in input list"); - assert(SrcOps.size() * SrcOps[0].getLLTTy(*getMRI()).getSizeInBits() == - DstOps[0].getLLTTy(*getMRI()).getSizeInBits() && - "input scalars do not exactly cover the output vector register"); - break; - } - case TargetOpcode::G_BUILD_VECTOR_TRUNC: { - assert((!SrcOps.empty() || SrcOps.size() < 2) && - "Must have at least 2 operands"); - assert(DstOps.size() == 1 && "Invalid DstOps"); - assert(DstOps[0].getLLTTy(*getMRI()).isVector() && - "Res type must be a vector"); + "type mismatch in input list"); + assert(SrcOps.size() * SrcOps[0].getLLTTy(*getMRI()).getSizeInBits() == + DstOps[0].getLLTTy(*getMRI()).getSizeInBits() && + "input scalars do not exactly cover the output vector register"); + break; + } + case TargetOpcode::G_BUILD_VECTOR_TRUNC: { + assert((!SrcOps.empty() || SrcOps.size() < 2) && + "Must have at least 2 operands"); + assert(DstOps.size() == 1 && "Invalid DstOps"); + assert(DstOps[0].getLLTTy(*getMRI()).isVector() && + "Res type must be a vector"); assert(llvm::all_of(SrcOps, [&, this](const SrcOp &Op) { return Op.getLLTTy(*getMRI()) == SrcOps[0].getLLTTy(*getMRI()); }) && - "type mismatch in input list"); - if (SrcOps[0].getLLTTy(*getMRI()).getSizeInBits() == - DstOps[0].getLLTTy(*getMRI()).getElementType().getSizeInBits()) - return buildInstr(TargetOpcode::G_BUILD_VECTOR, DstOps, SrcOps); - break; - } - case TargetOpcode::G_CONCAT_VECTORS: { - assert(DstOps.size() == 1 && "Invalid DstOps"); - assert((!SrcOps.empty() || SrcOps.size() < 2) && - "Must have at least 2 operands"); + "type mismatch in input list"); + if (SrcOps[0].getLLTTy(*getMRI()).getSizeInBits() == + DstOps[0].getLLTTy(*getMRI()).getElementType().getSizeInBits()) + return buildInstr(TargetOpcode::G_BUILD_VECTOR, DstOps, SrcOps); + break; + } + case TargetOpcode::G_CONCAT_VECTORS: { + assert(DstOps.size() == 1 && "Invalid DstOps"); + assert((!SrcOps.empty() || SrcOps.size() < 2) && + "Must have at least 2 operands"); assert(llvm::all_of(SrcOps, [&, this](const SrcOp &Op) { return (Op.getLLTTy(*getMRI()).isVector() && Op.getLLTTy(*getMRI()) == SrcOps[0].getLLTTy(*getMRI())); }) && - "type mismatch in input list"); - assert(SrcOps.size() * SrcOps[0].getLLTTy(*getMRI()).getSizeInBits() == - DstOps[0].getLLTTy(*getMRI()).getSizeInBits() && - "input vectors do not exactly cover the output vector register"); - break; - } - case TargetOpcode::G_UADDE: { - assert(DstOps.size() == 2 && "Invalid no of dst operands"); - assert(SrcOps.size() == 3 && "Invalid no of src operands"); - assert(DstOps[0].getLLTTy(*getMRI()).isScalar() && "Invalid operand"); - assert((DstOps[0].getLLTTy(*getMRI()) == SrcOps[0].getLLTTy(*getMRI())) && - (DstOps[0].getLLTTy(*getMRI()) == SrcOps[1].getLLTTy(*getMRI())) && - "Invalid operand"); - assert(DstOps[1].getLLTTy(*getMRI()).isScalar() && "Invalid operand"); - assert(DstOps[1].getLLTTy(*getMRI()) == SrcOps[2].getLLTTy(*getMRI()) && - "type mismatch"); - break; - } - } - - auto MIB = buildInstr(Opc); - for (const DstOp &Op : DstOps) - Op.addDefToMIB(*getMRI(), MIB); - for (const SrcOp &Op : SrcOps) - Op.addSrcToMIB(MIB); - if (Flags) - MIB->setFlags(*Flags); - return MIB; -} + "type mismatch in input list"); + assert(SrcOps.size() * SrcOps[0].getLLTTy(*getMRI()).getSizeInBits() == + DstOps[0].getLLTTy(*getMRI()).getSizeInBits() && + "input vectors do not exactly cover the output vector register"); + break; + } + case TargetOpcode::G_UADDE: { + assert(DstOps.size() == 2 && "Invalid no of dst operands"); + assert(SrcOps.size() == 3 && "Invalid no of src operands"); + assert(DstOps[0].getLLTTy(*getMRI()).isScalar() && "Invalid operand"); + assert((DstOps[0].getLLTTy(*getMRI()) == SrcOps[0].getLLTTy(*getMRI())) && + (DstOps[0].getLLTTy(*getMRI()) == SrcOps[1].getLLTTy(*getMRI())) && + "Invalid operand"); + assert(DstOps[1].getLLTTy(*getMRI()).isScalar() && "Invalid operand"); + assert(DstOps[1].getLLTTy(*getMRI()) == SrcOps[2].getLLTTy(*getMRI()) && + "type mismatch"); + break; + } + } + + auto MIB = buildInstr(Opc); + for (const DstOp &Op : DstOps) + Op.addDefToMIB(*getMRI(), MIB); + for (const SrcOp &Op : SrcOps) + Op.addSrcToMIB(MIB); + if (Flags) + MIB->setFlags(*Flags); + return MIB; +} diff --git a/contrib/libs/llvm12/lib/CodeGen/GlobalISel/RegBankSelect.cpp b/contrib/libs/llvm12/lib/CodeGen/GlobalISel/RegBankSelect.cpp index 362ff084d9..356e0e437d 100644 --- a/contrib/libs/llvm12/lib/CodeGen/GlobalISel/RegBankSelect.cpp +++ b/contrib/libs/llvm12/lib/CodeGen/GlobalISel/RegBankSelect.cpp @@ -1,1083 +1,1083 @@ -//==- llvm/CodeGen/GlobalISel/RegBankSelect.cpp - RegBankSelect --*- C++ -*-==// -// -// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. -// See https://llvm.org/LICENSE.txt for license information. -// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception -// -//===----------------------------------------------------------------------===// -/// \file -/// This file implements the RegBankSelect class. -//===----------------------------------------------------------------------===// - -#include "llvm/CodeGen/GlobalISel/RegBankSelect.h" -#include "llvm/ADT/PostOrderIterator.h" -#include "llvm/ADT/STLExtras.h" -#include "llvm/ADT/SmallVector.h" -#include "llvm/CodeGen/GlobalISel/LegalizerInfo.h" -#include "llvm/CodeGen/GlobalISel/RegisterBank.h" -#include "llvm/CodeGen/GlobalISel/RegisterBankInfo.h" -#include "llvm/CodeGen/GlobalISel/Utils.h" -#include "llvm/CodeGen/MachineBasicBlock.h" -#include "llvm/CodeGen/MachineBlockFrequencyInfo.h" -#include "llvm/CodeGen/MachineBranchProbabilityInfo.h" -#include "llvm/CodeGen/MachineFunction.h" -#include "llvm/CodeGen/MachineInstr.h" -#include "llvm/CodeGen/MachineOperand.h" -#include "llvm/CodeGen/MachineOptimizationRemarkEmitter.h" -#include "llvm/CodeGen/MachineRegisterInfo.h" -#include "llvm/CodeGen/TargetOpcodes.h" -#include "llvm/CodeGen/TargetPassConfig.h" -#include "llvm/CodeGen/TargetRegisterInfo.h" -#include "llvm/CodeGen/TargetSubtargetInfo.h" -#include "llvm/Config/llvm-config.h" -#include "llvm/IR/Attributes.h" -#include "llvm/IR/Function.h" -#include "llvm/InitializePasses.h" -#include "llvm/Pass.h" -#include "llvm/Support/BlockFrequency.h" -#include "llvm/Support/CommandLine.h" -#include "llvm/Support/Compiler.h" -#include "llvm/Support/Debug.h" -#include "llvm/Support/ErrorHandling.h" -#include "llvm/Support/raw_ostream.h" -#include <algorithm> -#include <cassert> -#include <cstdint> -#include <limits> -#include <memory> -#include <utility> - -#define DEBUG_TYPE "regbankselect" - -using namespace llvm; - -static cl::opt<RegBankSelect::Mode> RegBankSelectMode( - cl::desc("Mode of the RegBankSelect pass"), cl::Hidden, cl::Optional, - cl::values(clEnumValN(RegBankSelect::Mode::Fast, "regbankselect-fast", - "Run the Fast mode (default mapping)"), - clEnumValN(RegBankSelect::Mode::Greedy, "regbankselect-greedy", - "Use the Greedy mode (best local mapping)"))); - -char RegBankSelect::ID = 0; - -INITIALIZE_PASS_BEGIN(RegBankSelect, DEBUG_TYPE, - "Assign register bank of generic virtual registers", - false, false); -INITIALIZE_PASS_DEPENDENCY(MachineBlockFrequencyInfo) -INITIALIZE_PASS_DEPENDENCY(MachineBranchProbabilityInfo) -INITIALIZE_PASS_DEPENDENCY(TargetPassConfig) -INITIALIZE_PASS_END(RegBankSelect, DEBUG_TYPE, - "Assign register bank of generic virtual registers", false, - false) - -RegBankSelect::RegBankSelect(Mode RunningMode) - : MachineFunctionPass(ID), OptMode(RunningMode) { - if (RegBankSelectMode.getNumOccurrences() != 0) { - OptMode = RegBankSelectMode; - if (RegBankSelectMode != RunningMode) - LLVM_DEBUG(dbgs() << "RegBankSelect mode overrided by command line\n"); - } -} - -void RegBankSelect::init(MachineFunction &MF) { - RBI = MF.getSubtarget().getRegBankInfo(); - assert(RBI && "Cannot work without RegisterBankInfo"); - MRI = &MF.getRegInfo(); - TRI = MF.getSubtarget().getRegisterInfo(); - TPC = &getAnalysis<TargetPassConfig>(); - if (OptMode != Mode::Fast) { - MBFI = &getAnalysis<MachineBlockFrequencyInfo>(); - MBPI = &getAnalysis<MachineBranchProbabilityInfo>(); - } else { - MBFI = nullptr; - MBPI = nullptr; - } - MIRBuilder.setMF(MF); - MORE = std::make_unique<MachineOptimizationRemarkEmitter>(MF, MBFI); -} - -void RegBankSelect::getAnalysisUsage(AnalysisUsage &AU) const { - if (OptMode != Mode::Fast) { - // We could preserve the information from these two analysis but - // the APIs do not allow to do so yet. - AU.addRequired<MachineBlockFrequencyInfo>(); - AU.addRequired<MachineBranchProbabilityInfo>(); - } - AU.addRequired<TargetPassConfig>(); - getSelectionDAGFallbackAnalysisUsage(AU); - MachineFunctionPass::getAnalysisUsage(AU); -} - -bool RegBankSelect::assignmentMatch( - Register Reg, const RegisterBankInfo::ValueMapping &ValMapping, - bool &OnlyAssign) const { - // By default we assume we will have to repair something. - OnlyAssign = false; - // Each part of a break down needs to end up in a different register. - // In other word, Reg assignment does not match. - if (ValMapping.NumBreakDowns != 1) - return false; - - const RegisterBank *CurRegBank = RBI->getRegBank(Reg, *MRI, *TRI); - const RegisterBank *DesiredRegBank = ValMapping.BreakDown[0].RegBank; - // Reg is free of assignment, a simple assignment will make the - // register bank to match. - OnlyAssign = CurRegBank == nullptr; - LLVM_DEBUG(dbgs() << "Does assignment already match: "; - if (CurRegBank) dbgs() << *CurRegBank; else dbgs() << "none"; - dbgs() << " against "; - assert(DesiredRegBank && "The mapping must be valid"); - dbgs() << *DesiredRegBank << '\n';); - return CurRegBank == DesiredRegBank; -} - -bool RegBankSelect::repairReg( - MachineOperand &MO, const RegisterBankInfo::ValueMapping &ValMapping, - RegBankSelect::RepairingPlacement &RepairPt, - const iterator_range<SmallVectorImpl<Register>::const_iterator> &NewVRegs) { - - assert(ValMapping.NumBreakDowns == (unsigned)size(NewVRegs) && - "need new vreg for each breakdown"); - - // An empty range of new register means no repairing. - assert(!NewVRegs.empty() && "We should not have to repair"); - - MachineInstr *MI; - if (ValMapping.NumBreakDowns == 1) { - // Assume we are repairing a use and thus, the original reg will be - // the source of the repairing. - Register Src = MO.getReg(); - Register Dst = *NewVRegs.begin(); - - // If we repair a definition, swap the source and destination for - // the repairing. - if (MO.isDef()) - std::swap(Src, Dst); - - assert((RepairPt.getNumInsertPoints() == 1 || - Register::isPhysicalRegister(Dst)) && - "We are about to create several defs for Dst"); - - // Build the instruction used to repair, then clone it at the right - // places. Avoiding buildCopy bypasses the check that Src and Dst have the - // same types because the type is a placeholder when this function is called. - MI = MIRBuilder.buildInstrNoInsert(TargetOpcode::COPY) - .addDef(Dst) - .addUse(Src); - LLVM_DEBUG(dbgs() << "Copy: " << printReg(Src) << " to: " << printReg(Dst) - << '\n'); - } else { - // TODO: Support with G_IMPLICIT_DEF + G_INSERT sequence or G_EXTRACT - // sequence. - assert(ValMapping.partsAllUniform() && "irregular breakdowns not supported"); - - LLT RegTy = MRI->getType(MO.getReg()); - if (MO.isDef()) { - unsigned MergeOp; - if (RegTy.isVector()) { - if (ValMapping.NumBreakDowns == RegTy.getNumElements()) - MergeOp = TargetOpcode::G_BUILD_VECTOR; - else { - assert( - (ValMapping.BreakDown[0].Length * ValMapping.NumBreakDowns == - RegTy.getSizeInBits()) && - (ValMapping.BreakDown[0].Length % RegTy.getScalarSizeInBits() == - 0) && - "don't understand this value breakdown"); - - MergeOp = TargetOpcode::G_CONCAT_VECTORS; - } - } else - MergeOp = TargetOpcode::G_MERGE_VALUES; - - auto MergeBuilder = - MIRBuilder.buildInstrNoInsert(MergeOp) - .addDef(MO.getReg()); - - for (Register SrcReg : NewVRegs) - MergeBuilder.addUse(SrcReg); - - MI = MergeBuilder; - } else { - MachineInstrBuilder UnMergeBuilder = - MIRBuilder.buildInstrNoInsert(TargetOpcode::G_UNMERGE_VALUES); - for (Register DefReg : NewVRegs) - UnMergeBuilder.addDef(DefReg); - - UnMergeBuilder.addUse(MO.getReg()); - MI = UnMergeBuilder; - } - } - - if (RepairPt.getNumInsertPoints() != 1) - report_fatal_error("need testcase to support multiple insertion points"); - - // TODO: - // Check if MI is legal. if not, we need to legalize all the - // instructions we are going to insert. - std::unique_ptr<MachineInstr *[]> NewInstrs( - new MachineInstr *[RepairPt.getNumInsertPoints()]); - bool IsFirst = true; - unsigned Idx = 0; - for (const std::unique_ptr<InsertPoint> &InsertPt : RepairPt) { - MachineInstr *CurMI; - if (IsFirst) - CurMI = MI; - else - CurMI = MIRBuilder.getMF().CloneMachineInstr(MI); - InsertPt->insert(*CurMI); - NewInstrs[Idx++] = CurMI; - IsFirst = false; - } - // TODO: - // Legalize NewInstrs if need be. - return true; -} - -uint64_t RegBankSelect::getRepairCost( - const MachineOperand &MO, - const RegisterBankInfo::ValueMapping &ValMapping) const { - assert(MO.isReg() && "We should only repair register operand"); - assert(ValMapping.NumBreakDowns && "Nothing to map??"); - - bool IsSameNumOfValues = ValMapping.NumBreakDowns == 1; - const RegisterBank *CurRegBank = RBI->getRegBank(MO.getReg(), *MRI, *TRI); - // If MO does not have a register bank, we should have just been - // able to set one unless we have to break the value down. - assert(CurRegBank || MO.isDef()); - - // Def: Val <- NewDefs - // Same number of values: copy - // Different number: Val = build_sequence Defs1, Defs2, ... - // Use: NewSources <- Val. - // Same number of values: copy. - // Different number: Src1, Src2, ... = - // extract_value Val, Src1Begin, Src1Len, Src2Begin, Src2Len, ... - // We should remember that this value is available somewhere else to - // coalesce the value. - - if (ValMapping.NumBreakDowns != 1) - return RBI->getBreakDownCost(ValMapping, CurRegBank); - - if (IsSameNumOfValues) { - const RegisterBank *DesiredRegBank = ValMapping.BreakDown[0].RegBank; - // If we repair a definition, swap the source and destination for - // the repairing. - if (MO.isDef()) - std::swap(CurRegBank, DesiredRegBank); - // TODO: It may be possible to actually avoid the copy. - // If we repair something where the source is defined by a copy - // and the source of that copy is on the right bank, we can reuse - // it for free. - // E.g., - // RegToRepair<BankA> = copy AlternativeSrc<BankB> - // = op RegToRepair<BankA> - // We can simply propagate AlternativeSrc instead of copying RegToRepair - // into a new virtual register. - // We would also need to propagate this information in the - // repairing placement. - unsigned Cost = RBI->copyCost(*DesiredRegBank, *CurRegBank, - RBI->getSizeInBits(MO.getReg(), *MRI, *TRI)); - // TODO: use a dedicated constant for ImpossibleCost. - if (Cost != std::numeric_limits<unsigned>::max()) - return Cost; - // Return the legalization cost of that repairing. - } - return std::numeric_limits<unsigned>::max(); -} - -const RegisterBankInfo::InstructionMapping &RegBankSelect::findBestMapping( - MachineInstr &MI, RegisterBankInfo::InstructionMappings &PossibleMappings, - SmallVectorImpl<RepairingPlacement> &RepairPts) { - assert(!PossibleMappings.empty() && - "Do not know how to map this instruction"); - - const RegisterBankInfo::InstructionMapping *BestMapping = nullptr; - MappingCost Cost = MappingCost::ImpossibleCost(); - SmallVector<RepairingPlacement, 4> LocalRepairPts; - for (const RegisterBankInfo::InstructionMapping *CurMapping : - PossibleMappings) { - MappingCost CurCost = - computeMapping(MI, *CurMapping, LocalRepairPts, &Cost); - if (CurCost < Cost) { - LLVM_DEBUG(dbgs() << "New best: " << CurCost << '\n'); - Cost = CurCost; - BestMapping = CurMapping; - RepairPts.clear(); - for (RepairingPlacement &RepairPt : LocalRepairPts) - RepairPts.emplace_back(std::move(RepairPt)); - } - } - if (!BestMapping && !TPC->isGlobalISelAbortEnabled()) { - // If none of the mapping worked that means they are all impossible. - // Thus, pick the first one and set an impossible repairing point. - // It will trigger the failed isel mode. - BestMapping = *PossibleMappings.begin(); - RepairPts.emplace_back( - RepairingPlacement(MI, 0, *TRI, *this, RepairingPlacement::Impossible)); - } else - assert(BestMapping && "No suitable mapping for instruction"); - return *BestMapping; -} - -void RegBankSelect::tryAvoidingSplit( - RegBankSelect::RepairingPlacement &RepairPt, const MachineOperand &MO, - const RegisterBankInfo::ValueMapping &ValMapping) const { - const MachineInstr &MI = *MO.getParent(); - assert(RepairPt.hasSplit() && "We should not have to adjust for split"); - // Splitting should only occur for PHIs or between terminators, - // because we only do local repairing. - assert((MI.isPHI() || MI.isTerminator()) && "Why do we split?"); - - assert(&MI.getOperand(RepairPt.getOpIdx()) == &MO && - "Repairing placement does not match operand"); - - // If we need splitting for phis, that means it is because we - // could not find an insertion point before the terminators of - // the predecessor block for this argument. In other words, - // the input value is defined by one of the terminators. - assert((!MI.isPHI() || !MO.isDef()) && "Need split for phi def?"); - - // We split to repair the use of a phi or a terminator. - if (!MO.isDef()) { - if (MI.isTerminator()) { - assert(&MI != &(*MI.getParent()->getFirstTerminator()) && - "Need to split for the first terminator?!"); - } else { - // For the PHI case, the split may not be actually required. - // In the copy case, a phi is already a copy on the incoming edge, - // therefore there is no need to split. - if (ValMapping.NumBreakDowns == 1) - // This is a already a copy, there is nothing to do. - RepairPt.switchTo(RepairingPlacement::RepairingKind::Reassign); - } - return; - } - - // At this point, we need to repair a defintion of a terminator. - - // Technically we need to fix the def of MI on all outgoing - // edges of MI to keep the repairing local. In other words, we - // will create several definitions of the same register. This - // does not work for SSA unless that definition is a physical - // register. - // However, there are other cases where we can get away with - // that while still keeping the repairing local. - assert(MI.isTerminator() && MO.isDef() && - "This code is for the def of a terminator"); - - // Since we use RPO traversal, if we need to repair a definition - // this means this definition could be: - // 1. Used by PHIs (i.e., this VReg has been visited as part of the - // uses of a phi.), or - // 2. Part of a target specific instruction (i.e., the target applied - // some register class constraints when creating the instruction.) - // If the constraints come for #2, the target said that another mapping - // is supported so we may just drop them. Indeed, if we do not change - // the number of registers holding that value, the uses will get fixed - // when we get to them. - // Uses in PHIs may have already been proceeded though. - // If the constraints come for #1, then, those are weak constraints and - // no actual uses may rely on them. However, the problem remains mainly - // the same as for #2. If the value stays in one register, we could - // just switch the register bank of the definition, but we would need to - // account for a repairing cost for each phi we silently change. - // - // In any case, if the value needs to be broken down into several - // registers, the repairing is not local anymore as we need to patch - // every uses to rebuild the value in just one register. - // - // To summarize: - // - If the value is in a physical register, we can do the split and - // fix locally. - // Otherwise if the value is in a virtual register: - // - If the value remains in one register, we do not have to split - // just switching the register bank would do, but we need to account - // in the repairing cost all the phi we changed. - // - If the value spans several registers, then we cannot do a local - // repairing. - - // Check if this is a physical or virtual register. - Register Reg = MO.getReg(); - if (Register::isPhysicalRegister(Reg)) { - // We are going to split every outgoing edges. - // Check that this is possible. - // FIXME: The machine representation is currently broken - // since it also several terminators in one basic block. - // Because of that we would technically need a way to get - // the targets of just one terminator to know which edges - // we have to split. - // Assert that we do not hit the ill-formed representation. - - // If there are other terminators before that one, some of - // the outgoing edges may not be dominated by this definition. - assert(&MI == &(*MI.getParent()->getFirstTerminator()) && - "Do not know which outgoing edges are relevant"); - const MachineInstr *Next = MI.getNextNode(); - assert((!Next || Next->isUnconditionalBranch()) && - "Do not know where each terminator ends up"); - if (Next) - // If the next terminator uses Reg, this means we have - // to split right after MI and thus we need a way to ask - // which outgoing edges are affected. - assert(!Next->readsRegister(Reg) && "Need to split between terminators"); - // We will split all the edges and repair there. - } else { - // This is a virtual register defined by a terminator. - if (ValMapping.NumBreakDowns == 1) { - // There is nothing to repair, but we may actually lie on - // the repairing cost because of the PHIs already proceeded - // as already stated. - // Though the code will be correct. - assert(false && "Repairing cost may not be accurate"); - } else { - // We need to do non-local repairing. Basically, patch all - // the uses (i.e., phis) that we already proceeded. - // For now, just say this mapping is not possible. - RepairPt.switchTo(RepairingPlacement::RepairingKind::Impossible); - } - } -} - -RegBankSelect::MappingCost RegBankSelect::computeMapping( - MachineInstr &MI, const RegisterBankInfo::InstructionMapping &InstrMapping, - SmallVectorImpl<RepairingPlacement> &RepairPts, - const RegBankSelect::MappingCost *BestCost) { - assert((MBFI || !BestCost) && "Costs comparison require MBFI"); - - if (!InstrMapping.isValid()) - return MappingCost::ImpossibleCost(); - - // If mapped with InstrMapping, MI will have the recorded cost. - MappingCost Cost(MBFI ? MBFI->getBlockFreq(MI.getParent()) : 1); - bool Saturated = Cost.addLocalCost(InstrMapping.getCost()); - assert(!Saturated && "Possible mapping saturated the cost"); - LLVM_DEBUG(dbgs() << "Evaluating mapping cost for: " << MI); - LLVM_DEBUG(dbgs() << "With: " << InstrMapping << '\n'); - RepairPts.clear(); - if (BestCost && Cost > *BestCost) { - LLVM_DEBUG(dbgs() << "Mapping is too expensive from the start\n"); - return Cost; - } - - // Moreover, to realize this mapping, the register bank of each operand must - // match this mapping. In other words, we may need to locally reassign the - // register banks. Account for that repairing cost as well. - // In this context, local means in the surrounding of MI. - for (unsigned OpIdx = 0, EndOpIdx = InstrMapping.getNumOperands(); - OpIdx != EndOpIdx; ++OpIdx) { - const MachineOperand &MO = MI.getOperand(OpIdx); - if (!MO.isReg()) - continue; - Register Reg = MO.getReg(); - if (!Reg) - continue; - LLVM_DEBUG(dbgs() << "Opd" << OpIdx << '\n'); - const RegisterBankInfo::ValueMapping &ValMapping = - InstrMapping.getOperandMapping(OpIdx); - // If Reg is already properly mapped, this is free. - bool Assign; - if (assignmentMatch(Reg, ValMapping, Assign)) { - LLVM_DEBUG(dbgs() << "=> is free (match).\n"); - continue; - } - if (Assign) { - LLVM_DEBUG(dbgs() << "=> is free (simple assignment).\n"); - RepairPts.emplace_back(RepairingPlacement(MI, OpIdx, *TRI, *this, - RepairingPlacement::Reassign)); - continue; - } - - // Find the insertion point for the repairing code. - RepairPts.emplace_back( - RepairingPlacement(MI, OpIdx, *TRI, *this, RepairingPlacement::Insert)); - RepairingPlacement &RepairPt = RepairPts.back(); - - // If we need to split a basic block to materialize this insertion point, - // we may give a higher cost to this mapping. - // Nevertheless, we may get away with the split, so try that first. - if (RepairPt.hasSplit()) - tryAvoidingSplit(RepairPt, MO, ValMapping); - - // Check that the materialization of the repairing is possible. - if (!RepairPt.canMaterialize()) { - LLVM_DEBUG(dbgs() << "Mapping involves impossible repairing\n"); - return MappingCost::ImpossibleCost(); - } - - // Account for the split cost and repair cost. - // Unless the cost is already saturated or we do not care about the cost. - if (!BestCost || Saturated) - continue; - - // To get accurate information we need MBFI and MBPI. - // Thus, if we end up here this information should be here. - assert(MBFI && MBPI && "Cost computation requires MBFI and MBPI"); - - // FIXME: We will have to rework the repairing cost model. - // The repairing cost depends on the register bank that MO has. - // However, when we break down the value into different values, - // MO may not have a register bank while still needing repairing. - // For the fast mode, we don't compute the cost so that is fine, - // but still for the repairing code, we will have to make a choice. - // For the greedy mode, we should choose greedily what is the best - // choice based on the next use of MO. - - // Sums up the repairing cost of MO at each insertion point. - uint64_t RepairCost = getRepairCost(MO, ValMapping); - - // This is an impossible to repair cost. - if (RepairCost == std::numeric_limits<unsigned>::max()) - return MappingCost::ImpossibleCost(); - - // Bias used for splitting: 5%. - const uint64_t PercentageForBias = 5; - uint64_t Bias = (RepairCost * PercentageForBias + 99) / 100; - // We should not need more than a couple of instructions to repair - // an assignment. In other words, the computation should not - // overflow because the repairing cost is free of basic block - // frequency. - assert(((RepairCost < RepairCost * PercentageForBias) && - (RepairCost * PercentageForBias < - RepairCost * PercentageForBias + 99)) && - "Repairing involves more than a billion of instructions?!"); - for (const std::unique_ptr<InsertPoint> &InsertPt : RepairPt) { - assert(InsertPt->canMaterialize() && "We should not have made it here"); - // We will applied some basic block frequency and those uses uint64_t. - if (!InsertPt->isSplit()) - Saturated = Cost.addLocalCost(RepairCost); - else { - uint64_t CostForInsertPt = RepairCost; - // Again we shouldn't overflow here givent that - // CostForInsertPt is frequency free at this point. - assert(CostForInsertPt + Bias > CostForInsertPt && - "Repairing + split bias overflows"); - CostForInsertPt += Bias; - uint64_t PtCost = InsertPt->frequency(*this) * CostForInsertPt; - // Check if we just overflowed. - if ((Saturated = PtCost < CostForInsertPt)) - Cost.saturate(); - else - Saturated = Cost.addNonLocalCost(PtCost); - } - - // Stop looking into what it takes to repair, this is already - // too expensive. - if (BestCost && Cost > *BestCost) { - LLVM_DEBUG(dbgs() << "Mapping is too expensive, stop processing\n"); - return Cost; - } - - // No need to accumulate more cost information. - // We need to still gather the repairing information though. - if (Saturated) - break; - } - } - LLVM_DEBUG(dbgs() << "Total cost is: " << Cost << "\n"); - return Cost; -} - -bool RegBankSelect::applyMapping( - MachineInstr &MI, const RegisterBankInfo::InstructionMapping &InstrMapping, - SmallVectorImpl<RegBankSelect::RepairingPlacement> &RepairPts) { - // OpdMapper will hold all the information needed for the rewriting. - RegisterBankInfo::OperandsMapper OpdMapper(MI, InstrMapping, *MRI); - - // First, place the repairing code. - for (RepairingPlacement &RepairPt : RepairPts) { - if (!RepairPt.canMaterialize() || - RepairPt.getKind() == RepairingPlacement::Impossible) - return false; - assert(RepairPt.getKind() != RepairingPlacement::None && - "This should not make its way in the list"); - unsigned OpIdx = RepairPt.getOpIdx(); - MachineOperand &MO = MI.getOperand(OpIdx); - const RegisterBankInfo::ValueMapping &ValMapping = - InstrMapping.getOperandMapping(OpIdx); - Register Reg = MO.getReg(); - - switch (RepairPt.getKind()) { - case RepairingPlacement::Reassign: - assert(ValMapping.NumBreakDowns == 1 && - "Reassignment should only be for simple mapping"); - MRI->setRegBank(Reg, *ValMapping.BreakDown[0].RegBank); - break; - case RepairingPlacement::Insert: - OpdMapper.createVRegs(OpIdx); - if (!repairReg(MO, ValMapping, RepairPt, OpdMapper.getVRegs(OpIdx))) - return false; - break; - default: - llvm_unreachable("Other kind should not happen"); - } - } - - // Second, rewrite the instruction. - LLVM_DEBUG(dbgs() << "Actual mapping of the operands: " << OpdMapper << '\n'); - RBI->applyMapping(OpdMapper); - - return true; -} - -bool RegBankSelect::assignInstr(MachineInstr &MI) { - LLVM_DEBUG(dbgs() << "Assign: " << MI); - // Remember the repairing placement for all the operands. - SmallVector<RepairingPlacement, 4> RepairPts; - - const RegisterBankInfo::InstructionMapping *BestMapping; - if (OptMode == RegBankSelect::Mode::Fast) { - BestMapping = &RBI->getInstrMapping(MI); - MappingCost DefaultCost = computeMapping(MI, *BestMapping, RepairPts); - (void)DefaultCost; - if (DefaultCost == MappingCost::ImpossibleCost()) - return false; - } else { - RegisterBankInfo::InstructionMappings PossibleMappings = - RBI->getInstrPossibleMappings(MI); - if (PossibleMappings.empty()) - return false; - BestMapping = &findBestMapping(MI, PossibleMappings, RepairPts); - } - // Make sure the mapping is valid for MI. - assert(BestMapping->verify(MI) && "Invalid instruction mapping"); - - LLVM_DEBUG(dbgs() << "Best Mapping: " << *BestMapping << '\n'); - - // After this call, MI may not be valid anymore. - // Do not use it. - return applyMapping(MI, *BestMapping, RepairPts); -} - -bool RegBankSelect::runOnMachineFunction(MachineFunction &MF) { - // If the ISel pipeline failed, do not bother running that pass. - if (MF.getProperties().hasProperty( - MachineFunctionProperties::Property::FailedISel)) - return false; - - LLVM_DEBUG(dbgs() << "Assign register banks for: " << MF.getName() << '\n'); - const Function &F = MF.getFunction(); - Mode SaveOptMode = OptMode; - if (F.hasOptNone()) - OptMode = Mode::Fast; - init(MF); - -#ifndef NDEBUG - // Check that our input is fully legal: we require the function to have the - // Legalized property, so it should be. - // FIXME: This should be in the MachineVerifier. - if (!DisableGISelLegalityCheck) - if (const MachineInstr *MI = machineFunctionIsIllegal(MF)) { - reportGISelFailure(MF, *TPC, *MORE, "gisel-regbankselect", - "instruction is not legal", *MI); - return false; - } -#endif - - // Walk the function and assign register banks to all operands. - // Use a RPOT to make sure all registers are assigned before we choose - // the best mapping of the current instruction. - ReversePostOrderTraversal<MachineFunction*> RPOT(&MF); - for (MachineBasicBlock *MBB : RPOT) { - // Set a sensible insertion point so that subsequent calls to - // MIRBuilder. - MIRBuilder.setMBB(*MBB); - for (MachineBasicBlock::iterator MII = MBB->begin(), End = MBB->end(); - MII != End;) { - // MI might be invalidated by the assignment, so move the - // iterator before hand. - MachineInstr &MI = *MII++; - - // Ignore target-specific post-isel instructions: they should use proper - // regclasses. - if (isTargetSpecificOpcode(MI.getOpcode()) && !MI.isPreISelOpcode()) - continue; - - // Ignore inline asm instructions: they should use physical - // registers/regclasses - if (MI.isInlineAsm()) - continue; - - // Ignore debug info. - if (MI.isDebugInstr()) - continue; - - if (!assignInstr(MI)) { - reportGISelFailure(MF, *TPC, *MORE, "gisel-regbankselect", - "unable to map instruction", MI); - return false; - } - - // It's possible the mapping changed control flow, and moved the following - // instruction to a new block, so figure out the new parent. - if (MII != End) { - MachineBasicBlock *NextInstBB = MII->getParent(); - if (NextInstBB != MBB) { - LLVM_DEBUG(dbgs() << "Instruction mapping changed control flow\n"); - MBB = NextInstBB; - MIRBuilder.setMBB(*MBB); - End = MBB->end(); - } - } - } - } - - OptMode = SaveOptMode; - return false; -} - -//------------------------------------------------------------------------------ -// Helper Classes Implementation -//------------------------------------------------------------------------------ -RegBankSelect::RepairingPlacement::RepairingPlacement( - MachineInstr &MI, unsigned OpIdx, const TargetRegisterInfo &TRI, Pass &P, - RepairingPlacement::RepairingKind Kind) - // Default is, we are going to insert code to repair OpIdx. - : Kind(Kind), OpIdx(OpIdx), - CanMaterialize(Kind != RepairingKind::Impossible), P(P) { - const MachineOperand &MO = MI.getOperand(OpIdx); - assert(MO.isReg() && "Trying to repair a non-reg operand"); - - if (Kind != RepairingKind::Insert) - return; - - // Repairings for definitions happen after MI, uses happen before. - bool Before = !MO.isDef(); - - // Check if we are done with MI. - if (!MI.isPHI() && !MI.isTerminator()) { - addInsertPoint(MI, Before); - // We are done with the initialization. - return; - } - - // Now, look for the special cases. - if (MI.isPHI()) { - // - PHI must be the first instructions: - // * Before, we have to split the related incoming edge. - // * After, move the insertion point past the last phi. - if (!Before) { - MachineBasicBlock::iterator It = MI.getParent()->getFirstNonPHI(); - if (It != MI.getParent()->end()) - addInsertPoint(*It, /*Before*/ true); - else - addInsertPoint(*(--It), /*Before*/ false); - return; - } - // We repair a use of a phi, we may need to split the related edge. - MachineBasicBlock &Pred = *MI.getOperand(OpIdx + 1).getMBB(); - // Check if we can move the insertion point prior to the - // terminators of the predecessor. - Register Reg = MO.getReg(); - MachineBasicBlock::iterator It = Pred.getLastNonDebugInstr(); - for (auto Begin = Pred.begin(); It != Begin && It->isTerminator(); --It) - if (It->modifiesRegister(Reg, &TRI)) { - // We cannot hoist the repairing code in the predecessor. - // Split the edge. - addInsertPoint(Pred, *MI.getParent()); - return; - } - // At this point, we can insert in Pred. - - // - If It is invalid, Pred is empty and we can insert in Pred - // wherever we want. - // - If It is valid, It is the first non-terminator, insert after It. - if (It == Pred.end()) - addInsertPoint(Pred, /*Beginning*/ false); - else - addInsertPoint(*It, /*Before*/ false); - } else { - // - Terminators must be the last instructions: - // * Before, move the insert point before the first terminator. - // * After, we have to split the outcoming edges. - if (Before) { - // Check whether Reg is defined by any terminator. - MachineBasicBlock::reverse_iterator It = MI; - auto REnd = MI.getParent()->rend(); - - for (; It != REnd && It->isTerminator(); ++It) { - assert(!It->modifiesRegister(MO.getReg(), &TRI) && - "copy insertion in middle of terminators not handled"); - } - - if (It == REnd) { - addInsertPoint(*MI.getParent()->begin(), true); - return; - } - - // We are sure to be right before the first terminator. - addInsertPoint(*It, /*Before*/ false); - return; - } - // Make sure Reg is not redefined by other terminators, otherwise - // we do not know how to split. - for (MachineBasicBlock::iterator It = MI, End = MI.getParent()->end(); - ++It != End;) - // The machine verifier should reject this kind of code. - assert(It->modifiesRegister(MO.getReg(), &TRI) && - "Do not know where to split"); - // Split each outcoming edges. - MachineBasicBlock &Src = *MI.getParent(); - for (auto &Succ : Src.successors()) - addInsertPoint(Src, Succ); - } -} - -void RegBankSelect::RepairingPlacement::addInsertPoint(MachineInstr &MI, - bool Before) { - addInsertPoint(*new InstrInsertPoint(MI, Before)); -} - -void RegBankSelect::RepairingPlacement::addInsertPoint(MachineBasicBlock &MBB, - bool Beginning) { - addInsertPoint(*new MBBInsertPoint(MBB, Beginning)); -} - -void RegBankSelect::RepairingPlacement::addInsertPoint(MachineBasicBlock &Src, - MachineBasicBlock &Dst) { - addInsertPoint(*new EdgeInsertPoint(Src, Dst, P)); -} - -void RegBankSelect::RepairingPlacement::addInsertPoint( - RegBankSelect::InsertPoint &Point) { - CanMaterialize &= Point.canMaterialize(); - HasSplit |= Point.isSplit(); - InsertPoints.emplace_back(&Point); -} - -RegBankSelect::InstrInsertPoint::InstrInsertPoint(MachineInstr &Instr, - bool Before) - : InsertPoint(), Instr(Instr), Before(Before) { - // Since we do not support splitting, we do not need to update - // liveness and such, so do not do anything with P. - assert((!Before || !Instr.isPHI()) && - "Splitting before phis requires more points"); - assert((!Before || !Instr.getNextNode() || !Instr.getNextNode()->isPHI()) && - "Splitting between phis does not make sense"); -} - -void RegBankSelect::InstrInsertPoint::materialize() { - if (isSplit()) { - // Slice and return the beginning of the new block. - // If we need to split between the terminators, we theoritically - // need to know where the first and second set of terminators end - // to update the successors properly. - // Now, in pratice, we should have a maximum of 2 branch - // instructions; one conditional and one unconditional. Therefore - // we know how to update the successor by looking at the target of - // the unconditional branch. - // If we end up splitting at some point, then, we should update - // the liveness information and such. I.e., we would need to - // access P here. - // The machine verifier should actually make sure such cases - // cannot happen. - llvm_unreachable("Not yet implemented"); - } - // Otherwise the insertion point is just the current or next - // instruction depending on Before. I.e., there is nothing to do - // here. -} - -bool RegBankSelect::InstrInsertPoint::isSplit() const { - // If the insertion point is after a terminator, we need to split. - if (!Before) - return Instr.isTerminator(); - // If we insert before an instruction that is after a terminator, - // we are still after a terminator. - return Instr.getPrevNode() && Instr.getPrevNode()->isTerminator(); -} - -uint64_t RegBankSelect::InstrInsertPoint::frequency(const Pass &P) const { - // Even if we need to split, because we insert between terminators, - // this split has actually the same frequency as the instruction. - const MachineBlockFrequencyInfo *MBFI = - P.getAnalysisIfAvailable<MachineBlockFrequencyInfo>(); - if (!MBFI) - return 1; - return MBFI->getBlockFreq(Instr.getParent()).getFrequency(); -} - -uint64_t RegBankSelect::MBBInsertPoint::frequency(const Pass &P) const { - const MachineBlockFrequencyInfo *MBFI = - P.getAnalysisIfAvailable<MachineBlockFrequencyInfo>(); - if (!MBFI) - return 1; - return MBFI->getBlockFreq(&MBB).getFrequency(); -} - -void RegBankSelect::EdgeInsertPoint::materialize() { - // If we end up repairing twice at the same place before materializing the - // insertion point, we may think we have to split an edge twice. - // We should have a factory for the insert point such that identical points - // are the same instance. - assert(Src.isSuccessor(DstOrSplit) && DstOrSplit->isPredecessor(&Src) && - "This point has already been split"); - MachineBasicBlock *NewBB = Src.SplitCriticalEdge(DstOrSplit, P); - assert(NewBB && "Invalid call to materialize"); - // We reuse the destination block to hold the information of the new block. - DstOrSplit = NewBB; -} - -uint64_t RegBankSelect::EdgeInsertPoint::frequency(const Pass &P) const { - const MachineBlockFrequencyInfo *MBFI = - P.getAnalysisIfAvailable<MachineBlockFrequencyInfo>(); - if (!MBFI) - return 1; - if (WasMaterialized) - return MBFI->getBlockFreq(DstOrSplit).getFrequency(); - - const MachineBranchProbabilityInfo *MBPI = - P.getAnalysisIfAvailable<MachineBranchProbabilityInfo>(); - if (!MBPI) - return 1; - // The basic block will be on the edge. - return (MBFI->getBlockFreq(&Src) * MBPI->getEdgeProbability(&Src, DstOrSplit)) - .getFrequency(); -} - -bool RegBankSelect::EdgeInsertPoint::canMaterialize() const { - // If this is not a critical edge, we should not have used this insert - // point. Indeed, either the successor or the predecessor should - // have do. - assert(Src.succ_size() > 1 && DstOrSplit->pred_size() > 1 && - "Edge is not critical"); - return Src.canSplitCriticalEdge(DstOrSplit); -} - -RegBankSelect::MappingCost::MappingCost(const BlockFrequency &LocalFreq) - : LocalFreq(LocalFreq.getFrequency()) {} - -bool RegBankSelect::MappingCost::addLocalCost(uint64_t Cost) { - // Check if this overflows. - if (LocalCost + Cost < LocalCost) { - saturate(); - return true; - } - LocalCost += Cost; - return isSaturated(); -} - -bool RegBankSelect::MappingCost::addNonLocalCost(uint64_t Cost) { - // Check if this overflows. - if (NonLocalCost + Cost < NonLocalCost) { - saturate(); - return true; - } - NonLocalCost += Cost; - return isSaturated(); -} - -bool RegBankSelect::MappingCost::isSaturated() const { - return LocalCost == UINT64_MAX - 1 && NonLocalCost == UINT64_MAX && - LocalFreq == UINT64_MAX; -} - -void RegBankSelect::MappingCost::saturate() { - *this = ImpossibleCost(); - --LocalCost; -} - -RegBankSelect::MappingCost RegBankSelect::MappingCost::ImpossibleCost() { - return MappingCost(UINT64_MAX, UINT64_MAX, UINT64_MAX); -} - -bool RegBankSelect::MappingCost::operator<(const MappingCost &Cost) const { - // Sort out the easy cases. - if (*this == Cost) - return false; - // If one is impossible to realize the other is cheaper unless it is - // impossible as well. - if ((*this == ImpossibleCost()) || (Cost == ImpossibleCost())) - return (*this == ImpossibleCost()) < (Cost == ImpossibleCost()); - // If one is saturated the other is cheaper, unless it is saturated - // as well. - if (isSaturated() || Cost.isSaturated()) - return isSaturated() < Cost.isSaturated(); - // At this point we know both costs hold sensible values. - - // If both values have a different base frequency, there is no much - // we can do but to scale everything. - // However, if they have the same base frequency we can avoid making - // complicated computation. - uint64_t ThisLocalAdjust; - uint64_t OtherLocalAdjust; - if (LLVM_LIKELY(LocalFreq == Cost.LocalFreq)) { - - // At this point, we know the local costs are comparable. - // Do the case that do not involve potential overflow first. - if (NonLocalCost == Cost.NonLocalCost) - // Since the non-local costs do not discriminate on the result, - // just compare the local costs. - return LocalCost < Cost.LocalCost; - - // The base costs are comparable so we may only keep the relative - // value to increase our chances of avoiding overflows. - ThisLocalAdjust = 0; - OtherLocalAdjust = 0; - if (LocalCost < Cost.LocalCost) - OtherLocalAdjust = Cost.LocalCost - LocalCost; - else - ThisLocalAdjust = LocalCost - Cost.LocalCost; - } else { - ThisLocalAdjust = LocalCost; - OtherLocalAdjust = Cost.LocalCost; - } - - // The non-local costs are comparable, just keep the relative value. - uint64_t ThisNonLocalAdjust = 0; - uint64_t OtherNonLocalAdjust = 0; - if (NonLocalCost < Cost.NonLocalCost) - OtherNonLocalAdjust = Cost.NonLocalCost - NonLocalCost; - else - ThisNonLocalAdjust = NonLocalCost - Cost.NonLocalCost; - // Scale everything to make them comparable. - uint64_t ThisScaledCost = ThisLocalAdjust * LocalFreq; - // Check for overflow on that operation. - bool ThisOverflows = ThisLocalAdjust && (ThisScaledCost < ThisLocalAdjust || - ThisScaledCost < LocalFreq); - uint64_t OtherScaledCost = OtherLocalAdjust * Cost.LocalFreq; - // Check for overflow on the last operation. - bool OtherOverflows = - OtherLocalAdjust && - (OtherScaledCost < OtherLocalAdjust || OtherScaledCost < Cost.LocalFreq); - // Add the non-local costs. - ThisOverflows |= ThisNonLocalAdjust && - ThisScaledCost + ThisNonLocalAdjust < ThisNonLocalAdjust; - ThisScaledCost += ThisNonLocalAdjust; - OtherOverflows |= OtherNonLocalAdjust && - OtherScaledCost + OtherNonLocalAdjust < OtherNonLocalAdjust; - OtherScaledCost += OtherNonLocalAdjust; - // If both overflows, we cannot compare without additional - // precision, e.g., APInt. Just give up on that case. - if (ThisOverflows && OtherOverflows) - return false; - // If one overflows but not the other, we can still compare. - if (ThisOverflows || OtherOverflows) - return ThisOverflows < OtherOverflows; - // Otherwise, just compare the values. - return ThisScaledCost < OtherScaledCost; -} - -bool RegBankSelect::MappingCost::operator==(const MappingCost &Cost) const { - return LocalCost == Cost.LocalCost && NonLocalCost == Cost.NonLocalCost && - LocalFreq == Cost.LocalFreq; -} - -#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP) -LLVM_DUMP_METHOD void RegBankSelect::MappingCost::dump() const { - print(dbgs()); - dbgs() << '\n'; -} -#endif - -void RegBankSelect::MappingCost::print(raw_ostream &OS) const { - if (*this == ImpossibleCost()) { - OS << "impossible"; - return; - } - if (isSaturated()) { - OS << "saturated"; - return; - } - OS << LocalFreq << " * " << LocalCost << " + " << NonLocalCost; -} +//==- llvm/CodeGen/GlobalISel/RegBankSelect.cpp - RegBankSelect --*- C++ -*-==// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +/// \file +/// This file implements the RegBankSelect class. +//===----------------------------------------------------------------------===// + +#include "llvm/CodeGen/GlobalISel/RegBankSelect.h" +#include "llvm/ADT/PostOrderIterator.h" +#include "llvm/ADT/STLExtras.h" +#include "llvm/ADT/SmallVector.h" +#include "llvm/CodeGen/GlobalISel/LegalizerInfo.h" +#include "llvm/CodeGen/GlobalISel/RegisterBank.h" +#include "llvm/CodeGen/GlobalISel/RegisterBankInfo.h" +#include "llvm/CodeGen/GlobalISel/Utils.h" +#include "llvm/CodeGen/MachineBasicBlock.h" +#include "llvm/CodeGen/MachineBlockFrequencyInfo.h" +#include "llvm/CodeGen/MachineBranchProbabilityInfo.h" +#include "llvm/CodeGen/MachineFunction.h" +#include "llvm/CodeGen/MachineInstr.h" +#include "llvm/CodeGen/MachineOperand.h" +#include "llvm/CodeGen/MachineOptimizationRemarkEmitter.h" +#include "llvm/CodeGen/MachineRegisterInfo.h" +#include "llvm/CodeGen/TargetOpcodes.h" +#include "llvm/CodeGen/TargetPassConfig.h" +#include "llvm/CodeGen/TargetRegisterInfo.h" +#include "llvm/CodeGen/TargetSubtargetInfo.h" +#include "llvm/Config/llvm-config.h" +#include "llvm/IR/Attributes.h" +#include "llvm/IR/Function.h" +#include "llvm/InitializePasses.h" +#include "llvm/Pass.h" +#include "llvm/Support/BlockFrequency.h" +#include "llvm/Support/CommandLine.h" +#include "llvm/Support/Compiler.h" +#include "llvm/Support/Debug.h" +#include "llvm/Support/ErrorHandling.h" +#include "llvm/Support/raw_ostream.h" +#include <algorithm> +#include <cassert> +#include <cstdint> +#include <limits> +#include <memory> +#include <utility> + +#define DEBUG_TYPE "regbankselect" + +using namespace llvm; + +static cl::opt<RegBankSelect::Mode> RegBankSelectMode( + cl::desc("Mode of the RegBankSelect pass"), cl::Hidden, cl::Optional, + cl::values(clEnumValN(RegBankSelect::Mode::Fast, "regbankselect-fast", + "Run the Fast mode (default mapping)"), + clEnumValN(RegBankSelect::Mode::Greedy, "regbankselect-greedy", + "Use the Greedy mode (best local mapping)"))); + +char RegBankSelect::ID = 0; + +INITIALIZE_PASS_BEGIN(RegBankSelect, DEBUG_TYPE, + "Assign register bank of generic virtual registers", + false, false); +INITIALIZE_PASS_DEPENDENCY(MachineBlockFrequencyInfo) +INITIALIZE_PASS_DEPENDENCY(MachineBranchProbabilityInfo) +INITIALIZE_PASS_DEPENDENCY(TargetPassConfig) +INITIALIZE_PASS_END(RegBankSelect, DEBUG_TYPE, + "Assign register bank of generic virtual registers", false, + false) + +RegBankSelect::RegBankSelect(Mode RunningMode) + : MachineFunctionPass(ID), OptMode(RunningMode) { + if (RegBankSelectMode.getNumOccurrences() != 0) { + OptMode = RegBankSelectMode; + if (RegBankSelectMode != RunningMode) + LLVM_DEBUG(dbgs() << "RegBankSelect mode overrided by command line\n"); + } +} + +void RegBankSelect::init(MachineFunction &MF) { + RBI = MF.getSubtarget().getRegBankInfo(); + assert(RBI && "Cannot work without RegisterBankInfo"); + MRI = &MF.getRegInfo(); + TRI = MF.getSubtarget().getRegisterInfo(); + TPC = &getAnalysis<TargetPassConfig>(); + if (OptMode != Mode::Fast) { + MBFI = &getAnalysis<MachineBlockFrequencyInfo>(); + MBPI = &getAnalysis<MachineBranchProbabilityInfo>(); + } else { + MBFI = nullptr; + MBPI = nullptr; + } + MIRBuilder.setMF(MF); + MORE = std::make_unique<MachineOptimizationRemarkEmitter>(MF, MBFI); +} + +void RegBankSelect::getAnalysisUsage(AnalysisUsage &AU) const { + if (OptMode != Mode::Fast) { + // We could preserve the information from these two analysis but + // the APIs do not allow to do so yet. + AU.addRequired<MachineBlockFrequencyInfo>(); + AU.addRequired<MachineBranchProbabilityInfo>(); + } + AU.addRequired<TargetPassConfig>(); + getSelectionDAGFallbackAnalysisUsage(AU); + MachineFunctionPass::getAnalysisUsage(AU); +} + +bool RegBankSelect::assignmentMatch( + Register Reg, const RegisterBankInfo::ValueMapping &ValMapping, + bool &OnlyAssign) const { + // By default we assume we will have to repair something. + OnlyAssign = false; + // Each part of a break down needs to end up in a different register. + // In other word, Reg assignment does not match. + if (ValMapping.NumBreakDowns != 1) + return false; + + const RegisterBank *CurRegBank = RBI->getRegBank(Reg, *MRI, *TRI); + const RegisterBank *DesiredRegBank = ValMapping.BreakDown[0].RegBank; + // Reg is free of assignment, a simple assignment will make the + // register bank to match. + OnlyAssign = CurRegBank == nullptr; + LLVM_DEBUG(dbgs() << "Does assignment already match: "; + if (CurRegBank) dbgs() << *CurRegBank; else dbgs() << "none"; + dbgs() << " against "; + assert(DesiredRegBank && "The mapping must be valid"); + dbgs() << *DesiredRegBank << '\n';); + return CurRegBank == DesiredRegBank; +} + +bool RegBankSelect::repairReg( + MachineOperand &MO, const RegisterBankInfo::ValueMapping &ValMapping, + RegBankSelect::RepairingPlacement &RepairPt, + const iterator_range<SmallVectorImpl<Register>::const_iterator> &NewVRegs) { + + assert(ValMapping.NumBreakDowns == (unsigned)size(NewVRegs) && + "need new vreg for each breakdown"); + + // An empty range of new register means no repairing. + assert(!NewVRegs.empty() && "We should not have to repair"); + + MachineInstr *MI; + if (ValMapping.NumBreakDowns == 1) { + // Assume we are repairing a use and thus, the original reg will be + // the source of the repairing. + Register Src = MO.getReg(); + Register Dst = *NewVRegs.begin(); + + // If we repair a definition, swap the source and destination for + // the repairing. + if (MO.isDef()) + std::swap(Src, Dst); + + assert((RepairPt.getNumInsertPoints() == 1 || + Register::isPhysicalRegister(Dst)) && + "We are about to create several defs for Dst"); + + // Build the instruction used to repair, then clone it at the right + // places. Avoiding buildCopy bypasses the check that Src and Dst have the + // same types because the type is a placeholder when this function is called. + MI = MIRBuilder.buildInstrNoInsert(TargetOpcode::COPY) + .addDef(Dst) + .addUse(Src); + LLVM_DEBUG(dbgs() << "Copy: " << printReg(Src) << " to: " << printReg(Dst) + << '\n'); + } else { + // TODO: Support with G_IMPLICIT_DEF + G_INSERT sequence or G_EXTRACT + // sequence. + assert(ValMapping.partsAllUniform() && "irregular breakdowns not supported"); + + LLT RegTy = MRI->getType(MO.getReg()); + if (MO.isDef()) { + unsigned MergeOp; + if (RegTy.isVector()) { + if (ValMapping.NumBreakDowns == RegTy.getNumElements()) + MergeOp = TargetOpcode::G_BUILD_VECTOR; + else { + assert( + (ValMapping.BreakDown[0].Length * ValMapping.NumBreakDowns == + RegTy.getSizeInBits()) && + (ValMapping.BreakDown[0].Length % RegTy.getScalarSizeInBits() == + 0) && + "don't understand this value breakdown"); + + MergeOp = TargetOpcode::G_CONCAT_VECTORS; + } + } else + MergeOp = TargetOpcode::G_MERGE_VALUES; + + auto MergeBuilder = + MIRBuilder.buildInstrNoInsert(MergeOp) + .addDef(MO.getReg()); + + for (Register SrcReg : NewVRegs) + MergeBuilder.addUse(SrcReg); + + MI = MergeBuilder; + } else { + MachineInstrBuilder UnMergeBuilder = + MIRBuilder.buildInstrNoInsert(TargetOpcode::G_UNMERGE_VALUES); + for (Register DefReg : NewVRegs) + UnMergeBuilder.addDef(DefReg); + + UnMergeBuilder.addUse(MO.getReg()); + MI = UnMergeBuilder; + } + } + + if (RepairPt.getNumInsertPoints() != 1) + report_fatal_error("need testcase to support multiple insertion points"); + + // TODO: + // Check if MI is legal. if not, we need to legalize all the + // instructions we are going to insert. + std::unique_ptr<MachineInstr *[]> NewInstrs( + new MachineInstr *[RepairPt.getNumInsertPoints()]); + bool IsFirst = true; + unsigned Idx = 0; + for (const std::unique_ptr<InsertPoint> &InsertPt : RepairPt) { + MachineInstr *CurMI; + if (IsFirst) + CurMI = MI; + else + CurMI = MIRBuilder.getMF().CloneMachineInstr(MI); + InsertPt->insert(*CurMI); + NewInstrs[Idx++] = CurMI; + IsFirst = false; + } + // TODO: + // Legalize NewInstrs if need be. + return true; +} + +uint64_t RegBankSelect::getRepairCost( + const MachineOperand &MO, + const RegisterBankInfo::ValueMapping &ValMapping) const { + assert(MO.isReg() && "We should only repair register operand"); + assert(ValMapping.NumBreakDowns && "Nothing to map??"); + + bool IsSameNumOfValues = ValMapping.NumBreakDowns == 1; + const RegisterBank *CurRegBank = RBI->getRegBank(MO.getReg(), *MRI, *TRI); + // If MO does not have a register bank, we should have just been + // able to set one unless we have to break the value down. + assert(CurRegBank || MO.isDef()); + + // Def: Val <- NewDefs + // Same number of values: copy + // Different number: Val = build_sequence Defs1, Defs2, ... + // Use: NewSources <- Val. + // Same number of values: copy. + // Different number: Src1, Src2, ... = + // extract_value Val, Src1Begin, Src1Len, Src2Begin, Src2Len, ... + // We should remember that this value is available somewhere else to + // coalesce the value. + + if (ValMapping.NumBreakDowns != 1) + return RBI->getBreakDownCost(ValMapping, CurRegBank); + + if (IsSameNumOfValues) { + const RegisterBank *DesiredRegBank = ValMapping.BreakDown[0].RegBank; + // If we repair a definition, swap the source and destination for + // the repairing. + if (MO.isDef()) + std::swap(CurRegBank, DesiredRegBank); + // TODO: It may be possible to actually avoid the copy. + // If we repair something where the source is defined by a copy + // and the source of that copy is on the right bank, we can reuse + // it for free. + // E.g., + // RegToRepair<BankA> = copy AlternativeSrc<BankB> + // = op RegToRepair<BankA> + // We can simply propagate AlternativeSrc instead of copying RegToRepair + // into a new virtual register. + // We would also need to propagate this information in the + // repairing placement. + unsigned Cost = RBI->copyCost(*DesiredRegBank, *CurRegBank, + RBI->getSizeInBits(MO.getReg(), *MRI, *TRI)); + // TODO: use a dedicated constant for ImpossibleCost. + if (Cost != std::numeric_limits<unsigned>::max()) + return Cost; + // Return the legalization cost of that repairing. + } + return std::numeric_limits<unsigned>::max(); +} + +const RegisterBankInfo::InstructionMapping &RegBankSelect::findBestMapping( + MachineInstr &MI, RegisterBankInfo::InstructionMappings &PossibleMappings, + SmallVectorImpl<RepairingPlacement> &RepairPts) { + assert(!PossibleMappings.empty() && + "Do not know how to map this instruction"); + + const RegisterBankInfo::InstructionMapping *BestMapping = nullptr; + MappingCost Cost = MappingCost::ImpossibleCost(); + SmallVector<RepairingPlacement, 4> LocalRepairPts; + for (const RegisterBankInfo::InstructionMapping *CurMapping : + PossibleMappings) { + MappingCost CurCost = + computeMapping(MI, *CurMapping, LocalRepairPts, &Cost); + if (CurCost < Cost) { + LLVM_DEBUG(dbgs() << "New best: " << CurCost << '\n'); + Cost = CurCost; + BestMapping = CurMapping; + RepairPts.clear(); + for (RepairingPlacement &RepairPt : LocalRepairPts) + RepairPts.emplace_back(std::move(RepairPt)); + } + } + if (!BestMapping && !TPC->isGlobalISelAbortEnabled()) { + // If none of the mapping worked that means they are all impossible. + // Thus, pick the first one and set an impossible repairing point. + // It will trigger the failed isel mode. + BestMapping = *PossibleMappings.begin(); + RepairPts.emplace_back( + RepairingPlacement(MI, 0, *TRI, *this, RepairingPlacement::Impossible)); + } else + assert(BestMapping && "No suitable mapping for instruction"); + return *BestMapping; +} + +void RegBankSelect::tryAvoidingSplit( + RegBankSelect::RepairingPlacement &RepairPt, const MachineOperand &MO, + const RegisterBankInfo::ValueMapping &ValMapping) const { + const MachineInstr &MI = *MO.getParent(); + assert(RepairPt.hasSplit() && "We should not have to adjust for split"); + // Splitting should only occur for PHIs or between terminators, + // because we only do local repairing. + assert((MI.isPHI() || MI.isTerminator()) && "Why do we split?"); + + assert(&MI.getOperand(RepairPt.getOpIdx()) == &MO && + "Repairing placement does not match operand"); + + // If we need splitting for phis, that means it is because we + // could not find an insertion point before the terminators of + // the predecessor block for this argument. In other words, + // the input value is defined by one of the terminators. + assert((!MI.isPHI() || !MO.isDef()) && "Need split for phi def?"); + + // We split to repair the use of a phi or a terminator. + if (!MO.isDef()) { + if (MI.isTerminator()) { + assert(&MI != &(*MI.getParent()->getFirstTerminator()) && + "Need to split for the first terminator?!"); + } else { + // For the PHI case, the split may not be actually required. + // In the copy case, a phi is already a copy on the incoming edge, + // therefore there is no need to split. + if (ValMapping.NumBreakDowns == 1) + // This is a already a copy, there is nothing to do. + RepairPt.switchTo(RepairingPlacement::RepairingKind::Reassign); + } + return; + } + + // At this point, we need to repair a defintion of a terminator. + + // Technically we need to fix the def of MI on all outgoing + // edges of MI to keep the repairing local. In other words, we + // will create several definitions of the same register. This + // does not work for SSA unless that definition is a physical + // register. + // However, there are other cases where we can get away with + // that while still keeping the repairing local. + assert(MI.isTerminator() && MO.isDef() && + "This code is for the def of a terminator"); + + // Since we use RPO traversal, if we need to repair a definition + // this means this definition could be: + // 1. Used by PHIs (i.e., this VReg has been visited as part of the + // uses of a phi.), or + // 2. Part of a target specific instruction (i.e., the target applied + // some register class constraints when creating the instruction.) + // If the constraints come for #2, the target said that another mapping + // is supported so we may just drop them. Indeed, if we do not change + // the number of registers holding that value, the uses will get fixed + // when we get to them. + // Uses in PHIs may have already been proceeded though. + // If the constraints come for #1, then, those are weak constraints and + // no actual uses may rely on them. However, the problem remains mainly + // the same as for #2. If the value stays in one register, we could + // just switch the register bank of the definition, but we would need to + // account for a repairing cost for each phi we silently change. + // + // In any case, if the value needs to be broken down into several + // registers, the repairing is not local anymore as we need to patch + // every uses to rebuild the value in just one register. + // + // To summarize: + // - If the value is in a physical register, we can do the split and + // fix locally. + // Otherwise if the value is in a virtual register: + // - If the value remains in one register, we do not have to split + // just switching the register bank would do, but we need to account + // in the repairing cost all the phi we changed. + // - If the value spans several registers, then we cannot do a local + // repairing. + + // Check if this is a physical or virtual register. + Register Reg = MO.getReg(); + if (Register::isPhysicalRegister(Reg)) { + // We are going to split every outgoing edges. + // Check that this is possible. + // FIXME: The machine representation is currently broken + // since it also several terminators in one basic block. + // Because of that we would technically need a way to get + // the targets of just one terminator to know which edges + // we have to split. + // Assert that we do not hit the ill-formed representation. + + // If there are other terminators before that one, some of + // the outgoing edges may not be dominated by this definition. + assert(&MI == &(*MI.getParent()->getFirstTerminator()) && + "Do not know which outgoing edges are relevant"); + const MachineInstr *Next = MI.getNextNode(); + assert((!Next || Next->isUnconditionalBranch()) && + "Do not know where each terminator ends up"); + if (Next) + // If the next terminator uses Reg, this means we have + // to split right after MI and thus we need a way to ask + // which outgoing edges are affected. + assert(!Next->readsRegister(Reg) && "Need to split between terminators"); + // We will split all the edges and repair there. + } else { + // This is a virtual register defined by a terminator. + if (ValMapping.NumBreakDowns == 1) { + // There is nothing to repair, but we may actually lie on + // the repairing cost because of the PHIs already proceeded + // as already stated. + // Though the code will be correct. + assert(false && "Repairing cost may not be accurate"); + } else { + // We need to do non-local repairing. Basically, patch all + // the uses (i.e., phis) that we already proceeded. + // For now, just say this mapping is not possible. + RepairPt.switchTo(RepairingPlacement::RepairingKind::Impossible); + } + } +} + +RegBankSelect::MappingCost RegBankSelect::computeMapping( + MachineInstr &MI, const RegisterBankInfo::InstructionMapping &InstrMapping, + SmallVectorImpl<RepairingPlacement> &RepairPts, + const RegBankSelect::MappingCost *BestCost) { + assert((MBFI || !BestCost) && "Costs comparison require MBFI"); + + if (!InstrMapping.isValid()) + return MappingCost::ImpossibleCost(); + + // If mapped with InstrMapping, MI will have the recorded cost. + MappingCost Cost(MBFI ? MBFI->getBlockFreq(MI.getParent()) : 1); + bool Saturated = Cost.addLocalCost(InstrMapping.getCost()); + assert(!Saturated && "Possible mapping saturated the cost"); + LLVM_DEBUG(dbgs() << "Evaluating mapping cost for: " << MI); + LLVM_DEBUG(dbgs() << "With: " << InstrMapping << '\n'); + RepairPts.clear(); + if (BestCost && Cost > *BestCost) { + LLVM_DEBUG(dbgs() << "Mapping is too expensive from the start\n"); + return Cost; + } + + // Moreover, to realize this mapping, the register bank of each operand must + // match this mapping. In other words, we may need to locally reassign the + // register banks. Account for that repairing cost as well. + // In this context, local means in the surrounding of MI. + for (unsigned OpIdx = 0, EndOpIdx = InstrMapping.getNumOperands(); + OpIdx != EndOpIdx; ++OpIdx) { + const MachineOperand &MO = MI.getOperand(OpIdx); + if (!MO.isReg()) + continue; + Register Reg = MO.getReg(); + if (!Reg) + continue; + LLVM_DEBUG(dbgs() << "Opd" << OpIdx << '\n'); + const RegisterBankInfo::ValueMapping &ValMapping = + InstrMapping.getOperandMapping(OpIdx); + // If Reg is already properly mapped, this is free. + bool Assign; + if (assignmentMatch(Reg, ValMapping, Assign)) { + LLVM_DEBUG(dbgs() << "=> is free (match).\n"); + continue; + } + if (Assign) { + LLVM_DEBUG(dbgs() << "=> is free (simple assignment).\n"); + RepairPts.emplace_back(RepairingPlacement(MI, OpIdx, *TRI, *this, + RepairingPlacement::Reassign)); + continue; + } + + // Find the insertion point for the repairing code. + RepairPts.emplace_back( + RepairingPlacement(MI, OpIdx, *TRI, *this, RepairingPlacement::Insert)); + RepairingPlacement &RepairPt = RepairPts.back(); + + // If we need to split a basic block to materialize this insertion point, + // we may give a higher cost to this mapping. + // Nevertheless, we may get away with the split, so try that first. + if (RepairPt.hasSplit()) + tryAvoidingSplit(RepairPt, MO, ValMapping); + + // Check that the materialization of the repairing is possible. + if (!RepairPt.canMaterialize()) { + LLVM_DEBUG(dbgs() << "Mapping involves impossible repairing\n"); + return MappingCost::ImpossibleCost(); + } + + // Account for the split cost and repair cost. + // Unless the cost is already saturated or we do not care about the cost. + if (!BestCost || Saturated) + continue; + + // To get accurate information we need MBFI and MBPI. + // Thus, if we end up here this information should be here. + assert(MBFI && MBPI && "Cost computation requires MBFI and MBPI"); + + // FIXME: We will have to rework the repairing cost model. + // The repairing cost depends on the register bank that MO has. + // However, when we break down the value into different values, + // MO may not have a register bank while still needing repairing. + // For the fast mode, we don't compute the cost so that is fine, + // but still for the repairing code, we will have to make a choice. + // For the greedy mode, we should choose greedily what is the best + // choice based on the next use of MO. + + // Sums up the repairing cost of MO at each insertion point. + uint64_t RepairCost = getRepairCost(MO, ValMapping); + + // This is an impossible to repair cost. + if (RepairCost == std::numeric_limits<unsigned>::max()) + return MappingCost::ImpossibleCost(); + + // Bias used for splitting: 5%. + const uint64_t PercentageForBias = 5; + uint64_t Bias = (RepairCost * PercentageForBias + 99) / 100; + // We should not need more than a couple of instructions to repair + // an assignment. In other words, the computation should not + // overflow because the repairing cost is free of basic block + // frequency. + assert(((RepairCost < RepairCost * PercentageForBias) && + (RepairCost * PercentageForBias < + RepairCost * PercentageForBias + 99)) && + "Repairing involves more than a billion of instructions?!"); + for (const std::unique_ptr<InsertPoint> &InsertPt : RepairPt) { + assert(InsertPt->canMaterialize() && "We should not have made it here"); + // We will applied some basic block frequency and those uses uint64_t. + if (!InsertPt->isSplit()) + Saturated = Cost.addLocalCost(RepairCost); + else { + uint64_t CostForInsertPt = RepairCost; + // Again we shouldn't overflow here givent that + // CostForInsertPt is frequency free at this point. + assert(CostForInsertPt + Bias > CostForInsertPt && + "Repairing + split bias overflows"); + CostForInsertPt += Bias; + uint64_t PtCost = InsertPt->frequency(*this) * CostForInsertPt; + // Check if we just overflowed. + if ((Saturated = PtCost < CostForInsertPt)) + Cost.saturate(); + else + Saturated = Cost.addNonLocalCost(PtCost); + } + + // Stop looking into what it takes to repair, this is already + // too expensive. + if (BestCost && Cost > *BestCost) { + LLVM_DEBUG(dbgs() << "Mapping is too expensive, stop processing\n"); + return Cost; + } + + // No need to accumulate more cost information. + // We need to still gather the repairing information though. + if (Saturated) + break; + } + } + LLVM_DEBUG(dbgs() << "Total cost is: " << Cost << "\n"); + return Cost; +} + +bool RegBankSelect::applyMapping( + MachineInstr &MI, const RegisterBankInfo::InstructionMapping &InstrMapping, + SmallVectorImpl<RegBankSelect::RepairingPlacement> &RepairPts) { + // OpdMapper will hold all the information needed for the rewriting. + RegisterBankInfo::OperandsMapper OpdMapper(MI, InstrMapping, *MRI); + + // First, place the repairing code. + for (RepairingPlacement &RepairPt : RepairPts) { + if (!RepairPt.canMaterialize() || + RepairPt.getKind() == RepairingPlacement::Impossible) + return false; + assert(RepairPt.getKind() != RepairingPlacement::None && + "This should not make its way in the list"); + unsigned OpIdx = RepairPt.getOpIdx(); + MachineOperand &MO = MI.getOperand(OpIdx); + const RegisterBankInfo::ValueMapping &ValMapping = + InstrMapping.getOperandMapping(OpIdx); + Register Reg = MO.getReg(); + + switch (RepairPt.getKind()) { + case RepairingPlacement::Reassign: + assert(ValMapping.NumBreakDowns == 1 && + "Reassignment should only be for simple mapping"); + MRI->setRegBank(Reg, *ValMapping.BreakDown[0].RegBank); + break; + case RepairingPlacement::Insert: + OpdMapper.createVRegs(OpIdx); + if (!repairReg(MO, ValMapping, RepairPt, OpdMapper.getVRegs(OpIdx))) + return false; + break; + default: + llvm_unreachable("Other kind should not happen"); + } + } + + // Second, rewrite the instruction. + LLVM_DEBUG(dbgs() << "Actual mapping of the operands: " << OpdMapper << '\n'); + RBI->applyMapping(OpdMapper); + + return true; +} + +bool RegBankSelect::assignInstr(MachineInstr &MI) { + LLVM_DEBUG(dbgs() << "Assign: " << MI); + // Remember the repairing placement for all the operands. + SmallVector<RepairingPlacement, 4> RepairPts; + + const RegisterBankInfo::InstructionMapping *BestMapping; + if (OptMode == RegBankSelect::Mode::Fast) { + BestMapping = &RBI->getInstrMapping(MI); + MappingCost DefaultCost = computeMapping(MI, *BestMapping, RepairPts); + (void)DefaultCost; + if (DefaultCost == MappingCost::ImpossibleCost()) + return false; + } else { + RegisterBankInfo::InstructionMappings PossibleMappings = + RBI->getInstrPossibleMappings(MI); + if (PossibleMappings.empty()) + return false; + BestMapping = &findBestMapping(MI, PossibleMappings, RepairPts); + } + // Make sure the mapping is valid for MI. + assert(BestMapping->verify(MI) && "Invalid instruction mapping"); + + LLVM_DEBUG(dbgs() << "Best Mapping: " << *BestMapping << '\n'); + + // After this call, MI may not be valid anymore. + // Do not use it. + return applyMapping(MI, *BestMapping, RepairPts); +} + +bool RegBankSelect::runOnMachineFunction(MachineFunction &MF) { + // If the ISel pipeline failed, do not bother running that pass. + if (MF.getProperties().hasProperty( + MachineFunctionProperties::Property::FailedISel)) + return false; + + LLVM_DEBUG(dbgs() << "Assign register banks for: " << MF.getName() << '\n'); + const Function &F = MF.getFunction(); + Mode SaveOptMode = OptMode; + if (F.hasOptNone()) + OptMode = Mode::Fast; + init(MF); + +#ifndef NDEBUG + // Check that our input is fully legal: we require the function to have the + // Legalized property, so it should be. + // FIXME: This should be in the MachineVerifier. + if (!DisableGISelLegalityCheck) + if (const MachineInstr *MI = machineFunctionIsIllegal(MF)) { + reportGISelFailure(MF, *TPC, *MORE, "gisel-regbankselect", + "instruction is not legal", *MI); + return false; + } +#endif + + // Walk the function and assign register banks to all operands. + // Use a RPOT to make sure all registers are assigned before we choose + // the best mapping of the current instruction. + ReversePostOrderTraversal<MachineFunction*> RPOT(&MF); + for (MachineBasicBlock *MBB : RPOT) { + // Set a sensible insertion point so that subsequent calls to + // MIRBuilder. + MIRBuilder.setMBB(*MBB); + for (MachineBasicBlock::iterator MII = MBB->begin(), End = MBB->end(); + MII != End;) { + // MI might be invalidated by the assignment, so move the + // iterator before hand. + MachineInstr &MI = *MII++; + + // Ignore target-specific post-isel instructions: they should use proper + // regclasses. + if (isTargetSpecificOpcode(MI.getOpcode()) && !MI.isPreISelOpcode()) + continue; + + // Ignore inline asm instructions: they should use physical + // registers/regclasses + if (MI.isInlineAsm()) + continue; + + // Ignore debug info. + if (MI.isDebugInstr()) + continue; + + if (!assignInstr(MI)) { + reportGISelFailure(MF, *TPC, *MORE, "gisel-regbankselect", + "unable to map instruction", MI); + return false; + } + + // It's possible the mapping changed control flow, and moved the following + // instruction to a new block, so figure out the new parent. + if (MII != End) { + MachineBasicBlock *NextInstBB = MII->getParent(); + if (NextInstBB != MBB) { + LLVM_DEBUG(dbgs() << "Instruction mapping changed control flow\n"); + MBB = NextInstBB; + MIRBuilder.setMBB(*MBB); + End = MBB->end(); + } + } + } + } + + OptMode = SaveOptMode; + return false; +} + +//------------------------------------------------------------------------------ +// Helper Classes Implementation +//------------------------------------------------------------------------------ +RegBankSelect::RepairingPlacement::RepairingPlacement( + MachineInstr &MI, unsigned OpIdx, const TargetRegisterInfo &TRI, Pass &P, + RepairingPlacement::RepairingKind Kind) + // Default is, we are going to insert code to repair OpIdx. + : Kind(Kind), OpIdx(OpIdx), + CanMaterialize(Kind != RepairingKind::Impossible), P(P) { + const MachineOperand &MO = MI.getOperand(OpIdx); + assert(MO.isReg() && "Trying to repair a non-reg operand"); + + if (Kind != RepairingKind::Insert) + return; + + // Repairings for definitions happen after MI, uses happen before. + bool Before = !MO.isDef(); + + // Check if we are done with MI. + if (!MI.isPHI() && !MI.isTerminator()) { + addInsertPoint(MI, Before); + // We are done with the initialization. + return; + } + + // Now, look for the special cases. + if (MI.isPHI()) { + // - PHI must be the first instructions: + // * Before, we have to split the related incoming edge. + // * After, move the insertion point past the last phi. + if (!Before) { + MachineBasicBlock::iterator It = MI.getParent()->getFirstNonPHI(); + if (It != MI.getParent()->end()) + addInsertPoint(*It, /*Before*/ true); + else + addInsertPoint(*(--It), /*Before*/ false); + return; + } + // We repair a use of a phi, we may need to split the related edge. + MachineBasicBlock &Pred = *MI.getOperand(OpIdx + 1).getMBB(); + // Check if we can move the insertion point prior to the + // terminators of the predecessor. + Register Reg = MO.getReg(); + MachineBasicBlock::iterator It = Pred.getLastNonDebugInstr(); + for (auto Begin = Pred.begin(); It != Begin && It->isTerminator(); --It) + if (It->modifiesRegister(Reg, &TRI)) { + // We cannot hoist the repairing code in the predecessor. + // Split the edge. + addInsertPoint(Pred, *MI.getParent()); + return; + } + // At this point, we can insert in Pred. + + // - If It is invalid, Pred is empty and we can insert in Pred + // wherever we want. + // - If It is valid, It is the first non-terminator, insert after It. + if (It == Pred.end()) + addInsertPoint(Pred, /*Beginning*/ false); + else + addInsertPoint(*It, /*Before*/ false); + } else { + // - Terminators must be the last instructions: + // * Before, move the insert point before the first terminator. + // * After, we have to split the outcoming edges. + if (Before) { + // Check whether Reg is defined by any terminator. + MachineBasicBlock::reverse_iterator It = MI; + auto REnd = MI.getParent()->rend(); + + for (; It != REnd && It->isTerminator(); ++It) { + assert(!It->modifiesRegister(MO.getReg(), &TRI) && + "copy insertion in middle of terminators not handled"); + } + + if (It == REnd) { + addInsertPoint(*MI.getParent()->begin(), true); + return; + } + + // We are sure to be right before the first terminator. + addInsertPoint(*It, /*Before*/ false); + return; + } + // Make sure Reg is not redefined by other terminators, otherwise + // we do not know how to split. + for (MachineBasicBlock::iterator It = MI, End = MI.getParent()->end(); + ++It != End;) + // The machine verifier should reject this kind of code. + assert(It->modifiesRegister(MO.getReg(), &TRI) && + "Do not know where to split"); + // Split each outcoming edges. + MachineBasicBlock &Src = *MI.getParent(); + for (auto &Succ : Src.successors()) + addInsertPoint(Src, Succ); + } +} + +void RegBankSelect::RepairingPlacement::addInsertPoint(MachineInstr &MI, + bool Before) { + addInsertPoint(*new InstrInsertPoint(MI, Before)); +} + +void RegBankSelect::RepairingPlacement::addInsertPoint(MachineBasicBlock &MBB, + bool Beginning) { + addInsertPoint(*new MBBInsertPoint(MBB, Beginning)); +} + +void RegBankSelect::RepairingPlacement::addInsertPoint(MachineBasicBlock &Src, + MachineBasicBlock &Dst) { + addInsertPoint(*new EdgeInsertPoint(Src, Dst, P)); +} + +void RegBankSelect::RepairingPlacement::addInsertPoint( + RegBankSelect::InsertPoint &Point) { + CanMaterialize &= Point.canMaterialize(); + HasSplit |= Point.isSplit(); + InsertPoints.emplace_back(&Point); +} + +RegBankSelect::InstrInsertPoint::InstrInsertPoint(MachineInstr &Instr, + bool Before) + : InsertPoint(), Instr(Instr), Before(Before) { + // Since we do not support splitting, we do not need to update + // liveness and such, so do not do anything with P. + assert((!Before || !Instr.isPHI()) && + "Splitting before phis requires more points"); + assert((!Before || !Instr.getNextNode() || !Instr.getNextNode()->isPHI()) && + "Splitting between phis does not make sense"); +} + +void RegBankSelect::InstrInsertPoint::materialize() { + if (isSplit()) { + // Slice and return the beginning of the new block. + // If we need to split between the terminators, we theoritically + // need to know where the first and second set of terminators end + // to update the successors properly. + // Now, in pratice, we should have a maximum of 2 branch + // instructions; one conditional and one unconditional. Therefore + // we know how to update the successor by looking at the target of + // the unconditional branch. + // If we end up splitting at some point, then, we should update + // the liveness information and such. I.e., we would need to + // access P here. + // The machine verifier should actually make sure such cases + // cannot happen. + llvm_unreachable("Not yet implemented"); + } + // Otherwise the insertion point is just the current or next + // instruction depending on Before. I.e., there is nothing to do + // here. +} + +bool RegBankSelect::InstrInsertPoint::isSplit() const { + // If the insertion point is after a terminator, we need to split. + if (!Before) + return Instr.isTerminator(); + // If we insert before an instruction that is after a terminator, + // we are still after a terminator. + return Instr.getPrevNode() && Instr.getPrevNode()->isTerminator(); +} + +uint64_t RegBankSelect::InstrInsertPoint::frequency(const Pass &P) const { + // Even if we need to split, because we insert between terminators, + // this split has actually the same frequency as the instruction. + const MachineBlockFrequencyInfo *MBFI = + P.getAnalysisIfAvailable<MachineBlockFrequencyInfo>(); + if (!MBFI) + return 1; + return MBFI->getBlockFreq(Instr.getParent()).getFrequency(); +} + +uint64_t RegBankSelect::MBBInsertPoint::frequency(const Pass &P) const { + const MachineBlockFrequencyInfo *MBFI = + P.getAnalysisIfAvailable<MachineBlockFrequencyInfo>(); + if (!MBFI) + return 1; + return MBFI->getBlockFreq(&MBB).getFrequency(); +} + +void RegBankSelect::EdgeInsertPoint::materialize() { + // If we end up repairing twice at the same place before materializing the + // insertion point, we may think we have to split an edge twice. + // We should have a factory for the insert point such that identical points + // are the same instance. + assert(Src.isSuccessor(DstOrSplit) && DstOrSplit->isPredecessor(&Src) && + "This point has already been split"); + MachineBasicBlock *NewBB = Src.SplitCriticalEdge(DstOrSplit, P); + assert(NewBB && "Invalid call to materialize"); + // We reuse the destination block to hold the information of the new block. + DstOrSplit = NewBB; +} + +uint64_t RegBankSelect::EdgeInsertPoint::frequency(const Pass &P) const { + const MachineBlockFrequencyInfo *MBFI = + P.getAnalysisIfAvailable<MachineBlockFrequencyInfo>(); + if (!MBFI) + return 1; + if (WasMaterialized) + return MBFI->getBlockFreq(DstOrSplit).getFrequency(); + + const MachineBranchProbabilityInfo *MBPI = + P.getAnalysisIfAvailable<MachineBranchProbabilityInfo>(); + if (!MBPI) + return 1; + // The basic block will be on the edge. + return (MBFI->getBlockFreq(&Src) * MBPI->getEdgeProbability(&Src, DstOrSplit)) + .getFrequency(); +} + +bool RegBankSelect::EdgeInsertPoint::canMaterialize() const { + // If this is not a critical edge, we should not have used this insert + // point. Indeed, either the successor or the predecessor should + // have do. + assert(Src.succ_size() > 1 && DstOrSplit->pred_size() > 1 && + "Edge is not critical"); + return Src.canSplitCriticalEdge(DstOrSplit); +} + +RegBankSelect::MappingCost::MappingCost(const BlockFrequency &LocalFreq) + : LocalFreq(LocalFreq.getFrequency()) {} + +bool RegBankSelect::MappingCost::addLocalCost(uint64_t Cost) { + // Check if this overflows. + if (LocalCost + Cost < LocalCost) { + saturate(); + return true; + } + LocalCost += Cost; + return isSaturated(); +} + +bool RegBankSelect::MappingCost::addNonLocalCost(uint64_t Cost) { + // Check if this overflows. + if (NonLocalCost + Cost < NonLocalCost) { + saturate(); + return true; + } + NonLocalCost += Cost; + return isSaturated(); +} + +bool RegBankSelect::MappingCost::isSaturated() const { + return LocalCost == UINT64_MAX - 1 && NonLocalCost == UINT64_MAX && + LocalFreq == UINT64_MAX; +} + +void RegBankSelect::MappingCost::saturate() { + *this = ImpossibleCost(); + --LocalCost; +} + +RegBankSelect::MappingCost RegBankSelect::MappingCost::ImpossibleCost() { + return MappingCost(UINT64_MAX, UINT64_MAX, UINT64_MAX); +} + +bool RegBankSelect::MappingCost::operator<(const MappingCost &Cost) const { + // Sort out the easy cases. + if (*this == Cost) + return false; + // If one is impossible to realize the other is cheaper unless it is + // impossible as well. + if ((*this == ImpossibleCost()) || (Cost == ImpossibleCost())) + return (*this == ImpossibleCost()) < (Cost == ImpossibleCost()); + // If one is saturated the other is cheaper, unless it is saturated + // as well. + if (isSaturated() || Cost.isSaturated()) + return isSaturated() < Cost.isSaturated(); + // At this point we know both costs hold sensible values. + + // If both values have a different base frequency, there is no much + // we can do but to scale everything. + // However, if they have the same base frequency we can avoid making + // complicated computation. + uint64_t ThisLocalAdjust; + uint64_t OtherLocalAdjust; + if (LLVM_LIKELY(LocalFreq == Cost.LocalFreq)) { + + // At this point, we know the local costs are comparable. + // Do the case that do not involve potential overflow first. + if (NonLocalCost == Cost.NonLocalCost) + // Since the non-local costs do not discriminate on the result, + // just compare the local costs. + return LocalCost < Cost.LocalCost; + + // The base costs are comparable so we may only keep the relative + // value to increase our chances of avoiding overflows. + ThisLocalAdjust = 0; + OtherLocalAdjust = 0; + if (LocalCost < Cost.LocalCost) + OtherLocalAdjust = Cost.LocalCost - LocalCost; + else + ThisLocalAdjust = LocalCost - Cost.LocalCost; + } else { + ThisLocalAdjust = LocalCost; + OtherLocalAdjust = Cost.LocalCost; + } + + // The non-local costs are comparable, just keep the relative value. + uint64_t ThisNonLocalAdjust = 0; + uint64_t OtherNonLocalAdjust = 0; + if (NonLocalCost < Cost.NonLocalCost) + OtherNonLocalAdjust = Cost.NonLocalCost - NonLocalCost; + else + ThisNonLocalAdjust = NonLocalCost - Cost.NonLocalCost; + // Scale everything to make them comparable. + uint64_t ThisScaledCost = ThisLocalAdjust * LocalFreq; + // Check for overflow on that operation. + bool ThisOverflows = ThisLocalAdjust && (ThisScaledCost < ThisLocalAdjust || + ThisScaledCost < LocalFreq); + uint64_t OtherScaledCost = OtherLocalAdjust * Cost.LocalFreq; + // Check for overflow on the last operation. + bool OtherOverflows = + OtherLocalAdjust && + (OtherScaledCost < OtherLocalAdjust || OtherScaledCost < Cost.LocalFreq); + // Add the non-local costs. + ThisOverflows |= ThisNonLocalAdjust && + ThisScaledCost + ThisNonLocalAdjust < ThisNonLocalAdjust; + ThisScaledCost += ThisNonLocalAdjust; + OtherOverflows |= OtherNonLocalAdjust && + OtherScaledCost + OtherNonLocalAdjust < OtherNonLocalAdjust; + OtherScaledCost += OtherNonLocalAdjust; + // If both overflows, we cannot compare without additional + // precision, e.g., APInt. Just give up on that case. + if (ThisOverflows && OtherOverflows) + return false; + // If one overflows but not the other, we can still compare. + if (ThisOverflows || OtherOverflows) + return ThisOverflows < OtherOverflows; + // Otherwise, just compare the values. + return ThisScaledCost < OtherScaledCost; +} + +bool RegBankSelect::MappingCost::operator==(const MappingCost &Cost) const { + return LocalCost == Cost.LocalCost && NonLocalCost == Cost.NonLocalCost && + LocalFreq == Cost.LocalFreq; +} + +#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP) +LLVM_DUMP_METHOD void RegBankSelect::MappingCost::dump() const { + print(dbgs()); + dbgs() << '\n'; +} +#endif + +void RegBankSelect::MappingCost::print(raw_ostream &OS) const { + if (*this == ImpossibleCost()) { + OS << "impossible"; + return; + } + if (isSaturated()) { + OS << "saturated"; + return; + } + OS << LocalFreq << " * " << LocalCost << " + " << NonLocalCost; +} diff --git a/contrib/libs/llvm12/lib/CodeGen/GlobalISel/RegisterBank.cpp b/contrib/libs/llvm12/lib/CodeGen/GlobalISel/RegisterBank.cpp index 2d17a3b7c0..fc9c802693 100644 --- a/contrib/libs/llvm12/lib/CodeGen/GlobalISel/RegisterBank.cpp +++ b/contrib/libs/llvm12/lib/CodeGen/GlobalISel/RegisterBank.cpp @@ -1,114 +1,114 @@ -//===- llvm/CodeGen/GlobalISel/RegisterBank.cpp - Register Bank --*- C++ -*-==// -// -// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. -// See https://llvm.org/LICENSE.txt for license information. -// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception -// -//===----------------------------------------------------------------------===// -/// \file -/// This file implements the RegisterBank class. -//===----------------------------------------------------------------------===// - -#include "llvm/CodeGen/GlobalISel/RegisterBank.h" -#include "llvm/CodeGen/TargetRegisterInfo.h" -#include "llvm/Config/llvm-config.h" -#include "llvm/Support/Debug.h" - -#define DEBUG_TYPE "registerbank" - -using namespace llvm; - -const unsigned RegisterBank::InvalidID = UINT_MAX; - -RegisterBank::RegisterBank( - unsigned ID, const char *Name, unsigned Size, - const uint32_t *CoveredClasses, unsigned NumRegClasses) - : ID(ID), Name(Name), Size(Size) { - ContainedRegClasses.resize(NumRegClasses); - ContainedRegClasses.setBitsInMask(CoveredClasses); -} - -bool RegisterBank::verify(const TargetRegisterInfo &TRI) const { - assert(isValid() && "Invalid register bank"); - for (unsigned RCId = 0, End = TRI.getNumRegClasses(); RCId != End; ++RCId) { - const TargetRegisterClass &RC = *TRI.getRegClass(RCId); - - if (!covers(RC)) - continue; - // Verify that the register bank covers all the sub classes of the - // classes it covers. - - // Use a different (slow in that case) method than - // RegisterBankInfo to find the subclasses of RC, to make sure - // both agree on the covers. - for (unsigned SubRCId = 0; SubRCId != End; ++SubRCId) { - const TargetRegisterClass &SubRC = *TRI.getRegClass(RCId); - - if (!RC.hasSubClassEq(&SubRC)) - continue; - - // Verify that the Size of the register bank is big enough to cover - // all the register classes it covers. - assert(getSize() >= TRI.getRegSizeInBits(SubRC) && - "Size is not big enough for all the subclasses!"); - assert(covers(SubRC) && "Not all subclasses are covered"); - } - } - return true; -} - -bool RegisterBank::covers(const TargetRegisterClass &RC) const { - assert(isValid() && "RB hasn't been initialized yet"); - return ContainedRegClasses.test(RC.getID()); -} - -bool RegisterBank::isValid() const { - return ID != InvalidID && Name != nullptr && Size != 0 && - // A register bank that does not cover anything is useless. - !ContainedRegClasses.empty(); -} - -bool RegisterBank::operator==(const RegisterBank &OtherRB) const { - // There must be only one instance of a given register bank alive - // for the whole compilation. - // The RegisterBankInfo is supposed to enforce that. - assert((OtherRB.getID() != getID() || &OtherRB == this) && - "ID does not uniquely identify a RegisterBank"); - return &OtherRB == this; -} - -#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP) -LLVM_DUMP_METHOD void RegisterBank::dump(const TargetRegisterInfo *TRI) const { - print(dbgs(), /* IsForDebug */ true, TRI); -} -#endif - -void RegisterBank::print(raw_ostream &OS, bool IsForDebug, - const TargetRegisterInfo *TRI) const { - OS << getName(); - if (!IsForDebug) - return; - OS << "(ID:" << getID() << ", Size:" << getSize() << ")\n" - << "isValid:" << isValid() << '\n' - << "Number of Covered register classes: " << ContainedRegClasses.count() - << '\n'; - // Print all the subclasses if we can. - // This register classes may not be properly initialized yet. - if (!TRI || ContainedRegClasses.empty()) - return; - assert(ContainedRegClasses.size() == TRI->getNumRegClasses() && - "TRI does not match the initialization process?"); - bool IsFirst = true; - OS << "Covered register classes:\n"; - for (unsigned RCId = 0, End = TRI->getNumRegClasses(); RCId != End; ++RCId) { - const TargetRegisterClass &RC = *TRI->getRegClass(RCId); - - if (!covers(RC)) - continue; - - if (!IsFirst) - OS << ", "; - OS << TRI->getRegClassName(&RC); - IsFirst = false; - } -} +//===- llvm/CodeGen/GlobalISel/RegisterBank.cpp - Register Bank --*- C++ -*-==// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +/// \file +/// This file implements the RegisterBank class. +//===----------------------------------------------------------------------===// + +#include "llvm/CodeGen/GlobalISel/RegisterBank.h" +#include "llvm/CodeGen/TargetRegisterInfo.h" +#include "llvm/Config/llvm-config.h" +#include "llvm/Support/Debug.h" + +#define DEBUG_TYPE "registerbank" + +using namespace llvm; + +const unsigned RegisterBank::InvalidID = UINT_MAX; + +RegisterBank::RegisterBank( + unsigned ID, const char *Name, unsigned Size, + const uint32_t *CoveredClasses, unsigned NumRegClasses) + : ID(ID), Name(Name), Size(Size) { + ContainedRegClasses.resize(NumRegClasses); + ContainedRegClasses.setBitsInMask(CoveredClasses); +} + +bool RegisterBank::verify(const TargetRegisterInfo &TRI) const { + assert(isValid() && "Invalid register bank"); + for (unsigned RCId = 0, End = TRI.getNumRegClasses(); RCId != End; ++RCId) { + const TargetRegisterClass &RC = *TRI.getRegClass(RCId); + + if (!covers(RC)) + continue; + // Verify that the register bank covers all the sub classes of the + // classes it covers. + + // Use a different (slow in that case) method than + // RegisterBankInfo to find the subclasses of RC, to make sure + // both agree on the covers. + for (unsigned SubRCId = 0; SubRCId != End; ++SubRCId) { + const TargetRegisterClass &SubRC = *TRI.getRegClass(RCId); + + if (!RC.hasSubClassEq(&SubRC)) + continue; + + // Verify that the Size of the register bank is big enough to cover + // all the register classes it covers. + assert(getSize() >= TRI.getRegSizeInBits(SubRC) && + "Size is not big enough for all the subclasses!"); + assert(covers(SubRC) && "Not all subclasses are covered"); + } + } + return true; +} + +bool RegisterBank::covers(const TargetRegisterClass &RC) const { + assert(isValid() && "RB hasn't been initialized yet"); + return ContainedRegClasses.test(RC.getID()); +} + +bool RegisterBank::isValid() const { + return ID != InvalidID && Name != nullptr && Size != 0 && + // A register bank that does not cover anything is useless. + !ContainedRegClasses.empty(); +} + +bool RegisterBank::operator==(const RegisterBank &OtherRB) const { + // There must be only one instance of a given register bank alive + // for the whole compilation. + // The RegisterBankInfo is supposed to enforce that. + assert((OtherRB.getID() != getID() || &OtherRB == this) && + "ID does not uniquely identify a RegisterBank"); + return &OtherRB == this; +} + +#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP) +LLVM_DUMP_METHOD void RegisterBank::dump(const TargetRegisterInfo *TRI) const { + print(dbgs(), /* IsForDebug */ true, TRI); +} +#endif + +void RegisterBank::print(raw_ostream &OS, bool IsForDebug, + const TargetRegisterInfo *TRI) const { + OS << getName(); + if (!IsForDebug) + return; + OS << "(ID:" << getID() << ", Size:" << getSize() << ")\n" + << "isValid:" << isValid() << '\n' + << "Number of Covered register classes: " << ContainedRegClasses.count() + << '\n'; + // Print all the subclasses if we can. + // This register classes may not be properly initialized yet. + if (!TRI || ContainedRegClasses.empty()) + return; + assert(ContainedRegClasses.size() == TRI->getNumRegClasses() && + "TRI does not match the initialization process?"); + bool IsFirst = true; + OS << "Covered register classes:\n"; + for (unsigned RCId = 0, End = TRI->getNumRegClasses(); RCId != End; ++RCId) { + const TargetRegisterClass &RC = *TRI->getRegClass(RCId); + + if (!covers(RC)) + continue; + + if (!IsFirst) + OS << ", "; + OS << TRI->getRegClassName(&RC); + IsFirst = false; + } +} diff --git a/contrib/libs/llvm12/lib/CodeGen/GlobalISel/RegisterBankInfo.cpp b/contrib/libs/llvm12/lib/CodeGen/GlobalISel/RegisterBankInfo.cpp index 32905d0369..e2a9637471 100644 --- a/contrib/libs/llvm12/lib/CodeGen/GlobalISel/RegisterBankInfo.cpp +++ b/contrib/libs/llvm12/lib/CodeGen/GlobalISel/RegisterBankInfo.cpp @@ -1,805 +1,805 @@ -//===- llvm/CodeGen/GlobalISel/RegisterBankInfo.cpp --------------*- C++ -*-==// -// -// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. -// See https://llvm.org/LICENSE.txt for license information. -// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception -// -//===----------------------------------------------------------------------===// -/// \file -/// This file implements the RegisterBankInfo class. -//===----------------------------------------------------------------------===// - -#include "llvm/CodeGen/GlobalISel/RegisterBankInfo.h" -#include "llvm/ADT/SmallString.h" -#include "llvm/ADT/SmallVector.h" -#include "llvm/ADT/Statistic.h" -#include "llvm/ADT/iterator_range.h" -#include "llvm/CodeGen/GlobalISel/RegisterBank.h" -#include "llvm/CodeGen/MachineBasicBlock.h" -#include "llvm/CodeGen/MachineFunction.h" -#include "llvm/CodeGen/MachineRegisterInfo.h" -#include "llvm/CodeGen/TargetOpcodes.h" -#include "llvm/CodeGen/TargetRegisterInfo.h" -#include "llvm/CodeGen/TargetSubtargetInfo.h" -#include "llvm/Config/llvm-config.h" -#include "llvm/IR/Type.h" -#include "llvm/Support/Debug.h" -#include "llvm/Support/raw_ostream.h" - -#include <algorithm> // For std::max. - -#define DEBUG_TYPE "registerbankinfo" - -using namespace llvm; - -STATISTIC(NumPartialMappingsCreated, - "Number of partial mappings dynamically created"); -STATISTIC(NumPartialMappingsAccessed, - "Number of partial mappings dynamically accessed"); -STATISTIC(NumValueMappingsCreated, - "Number of value mappings dynamically created"); -STATISTIC(NumValueMappingsAccessed, - "Number of value mappings dynamically accessed"); -STATISTIC(NumOperandsMappingsCreated, - "Number of operands mappings dynamically created"); -STATISTIC(NumOperandsMappingsAccessed, - "Number of operands mappings dynamically accessed"); -STATISTIC(NumInstructionMappingsCreated, - "Number of instruction mappings dynamically created"); -STATISTIC(NumInstructionMappingsAccessed, - "Number of instruction mappings dynamically accessed"); - -const unsigned RegisterBankInfo::DefaultMappingID = UINT_MAX; -const unsigned RegisterBankInfo::InvalidMappingID = UINT_MAX - 1; - -//------------------------------------------------------------------------------ -// RegisterBankInfo implementation. -//------------------------------------------------------------------------------ -RegisterBankInfo::RegisterBankInfo(RegisterBank **RegBanks, - unsigned NumRegBanks) - : RegBanks(RegBanks), NumRegBanks(NumRegBanks) { -#ifndef NDEBUG - for (unsigned Idx = 0, End = getNumRegBanks(); Idx != End; ++Idx) { - assert(RegBanks[Idx] != nullptr && "Invalid RegisterBank"); - assert(RegBanks[Idx]->isValid() && "RegisterBank should be valid"); - } -#endif // NDEBUG -} - -bool RegisterBankInfo::verify(const TargetRegisterInfo &TRI) const { -#ifndef NDEBUG - for (unsigned Idx = 0, End = getNumRegBanks(); Idx != End; ++Idx) { - const RegisterBank &RegBank = getRegBank(Idx); - assert(Idx == RegBank.getID() && - "ID does not match the index in the array"); - LLVM_DEBUG(dbgs() << "Verify " << RegBank << '\n'); - assert(RegBank.verify(TRI) && "RegBank is invalid"); - } -#endif // NDEBUG - return true; -} - -const RegisterBank * -RegisterBankInfo::getRegBank(Register Reg, const MachineRegisterInfo &MRI, - const TargetRegisterInfo &TRI) const { - if (Register::isPhysicalRegister(Reg)) { - // FIXME: This was probably a copy to a virtual register that does have a - // type we could use. - return &getRegBankFromRegClass(getMinimalPhysRegClass(Reg, TRI), LLT()); - } - - assert(Reg && "NoRegister does not have a register bank"); - const RegClassOrRegBank &RegClassOrBank = MRI.getRegClassOrRegBank(Reg); - if (auto *RB = RegClassOrBank.dyn_cast<const RegisterBank *>()) - return RB; - if (auto *RC = RegClassOrBank.dyn_cast<const TargetRegisterClass *>()) - return &getRegBankFromRegClass(*RC, MRI.getType(Reg)); - return nullptr; -} - -const TargetRegisterClass & -RegisterBankInfo::getMinimalPhysRegClass(Register Reg, - const TargetRegisterInfo &TRI) const { - assert(Register::isPhysicalRegister(Reg) && "Reg must be a physreg"); - const auto &RegRCIt = PhysRegMinimalRCs.find(Reg); - if (RegRCIt != PhysRegMinimalRCs.end()) - return *RegRCIt->second; - const TargetRegisterClass *PhysRC = TRI.getMinimalPhysRegClass(Reg); - PhysRegMinimalRCs[Reg] = PhysRC; - return *PhysRC; -} - -const RegisterBank *RegisterBankInfo::getRegBankFromConstraints( - const MachineInstr &MI, unsigned OpIdx, const TargetInstrInfo &TII, - const MachineRegisterInfo &MRI) const { - const TargetRegisterInfo *TRI = MRI.getTargetRegisterInfo(); - - // The mapping of the registers may be available via the - // register class constraints. - const TargetRegisterClass *RC = MI.getRegClassConstraint(OpIdx, &TII, TRI); - - if (!RC) - return nullptr; - - Register Reg = MI.getOperand(OpIdx).getReg(); - const RegisterBank &RegBank = getRegBankFromRegClass(*RC, MRI.getType(Reg)); - // Sanity check that the target properly implemented getRegBankFromRegClass. - assert(RegBank.covers(*RC) && - "The mapping of the register bank does not make sense"); - return &RegBank; -} - -const TargetRegisterClass *RegisterBankInfo::constrainGenericRegister( - Register Reg, const TargetRegisterClass &RC, MachineRegisterInfo &MRI) { - - // If the register already has a class, fallback to MRI::constrainRegClass. - auto &RegClassOrBank = MRI.getRegClassOrRegBank(Reg); - if (RegClassOrBank.is<const TargetRegisterClass *>()) - return MRI.constrainRegClass(Reg, &RC); - - const RegisterBank *RB = RegClassOrBank.get<const RegisterBank *>(); - // Otherwise, all we can do is ensure the bank covers the class, and set it. - if (RB && !RB->covers(RC)) - return nullptr; - - // If nothing was set or the class is simply compatible, set it. - MRI.setRegClass(Reg, &RC); - return &RC; -} - -/// Check whether or not \p MI should be treated like a copy -/// for the mappings. -/// Copy like instruction are special for mapping because -/// they don't have actual register constraints. Moreover, -/// they sometimes have register classes assigned and we can -/// just use that instead of failing to provide a generic mapping. -static bool isCopyLike(const MachineInstr &MI) { - return MI.isCopy() || MI.isPHI() || - MI.getOpcode() == TargetOpcode::REG_SEQUENCE; -} - -const RegisterBankInfo::InstructionMapping & -RegisterBankInfo::getInstrMappingImpl(const MachineInstr &MI) const { - // For copies we want to walk over the operands and try to find one - // that has a register bank since the instruction itself will not get - // us any constraint. - bool IsCopyLike = isCopyLike(MI); - // For copy like instruction, only the mapping of the definition - // is important. The rest is not constrained. - unsigned NumOperandsForMapping = IsCopyLike ? 1 : MI.getNumOperands(); - - const MachineFunction &MF = *MI.getMF(); - const TargetSubtargetInfo &STI = MF.getSubtarget(); - const TargetRegisterInfo &TRI = *STI.getRegisterInfo(); - const MachineRegisterInfo &MRI = MF.getRegInfo(); - // We may need to query the instruction encoding to guess the mapping. - const TargetInstrInfo &TII = *STI.getInstrInfo(); - - // Before doing anything complicated check if the mapping is not - // directly available. - bool CompleteMapping = true; - - SmallVector<const ValueMapping *, 8> OperandsMapping(NumOperandsForMapping); - for (unsigned OpIdx = 0, EndIdx = MI.getNumOperands(); OpIdx != EndIdx; - ++OpIdx) { - const MachineOperand &MO = MI.getOperand(OpIdx); - if (!MO.isReg()) - continue; - Register Reg = MO.getReg(); - if (!Reg) - continue; - // The register bank of Reg is just a side effect of the current - // excution and in particular, there is no reason to believe this - // is the best default mapping for the current instruction. Keep - // it as an alternative register bank if we cannot figure out - // something. - const RegisterBank *AltRegBank = getRegBank(Reg, MRI, TRI); - // For copy-like instruction, we want to reuse the register bank - // that is already set on Reg, if any, since those instructions do - // not have any constraints. - const RegisterBank *CurRegBank = IsCopyLike ? AltRegBank : nullptr; - if (!CurRegBank) { - // If this is a target specific instruction, we can deduce - // the register bank from the encoding constraints. - CurRegBank = getRegBankFromConstraints(MI, OpIdx, TII, MRI); - if (!CurRegBank) { - // All our attempts failed, give up. - CompleteMapping = false; - - if (!IsCopyLike) - // MI does not carry enough information to guess the mapping. - return getInvalidInstructionMapping(); - continue; - } - } - - unsigned Size = getSizeInBits(Reg, MRI, TRI); - const ValueMapping *ValMapping = &getValueMapping(0, Size, *CurRegBank); - if (IsCopyLike) { - if (!OperandsMapping[0]) { - if (MI.isRegSequence()) { - // For reg_sequence, the result size does not match the input. - unsigned ResultSize = getSizeInBits(MI.getOperand(0).getReg(), - MRI, TRI); - OperandsMapping[0] = &getValueMapping(0, ResultSize, *CurRegBank); - } else { - OperandsMapping[0] = ValMapping; - } - } - - // The default handling assumes any register bank can be copied to any - // other. If this isn't the case, the target should specially deal with - // reg_sequence/phi. There may also be unsatisfiable copies. - for (; OpIdx != EndIdx; ++OpIdx) { - const MachineOperand &MO = MI.getOperand(OpIdx); - if (!MO.isReg()) - continue; - Register Reg = MO.getReg(); - if (!Reg) - continue; - - const RegisterBank *AltRegBank = getRegBank(Reg, MRI, TRI); - if (AltRegBank && - cannotCopy(*CurRegBank, *AltRegBank, getSizeInBits(Reg, MRI, TRI))) - return getInvalidInstructionMapping(); - } - - CompleteMapping = true; - break; - } - - OperandsMapping[OpIdx] = ValMapping; - } - - if (IsCopyLike && !CompleteMapping) { - // No way to deduce the type from what we have. - return getInvalidInstructionMapping(); - } - - assert(CompleteMapping && "Setting an uncomplete mapping"); - return getInstructionMapping( - DefaultMappingID, /*Cost*/ 1, - /*OperandsMapping*/ getOperandsMapping(OperandsMapping), - NumOperandsForMapping); -} - -/// Hashing function for PartialMapping. -static hash_code hashPartialMapping(unsigned StartIdx, unsigned Length, - const RegisterBank *RegBank) { - return hash_combine(StartIdx, Length, RegBank ? RegBank->getID() : 0); -} - -/// Overloaded version of hash_value for a PartialMapping. -hash_code -llvm::hash_value(const RegisterBankInfo::PartialMapping &PartMapping) { - return hashPartialMapping(PartMapping.StartIdx, PartMapping.Length, - PartMapping.RegBank); -} - -const RegisterBankInfo::PartialMapping & -RegisterBankInfo::getPartialMapping(unsigned StartIdx, unsigned Length, - const RegisterBank &RegBank) const { - ++NumPartialMappingsAccessed; - - hash_code Hash = hashPartialMapping(StartIdx, Length, &RegBank); - const auto &It = MapOfPartialMappings.find(Hash); - if (It != MapOfPartialMappings.end()) - return *It->second; - - ++NumPartialMappingsCreated; - - auto &PartMapping = MapOfPartialMappings[Hash]; - PartMapping = std::make_unique<PartialMapping>(StartIdx, Length, RegBank); - return *PartMapping; -} - -const RegisterBankInfo::ValueMapping & -RegisterBankInfo::getValueMapping(unsigned StartIdx, unsigned Length, - const RegisterBank &RegBank) const { - return getValueMapping(&getPartialMapping(StartIdx, Length, RegBank), 1); -} - -static hash_code -hashValueMapping(const RegisterBankInfo::PartialMapping *BreakDown, - unsigned NumBreakDowns) { - if (LLVM_LIKELY(NumBreakDowns == 1)) - return hash_value(*BreakDown); - SmallVector<size_t, 8> Hashes(NumBreakDowns); - for (unsigned Idx = 0; Idx != NumBreakDowns; ++Idx) - Hashes.push_back(hash_value(BreakDown[Idx])); - return hash_combine_range(Hashes.begin(), Hashes.end()); -} - -const RegisterBankInfo::ValueMapping & -RegisterBankInfo::getValueMapping(const PartialMapping *BreakDown, - unsigned NumBreakDowns) const { - ++NumValueMappingsAccessed; - - hash_code Hash = hashValueMapping(BreakDown, NumBreakDowns); - const auto &It = MapOfValueMappings.find(Hash); - if (It != MapOfValueMappings.end()) - return *It->second; - - ++NumValueMappingsCreated; - - auto &ValMapping = MapOfValueMappings[Hash]; - ValMapping = std::make_unique<ValueMapping>(BreakDown, NumBreakDowns); - return *ValMapping; -} - -template <typename Iterator> -const RegisterBankInfo::ValueMapping * -RegisterBankInfo::getOperandsMapping(Iterator Begin, Iterator End) const { - - ++NumOperandsMappingsAccessed; - - // The addresses of the value mapping are unique. - // Therefore, we can use them directly to hash the operand mapping. - hash_code Hash = hash_combine_range(Begin, End); - auto &Res = MapOfOperandsMappings[Hash]; - if (Res) - return Res.get(); - - ++NumOperandsMappingsCreated; - - // Create the array of ValueMapping. - // Note: this array will not hash to this instance of operands - // mapping, because we use the pointer of the ValueMapping - // to hash and we expect them to uniquely identify an instance - // of value mapping. - Res = std::make_unique<ValueMapping[]>(std::distance(Begin, End)); - unsigned Idx = 0; - for (Iterator It = Begin; It != End; ++It, ++Idx) { - const ValueMapping *ValMap = *It; - if (!ValMap) - continue; - Res[Idx] = *ValMap; - } - return Res.get(); -} - -const RegisterBankInfo::ValueMapping *RegisterBankInfo::getOperandsMapping( - const SmallVectorImpl<const RegisterBankInfo::ValueMapping *> &OpdsMapping) - const { - return getOperandsMapping(OpdsMapping.begin(), OpdsMapping.end()); -} - -const RegisterBankInfo::ValueMapping *RegisterBankInfo::getOperandsMapping( - std::initializer_list<const RegisterBankInfo::ValueMapping *> OpdsMapping) - const { - return getOperandsMapping(OpdsMapping.begin(), OpdsMapping.end()); -} - -static hash_code -hashInstructionMapping(unsigned ID, unsigned Cost, - const RegisterBankInfo::ValueMapping *OperandsMapping, - unsigned NumOperands) { - return hash_combine(ID, Cost, OperandsMapping, NumOperands); -} - -const RegisterBankInfo::InstructionMapping & -RegisterBankInfo::getInstructionMappingImpl( - bool IsInvalid, unsigned ID, unsigned Cost, - const RegisterBankInfo::ValueMapping *OperandsMapping, - unsigned NumOperands) const { - assert(((IsInvalid && ID == InvalidMappingID && Cost == 0 && - OperandsMapping == nullptr && NumOperands == 0) || - !IsInvalid) && - "Mismatch argument for invalid input"); - ++NumInstructionMappingsAccessed; - - hash_code Hash = - hashInstructionMapping(ID, Cost, OperandsMapping, NumOperands); - const auto &It = MapOfInstructionMappings.find(Hash); - if (It != MapOfInstructionMappings.end()) - return *It->second; - - ++NumInstructionMappingsCreated; - - auto &InstrMapping = MapOfInstructionMappings[Hash]; - InstrMapping = std::make_unique<InstructionMapping>( - ID, Cost, OperandsMapping, NumOperands); - return *InstrMapping; -} - -const RegisterBankInfo::InstructionMapping & -RegisterBankInfo::getInstrMapping(const MachineInstr &MI) const { - const RegisterBankInfo::InstructionMapping &Mapping = getInstrMappingImpl(MI); - if (Mapping.isValid()) - return Mapping; - llvm_unreachable("The target must implement this"); -} - -RegisterBankInfo::InstructionMappings -RegisterBankInfo::getInstrPossibleMappings(const MachineInstr &MI) const { - InstructionMappings PossibleMappings; - const auto &Mapping = getInstrMapping(MI); - if (Mapping.isValid()) { - // Put the default mapping first. - PossibleMappings.push_back(&Mapping); - } - - // Then the alternative mapping, if any. - InstructionMappings AltMappings = getInstrAlternativeMappings(MI); +//===- llvm/CodeGen/GlobalISel/RegisterBankInfo.cpp --------------*- C++ -*-==// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +/// \file +/// This file implements the RegisterBankInfo class. +//===----------------------------------------------------------------------===// + +#include "llvm/CodeGen/GlobalISel/RegisterBankInfo.h" +#include "llvm/ADT/SmallString.h" +#include "llvm/ADT/SmallVector.h" +#include "llvm/ADT/Statistic.h" +#include "llvm/ADT/iterator_range.h" +#include "llvm/CodeGen/GlobalISel/RegisterBank.h" +#include "llvm/CodeGen/MachineBasicBlock.h" +#include "llvm/CodeGen/MachineFunction.h" +#include "llvm/CodeGen/MachineRegisterInfo.h" +#include "llvm/CodeGen/TargetOpcodes.h" +#include "llvm/CodeGen/TargetRegisterInfo.h" +#include "llvm/CodeGen/TargetSubtargetInfo.h" +#include "llvm/Config/llvm-config.h" +#include "llvm/IR/Type.h" +#include "llvm/Support/Debug.h" +#include "llvm/Support/raw_ostream.h" + +#include <algorithm> // For std::max. + +#define DEBUG_TYPE "registerbankinfo" + +using namespace llvm; + +STATISTIC(NumPartialMappingsCreated, + "Number of partial mappings dynamically created"); +STATISTIC(NumPartialMappingsAccessed, + "Number of partial mappings dynamically accessed"); +STATISTIC(NumValueMappingsCreated, + "Number of value mappings dynamically created"); +STATISTIC(NumValueMappingsAccessed, + "Number of value mappings dynamically accessed"); +STATISTIC(NumOperandsMappingsCreated, + "Number of operands mappings dynamically created"); +STATISTIC(NumOperandsMappingsAccessed, + "Number of operands mappings dynamically accessed"); +STATISTIC(NumInstructionMappingsCreated, + "Number of instruction mappings dynamically created"); +STATISTIC(NumInstructionMappingsAccessed, + "Number of instruction mappings dynamically accessed"); + +const unsigned RegisterBankInfo::DefaultMappingID = UINT_MAX; +const unsigned RegisterBankInfo::InvalidMappingID = UINT_MAX - 1; + +//------------------------------------------------------------------------------ +// RegisterBankInfo implementation. +//------------------------------------------------------------------------------ +RegisterBankInfo::RegisterBankInfo(RegisterBank **RegBanks, + unsigned NumRegBanks) + : RegBanks(RegBanks), NumRegBanks(NumRegBanks) { +#ifndef NDEBUG + for (unsigned Idx = 0, End = getNumRegBanks(); Idx != End; ++Idx) { + assert(RegBanks[Idx] != nullptr && "Invalid RegisterBank"); + assert(RegBanks[Idx]->isValid() && "RegisterBank should be valid"); + } +#endif // NDEBUG +} + +bool RegisterBankInfo::verify(const TargetRegisterInfo &TRI) const { +#ifndef NDEBUG + for (unsigned Idx = 0, End = getNumRegBanks(); Idx != End; ++Idx) { + const RegisterBank &RegBank = getRegBank(Idx); + assert(Idx == RegBank.getID() && + "ID does not match the index in the array"); + LLVM_DEBUG(dbgs() << "Verify " << RegBank << '\n'); + assert(RegBank.verify(TRI) && "RegBank is invalid"); + } +#endif // NDEBUG + return true; +} + +const RegisterBank * +RegisterBankInfo::getRegBank(Register Reg, const MachineRegisterInfo &MRI, + const TargetRegisterInfo &TRI) const { + if (Register::isPhysicalRegister(Reg)) { + // FIXME: This was probably a copy to a virtual register that does have a + // type we could use. + return &getRegBankFromRegClass(getMinimalPhysRegClass(Reg, TRI), LLT()); + } + + assert(Reg && "NoRegister does not have a register bank"); + const RegClassOrRegBank &RegClassOrBank = MRI.getRegClassOrRegBank(Reg); + if (auto *RB = RegClassOrBank.dyn_cast<const RegisterBank *>()) + return RB; + if (auto *RC = RegClassOrBank.dyn_cast<const TargetRegisterClass *>()) + return &getRegBankFromRegClass(*RC, MRI.getType(Reg)); + return nullptr; +} + +const TargetRegisterClass & +RegisterBankInfo::getMinimalPhysRegClass(Register Reg, + const TargetRegisterInfo &TRI) const { + assert(Register::isPhysicalRegister(Reg) && "Reg must be a physreg"); + const auto &RegRCIt = PhysRegMinimalRCs.find(Reg); + if (RegRCIt != PhysRegMinimalRCs.end()) + return *RegRCIt->second; + const TargetRegisterClass *PhysRC = TRI.getMinimalPhysRegClass(Reg); + PhysRegMinimalRCs[Reg] = PhysRC; + return *PhysRC; +} + +const RegisterBank *RegisterBankInfo::getRegBankFromConstraints( + const MachineInstr &MI, unsigned OpIdx, const TargetInstrInfo &TII, + const MachineRegisterInfo &MRI) const { + const TargetRegisterInfo *TRI = MRI.getTargetRegisterInfo(); + + // The mapping of the registers may be available via the + // register class constraints. + const TargetRegisterClass *RC = MI.getRegClassConstraint(OpIdx, &TII, TRI); + + if (!RC) + return nullptr; + + Register Reg = MI.getOperand(OpIdx).getReg(); + const RegisterBank &RegBank = getRegBankFromRegClass(*RC, MRI.getType(Reg)); + // Sanity check that the target properly implemented getRegBankFromRegClass. + assert(RegBank.covers(*RC) && + "The mapping of the register bank does not make sense"); + return &RegBank; +} + +const TargetRegisterClass *RegisterBankInfo::constrainGenericRegister( + Register Reg, const TargetRegisterClass &RC, MachineRegisterInfo &MRI) { + + // If the register already has a class, fallback to MRI::constrainRegClass. + auto &RegClassOrBank = MRI.getRegClassOrRegBank(Reg); + if (RegClassOrBank.is<const TargetRegisterClass *>()) + return MRI.constrainRegClass(Reg, &RC); + + const RegisterBank *RB = RegClassOrBank.get<const RegisterBank *>(); + // Otherwise, all we can do is ensure the bank covers the class, and set it. + if (RB && !RB->covers(RC)) + return nullptr; + + // If nothing was set or the class is simply compatible, set it. + MRI.setRegClass(Reg, &RC); + return &RC; +} + +/// Check whether or not \p MI should be treated like a copy +/// for the mappings. +/// Copy like instruction are special for mapping because +/// they don't have actual register constraints. Moreover, +/// they sometimes have register classes assigned and we can +/// just use that instead of failing to provide a generic mapping. +static bool isCopyLike(const MachineInstr &MI) { + return MI.isCopy() || MI.isPHI() || + MI.getOpcode() == TargetOpcode::REG_SEQUENCE; +} + +const RegisterBankInfo::InstructionMapping & +RegisterBankInfo::getInstrMappingImpl(const MachineInstr &MI) const { + // For copies we want to walk over the operands and try to find one + // that has a register bank since the instruction itself will not get + // us any constraint. + bool IsCopyLike = isCopyLike(MI); + // For copy like instruction, only the mapping of the definition + // is important. The rest is not constrained. + unsigned NumOperandsForMapping = IsCopyLike ? 1 : MI.getNumOperands(); + + const MachineFunction &MF = *MI.getMF(); + const TargetSubtargetInfo &STI = MF.getSubtarget(); + const TargetRegisterInfo &TRI = *STI.getRegisterInfo(); + const MachineRegisterInfo &MRI = MF.getRegInfo(); + // We may need to query the instruction encoding to guess the mapping. + const TargetInstrInfo &TII = *STI.getInstrInfo(); + + // Before doing anything complicated check if the mapping is not + // directly available. + bool CompleteMapping = true; + + SmallVector<const ValueMapping *, 8> OperandsMapping(NumOperandsForMapping); + for (unsigned OpIdx = 0, EndIdx = MI.getNumOperands(); OpIdx != EndIdx; + ++OpIdx) { + const MachineOperand &MO = MI.getOperand(OpIdx); + if (!MO.isReg()) + continue; + Register Reg = MO.getReg(); + if (!Reg) + continue; + // The register bank of Reg is just a side effect of the current + // excution and in particular, there is no reason to believe this + // is the best default mapping for the current instruction. Keep + // it as an alternative register bank if we cannot figure out + // something. + const RegisterBank *AltRegBank = getRegBank(Reg, MRI, TRI); + // For copy-like instruction, we want to reuse the register bank + // that is already set on Reg, if any, since those instructions do + // not have any constraints. + const RegisterBank *CurRegBank = IsCopyLike ? AltRegBank : nullptr; + if (!CurRegBank) { + // If this is a target specific instruction, we can deduce + // the register bank from the encoding constraints. + CurRegBank = getRegBankFromConstraints(MI, OpIdx, TII, MRI); + if (!CurRegBank) { + // All our attempts failed, give up. + CompleteMapping = false; + + if (!IsCopyLike) + // MI does not carry enough information to guess the mapping. + return getInvalidInstructionMapping(); + continue; + } + } + + unsigned Size = getSizeInBits(Reg, MRI, TRI); + const ValueMapping *ValMapping = &getValueMapping(0, Size, *CurRegBank); + if (IsCopyLike) { + if (!OperandsMapping[0]) { + if (MI.isRegSequence()) { + // For reg_sequence, the result size does not match the input. + unsigned ResultSize = getSizeInBits(MI.getOperand(0).getReg(), + MRI, TRI); + OperandsMapping[0] = &getValueMapping(0, ResultSize, *CurRegBank); + } else { + OperandsMapping[0] = ValMapping; + } + } + + // The default handling assumes any register bank can be copied to any + // other. If this isn't the case, the target should specially deal with + // reg_sequence/phi. There may also be unsatisfiable copies. + for (; OpIdx != EndIdx; ++OpIdx) { + const MachineOperand &MO = MI.getOperand(OpIdx); + if (!MO.isReg()) + continue; + Register Reg = MO.getReg(); + if (!Reg) + continue; + + const RegisterBank *AltRegBank = getRegBank(Reg, MRI, TRI); + if (AltRegBank && + cannotCopy(*CurRegBank, *AltRegBank, getSizeInBits(Reg, MRI, TRI))) + return getInvalidInstructionMapping(); + } + + CompleteMapping = true; + break; + } + + OperandsMapping[OpIdx] = ValMapping; + } + + if (IsCopyLike && !CompleteMapping) { + // No way to deduce the type from what we have. + return getInvalidInstructionMapping(); + } + + assert(CompleteMapping && "Setting an uncomplete mapping"); + return getInstructionMapping( + DefaultMappingID, /*Cost*/ 1, + /*OperandsMapping*/ getOperandsMapping(OperandsMapping), + NumOperandsForMapping); +} + +/// Hashing function for PartialMapping. +static hash_code hashPartialMapping(unsigned StartIdx, unsigned Length, + const RegisterBank *RegBank) { + return hash_combine(StartIdx, Length, RegBank ? RegBank->getID() : 0); +} + +/// Overloaded version of hash_value for a PartialMapping. +hash_code +llvm::hash_value(const RegisterBankInfo::PartialMapping &PartMapping) { + return hashPartialMapping(PartMapping.StartIdx, PartMapping.Length, + PartMapping.RegBank); +} + +const RegisterBankInfo::PartialMapping & +RegisterBankInfo::getPartialMapping(unsigned StartIdx, unsigned Length, + const RegisterBank &RegBank) const { + ++NumPartialMappingsAccessed; + + hash_code Hash = hashPartialMapping(StartIdx, Length, &RegBank); + const auto &It = MapOfPartialMappings.find(Hash); + if (It != MapOfPartialMappings.end()) + return *It->second; + + ++NumPartialMappingsCreated; + + auto &PartMapping = MapOfPartialMappings[Hash]; + PartMapping = std::make_unique<PartialMapping>(StartIdx, Length, RegBank); + return *PartMapping; +} + +const RegisterBankInfo::ValueMapping & +RegisterBankInfo::getValueMapping(unsigned StartIdx, unsigned Length, + const RegisterBank &RegBank) const { + return getValueMapping(&getPartialMapping(StartIdx, Length, RegBank), 1); +} + +static hash_code +hashValueMapping(const RegisterBankInfo::PartialMapping *BreakDown, + unsigned NumBreakDowns) { + if (LLVM_LIKELY(NumBreakDowns == 1)) + return hash_value(*BreakDown); + SmallVector<size_t, 8> Hashes(NumBreakDowns); + for (unsigned Idx = 0; Idx != NumBreakDowns; ++Idx) + Hashes.push_back(hash_value(BreakDown[Idx])); + return hash_combine_range(Hashes.begin(), Hashes.end()); +} + +const RegisterBankInfo::ValueMapping & +RegisterBankInfo::getValueMapping(const PartialMapping *BreakDown, + unsigned NumBreakDowns) const { + ++NumValueMappingsAccessed; + + hash_code Hash = hashValueMapping(BreakDown, NumBreakDowns); + const auto &It = MapOfValueMappings.find(Hash); + if (It != MapOfValueMappings.end()) + return *It->second; + + ++NumValueMappingsCreated; + + auto &ValMapping = MapOfValueMappings[Hash]; + ValMapping = std::make_unique<ValueMapping>(BreakDown, NumBreakDowns); + return *ValMapping; +} + +template <typename Iterator> +const RegisterBankInfo::ValueMapping * +RegisterBankInfo::getOperandsMapping(Iterator Begin, Iterator End) const { + + ++NumOperandsMappingsAccessed; + + // The addresses of the value mapping are unique. + // Therefore, we can use them directly to hash the operand mapping. + hash_code Hash = hash_combine_range(Begin, End); + auto &Res = MapOfOperandsMappings[Hash]; + if (Res) + return Res.get(); + + ++NumOperandsMappingsCreated; + + // Create the array of ValueMapping. + // Note: this array will not hash to this instance of operands + // mapping, because we use the pointer of the ValueMapping + // to hash and we expect them to uniquely identify an instance + // of value mapping. + Res = std::make_unique<ValueMapping[]>(std::distance(Begin, End)); + unsigned Idx = 0; + for (Iterator It = Begin; It != End; ++It, ++Idx) { + const ValueMapping *ValMap = *It; + if (!ValMap) + continue; + Res[Idx] = *ValMap; + } + return Res.get(); +} + +const RegisterBankInfo::ValueMapping *RegisterBankInfo::getOperandsMapping( + const SmallVectorImpl<const RegisterBankInfo::ValueMapping *> &OpdsMapping) + const { + return getOperandsMapping(OpdsMapping.begin(), OpdsMapping.end()); +} + +const RegisterBankInfo::ValueMapping *RegisterBankInfo::getOperandsMapping( + std::initializer_list<const RegisterBankInfo::ValueMapping *> OpdsMapping) + const { + return getOperandsMapping(OpdsMapping.begin(), OpdsMapping.end()); +} + +static hash_code +hashInstructionMapping(unsigned ID, unsigned Cost, + const RegisterBankInfo::ValueMapping *OperandsMapping, + unsigned NumOperands) { + return hash_combine(ID, Cost, OperandsMapping, NumOperands); +} + +const RegisterBankInfo::InstructionMapping & +RegisterBankInfo::getInstructionMappingImpl( + bool IsInvalid, unsigned ID, unsigned Cost, + const RegisterBankInfo::ValueMapping *OperandsMapping, + unsigned NumOperands) const { + assert(((IsInvalid && ID == InvalidMappingID && Cost == 0 && + OperandsMapping == nullptr && NumOperands == 0) || + !IsInvalid) && + "Mismatch argument for invalid input"); + ++NumInstructionMappingsAccessed; + + hash_code Hash = + hashInstructionMapping(ID, Cost, OperandsMapping, NumOperands); + const auto &It = MapOfInstructionMappings.find(Hash); + if (It != MapOfInstructionMappings.end()) + return *It->second; + + ++NumInstructionMappingsCreated; + + auto &InstrMapping = MapOfInstructionMappings[Hash]; + InstrMapping = std::make_unique<InstructionMapping>( + ID, Cost, OperandsMapping, NumOperands); + return *InstrMapping; +} + +const RegisterBankInfo::InstructionMapping & +RegisterBankInfo::getInstrMapping(const MachineInstr &MI) const { + const RegisterBankInfo::InstructionMapping &Mapping = getInstrMappingImpl(MI); + if (Mapping.isValid()) + return Mapping; + llvm_unreachable("The target must implement this"); +} + +RegisterBankInfo::InstructionMappings +RegisterBankInfo::getInstrPossibleMappings(const MachineInstr &MI) const { + InstructionMappings PossibleMappings; + const auto &Mapping = getInstrMapping(MI); + if (Mapping.isValid()) { + // Put the default mapping first. + PossibleMappings.push_back(&Mapping); + } + + // Then the alternative mapping, if any. + InstructionMappings AltMappings = getInstrAlternativeMappings(MI); append_range(PossibleMappings, AltMappings); -#ifndef NDEBUG - for (const InstructionMapping *Mapping : PossibleMappings) - assert(Mapping->verify(MI) && "Mapping is invalid"); -#endif - return PossibleMappings; -} - -RegisterBankInfo::InstructionMappings -RegisterBankInfo::getInstrAlternativeMappings(const MachineInstr &MI) const { - // No alternative for MI. - return InstructionMappings(); -} - -void RegisterBankInfo::applyDefaultMapping(const OperandsMapper &OpdMapper) { - MachineInstr &MI = OpdMapper.getMI(); - MachineRegisterInfo &MRI = OpdMapper.getMRI(); - LLVM_DEBUG(dbgs() << "Applying default-like mapping\n"); - for (unsigned OpIdx = 0, - EndIdx = OpdMapper.getInstrMapping().getNumOperands(); - OpIdx != EndIdx; ++OpIdx) { - LLVM_DEBUG(dbgs() << "OpIdx " << OpIdx); - MachineOperand &MO = MI.getOperand(OpIdx); - if (!MO.isReg()) { - LLVM_DEBUG(dbgs() << " is not a register, nothing to be done\n"); - continue; - } - if (!MO.getReg()) { - LLVM_DEBUG(dbgs() << " is $noreg, nothing to be done\n"); - continue; - } - assert(OpdMapper.getInstrMapping().getOperandMapping(OpIdx).NumBreakDowns != - 0 && - "Invalid mapping"); - assert(OpdMapper.getInstrMapping().getOperandMapping(OpIdx).NumBreakDowns == - 1 && - "This mapping is too complex for this function"); - iterator_range<SmallVectorImpl<Register>::const_iterator> NewRegs = - OpdMapper.getVRegs(OpIdx); - if (NewRegs.empty()) { - LLVM_DEBUG(dbgs() << " has not been repaired, nothing to be done\n"); - continue; - } - Register OrigReg = MO.getReg(); - Register NewReg = *NewRegs.begin(); - LLVM_DEBUG(dbgs() << " changed, replace " << printReg(OrigReg, nullptr)); - MO.setReg(NewReg); - LLVM_DEBUG(dbgs() << " with " << printReg(NewReg, nullptr)); - - // The OperandsMapper creates plain scalar, we may have to fix that. - // Check if the types match and if not, fix that. - LLT OrigTy = MRI.getType(OrigReg); - LLT NewTy = MRI.getType(NewReg); - if (OrigTy != NewTy) { - // The default mapping is not supposed to change the size of - // the storage. However, right now we don't necessarily bump all - // the types to storage size. For instance, we can consider - // s16 G_AND legal whereas the storage size is going to be 32. - assert(OrigTy.getSizeInBits() <= NewTy.getSizeInBits() && - "Types with difference size cannot be handled by the default " - "mapping"); - LLVM_DEBUG(dbgs() << "\nChange type of new opd from " << NewTy << " to " - << OrigTy); - MRI.setType(NewReg, OrigTy); - } - LLVM_DEBUG(dbgs() << '\n'); - } -} - -unsigned RegisterBankInfo::getSizeInBits(Register Reg, - const MachineRegisterInfo &MRI, - const TargetRegisterInfo &TRI) const { - if (Register::isPhysicalRegister(Reg)) { - // The size is not directly available for physical registers. - // Instead, we need to access a register class that contains Reg and - // get the size of that register class. - // Because this is expensive, we'll cache the register class by calling - auto *RC = &getMinimalPhysRegClass(Reg, TRI); - assert(RC && "Expecting Register class"); - return TRI.getRegSizeInBits(*RC); - } - return TRI.getRegSizeInBits(Reg, MRI); -} - -//------------------------------------------------------------------------------ -// Helper classes implementation. -//------------------------------------------------------------------------------ -#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP) -LLVM_DUMP_METHOD void RegisterBankInfo::PartialMapping::dump() const { - print(dbgs()); - dbgs() << '\n'; -} -#endif - -bool RegisterBankInfo::PartialMapping::verify() const { - assert(RegBank && "Register bank not set"); - assert(Length && "Empty mapping"); - assert((StartIdx <= getHighBitIdx()) && "Overflow, switch to APInt?"); - // Check if the minimum width fits into RegBank. - assert(RegBank->getSize() >= Length && "Register bank too small for Mask"); - return true; -} - -void RegisterBankInfo::PartialMapping::print(raw_ostream &OS) const { - OS << "[" << StartIdx << ", " << getHighBitIdx() << "], RegBank = "; - if (RegBank) - OS << *RegBank; - else - OS << "nullptr"; -} - -bool RegisterBankInfo::ValueMapping::partsAllUniform() const { - if (NumBreakDowns < 2) - return true; - - const PartialMapping *First = begin(); - for (const PartialMapping *Part = First + 1; Part != end(); ++Part) { - if (Part->Length != First->Length || Part->RegBank != First->RegBank) - return false; - } - - return true; -} - -bool RegisterBankInfo::ValueMapping::verify(unsigned MeaningfulBitWidth) const { - assert(NumBreakDowns && "Value mapped nowhere?!"); - unsigned OrigValueBitWidth = 0; - for (const RegisterBankInfo::PartialMapping &PartMap : *this) { - // Check that each register bank is big enough to hold the partial value: - // this check is done by PartialMapping::verify - assert(PartMap.verify() && "Partial mapping is invalid"); - // The original value should completely be mapped. - // Thus the maximum accessed index + 1 is the size of the original value. - OrigValueBitWidth = - std::max(OrigValueBitWidth, PartMap.getHighBitIdx() + 1); - } - assert(OrigValueBitWidth >= MeaningfulBitWidth && - "Meaningful bits not covered by the mapping"); - APInt ValueMask(OrigValueBitWidth, 0); - for (const RegisterBankInfo::PartialMapping &PartMap : *this) { - // Check that the union of the partial mappings covers the whole value, - // without overlaps. - // The high bit is exclusive in the APInt API, thus getHighBitIdx + 1. - APInt PartMapMask = APInt::getBitsSet(OrigValueBitWidth, PartMap.StartIdx, - PartMap.getHighBitIdx() + 1); - ValueMask ^= PartMapMask; - assert((ValueMask & PartMapMask) == PartMapMask && - "Some partial mappings overlap"); - } - assert(ValueMask.isAllOnesValue() && "Value is not fully mapped"); - return true; -} - -#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP) -LLVM_DUMP_METHOD void RegisterBankInfo::ValueMapping::dump() const { - print(dbgs()); - dbgs() << '\n'; -} -#endif - -void RegisterBankInfo::ValueMapping::print(raw_ostream &OS) const { - OS << "#BreakDown: " << NumBreakDowns << " "; - bool IsFirst = true; - for (const PartialMapping &PartMap : *this) { - if (!IsFirst) - OS << ", "; - OS << '[' << PartMap << ']'; - IsFirst = false; - } -} - -bool RegisterBankInfo::InstructionMapping::verify( - const MachineInstr &MI) const { - // Check that all the register operands are properly mapped. - // Check the constructor invariant. - // For PHI, we only care about mapping the definition. - assert(NumOperands == (isCopyLike(MI) ? 1 : MI.getNumOperands()) && - "NumOperands must match, see constructor"); - assert(MI.getParent() && MI.getMF() && - "MI must be connected to a MachineFunction"); - const MachineFunction &MF = *MI.getMF(); - const RegisterBankInfo *RBI = MF.getSubtarget().getRegBankInfo(); - (void)RBI; - - for (unsigned Idx = 0; Idx < NumOperands; ++Idx) { - const MachineOperand &MO = MI.getOperand(Idx); - if (!MO.isReg()) { - assert(!getOperandMapping(Idx).isValid() && - "We should not care about non-reg mapping"); - continue; - } - Register Reg = MO.getReg(); - if (!Reg) - continue; - assert(getOperandMapping(Idx).isValid() && - "We must have a mapping for reg operands"); - const RegisterBankInfo::ValueMapping &MOMapping = getOperandMapping(Idx); - (void)MOMapping; - // Register size in bits. - // This size must match what the mapping expects. - assert(MOMapping.verify(RBI->getSizeInBits( - Reg, MF.getRegInfo(), *MF.getSubtarget().getRegisterInfo())) && - "Value mapping is invalid"); - } - return true; -} - -#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP) -LLVM_DUMP_METHOD void RegisterBankInfo::InstructionMapping::dump() const { - print(dbgs()); - dbgs() << '\n'; -} -#endif - -void RegisterBankInfo::InstructionMapping::print(raw_ostream &OS) const { - OS << "ID: " << getID() << " Cost: " << getCost() << " Mapping: "; - - for (unsigned OpIdx = 0; OpIdx != NumOperands; ++OpIdx) { - const ValueMapping &ValMapping = getOperandMapping(OpIdx); - if (OpIdx) - OS << ", "; - OS << "{ Idx: " << OpIdx << " Map: " << ValMapping << '}'; - } -} - -const int RegisterBankInfo::OperandsMapper::DontKnowIdx = -1; - -RegisterBankInfo::OperandsMapper::OperandsMapper( - MachineInstr &MI, const InstructionMapping &InstrMapping, - MachineRegisterInfo &MRI) - : MRI(MRI), MI(MI), InstrMapping(InstrMapping) { - unsigned NumOpds = InstrMapping.getNumOperands(); - OpToNewVRegIdx.resize(NumOpds, OperandsMapper::DontKnowIdx); - assert(InstrMapping.verify(MI) && "Invalid mapping for MI"); -} - -iterator_range<SmallVectorImpl<Register>::iterator> -RegisterBankInfo::OperandsMapper::getVRegsMem(unsigned OpIdx) { - assert(OpIdx < getInstrMapping().getNumOperands() && "Out-of-bound access"); - unsigned NumPartialVal = - getInstrMapping().getOperandMapping(OpIdx).NumBreakDowns; - int StartIdx = OpToNewVRegIdx[OpIdx]; - - if (StartIdx == OperandsMapper::DontKnowIdx) { - // This is the first time we try to access OpIdx. - // Create the cells that will hold all the partial values at the - // end of the list of NewVReg. - StartIdx = NewVRegs.size(); - OpToNewVRegIdx[OpIdx] = StartIdx; - for (unsigned i = 0; i < NumPartialVal; ++i) - NewVRegs.push_back(0); - } - SmallVectorImpl<Register>::iterator End = - getNewVRegsEnd(StartIdx, NumPartialVal); - - return make_range(&NewVRegs[StartIdx], End); -} - -SmallVectorImpl<Register>::const_iterator -RegisterBankInfo::OperandsMapper::getNewVRegsEnd(unsigned StartIdx, - unsigned NumVal) const { - return const_cast<OperandsMapper *>(this)->getNewVRegsEnd(StartIdx, NumVal); -} -SmallVectorImpl<Register>::iterator -RegisterBankInfo::OperandsMapper::getNewVRegsEnd(unsigned StartIdx, - unsigned NumVal) { - assert((NewVRegs.size() == StartIdx + NumVal || - NewVRegs.size() > StartIdx + NumVal) && - "NewVRegs too small to contain all the partial mapping"); - return NewVRegs.size() <= StartIdx + NumVal ? NewVRegs.end() - : &NewVRegs[StartIdx + NumVal]; -} - -void RegisterBankInfo::OperandsMapper::createVRegs(unsigned OpIdx) { - assert(OpIdx < getInstrMapping().getNumOperands() && "Out-of-bound access"); - iterator_range<SmallVectorImpl<Register>::iterator> NewVRegsForOpIdx = - getVRegsMem(OpIdx); - const ValueMapping &ValMapping = getInstrMapping().getOperandMapping(OpIdx); - const PartialMapping *PartMap = ValMapping.begin(); - for (Register &NewVReg : NewVRegsForOpIdx) { - assert(PartMap != ValMapping.end() && "Out-of-bound access"); - assert(NewVReg == 0 && "Register has already been created"); - // The new registers are always bound to scalar with the right size. - // The actual type has to be set when the target does the mapping - // of the instruction. - // The rationale is that this generic code cannot guess how the - // target plans to split the input type. - NewVReg = MRI.createGenericVirtualRegister(LLT::scalar(PartMap->Length)); - MRI.setRegBank(NewVReg, *PartMap->RegBank); - ++PartMap; - } -} - -void RegisterBankInfo::OperandsMapper::setVRegs(unsigned OpIdx, - unsigned PartialMapIdx, - Register NewVReg) { - assert(OpIdx < getInstrMapping().getNumOperands() && "Out-of-bound access"); - assert(getInstrMapping().getOperandMapping(OpIdx).NumBreakDowns > - PartialMapIdx && - "Out-of-bound access for partial mapping"); - // Make sure the memory is initialized for that operand. - (void)getVRegsMem(OpIdx); - assert(NewVRegs[OpToNewVRegIdx[OpIdx] + PartialMapIdx] == 0 && - "This value is already set"); - NewVRegs[OpToNewVRegIdx[OpIdx] + PartialMapIdx] = NewVReg; -} - -iterator_range<SmallVectorImpl<Register>::const_iterator> -RegisterBankInfo::OperandsMapper::getVRegs(unsigned OpIdx, - bool ForDebug) const { - (void)ForDebug; - assert(OpIdx < getInstrMapping().getNumOperands() && "Out-of-bound access"); - int StartIdx = OpToNewVRegIdx[OpIdx]; - - if (StartIdx == OperandsMapper::DontKnowIdx) - return make_range(NewVRegs.end(), NewVRegs.end()); - - unsigned PartMapSize = - getInstrMapping().getOperandMapping(OpIdx).NumBreakDowns; - SmallVectorImpl<Register>::const_iterator End = - getNewVRegsEnd(StartIdx, PartMapSize); - iterator_range<SmallVectorImpl<Register>::const_iterator> Res = - make_range(&NewVRegs[StartIdx], End); -#ifndef NDEBUG - for (Register VReg : Res) - assert((VReg || ForDebug) && "Some registers are uninitialized"); -#endif - return Res; -} - -#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP) -LLVM_DUMP_METHOD void RegisterBankInfo::OperandsMapper::dump() const { - print(dbgs(), true); - dbgs() << '\n'; -} -#endif - -void RegisterBankInfo::OperandsMapper::print(raw_ostream &OS, - bool ForDebug) const { - unsigned NumOpds = getInstrMapping().getNumOperands(); - if (ForDebug) { - OS << "Mapping for " << getMI() << "\nwith " << getInstrMapping() << '\n'; - // Print out the internal state of the index table. - OS << "Populated indices (CellNumber, IndexInNewVRegs): "; - bool IsFirst = true; - for (unsigned Idx = 0; Idx != NumOpds; ++Idx) { - if (OpToNewVRegIdx[Idx] != DontKnowIdx) { - if (!IsFirst) - OS << ", "; - OS << '(' << Idx << ", " << OpToNewVRegIdx[Idx] << ')'; - IsFirst = false; - } - } - OS << '\n'; - } else - OS << "Mapping ID: " << getInstrMapping().getID() << ' '; - - OS << "Operand Mapping: "; - // If we have a function, we can pretty print the name of the registers. - // Otherwise we will print the raw numbers. - const TargetRegisterInfo *TRI = - getMI().getParent() && getMI().getMF() - ? getMI().getMF()->getSubtarget().getRegisterInfo() - : nullptr; - bool IsFirst = true; - for (unsigned Idx = 0; Idx != NumOpds; ++Idx) { - if (OpToNewVRegIdx[Idx] == DontKnowIdx) - continue; - if (!IsFirst) - OS << ", "; - IsFirst = false; - OS << '(' << printReg(getMI().getOperand(Idx).getReg(), TRI) << ", ["; - bool IsFirstNewVReg = true; - for (Register VReg : getVRegs(Idx)) { - if (!IsFirstNewVReg) - OS << ", "; - IsFirstNewVReg = false; - OS << printReg(VReg, TRI); - } - OS << "])"; - } -} +#ifndef NDEBUG + for (const InstructionMapping *Mapping : PossibleMappings) + assert(Mapping->verify(MI) && "Mapping is invalid"); +#endif + return PossibleMappings; +} + +RegisterBankInfo::InstructionMappings +RegisterBankInfo::getInstrAlternativeMappings(const MachineInstr &MI) const { + // No alternative for MI. + return InstructionMappings(); +} + +void RegisterBankInfo::applyDefaultMapping(const OperandsMapper &OpdMapper) { + MachineInstr &MI = OpdMapper.getMI(); + MachineRegisterInfo &MRI = OpdMapper.getMRI(); + LLVM_DEBUG(dbgs() << "Applying default-like mapping\n"); + for (unsigned OpIdx = 0, + EndIdx = OpdMapper.getInstrMapping().getNumOperands(); + OpIdx != EndIdx; ++OpIdx) { + LLVM_DEBUG(dbgs() << "OpIdx " << OpIdx); + MachineOperand &MO = MI.getOperand(OpIdx); + if (!MO.isReg()) { + LLVM_DEBUG(dbgs() << " is not a register, nothing to be done\n"); + continue; + } + if (!MO.getReg()) { + LLVM_DEBUG(dbgs() << " is $noreg, nothing to be done\n"); + continue; + } + assert(OpdMapper.getInstrMapping().getOperandMapping(OpIdx).NumBreakDowns != + 0 && + "Invalid mapping"); + assert(OpdMapper.getInstrMapping().getOperandMapping(OpIdx).NumBreakDowns == + 1 && + "This mapping is too complex for this function"); + iterator_range<SmallVectorImpl<Register>::const_iterator> NewRegs = + OpdMapper.getVRegs(OpIdx); + if (NewRegs.empty()) { + LLVM_DEBUG(dbgs() << " has not been repaired, nothing to be done\n"); + continue; + } + Register OrigReg = MO.getReg(); + Register NewReg = *NewRegs.begin(); + LLVM_DEBUG(dbgs() << " changed, replace " << printReg(OrigReg, nullptr)); + MO.setReg(NewReg); + LLVM_DEBUG(dbgs() << " with " << printReg(NewReg, nullptr)); + + // The OperandsMapper creates plain scalar, we may have to fix that. + // Check if the types match and if not, fix that. + LLT OrigTy = MRI.getType(OrigReg); + LLT NewTy = MRI.getType(NewReg); + if (OrigTy != NewTy) { + // The default mapping is not supposed to change the size of + // the storage. However, right now we don't necessarily bump all + // the types to storage size. For instance, we can consider + // s16 G_AND legal whereas the storage size is going to be 32. + assert(OrigTy.getSizeInBits() <= NewTy.getSizeInBits() && + "Types with difference size cannot be handled by the default " + "mapping"); + LLVM_DEBUG(dbgs() << "\nChange type of new opd from " << NewTy << " to " + << OrigTy); + MRI.setType(NewReg, OrigTy); + } + LLVM_DEBUG(dbgs() << '\n'); + } +} + +unsigned RegisterBankInfo::getSizeInBits(Register Reg, + const MachineRegisterInfo &MRI, + const TargetRegisterInfo &TRI) const { + if (Register::isPhysicalRegister(Reg)) { + // The size is not directly available for physical registers. + // Instead, we need to access a register class that contains Reg and + // get the size of that register class. + // Because this is expensive, we'll cache the register class by calling + auto *RC = &getMinimalPhysRegClass(Reg, TRI); + assert(RC && "Expecting Register class"); + return TRI.getRegSizeInBits(*RC); + } + return TRI.getRegSizeInBits(Reg, MRI); +} + +//------------------------------------------------------------------------------ +// Helper classes implementation. +//------------------------------------------------------------------------------ +#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP) +LLVM_DUMP_METHOD void RegisterBankInfo::PartialMapping::dump() const { + print(dbgs()); + dbgs() << '\n'; +} +#endif + +bool RegisterBankInfo::PartialMapping::verify() const { + assert(RegBank && "Register bank not set"); + assert(Length && "Empty mapping"); + assert((StartIdx <= getHighBitIdx()) && "Overflow, switch to APInt?"); + // Check if the minimum width fits into RegBank. + assert(RegBank->getSize() >= Length && "Register bank too small for Mask"); + return true; +} + +void RegisterBankInfo::PartialMapping::print(raw_ostream &OS) const { + OS << "[" << StartIdx << ", " << getHighBitIdx() << "], RegBank = "; + if (RegBank) + OS << *RegBank; + else + OS << "nullptr"; +} + +bool RegisterBankInfo::ValueMapping::partsAllUniform() const { + if (NumBreakDowns < 2) + return true; + + const PartialMapping *First = begin(); + for (const PartialMapping *Part = First + 1; Part != end(); ++Part) { + if (Part->Length != First->Length || Part->RegBank != First->RegBank) + return false; + } + + return true; +} + +bool RegisterBankInfo::ValueMapping::verify(unsigned MeaningfulBitWidth) const { + assert(NumBreakDowns && "Value mapped nowhere?!"); + unsigned OrigValueBitWidth = 0; + for (const RegisterBankInfo::PartialMapping &PartMap : *this) { + // Check that each register bank is big enough to hold the partial value: + // this check is done by PartialMapping::verify + assert(PartMap.verify() && "Partial mapping is invalid"); + // The original value should completely be mapped. + // Thus the maximum accessed index + 1 is the size of the original value. + OrigValueBitWidth = + std::max(OrigValueBitWidth, PartMap.getHighBitIdx() + 1); + } + assert(OrigValueBitWidth >= MeaningfulBitWidth && + "Meaningful bits not covered by the mapping"); + APInt ValueMask(OrigValueBitWidth, 0); + for (const RegisterBankInfo::PartialMapping &PartMap : *this) { + // Check that the union of the partial mappings covers the whole value, + // without overlaps. + // The high bit is exclusive in the APInt API, thus getHighBitIdx + 1. + APInt PartMapMask = APInt::getBitsSet(OrigValueBitWidth, PartMap.StartIdx, + PartMap.getHighBitIdx() + 1); + ValueMask ^= PartMapMask; + assert((ValueMask & PartMapMask) == PartMapMask && + "Some partial mappings overlap"); + } + assert(ValueMask.isAllOnesValue() && "Value is not fully mapped"); + return true; +} + +#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP) +LLVM_DUMP_METHOD void RegisterBankInfo::ValueMapping::dump() const { + print(dbgs()); + dbgs() << '\n'; +} +#endif + +void RegisterBankInfo::ValueMapping::print(raw_ostream &OS) const { + OS << "#BreakDown: " << NumBreakDowns << " "; + bool IsFirst = true; + for (const PartialMapping &PartMap : *this) { + if (!IsFirst) + OS << ", "; + OS << '[' << PartMap << ']'; + IsFirst = false; + } +} + +bool RegisterBankInfo::InstructionMapping::verify( + const MachineInstr &MI) const { + // Check that all the register operands are properly mapped. + // Check the constructor invariant. + // For PHI, we only care about mapping the definition. + assert(NumOperands == (isCopyLike(MI) ? 1 : MI.getNumOperands()) && + "NumOperands must match, see constructor"); + assert(MI.getParent() && MI.getMF() && + "MI must be connected to a MachineFunction"); + const MachineFunction &MF = *MI.getMF(); + const RegisterBankInfo *RBI = MF.getSubtarget().getRegBankInfo(); + (void)RBI; + + for (unsigned Idx = 0; Idx < NumOperands; ++Idx) { + const MachineOperand &MO = MI.getOperand(Idx); + if (!MO.isReg()) { + assert(!getOperandMapping(Idx).isValid() && + "We should not care about non-reg mapping"); + continue; + } + Register Reg = MO.getReg(); + if (!Reg) + continue; + assert(getOperandMapping(Idx).isValid() && + "We must have a mapping for reg operands"); + const RegisterBankInfo::ValueMapping &MOMapping = getOperandMapping(Idx); + (void)MOMapping; + // Register size in bits. + // This size must match what the mapping expects. + assert(MOMapping.verify(RBI->getSizeInBits( + Reg, MF.getRegInfo(), *MF.getSubtarget().getRegisterInfo())) && + "Value mapping is invalid"); + } + return true; +} + +#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP) +LLVM_DUMP_METHOD void RegisterBankInfo::InstructionMapping::dump() const { + print(dbgs()); + dbgs() << '\n'; +} +#endif + +void RegisterBankInfo::InstructionMapping::print(raw_ostream &OS) const { + OS << "ID: " << getID() << " Cost: " << getCost() << " Mapping: "; + + for (unsigned OpIdx = 0; OpIdx != NumOperands; ++OpIdx) { + const ValueMapping &ValMapping = getOperandMapping(OpIdx); + if (OpIdx) + OS << ", "; + OS << "{ Idx: " << OpIdx << " Map: " << ValMapping << '}'; + } +} + +const int RegisterBankInfo::OperandsMapper::DontKnowIdx = -1; + +RegisterBankInfo::OperandsMapper::OperandsMapper( + MachineInstr &MI, const InstructionMapping &InstrMapping, + MachineRegisterInfo &MRI) + : MRI(MRI), MI(MI), InstrMapping(InstrMapping) { + unsigned NumOpds = InstrMapping.getNumOperands(); + OpToNewVRegIdx.resize(NumOpds, OperandsMapper::DontKnowIdx); + assert(InstrMapping.verify(MI) && "Invalid mapping for MI"); +} + +iterator_range<SmallVectorImpl<Register>::iterator> +RegisterBankInfo::OperandsMapper::getVRegsMem(unsigned OpIdx) { + assert(OpIdx < getInstrMapping().getNumOperands() && "Out-of-bound access"); + unsigned NumPartialVal = + getInstrMapping().getOperandMapping(OpIdx).NumBreakDowns; + int StartIdx = OpToNewVRegIdx[OpIdx]; + + if (StartIdx == OperandsMapper::DontKnowIdx) { + // This is the first time we try to access OpIdx. + // Create the cells that will hold all the partial values at the + // end of the list of NewVReg. + StartIdx = NewVRegs.size(); + OpToNewVRegIdx[OpIdx] = StartIdx; + for (unsigned i = 0; i < NumPartialVal; ++i) + NewVRegs.push_back(0); + } + SmallVectorImpl<Register>::iterator End = + getNewVRegsEnd(StartIdx, NumPartialVal); + + return make_range(&NewVRegs[StartIdx], End); +} + +SmallVectorImpl<Register>::const_iterator +RegisterBankInfo::OperandsMapper::getNewVRegsEnd(unsigned StartIdx, + unsigned NumVal) const { + return const_cast<OperandsMapper *>(this)->getNewVRegsEnd(StartIdx, NumVal); +} +SmallVectorImpl<Register>::iterator +RegisterBankInfo::OperandsMapper::getNewVRegsEnd(unsigned StartIdx, + unsigned NumVal) { + assert((NewVRegs.size() == StartIdx + NumVal || + NewVRegs.size() > StartIdx + NumVal) && + "NewVRegs too small to contain all the partial mapping"); + return NewVRegs.size() <= StartIdx + NumVal ? NewVRegs.end() + : &NewVRegs[StartIdx + NumVal]; +} + +void RegisterBankInfo::OperandsMapper::createVRegs(unsigned OpIdx) { + assert(OpIdx < getInstrMapping().getNumOperands() && "Out-of-bound access"); + iterator_range<SmallVectorImpl<Register>::iterator> NewVRegsForOpIdx = + getVRegsMem(OpIdx); + const ValueMapping &ValMapping = getInstrMapping().getOperandMapping(OpIdx); + const PartialMapping *PartMap = ValMapping.begin(); + for (Register &NewVReg : NewVRegsForOpIdx) { + assert(PartMap != ValMapping.end() && "Out-of-bound access"); + assert(NewVReg == 0 && "Register has already been created"); + // The new registers are always bound to scalar with the right size. + // The actual type has to be set when the target does the mapping + // of the instruction. + // The rationale is that this generic code cannot guess how the + // target plans to split the input type. + NewVReg = MRI.createGenericVirtualRegister(LLT::scalar(PartMap->Length)); + MRI.setRegBank(NewVReg, *PartMap->RegBank); + ++PartMap; + } +} + +void RegisterBankInfo::OperandsMapper::setVRegs(unsigned OpIdx, + unsigned PartialMapIdx, + Register NewVReg) { + assert(OpIdx < getInstrMapping().getNumOperands() && "Out-of-bound access"); + assert(getInstrMapping().getOperandMapping(OpIdx).NumBreakDowns > + PartialMapIdx && + "Out-of-bound access for partial mapping"); + // Make sure the memory is initialized for that operand. + (void)getVRegsMem(OpIdx); + assert(NewVRegs[OpToNewVRegIdx[OpIdx] + PartialMapIdx] == 0 && + "This value is already set"); + NewVRegs[OpToNewVRegIdx[OpIdx] + PartialMapIdx] = NewVReg; +} + +iterator_range<SmallVectorImpl<Register>::const_iterator> +RegisterBankInfo::OperandsMapper::getVRegs(unsigned OpIdx, + bool ForDebug) const { + (void)ForDebug; + assert(OpIdx < getInstrMapping().getNumOperands() && "Out-of-bound access"); + int StartIdx = OpToNewVRegIdx[OpIdx]; + + if (StartIdx == OperandsMapper::DontKnowIdx) + return make_range(NewVRegs.end(), NewVRegs.end()); + + unsigned PartMapSize = + getInstrMapping().getOperandMapping(OpIdx).NumBreakDowns; + SmallVectorImpl<Register>::const_iterator End = + getNewVRegsEnd(StartIdx, PartMapSize); + iterator_range<SmallVectorImpl<Register>::const_iterator> Res = + make_range(&NewVRegs[StartIdx], End); +#ifndef NDEBUG + for (Register VReg : Res) + assert((VReg || ForDebug) && "Some registers are uninitialized"); +#endif + return Res; +} + +#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP) +LLVM_DUMP_METHOD void RegisterBankInfo::OperandsMapper::dump() const { + print(dbgs(), true); + dbgs() << '\n'; +} +#endif + +void RegisterBankInfo::OperandsMapper::print(raw_ostream &OS, + bool ForDebug) const { + unsigned NumOpds = getInstrMapping().getNumOperands(); + if (ForDebug) { + OS << "Mapping for " << getMI() << "\nwith " << getInstrMapping() << '\n'; + // Print out the internal state of the index table. + OS << "Populated indices (CellNumber, IndexInNewVRegs): "; + bool IsFirst = true; + for (unsigned Idx = 0; Idx != NumOpds; ++Idx) { + if (OpToNewVRegIdx[Idx] != DontKnowIdx) { + if (!IsFirst) + OS << ", "; + OS << '(' << Idx << ", " << OpToNewVRegIdx[Idx] << ')'; + IsFirst = false; + } + } + OS << '\n'; + } else + OS << "Mapping ID: " << getInstrMapping().getID() << ' '; + + OS << "Operand Mapping: "; + // If we have a function, we can pretty print the name of the registers. + // Otherwise we will print the raw numbers. + const TargetRegisterInfo *TRI = + getMI().getParent() && getMI().getMF() + ? getMI().getMF()->getSubtarget().getRegisterInfo() + : nullptr; + bool IsFirst = true; + for (unsigned Idx = 0; Idx != NumOpds; ++Idx) { + if (OpToNewVRegIdx[Idx] == DontKnowIdx) + continue; + if (!IsFirst) + OS << ", "; + IsFirst = false; + OS << '(' << printReg(getMI().getOperand(Idx).getReg(), TRI) << ", ["; + bool IsFirstNewVReg = true; + for (Register VReg : getVRegs(Idx)) { + if (!IsFirstNewVReg) + OS << ", "; + IsFirstNewVReg = false; + OS << printReg(VReg, TRI); + } + OS << "])"; + } +} diff --git a/contrib/libs/llvm12/lib/CodeGen/GlobalISel/Utils.cpp b/contrib/libs/llvm12/lib/CodeGen/GlobalISel/Utils.cpp index ac3164cb1b..cd24832244 100644 --- a/contrib/libs/llvm12/lib/CodeGen/GlobalISel/Utils.cpp +++ b/contrib/libs/llvm12/lib/CodeGen/GlobalISel/Utils.cpp @@ -1,74 +1,74 @@ -//===- llvm/CodeGen/GlobalISel/Utils.cpp -------------------------*- C++ -*-==// -// -// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. -// See https://llvm.org/LICENSE.txt for license information. -// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception -// -//===----------------------------------------------------------------------===// -/// \file This file implements the utility functions used by the GlobalISel -/// pipeline. -//===----------------------------------------------------------------------===// - -#include "llvm/CodeGen/GlobalISel/Utils.h" -#include "llvm/ADT/APFloat.h" +//===- llvm/CodeGen/GlobalISel/Utils.cpp -------------------------*- C++ -*-==// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +/// \file This file implements the utility functions used by the GlobalISel +/// pipeline. +//===----------------------------------------------------------------------===// + +#include "llvm/CodeGen/GlobalISel/Utils.h" +#include "llvm/ADT/APFloat.h" #include "llvm/ADT/APInt.h" #include "llvm/ADT/Optional.h" -#include "llvm/CodeGen/GlobalISel/GISelChangeObserver.h" +#include "llvm/CodeGen/GlobalISel/GISelChangeObserver.h" #include "llvm/CodeGen/GlobalISel/GISelKnownBits.h" #include "llvm/CodeGen/GlobalISel/MIPatternMatch.h" -#include "llvm/CodeGen/GlobalISel/RegisterBankInfo.h" -#include "llvm/CodeGen/MachineInstr.h" -#include "llvm/CodeGen/MachineInstrBuilder.h" -#include "llvm/CodeGen/MachineOptimizationRemarkEmitter.h" -#include "llvm/CodeGen/MachineRegisterInfo.h" -#include "llvm/CodeGen/StackProtector.h" -#include "llvm/CodeGen/TargetInstrInfo.h" +#include "llvm/CodeGen/GlobalISel/RegisterBankInfo.h" +#include "llvm/CodeGen/MachineInstr.h" +#include "llvm/CodeGen/MachineInstrBuilder.h" +#include "llvm/CodeGen/MachineOptimizationRemarkEmitter.h" +#include "llvm/CodeGen/MachineRegisterInfo.h" +#include "llvm/CodeGen/StackProtector.h" +#include "llvm/CodeGen/TargetInstrInfo.h" #include "llvm/CodeGen/TargetLowering.h" -#include "llvm/CodeGen/TargetPassConfig.h" -#include "llvm/CodeGen/TargetRegisterInfo.h" -#include "llvm/IR/Constants.h" +#include "llvm/CodeGen/TargetPassConfig.h" +#include "llvm/CodeGen/TargetRegisterInfo.h" +#include "llvm/IR/Constants.h" #include "llvm/Target/TargetMachine.h" - -#define DEBUG_TYPE "globalisel-utils" - -using namespace llvm; + +#define DEBUG_TYPE "globalisel-utils" + +using namespace llvm; using namespace MIPatternMatch; - -Register llvm::constrainRegToClass(MachineRegisterInfo &MRI, - const TargetInstrInfo &TII, - const RegisterBankInfo &RBI, Register Reg, - const TargetRegisterClass &RegClass) { - if (!RBI.constrainGenericRegister(Reg, RegClass, MRI)) - return MRI.createVirtualRegister(&RegClass); - - return Reg; -} - -Register llvm::constrainOperandRegClass( - const MachineFunction &MF, const TargetRegisterInfo &TRI, - MachineRegisterInfo &MRI, const TargetInstrInfo &TII, - const RegisterBankInfo &RBI, MachineInstr &InsertPt, + +Register llvm::constrainRegToClass(MachineRegisterInfo &MRI, + const TargetInstrInfo &TII, + const RegisterBankInfo &RBI, Register Reg, + const TargetRegisterClass &RegClass) { + if (!RBI.constrainGenericRegister(Reg, RegClass, MRI)) + return MRI.createVirtualRegister(&RegClass); + + return Reg; +} + +Register llvm::constrainOperandRegClass( + const MachineFunction &MF, const TargetRegisterInfo &TRI, + MachineRegisterInfo &MRI, const TargetInstrInfo &TII, + const RegisterBankInfo &RBI, MachineInstr &InsertPt, const TargetRegisterClass &RegClass, MachineOperand &RegMO) { - Register Reg = RegMO.getReg(); - // Assume physical registers are properly constrained. - assert(Register::isVirtualRegister(Reg) && "PhysReg not implemented"); - - Register ConstrainedReg = constrainRegToClass(MRI, TII, RBI, Reg, RegClass); - // If we created a new virtual register because the class is not compatible - // then create a copy between the new and the old register. - if (ConstrainedReg != Reg) { - MachineBasicBlock::iterator InsertIt(&InsertPt); - MachineBasicBlock &MBB = *InsertPt.getParent(); - if (RegMO.isUse()) { - BuildMI(MBB, InsertIt, InsertPt.getDebugLoc(), - TII.get(TargetOpcode::COPY), ConstrainedReg) - .addReg(Reg); - } else { - assert(RegMO.isDef() && "Must be a definition"); - BuildMI(MBB, std::next(InsertIt), InsertPt.getDebugLoc(), - TII.get(TargetOpcode::COPY), Reg) - .addReg(ConstrainedReg); - } + Register Reg = RegMO.getReg(); + // Assume physical registers are properly constrained. + assert(Register::isVirtualRegister(Reg) && "PhysReg not implemented"); + + Register ConstrainedReg = constrainRegToClass(MRI, TII, RBI, Reg, RegClass); + // If we created a new virtual register because the class is not compatible + // then create a copy between the new and the old register. + if (ConstrainedReg != Reg) { + MachineBasicBlock::iterator InsertIt(&InsertPt); + MachineBasicBlock &MBB = *InsertPt.getParent(); + if (RegMO.isUse()) { + BuildMI(MBB, InsertIt, InsertPt.getDebugLoc(), + TII.get(TargetOpcode::COPY), ConstrainedReg) + .addReg(Reg); + } else { + assert(RegMO.isDef() && "Must be a definition"); + BuildMI(MBB, std::next(InsertIt), InsertPt.getDebugLoc(), + TII.get(TargetOpcode::COPY), Reg) + .addReg(ConstrainedReg); + } if (GISelChangeObserver *Observer = MF.getObserver()) { Observer->changingInstr(*RegMO.getParent()); } @@ -76,122 +76,122 @@ Register llvm::constrainOperandRegClass( if (GISelChangeObserver *Observer = MF.getObserver()) { Observer->changedInstr(*RegMO.getParent()); } - } else { - if (GISelChangeObserver *Observer = MF.getObserver()) { - if (!RegMO.isDef()) { - MachineInstr *RegDef = MRI.getVRegDef(Reg); - Observer->changedInstr(*RegDef); - } - Observer->changingAllUsesOfReg(MRI, Reg); - Observer->finishedChangingAllUsesOfReg(); - } - } - return ConstrainedReg; -} - -Register llvm::constrainOperandRegClass( - const MachineFunction &MF, const TargetRegisterInfo &TRI, - MachineRegisterInfo &MRI, const TargetInstrInfo &TII, - const RegisterBankInfo &RBI, MachineInstr &InsertPt, const MCInstrDesc &II, + } else { + if (GISelChangeObserver *Observer = MF.getObserver()) { + if (!RegMO.isDef()) { + MachineInstr *RegDef = MRI.getVRegDef(Reg); + Observer->changedInstr(*RegDef); + } + Observer->changingAllUsesOfReg(MRI, Reg); + Observer->finishedChangingAllUsesOfReg(); + } + } + return ConstrainedReg; +} + +Register llvm::constrainOperandRegClass( + const MachineFunction &MF, const TargetRegisterInfo &TRI, + MachineRegisterInfo &MRI, const TargetInstrInfo &TII, + const RegisterBankInfo &RBI, MachineInstr &InsertPt, const MCInstrDesc &II, MachineOperand &RegMO, unsigned OpIdx) { - Register Reg = RegMO.getReg(); - // Assume physical registers are properly constrained. - assert(Register::isVirtualRegister(Reg) && "PhysReg not implemented"); - - const TargetRegisterClass *RegClass = TII.getRegClass(II, OpIdx, &TRI, MF); - // Some of the target independent instructions, like COPY, may not impose any - // register class constraints on some of their operands: If it's a use, we can - // skip constraining as the instruction defining the register would constrain - // it. - - // We can't constrain unallocatable register classes, because we can't create - // virtual registers for these classes, so we need to let targets handled this - // case. - if (RegClass && !RegClass->isAllocatable()) - RegClass = TRI.getConstrainedRegClassForOperand(RegMO, MRI); - - if (!RegClass) { - assert((!isTargetSpecificOpcode(II.getOpcode()) || RegMO.isUse()) && - "Register class constraint is required unless either the " - "instruction is target independent or the operand is a use"); - // FIXME: Just bailing out like this here could be not enough, unless we - // expect the users of this function to do the right thing for PHIs and - // COPY: - // v1 = COPY v0 - // v2 = COPY v1 - // v1 here may end up not being constrained at all. Please notice that to - // reproduce the issue we likely need a destination pattern of a selection - // rule producing such extra copies, not just an input GMIR with them as - // every existing target using selectImpl handles copies before calling it - // and they never reach this function. - return Reg; - } - return constrainOperandRegClass(MF, TRI, MRI, TII, RBI, InsertPt, *RegClass, - RegMO); -} - -bool llvm::constrainSelectedInstRegOperands(MachineInstr &I, - const TargetInstrInfo &TII, - const TargetRegisterInfo &TRI, - const RegisterBankInfo &RBI) { - assert(!isPreISelGenericOpcode(I.getOpcode()) && - "A selected instruction is expected"); - MachineBasicBlock &MBB = *I.getParent(); - MachineFunction &MF = *MBB.getParent(); - MachineRegisterInfo &MRI = MF.getRegInfo(); - - for (unsigned OpI = 0, OpE = I.getNumExplicitOperands(); OpI != OpE; ++OpI) { - MachineOperand &MO = I.getOperand(OpI); - - // There's nothing to be done on non-register operands. - if (!MO.isReg()) - continue; - - LLVM_DEBUG(dbgs() << "Converting operand: " << MO << '\n'); - assert(MO.isReg() && "Unsupported non-reg operand"); - - Register Reg = MO.getReg(); - // Physical registers don't need to be constrained. - if (Register::isPhysicalRegister(Reg)) - continue; - - // Register operands with a value of 0 (e.g. predicate operands) don't need - // to be constrained. - if (Reg == 0) - continue; - - // If the operand is a vreg, we should constrain its regclass, and only - // insert COPYs if that's impossible. - // constrainOperandRegClass does that for us. + Register Reg = RegMO.getReg(); + // Assume physical registers are properly constrained. + assert(Register::isVirtualRegister(Reg) && "PhysReg not implemented"); + + const TargetRegisterClass *RegClass = TII.getRegClass(II, OpIdx, &TRI, MF); + // Some of the target independent instructions, like COPY, may not impose any + // register class constraints on some of their operands: If it's a use, we can + // skip constraining as the instruction defining the register would constrain + // it. + + // We can't constrain unallocatable register classes, because we can't create + // virtual registers for these classes, so we need to let targets handled this + // case. + if (RegClass && !RegClass->isAllocatable()) + RegClass = TRI.getConstrainedRegClassForOperand(RegMO, MRI); + + if (!RegClass) { + assert((!isTargetSpecificOpcode(II.getOpcode()) || RegMO.isUse()) && + "Register class constraint is required unless either the " + "instruction is target independent or the operand is a use"); + // FIXME: Just bailing out like this here could be not enough, unless we + // expect the users of this function to do the right thing for PHIs and + // COPY: + // v1 = COPY v0 + // v2 = COPY v1 + // v1 here may end up not being constrained at all. Please notice that to + // reproduce the issue we likely need a destination pattern of a selection + // rule producing such extra copies, not just an input GMIR with them as + // every existing target using selectImpl handles copies before calling it + // and they never reach this function. + return Reg; + } + return constrainOperandRegClass(MF, TRI, MRI, TII, RBI, InsertPt, *RegClass, + RegMO); +} + +bool llvm::constrainSelectedInstRegOperands(MachineInstr &I, + const TargetInstrInfo &TII, + const TargetRegisterInfo &TRI, + const RegisterBankInfo &RBI) { + assert(!isPreISelGenericOpcode(I.getOpcode()) && + "A selected instruction is expected"); + MachineBasicBlock &MBB = *I.getParent(); + MachineFunction &MF = *MBB.getParent(); + MachineRegisterInfo &MRI = MF.getRegInfo(); + + for (unsigned OpI = 0, OpE = I.getNumExplicitOperands(); OpI != OpE; ++OpI) { + MachineOperand &MO = I.getOperand(OpI); + + // There's nothing to be done on non-register operands. + if (!MO.isReg()) + continue; + + LLVM_DEBUG(dbgs() << "Converting operand: " << MO << '\n'); + assert(MO.isReg() && "Unsupported non-reg operand"); + + Register Reg = MO.getReg(); + // Physical registers don't need to be constrained. + if (Register::isPhysicalRegister(Reg)) + continue; + + // Register operands with a value of 0 (e.g. predicate operands) don't need + // to be constrained. + if (Reg == 0) + continue; + + // If the operand is a vreg, we should constrain its regclass, and only + // insert COPYs if that's impossible. + // constrainOperandRegClass does that for us. constrainOperandRegClass(MF, TRI, MRI, TII, RBI, I, I.getDesc(), MO, OpI); - - // Tie uses to defs as indicated in MCInstrDesc if this hasn't already been - // done. - if (MO.isUse()) { - int DefIdx = I.getDesc().getOperandConstraint(OpI, MCOI::TIED_TO); - if (DefIdx != -1 && !I.isRegTiedToUseOperand(DefIdx)) - I.tieOperands(DefIdx, OpI); - } - } - return true; -} - -bool llvm::canReplaceReg(Register DstReg, Register SrcReg, - MachineRegisterInfo &MRI) { - // Give up if either DstReg or SrcReg is a physical register. - if (DstReg.isPhysical() || SrcReg.isPhysical()) - return false; - // Give up if the types don't match. - if (MRI.getType(DstReg) != MRI.getType(SrcReg)) - return false; - // Replace if either DstReg has no constraints or the register - // constraints match. - return !MRI.getRegClassOrRegBank(DstReg) || - MRI.getRegClassOrRegBank(DstReg) == MRI.getRegClassOrRegBank(SrcReg); -} - -bool llvm::isTriviallyDead(const MachineInstr &MI, - const MachineRegisterInfo &MRI) { + + // Tie uses to defs as indicated in MCInstrDesc if this hasn't already been + // done. + if (MO.isUse()) { + int DefIdx = I.getDesc().getOperandConstraint(OpI, MCOI::TIED_TO); + if (DefIdx != -1 && !I.isRegTiedToUseOperand(DefIdx)) + I.tieOperands(DefIdx, OpI); + } + } + return true; +} + +bool llvm::canReplaceReg(Register DstReg, Register SrcReg, + MachineRegisterInfo &MRI) { + // Give up if either DstReg or SrcReg is a physical register. + if (DstReg.isPhysical() || SrcReg.isPhysical()) + return false; + // Give up if the types don't match. + if (MRI.getType(DstReg) != MRI.getType(SrcReg)) + return false; + // Replace if either DstReg has no constraints or the register + // constraints match. + return !MRI.getRegClassOrRegBank(DstReg) || + MRI.getRegClassOrRegBank(DstReg) == MRI.getRegClassOrRegBank(SrcReg); +} + +bool llvm::isTriviallyDead(const MachineInstr &MI, + const MachineRegisterInfo &MRI) { // FIXME: This logical is mostly duplicated with // DeadMachineInstructionElim::isDead. Why is LOCAL_ESCAPE not considered in // MachineInstr::isLabel? @@ -200,79 +200,79 @@ bool llvm::isTriviallyDead(const MachineInstr &MI, if (MI.getOpcode() == TargetOpcode::LOCAL_ESCAPE) return false; - // If we can move an instruction, we can remove it. Otherwise, it has - // a side-effect of some sort. - bool SawStore = false; - if (!MI.isSafeToMove(/*AA=*/nullptr, SawStore) && !MI.isPHI()) - return false; - - // Instructions without side-effects are dead iff they only define dead vregs. - for (auto &MO : MI.operands()) { - if (!MO.isReg() || !MO.isDef()) - continue; - - Register Reg = MO.getReg(); - if (Register::isPhysicalRegister(Reg) || !MRI.use_nodbg_empty(Reg)) - return false; - } - return true; -} - -static void reportGISelDiagnostic(DiagnosticSeverity Severity, - MachineFunction &MF, - const TargetPassConfig &TPC, - MachineOptimizationRemarkEmitter &MORE, - MachineOptimizationRemarkMissed &R) { - bool IsFatal = Severity == DS_Error && - TPC.isGlobalISelAbortEnabled(); - // Print the function name explicitly if we don't have a debug location (which - // makes the diagnostic less useful) or if we're going to emit a raw error. - if (!R.getLocation().isValid() || IsFatal) - R << (" (in function: " + MF.getName() + ")").str(); - - if (IsFatal) - report_fatal_error(R.getMsg()); - else - MORE.emit(R); -} - -void llvm::reportGISelWarning(MachineFunction &MF, const TargetPassConfig &TPC, - MachineOptimizationRemarkEmitter &MORE, - MachineOptimizationRemarkMissed &R) { - reportGISelDiagnostic(DS_Warning, MF, TPC, MORE, R); -} - -void llvm::reportGISelFailure(MachineFunction &MF, const TargetPassConfig &TPC, - MachineOptimizationRemarkEmitter &MORE, - MachineOptimizationRemarkMissed &R) { - MF.getProperties().set(MachineFunctionProperties::Property::FailedISel); - reportGISelDiagnostic(DS_Error, MF, TPC, MORE, R); -} - -void llvm::reportGISelFailure(MachineFunction &MF, const TargetPassConfig &TPC, - MachineOptimizationRemarkEmitter &MORE, - const char *PassName, StringRef Msg, - const MachineInstr &MI) { - MachineOptimizationRemarkMissed R(PassName, "GISelFailure: ", - MI.getDebugLoc(), MI.getParent()); - R << Msg; - // Printing MI is expensive; only do it if expensive remarks are enabled. - if (TPC.isGlobalISelAbortEnabled() || MORE.allowExtraAnalysis(PassName)) - R << ": " << ore::MNV("Inst", MI); - reportGISelFailure(MF, TPC, MORE, R); -} - + // If we can move an instruction, we can remove it. Otherwise, it has + // a side-effect of some sort. + bool SawStore = false; + if (!MI.isSafeToMove(/*AA=*/nullptr, SawStore) && !MI.isPHI()) + return false; + + // Instructions without side-effects are dead iff they only define dead vregs. + for (auto &MO : MI.operands()) { + if (!MO.isReg() || !MO.isDef()) + continue; + + Register Reg = MO.getReg(); + if (Register::isPhysicalRegister(Reg) || !MRI.use_nodbg_empty(Reg)) + return false; + } + return true; +} + +static void reportGISelDiagnostic(DiagnosticSeverity Severity, + MachineFunction &MF, + const TargetPassConfig &TPC, + MachineOptimizationRemarkEmitter &MORE, + MachineOptimizationRemarkMissed &R) { + bool IsFatal = Severity == DS_Error && + TPC.isGlobalISelAbortEnabled(); + // Print the function name explicitly if we don't have a debug location (which + // makes the diagnostic less useful) or if we're going to emit a raw error. + if (!R.getLocation().isValid() || IsFatal) + R << (" (in function: " + MF.getName() + ")").str(); + + if (IsFatal) + report_fatal_error(R.getMsg()); + else + MORE.emit(R); +} + +void llvm::reportGISelWarning(MachineFunction &MF, const TargetPassConfig &TPC, + MachineOptimizationRemarkEmitter &MORE, + MachineOptimizationRemarkMissed &R) { + reportGISelDiagnostic(DS_Warning, MF, TPC, MORE, R); +} + +void llvm::reportGISelFailure(MachineFunction &MF, const TargetPassConfig &TPC, + MachineOptimizationRemarkEmitter &MORE, + MachineOptimizationRemarkMissed &R) { + MF.getProperties().set(MachineFunctionProperties::Property::FailedISel); + reportGISelDiagnostic(DS_Error, MF, TPC, MORE, R); +} + +void llvm::reportGISelFailure(MachineFunction &MF, const TargetPassConfig &TPC, + MachineOptimizationRemarkEmitter &MORE, + const char *PassName, StringRef Msg, + const MachineInstr &MI) { + MachineOptimizationRemarkMissed R(PassName, "GISelFailure: ", + MI.getDebugLoc(), MI.getParent()); + R << Msg; + // Printing MI is expensive; only do it if expensive remarks are enabled. + if (TPC.isGlobalISelAbortEnabled() || MORE.allowExtraAnalysis(PassName)) + R << ": " << ore::MNV("Inst", MI); + reportGISelFailure(MF, TPC, MORE, R); +} + Optional<APInt> llvm::getConstantVRegVal(Register VReg, const MachineRegisterInfo &MRI) { - Optional<ValueAndVReg> ValAndVReg = - getConstantVRegValWithLookThrough(VReg, MRI, /*LookThroughInstrs*/ false); - assert((!ValAndVReg || ValAndVReg->VReg == VReg) && - "Value found while looking through instrs"); - if (!ValAndVReg) - return None; - return ValAndVReg->Value; -} - + Optional<ValueAndVReg> ValAndVReg = + getConstantVRegValWithLookThrough(VReg, MRI, /*LookThroughInstrs*/ false); + assert((!ValAndVReg || ValAndVReg->VReg == VReg) && + "Value found while looking through instrs"); + if (!ValAndVReg) + return None; + return ValAndVReg->Value; +} + Optional<int64_t> llvm::getConstantVRegSExtVal(Register VReg, const MachineRegisterInfo &MRI) { Optional<APInt> Val = getConstantVRegVal(VReg, MRI); @@ -281,237 +281,237 @@ Optional<int64_t> llvm::getConstantVRegSExtVal(Register VReg, return None; } -Optional<ValueAndVReg> llvm::getConstantVRegValWithLookThrough( - Register VReg, const MachineRegisterInfo &MRI, bool LookThroughInstrs, +Optional<ValueAndVReg> llvm::getConstantVRegValWithLookThrough( + Register VReg, const MachineRegisterInfo &MRI, bool LookThroughInstrs, bool HandleFConstant, bool LookThroughAnyExt) { - SmallVector<std::pair<unsigned, unsigned>, 4> SeenOpcodes; - MachineInstr *MI; - auto IsConstantOpcode = [HandleFConstant](unsigned Opcode) { - return Opcode == TargetOpcode::G_CONSTANT || - (HandleFConstant && Opcode == TargetOpcode::G_FCONSTANT); - }; - auto GetImmediateValue = [HandleFConstant, - &MRI](const MachineInstr &MI) -> Optional<APInt> { - const MachineOperand &CstVal = MI.getOperand(1); - if (!CstVal.isImm() && !CstVal.isCImm() && - (!HandleFConstant || !CstVal.isFPImm())) - return None; - if (!CstVal.isFPImm()) { - unsigned BitWidth = - MRI.getType(MI.getOperand(0).getReg()).getSizeInBits(); - APInt Val = CstVal.isImm() ? APInt(BitWidth, CstVal.getImm()) - : CstVal.getCImm()->getValue(); - assert(Val.getBitWidth() == BitWidth && - "Value bitwidth doesn't match definition type"); - return Val; - } - return CstVal.getFPImm()->getValueAPF().bitcastToAPInt(); - }; - while ((MI = MRI.getVRegDef(VReg)) && !IsConstantOpcode(MI->getOpcode()) && - LookThroughInstrs) { - switch (MI->getOpcode()) { + SmallVector<std::pair<unsigned, unsigned>, 4> SeenOpcodes; + MachineInstr *MI; + auto IsConstantOpcode = [HandleFConstant](unsigned Opcode) { + return Opcode == TargetOpcode::G_CONSTANT || + (HandleFConstant && Opcode == TargetOpcode::G_FCONSTANT); + }; + auto GetImmediateValue = [HandleFConstant, + &MRI](const MachineInstr &MI) -> Optional<APInt> { + const MachineOperand &CstVal = MI.getOperand(1); + if (!CstVal.isImm() && !CstVal.isCImm() && + (!HandleFConstant || !CstVal.isFPImm())) + return None; + if (!CstVal.isFPImm()) { + unsigned BitWidth = + MRI.getType(MI.getOperand(0).getReg()).getSizeInBits(); + APInt Val = CstVal.isImm() ? APInt(BitWidth, CstVal.getImm()) + : CstVal.getCImm()->getValue(); + assert(Val.getBitWidth() == BitWidth && + "Value bitwidth doesn't match definition type"); + return Val; + } + return CstVal.getFPImm()->getValueAPF().bitcastToAPInt(); + }; + while ((MI = MRI.getVRegDef(VReg)) && !IsConstantOpcode(MI->getOpcode()) && + LookThroughInstrs) { + switch (MI->getOpcode()) { case TargetOpcode::G_ANYEXT: if (!LookThroughAnyExt) return None; LLVM_FALLTHROUGH; - case TargetOpcode::G_TRUNC: - case TargetOpcode::G_SEXT: - case TargetOpcode::G_ZEXT: - SeenOpcodes.push_back(std::make_pair( - MI->getOpcode(), - MRI.getType(MI->getOperand(0).getReg()).getSizeInBits())); - VReg = MI->getOperand(1).getReg(); - break; - case TargetOpcode::COPY: - VReg = MI->getOperand(1).getReg(); - if (Register::isPhysicalRegister(VReg)) - return None; - break; - case TargetOpcode::G_INTTOPTR: - VReg = MI->getOperand(1).getReg(); - break; - default: - return None; - } - } - if (!MI || !IsConstantOpcode(MI->getOpcode())) - return None; - - Optional<APInt> MaybeVal = GetImmediateValue(*MI); - if (!MaybeVal) - return None; - APInt &Val = *MaybeVal; - while (!SeenOpcodes.empty()) { - std::pair<unsigned, unsigned> OpcodeAndSize = SeenOpcodes.pop_back_val(); - switch (OpcodeAndSize.first) { - case TargetOpcode::G_TRUNC: - Val = Val.trunc(OpcodeAndSize.second); - break; + case TargetOpcode::G_TRUNC: + case TargetOpcode::G_SEXT: + case TargetOpcode::G_ZEXT: + SeenOpcodes.push_back(std::make_pair( + MI->getOpcode(), + MRI.getType(MI->getOperand(0).getReg()).getSizeInBits())); + VReg = MI->getOperand(1).getReg(); + break; + case TargetOpcode::COPY: + VReg = MI->getOperand(1).getReg(); + if (Register::isPhysicalRegister(VReg)) + return None; + break; + case TargetOpcode::G_INTTOPTR: + VReg = MI->getOperand(1).getReg(); + break; + default: + return None; + } + } + if (!MI || !IsConstantOpcode(MI->getOpcode())) + return None; + + Optional<APInt> MaybeVal = GetImmediateValue(*MI); + if (!MaybeVal) + return None; + APInt &Val = *MaybeVal; + while (!SeenOpcodes.empty()) { + std::pair<unsigned, unsigned> OpcodeAndSize = SeenOpcodes.pop_back_val(); + switch (OpcodeAndSize.first) { + case TargetOpcode::G_TRUNC: + Val = Val.trunc(OpcodeAndSize.second); + break; case TargetOpcode::G_ANYEXT: - case TargetOpcode::G_SEXT: - Val = Val.sext(OpcodeAndSize.second); - break; - case TargetOpcode::G_ZEXT: - Val = Val.zext(OpcodeAndSize.second); - break; - } - } - + case TargetOpcode::G_SEXT: + Val = Val.sext(OpcodeAndSize.second); + break; + case TargetOpcode::G_ZEXT: + Val = Val.zext(OpcodeAndSize.second); + break; + } + } + return ValueAndVReg{Val, VReg}; -} - +} + const ConstantFP * -llvm::getConstantFPVRegVal(Register VReg, const MachineRegisterInfo &MRI) { - MachineInstr *MI = MRI.getVRegDef(VReg); - if (TargetOpcode::G_FCONSTANT != MI->getOpcode()) - return nullptr; - return MI->getOperand(1).getFPImm(); -} - +llvm::getConstantFPVRegVal(Register VReg, const MachineRegisterInfo &MRI) { + MachineInstr *MI = MRI.getVRegDef(VReg); + if (TargetOpcode::G_FCONSTANT != MI->getOpcode()) + return nullptr; + return MI->getOperand(1).getFPImm(); +} + Optional<DefinitionAndSourceRegister> llvm::getDefSrcRegIgnoringCopies(Register Reg, const MachineRegisterInfo &MRI) { - Register DefSrcReg = Reg; - auto *DefMI = MRI.getVRegDef(Reg); - auto DstTy = MRI.getType(DefMI->getOperand(0).getReg()); - if (!DstTy.isValid()) - return None; - while (DefMI->getOpcode() == TargetOpcode::COPY) { - Register SrcReg = DefMI->getOperand(1).getReg(); - auto SrcTy = MRI.getType(SrcReg); + Register DefSrcReg = Reg; + auto *DefMI = MRI.getVRegDef(Reg); + auto DstTy = MRI.getType(DefMI->getOperand(0).getReg()); + if (!DstTy.isValid()) + return None; + while (DefMI->getOpcode() == TargetOpcode::COPY) { + Register SrcReg = DefMI->getOperand(1).getReg(); + auto SrcTy = MRI.getType(SrcReg); if (!SrcTy.isValid()) - break; - DefMI = MRI.getVRegDef(SrcReg); - DefSrcReg = SrcReg; - } - return DefinitionAndSourceRegister{DefMI, DefSrcReg}; -} - + break; + DefMI = MRI.getVRegDef(SrcReg); + DefSrcReg = SrcReg; + } + return DefinitionAndSourceRegister{DefMI, DefSrcReg}; +} + MachineInstr *llvm::getDefIgnoringCopies(Register Reg, const MachineRegisterInfo &MRI) { - Optional<DefinitionAndSourceRegister> DefSrcReg = - getDefSrcRegIgnoringCopies(Reg, MRI); - return DefSrcReg ? DefSrcReg->MI : nullptr; -} - -Register llvm::getSrcRegIgnoringCopies(Register Reg, - const MachineRegisterInfo &MRI) { - Optional<DefinitionAndSourceRegister> DefSrcReg = - getDefSrcRegIgnoringCopies(Reg, MRI); - return DefSrcReg ? DefSrcReg->Reg : Register(); -} - + Optional<DefinitionAndSourceRegister> DefSrcReg = + getDefSrcRegIgnoringCopies(Reg, MRI); + return DefSrcReg ? DefSrcReg->MI : nullptr; +} + +Register llvm::getSrcRegIgnoringCopies(Register Reg, + const MachineRegisterInfo &MRI) { + Optional<DefinitionAndSourceRegister> DefSrcReg = + getDefSrcRegIgnoringCopies(Reg, MRI); + return DefSrcReg ? DefSrcReg->Reg : Register(); +} + MachineInstr *llvm::getOpcodeDef(unsigned Opcode, Register Reg, const MachineRegisterInfo &MRI) { - MachineInstr *DefMI = getDefIgnoringCopies(Reg, MRI); - return DefMI && DefMI->getOpcode() == Opcode ? DefMI : nullptr; -} - -APFloat llvm::getAPFloatFromSize(double Val, unsigned Size) { - if (Size == 32) - return APFloat(float(Val)); - if (Size == 64) - return APFloat(Val); - if (Size != 16) - llvm_unreachable("Unsupported FPConstant size"); - bool Ignored; - APFloat APF(Val); - APF.convert(APFloat::IEEEhalf(), APFloat::rmNearestTiesToEven, &Ignored); - return APF; -} - -Optional<APInt> llvm::ConstantFoldBinOp(unsigned Opcode, const Register Op1, - const Register Op2, - const MachineRegisterInfo &MRI) { - auto MaybeOp2Cst = getConstantVRegVal(Op2, MRI); - if (!MaybeOp2Cst) - return None; - - auto MaybeOp1Cst = getConstantVRegVal(Op1, MRI); - if (!MaybeOp1Cst) - return None; - + MachineInstr *DefMI = getDefIgnoringCopies(Reg, MRI); + return DefMI && DefMI->getOpcode() == Opcode ? DefMI : nullptr; +} + +APFloat llvm::getAPFloatFromSize(double Val, unsigned Size) { + if (Size == 32) + return APFloat(float(Val)); + if (Size == 64) + return APFloat(Val); + if (Size != 16) + llvm_unreachable("Unsupported FPConstant size"); + bool Ignored; + APFloat APF(Val); + APF.convert(APFloat::IEEEhalf(), APFloat::rmNearestTiesToEven, &Ignored); + return APF; +} + +Optional<APInt> llvm::ConstantFoldBinOp(unsigned Opcode, const Register Op1, + const Register Op2, + const MachineRegisterInfo &MRI) { + auto MaybeOp2Cst = getConstantVRegVal(Op2, MRI); + if (!MaybeOp2Cst) + return None; + + auto MaybeOp1Cst = getConstantVRegVal(Op1, MRI); + if (!MaybeOp1Cst) + return None; + const APInt &C1 = *MaybeOp1Cst; const APInt &C2 = *MaybeOp2Cst; - switch (Opcode) { - default: - break; - case TargetOpcode::G_ADD: - return C1 + C2; - case TargetOpcode::G_AND: - return C1 & C2; - case TargetOpcode::G_ASHR: - return C1.ashr(C2); - case TargetOpcode::G_LSHR: - return C1.lshr(C2); - case TargetOpcode::G_MUL: - return C1 * C2; - case TargetOpcode::G_OR: - return C1 | C2; - case TargetOpcode::G_SHL: - return C1 << C2; - case TargetOpcode::G_SUB: - return C1 - C2; - case TargetOpcode::G_XOR: - return C1 ^ C2; - case TargetOpcode::G_UDIV: - if (!C2.getBoolValue()) - break; - return C1.udiv(C2); - case TargetOpcode::G_SDIV: - if (!C2.getBoolValue()) - break; - return C1.sdiv(C2); - case TargetOpcode::G_UREM: - if (!C2.getBoolValue()) - break; - return C1.urem(C2); - case TargetOpcode::G_SREM: - if (!C2.getBoolValue()) - break; - return C1.srem(C2); - } - - return None; -} - -bool llvm::isKnownNeverNaN(Register Val, const MachineRegisterInfo &MRI, - bool SNaN) { - const MachineInstr *DefMI = MRI.getVRegDef(Val); - if (!DefMI) - return false; - + switch (Opcode) { + default: + break; + case TargetOpcode::G_ADD: + return C1 + C2; + case TargetOpcode::G_AND: + return C1 & C2; + case TargetOpcode::G_ASHR: + return C1.ashr(C2); + case TargetOpcode::G_LSHR: + return C1.lshr(C2); + case TargetOpcode::G_MUL: + return C1 * C2; + case TargetOpcode::G_OR: + return C1 | C2; + case TargetOpcode::G_SHL: + return C1 << C2; + case TargetOpcode::G_SUB: + return C1 - C2; + case TargetOpcode::G_XOR: + return C1 ^ C2; + case TargetOpcode::G_UDIV: + if (!C2.getBoolValue()) + break; + return C1.udiv(C2); + case TargetOpcode::G_SDIV: + if (!C2.getBoolValue()) + break; + return C1.sdiv(C2); + case TargetOpcode::G_UREM: + if (!C2.getBoolValue()) + break; + return C1.urem(C2); + case TargetOpcode::G_SREM: + if (!C2.getBoolValue()) + break; + return C1.srem(C2); + } + + return None; +} + +bool llvm::isKnownNeverNaN(Register Val, const MachineRegisterInfo &MRI, + bool SNaN) { + const MachineInstr *DefMI = MRI.getVRegDef(Val); + if (!DefMI) + return false; + const TargetMachine& TM = DefMI->getMF()->getTarget(); if (DefMI->getFlag(MachineInstr::FmNoNans) || TM.Options.NoNaNsFPMath) - return true; - - if (SNaN) { - // FP operations quiet. For now, just handle the ones inserted during - // legalization. - switch (DefMI->getOpcode()) { - case TargetOpcode::G_FPEXT: - case TargetOpcode::G_FPTRUNC: - case TargetOpcode::G_FCANONICALIZE: - return true; - default: - return false; - } - } - - return false; -} - -Align llvm::inferAlignFromPtrInfo(MachineFunction &MF, - const MachinePointerInfo &MPO) { - auto PSV = MPO.V.dyn_cast<const PseudoSourceValue *>(); - if (auto FSPV = dyn_cast_or_null<FixedStackPseudoSourceValue>(PSV)) { - MachineFrameInfo &MFI = MF.getFrameInfo(); - return commonAlignment(MFI.getObjectAlign(FSPV->getFrameIndex()), - MPO.Offset); - } - - return Align(1); -} - + return true; + + if (SNaN) { + // FP operations quiet. For now, just handle the ones inserted during + // legalization. + switch (DefMI->getOpcode()) { + case TargetOpcode::G_FPEXT: + case TargetOpcode::G_FPTRUNC: + case TargetOpcode::G_FCANONICALIZE: + return true; + default: + return false; + } + } + + return false; +} + +Align llvm::inferAlignFromPtrInfo(MachineFunction &MF, + const MachinePointerInfo &MPO) { + auto PSV = MPO.V.dyn_cast<const PseudoSourceValue *>(); + if (auto FSPV = dyn_cast_or_null<FixedStackPseudoSourceValue>(PSV)) { + MachineFrameInfo &MFI = MF.getFrameInfo(); + return commonAlignment(MFI.getObjectAlign(FSPV->getFrameIndex()), + MPO.Offset); + } + + return Align(1); +} + Register llvm::getFunctionLiveInPhysReg(MachineFunction &MF, const TargetInstrInfo &TII, MCRegister PhysReg, @@ -546,23 +546,23 @@ Register llvm::getFunctionLiveInPhysReg(MachineFunction &MF, return LiveIn; } -Optional<APInt> llvm::ConstantFoldExtOp(unsigned Opcode, const Register Op1, - uint64_t Imm, - const MachineRegisterInfo &MRI) { - auto MaybeOp1Cst = getConstantVRegVal(Op1, MRI); - if (MaybeOp1Cst) { - switch (Opcode) { - default: - break; +Optional<APInt> llvm::ConstantFoldExtOp(unsigned Opcode, const Register Op1, + uint64_t Imm, + const MachineRegisterInfo &MRI) { + auto MaybeOp1Cst = getConstantVRegVal(Op1, MRI); + if (MaybeOp1Cst) { + switch (Opcode) { + default: + break; case TargetOpcode::G_SEXT_INREG: { LLT Ty = MRI.getType(Op1); return MaybeOp1Cst->trunc(Imm).sext(Ty.getScalarSizeInBits()); - } } - } - return None; -} - + } + } + return None; +} + bool llvm::isKnownToBeAPowerOfTwo(Register Reg, const MachineRegisterInfo &MRI, GISelKnownBits *KB) { Optional<DefinitionAndSourceRegister> DefSrcReg = @@ -615,16 +615,16 @@ bool llvm::isKnownToBeAPowerOfTwo(Register Reg, const MachineRegisterInfo &MRI, return (Known.countMaxPopulation() == 1) && (Known.countMinPopulation() == 1); } -void llvm::getSelectionDAGFallbackAnalysisUsage(AnalysisUsage &AU) { - AU.addPreserved<StackProtector>(); -} - +void llvm::getSelectionDAGFallbackAnalysisUsage(AnalysisUsage &AU) { + AU.addPreserved<StackProtector>(); +} + static unsigned getLCMSize(unsigned OrigSize, unsigned TargetSize) { unsigned Mul = OrigSize * TargetSize; unsigned GCDSize = greatestCommonDivisor(OrigSize, TargetSize); return Mul / GCDSize; } - + LLT llvm::getLCMType(LLT OrigTy, LLT TargetTy) { const unsigned OrigSize = OrigTy.getSizeInBits(); const unsigned TargetSize = TargetTy.getSizeInBits(); @@ -652,21 +652,21 @@ LLT llvm::getLCMType(LLT OrigTy, LLT TargetTy) { unsigned LCMSize = getLCMSize(OrigSize, TargetSize); return LLT::vector(LCMSize / OrigElt.getSizeInBits(), OrigElt); - } - + } + if (TargetTy.isVector()) { unsigned LCMSize = getLCMSize(OrigSize, TargetSize); return LLT::vector(LCMSize / OrigSize, OrigTy); - } - + } + unsigned LCMSize = getLCMSize(OrigSize, TargetSize); - + // Preserve pointer types. if (LCMSize == OrigSize) return OrigTy; if (LCMSize == TargetSize) return TargetTy; - + return LLT::scalar(LCMSize); } @@ -701,8 +701,8 @@ LLT llvm::getGCDType(LLT OrigTy, LLT TargetTy) { if (GCD < OrigElt.getSizeInBits()) return LLT::scalar(GCD); return LLT::vector(GCD / OrigElt.getSizeInBits(), OrigElt); - } - + } + if (TargetTy.isVector()) { // Try to preserve the original element type. LLT TargetElt = TargetTy.getElementType(); @@ -712,8 +712,8 @@ LLT llvm::getGCDType(LLT OrigTy, LLT TargetTy) { unsigned GCD = greatestCommonDivisor(OrigSize, TargetSize); return LLT::scalar(GCD); -} - +} + Optional<int> llvm::getSplatIndex(MachineInstr &MI) { assert(MI.getOpcode() == TargetOpcode::G_SHUFFLE_VECTOR && "Only G_SHUFFLE_VECTOR can have a splat index!"); @@ -752,8 +752,8 @@ static bool isBuildVectorConstantSplat(const MachineInstr &MI, Register Element = MI.getOperand(I).getReg(); if (!mi_match(Element, MRI, m_SpecificICst(SplatValue))) return false; - } - + } + return true; } @@ -774,15 +774,15 @@ llvm::getBuildVectorConstantSplat(const MachineInstr &MI, Scalar = ElementValue; else if (*Scalar != ElementValue) return None; - } - + } + return Scalar; } - + bool llvm::isBuildVectorAllZeros(const MachineInstr &MI, const MachineRegisterInfo &MRI) { return isBuildVectorConstantSplat(MI, MRI, 0); -} +} bool llvm::isBuildVectorAllOnes(const MachineInstr &MI, const MachineRegisterInfo &MRI) { diff --git a/contrib/libs/llvm12/lib/CodeGen/GlobalISel/ya.make b/contrib/libs/llvm12/lib/CodeGen/GlobalISel/ya.make index 3906f6de47..e6de0fe8d9 100644 --- a/contrib/libs/llvm12/lib/CodeGen/GlobalISel/ya.make +++ b/contrib/libs/llvm12/lib/CodeGen/GlobalISel/ya.make @@ -1,17 +1,17 @@ -# Generated by devtools/yamaker. - -LIBRARY() - +# Generated by devtools/yamaker. + +LIBRARY() + OWNER( orivej g:cpp-contrib ) - + LICENSE(Apache-2.0 WITH LLVM-exception) LICENSE_TEXTS(.yandex_meta/licenses.list.txt) -PEERDIR( +PEERDIR( contrib/libs/llvm12 contrib/libs/llvm12/include contrib/libs/llvm12/lib/Analysis @@ -22,41 +22,41 @@ PEERDIR( contrib/libs/llvm12/lib/Support contrib/libs/llvm12/lib/Target contrib/libs/llvm12/lib/Transforms/Utils -) - +) + ADDINCL( contrib/libs/llvm12/lib/CodeGen/GlobalISel ) - -NO_COMPILER_WARNINGS() - -NO_UTIL() - -SRCS( - CSEInfo.cpp - CSEMIRBuilder.cpp - CallLowering.cpp - Combiner.cpp - CombinerHelper.cpp - GISelChangeObserver.cpp - GISelKnownBits.cpp - GlobalISel.cpp - IRTranslator.cpp - InlineAsmLowering.cpp - InstructionSelect.cpp - InstructionSelector.cpp - LegalityPredicates.cpp - LegalizeMutations.cpp - Legalizer.cpp - LegalizerHelper.cpp - LegalizerInfo.cpp - Localizer.cpp - LostDebugLocObserver.cpp - MachineIRBuilder.cpp - RegBankSelect.cpp - RegisterBank.cpp - RegisterBankInfo.cpp - Utils.cpp -) - -END() + +NO_COMPILER_WARNINGS() + +NO_UTIL() + +SRCS( + CSEInfo.cpp + CSEMIRBuilder.cpp + CallLowering.cpp + Combiner.cpp + CombinerHelper.cpp + GISelChangeObserver.cpp + GISelKnownBits.cpp + GlobalISel.cpp + IRTranslator.cpp + InlineAsmLowering.cpp + InstructionSelect.cpp + InstructionSelector.cpp + LegalityPredicates.cpp + LegalizeMutations.cpp + Legalizer.cpp + LegalizerHelper.cpp + LegalizerInfo.cpp + Localizer.cpp + LostDebugLocObserver.cpp + MachineIRBuilder.cpp + RegBankSelect.cpp + RegisterBank.cpp + RegisterBankInfo.cpp + Utils.cpp +) + +END() |