diff options
author | shadchin <shadchin@yandex-team.ru> | 2022-02-10 16:44:30 +0300 |
---|---|---|
committer | Daniil Cherednik <dcherednik@yandex-team.ru> | 2022-02-10 16:44:30 +0300 |
commit | 2598ef1d0aee359b4b6d5fdd1758916d5907d04f (patch) | |
tree | 012bb94d777798f1f56ac1cec429509766d05181 /contrib/libs/llvm12/lib/Target/AArch64/GISel/AArch64CallLowering.cpp | |
parent | 6751af0b0c1b952fede40b19b71da8025b5d8bcf (diff) | |
download | ydb-2598ef1d0aee359b4b6d5fdd1758916d5907d04f.tar.gz |
Restoring authorship annotation for <shadchin@yandex-team.ru>. Commit 1 of 2.
Diffstat (limited to 'contrib/libs/llvm12/lib/Target/AArch64/GISel/AArch64CallLowering.cpp')
-rw-r--r-- | contrib/libs/llvm12/lib/Target/AArch64/GISel/AArch64CallLowering.cpp | 64 |
1 files changed, 32 insertions, 32 deletions
diff --git a/contrib/libs/llvm12/lib/Target/AArch64/GISel/AArch64CallLowering.cpp b/contrib/libs/llvm12/lib/Target/AArch64/GISel/AArch64CallLowering.cpp index 0f8b1d6584..7b05f70a73 100644 --- a/contrib/libs/llvm12/lib/Target/AArch64/GISel/AArch64CallLowering.cpp +++ b/contrib/libs/llvm12/lib/Target/AArch64/GISel/AArch64CallLowering.cpp @@ -52,10 +52,10 @@ AArch64CallLowering::AArch64CallLowering(const AArch64TargetLowering &TLI) : CallLowering(&TLI) {} namespace { -struct IncomingArgHandler : public CallLowering::IncomingValueHandler { +struct IncomingArgHandler : public CallLowering::IncomingValueHandler { IncomingArgHandler(MachineIRBuilder &MIRBuilder, MachineRegisterInfo &MRI, CCAssignFn *AssignFn) - : IncomingValueHandler(MIRBuilder, MRI, AssignFn), StackUsed(0) {} + : IncomingValueHandler(MIRBuilder, MRI, AssignFn), StackUsed(0) {} Register getStackAddress(uint64_t Size, int64_t Offset, MachinePointerInfo &MPO) override { @@ -101,7 +101,7 @@ struct IncomingArgHandler : public CallLowering::IncomingValueHandler { /// How the physical register gets marked varies between formal /// parameters (it's a basic-block live-in), and a call instruction /// (it's an implicit-def of the BL). - virtual void markPhysRegUsed(MCRegister PhysReg) = 0; + virtual void markPhysRegUsed(MCRegister PhysReg) = 0; uint64_t StackUsed; }; @@ -111,7 +111,7 @@ struct FormalArgHandler : public IncomingArgHandler { CCAssignFn *AssignFn) : IncomingArgHandler(MIRBuilder, MRI, AssignFn) {} - void markPhysRegUsed(MCRegister PhysReg) override { + void markPhysRegUsed(MCRegister PhysReg) override { MIRBuilder.getMRI()->addLiveIn(PhysReg); MIRBuilder.getMBB().addLiveIn(PhysReg); } @@ -122,19 +122,19 @@ struct CallReturnHandler : public IncomingArgHandler { MachineInstrBuilder MIB, CCAssignFn *AssignFn) : IncomingArgHandler(MIRBuilder, MRI, AssignFn), MIB(MIB) {} - void markPhysRegUsed(MCRegister PhysReg) override { + void markPhysRegUsed(MCRegister PhysReg) override { MIB.addDef(PhysReg, RegState::Implicit); } MachineInstrBuilder MIB; }; -struct OutgoingArgHandler : public CallLowering::OutgoingValueHandler { +struct OutgoingArgHandler : public CallLowering::OutgoingValueHandler { OutgoingArgHandler(MachineIRBuilder &MIRBuilder, MachineRegisterInfo &MRI, MachineInstrBuilder MIB, CCAssignFn *AssignFn, CCAssignFn *AssignFnVarArg, bool IsTailCall = false, int FPDiff = 0) - : OutgoingValueHandler(MIRBuilder, MRI, AssignFn), MIB(MIB), + : OutgoingValueHandler(MIRBuilder, MRI, AssignFn), MIB(MIB), AssignFnVarArg(AssignFnVarArg), IsTailCall(IsTailCall), FPDiff(FPDiff), StackSize(0), SPReg(0) {} @@ -187,8 +187,8 @@ struct OutgoingArgHandler : public CallLowering::OutgoingValueHandler { if (!Arg.IsFixed) MaxSize = 0; - assert(Arg.Regs.size() == 1); - + assert(Arg.Regs.size() == 1); + Register ValVReg = VA.getLocInfo() != CCValAssign::LocInfo::FPExt ? extendRegister(Arg.Regs[0], VA, MaxSize) : Arg.Regs[0]; @@ -274,7 +274,7 @@ void AArch64CallLowering::splitToValueTypes( bool AArch64CallLowering::lowerReturn(MachineIRBuilder &MIRBuilder, const Value *Val, ArrayRef<Register> VRegs, - FunctionLoweringInfo &FLI, + FunctionLoweringInfo &FLI, Register SwiftErrorVReg) const { auto MIB = MIRBuilder.buildInstrNoInsert(AArch64::RET_ReallyLR); assert(((Val && !VRegs.empty()) || (!Val && VRegs.empty())) && @@ -420,7 +420,7 @@ static void handleMustTailForwardedRegisters(MachineIRBuilder &MIRBuilder, // Conservatively forward X8, since it might be used for an aggregate // return. if (!CCInfo.isAllocated(AArch64::X8)) { - Register X8VReg = MF.addLiveIn(AArch64::X8, &AArch64::GPR64RegClass); + Register X8VReg = MF.addLiveIn(AArch64::X8, &AArch64::GPR64RegClass); Forwards.push_back(ForwardedRegister(X8VReg, AArch64::X8, MVT::i64)); } @@ -441,7 +441,7 @@ bool AArch64CallLowering::fallBackToDAGISel(const Function &F) const { bool AArch64CallLowering::lowerFormalArguments( MachineIRBuilder &MIRBuilder, const Function &F, - ArrayRef<ArrayRef<Register>> VRegs, FunctionLoweringInfo &FLI) const { + ArrayRef<ArrayRef<Register>> VRegs, FunctionLoweringInfo &FLI) const { MachineFunction &MF = MIRBuilder.getMF(); MachineBasicBlock &MBB = MIRBuilder.getMBB(); MachineRegisterInfo &MRI = MF.getRegInfo(); @@ -623,25 +623,25 @@ bool AArch64CallLowering::areCalleeOutgoingArgsTailCallable( const uint32_t *CallerPreservedMask = TRI->getCallPreservedMask(MF, CallerCC); MachineRegisterInfo &MRI = MF.getRegInfo(); - if (Info.IsVarArg) { - // Be conservative and disallow variadic memory operands to match SDAG's - // behaviour. - // FIXME: If the caller's calling convention is C, then we can - // potentially use its argument area. However, for cases like fastcc, - // we can't do anything. - for (unsigned i = 0; i < OutLocs.size(); ++i) { - auto &ArgLoc = OutLocs[i]; - if (ArgLoc.isRegLoc()) - continue; + if (Info.IsVarArg) { + // Be conservative and disallow variadic memory operands to match SDAG's + // behaviour. + // FIXME: If the caller's calling convention is C, then we can + // potentially use its argument area. However, for cases like fastcc, + // we can't do anything. + for (unsigned i = 0; i < OutLocs.size(); ++i) { + auto &ArgLoc = OutLocs[i]; + if (ArgLoc.isRegLoc()) + continue; LLVM_DEBUG( dbgs() - << "... Cannot tail call vararg function with stack arguments\n"); + << "... Cannot tail call vararg function with stack arguments\n"); return false; } } - return parametersInCSRMatch(MRI, CallerPreservedMask, OutLocs, OutArgs); + return parametersInCSRMatch(MRI, CallerPreservedMask, OutLocs, OutArgs); } bool AArch64CallLowering::isEligibleForTailCallOptimization( @@ -756,7 +756,7 @@ static unsigned getCallOpcode(const MachineFunction &CallerF, bool IsIndirect, // When BTI is enabled, we need to use TCRETURNriBTI to make sure that we use // x16 or x17. - if (CallerF.getInfo<AArch64FunctionInfo>()->branchTargetEnforcement()) + if (CallerF.getInfo<AArch64FunctionInfo>()->branchTargetEnforcement()) return AArch64::TCRETURNriBTI; return AArch64::TCRETURNri; @@ -776,7 +776,7 @@ bool AArch64CallLowering::lowerTailCall( // TODO: Right now, regbankselect doesn't know how to handle the rtcGPR64 // register class. Until we can do that, we should fall back here. - if (MF.getInfo<AArch64FunctionInfo>()->branchTargetEnforcement()) { + if (MF.getInfo<AArch64FunctionInfo>()->branchTargetEnforcement()) { LLVM_DEBUG( dbgs() << "Cannot lower indirect tail calls with BTI enabled yet.\n"); return false; @@ -894,9 +894,9 @@ bool AArch64CallLowering::lowerTailCall( // If Callee is a reg, since it is used by a target specific instruction, // it must have a register class matching the constraint of that instruction. if (Info.Callee.isReg()) - constrainOperandRegClass(MF, *TRI, MRI, *MF.getSubtarget().getInstrInfo(), - *MF.getSubtarget().getRegBankInfo(), *MIB, - MIB->getDesc(), Info.Callee, 0); + constrainOperandRegClass(MF, *TRI, MRI, *MF.getSubtarget().getInstrInfo(), + *MF.getSubtarget().getRegBankInfo(), *MIB, + MIB->getDesc(), Info.Callee, 0); MF.getFrameInfo().setHasTailCall(); Info.LoweredTailCall = true; @@ -978,9 +978,9 @@ bool AArch64CallLowering::lowerCall(MachineIRBuilder &MIRBuilder, // instruction, it must have a register class matching the // constraint of that instruction. if (Info.Callee.isReg()) - constrainOperandRegClass(MF, *TRI, MRI, *MF.getSubtarget().getInstrInfo(), - *MF.getSubtarget().getRegBankInfo(), *MIB, - MIB->getDesc(), Info.Callee, 0); + constrainOperandRegClass(MF, *TRI, MRI, *MF.getSubtarget().getInstrInfo(), + *MF.getSubtarget().getRegBankInfo(), *MIB, + MIB->getDesc(), Info.Callee, 0); // Finally we can copy the returned value back into its virtual-register. In // symmetry with the arguments, the physical register must be an |