aboutsummaryrefslogtreecommitdiffstats
path: root/contrib/libs/llvm12/lib/Target/AArch64/AArch64CallingConvention.cpp
diff options
context:
space:
mode:
authorshadchin <shadchin@yandex-team.ru>2022-02-10 16:44:30 +0300
committerDaniil Cherednik <dcherednik@yandex-team.ru>2022-02-10 16:44:30 +0300
commit2598ef1d0aee359b4b6d5fdd1758916d5907d04f (patch)
tree012bb94d777798f1f56ac1cec429509766d05181 /contrib/libs/llvm12/lib/Target/AArch64/AArch64CallingConvention.cpp
parent6751af0b0c1b952fede40b19b71da8025b5d8bcf (diff)
downloadydb-2598ef1d0aee359b4b6d5fdd1758916d5907d04f.tar.gz
Restoring authorship annotation for <shadchin@yandex-team.ru>. Commit 1 of 2.
Diffstat (limited to 'contrib/libs/llvm12/lib/Target/AArch64/AArch64CallingConvention.cpp')
-rw-r--r--contrib/libs/llvm12/lib/Target/AArch64/AArch64CallingConvention.cpp100
1 files changed, 50 insertions, 50 deletions
diff --git a/contrib/libs/llvm12/lib/Target/AArch64/AArch64CallingConvention.cpp b/contrib/libs/llvm12/lib/Target/AArch64/AArch64CallingConvention.cpp
index c51dd48cab..ab1a31e1e7 100644
--- a/contrib/libs/llvm12/lib/Target/AArch64/AArch64CallingConvention.cpp
+++ b/contrib/libs/llvm12/lib/Target/AArch64/AArch64CallingConvention.cpp
@@ -42,51 +42,51 @@ static const MCPhysReg ZRegList[] = {AArch64::Z0, AArch64::Z1, AArch64::Z2,
static bool finishStackBlock(SmallVectorImpl<CCValAssign> &PendingMembers,
MVT LocVT, ISD::ArgFlagsTy &ArgFlags,
CCState &State, Align SlotAlign) {
- if (LocVT.isScalableVector()) {
- const AArch64Subtarget &Subtarget = static_cast<const AArch64Subtarget &>(
- State.getMachineFunction().getSubtarget());
- const AArch64TargetLowering *TLI = Subtarget.getTargetLowering();
-
- // We are about to reinvoke the CCAssignFn auto-generated handler. If we
- // don't unset these flags we will get stuck in an infinite loop forever
- // invoking the custom handler.
- ArgFlags.setInConsecutiveRegs(false);
- ArgFlags.setInConsecutiveRegsLast(false);
-
- // The calling convention for passing SVE tuples states that in the event
- // we cannot allocate enough registers for the tuple we should still leave
- // any remaining registers unallocated. However, when we call the
- // CCAssignFn again we want it to behave as if all remaining registers are
- // allocated. This will force the code to pass the tuple indirectly in
- // accordance with the PCS.
- bool RegsAllocated[8];
- for (int I = 0; I < 8; I++) {
- RegsAllocated[I] = State.isAllocated(ZRegList[I]);
- State.AllocateReg(ZRegList[I]);
- }
-
- auto &It = PendingMembers[0];
- CCAssignFn *AssignFn =
- TLI->CCAssignFnForCall(State.getCallingConv(), /*IsVarArg=*/false);
- if (AssignFn(It.getValNo(), It.getValVT(), It.getValVT(), CCValAssign::Full,
- ArgFlags, State))
- llvm_unreachable("Call operand has unhandled type");
-
- // Return the flags to how they were before.
- ArgFlags.setInConsecutiveRegs(true);
- ArgFlags.setInConsecutiveRegsLast(true);
-
- // Return the register state back to how it was before, leaving any
- // unallocated registers available for other smaller types.
- for (int I = 0; I < 8; I++)
- if (!RegsAllocated[I])
- State.DeallocateReg(ZRegList[I]);
-
- // All pending members have now been allocated
- PendingMembers.clear();
- return true;
- }
-
+ if (LocVT.isScalableVector()) {
+ const AArch64Subtarget &Subtarget = static_cast<const AArch64Subtarget &>(
+ State.getMachineFunction().getSubtarget());
+ const AArch64TargetLowering *TLI = Subtarget.getTargetLowering();
+
+ // We are about to reinvoke the CCAssignFn auto-generated handler. If we
+ // don't unset these flags we will get stuck in an infinite loop forever
+ // invoking the custom handler.
+ ArgFlags.setInConsecutiveRegs(false);
+ ArgFlags.setInConsecutiveRegsLast(false);
+
+ // The calling convention for passing SVE tuples states that in the event
+ // we cannot allocate enough registers for the tuple we should still leave
+ // any remaining registers unallocated. However, when we call the
+ // CCAssignFn again we want it to behave as if all remaining registers are
+ // allocated. This will force the code to pass the tuple indirectly in
+ // accordance with the PCS.
+ bool RegsAllocated[8];
+ for (int I = 0; I < 8; I++) {
+ RegsAllocated[I] = State.isAllocated(ZRegList[I]);
+ State.AllocateReg(ZRegList[I]);
+ }
+
+ auto &It = PendingMembers[0];
+ CCAssignFn *AssignFn =
+ TLI->CCAssignFnForCall(State.getCallingConv(), /*IsVarArg=*/false);
+ if (AssignFn(It.getValNo(), It.getValVT(), It.getValVT(), CCValAssign::Full,
+ ArgFlags, State))
+ llvm_unreachable("Call operand has unhandled type");
+
+ // Return the flags to how they were before.
+ ArgFlags.setInConsecutiveRegs(true);
+ ArgFlags.setInConsecutiveRegsLast(true);
+
+ // Return the register state back to how it was before, leaving any
+ // unallocated registers available for other smaller types.
+ for (int I = 0; I < 8; I++)
+ if (!RegsAllocated[I])
+ State.DeallocateReg(ZRegList[I]);
+
+ // All pending members have now been allocated
+ PendingMembers.clear();
+ return true;
+ }
+
unsigned Size = LocVT.getSizeInBits() / 8;
const Align StackAlign =
State.getMachineFunction().getDataLayout().getStackAlignment();
@@ -191,11 +191,11 @@ static bool CC_AArch64_Custom_Block(unsigned &ValNo, MVT &ValVT, MVT &LocVT,
return true;
}
- if (!LocVT.isScalableVector()) {
- // Mark all regs in the class as unavailable
- for (auto Reg : RegList)
- State.AllocateReg(Reg);
- }
+ if (!LocVT.isScalableVector()) {
+ // Mark all regs in the class as unavailable
+ for (auto Reg : RegList)
+ State.AllocateReg(Reg);
+ }
const Align SlotAlign = Subtarget.isTargetDarwin() ? Align(1) : Align(8);