diff options
author | shadchin <[email protected]> | 2022-02-10 16:44:39 +0300 |
---|---|---|
committer | Daniil Cherednik <[email protected]> | 2022-02-10 16:44:39 +0300 |
commit | e9656aae26e0358d5378e5b63dcac5c8dbe0e4d0 (patch) | |
tree | 64175d5cadab313b3e7039ebaa06c5bc3295e274 /contrib/libs/llvm12/lib/CodeGen/TargetLoweringBase.cpp | |
parent | 2598ef1d0aee359b4b6d5fdd1758916d5907d04f (diff) |
Restoring authorship annotation for <[email protected]>. Commit 2 of 2.
Diffstat (limited to 'contrib/libs/llvm12/lib/CodeGen/TargetLoweringBase.cpp')
-rw-r--r-- | contrib/libs/llvm12/lib/CodeGen/TargetLoweringBase.cpp | 394 |
1 files changed, 197 insertions, 197 deletions
diff --git a/contrib/libs/llvm12/lib/CodeGen/TargetLoweringBase.cpp b/contrib/libs/llvm12/lib/CodeGen/TargetLoweringBase.cpp index 173069d36bf..28c8bd0a7de 100644 --- a/contrib/libs/llvm12/lib/CodeGen/TargetLoweringBase.cpp +++ b/contrib/libs/llvm12/lib/CodeGen/TargetLoweringBase.cpp @@ -135,28 +135,28 @@ void TargetLoweringBase::InitLibcalls(const Triple &TT) { setLibcallCallingConv((RTLIB::Libcall)LC, CallingConv::C); // For IEEE quad-precision libcall names, PPC uses "kf" instead of "tf". - if (TT.isPPC()) { + if (TT.isPPC()) { setLibcallName(RTLIB::ADD_F128, "__addkf3"); setLibcallName(RTLIB::SUB_F128, "__subkf3"); setLibcallName(RTLIB::MUL_F128, "__mulkf3"); setLibcallName(RTLIB::DIV_F128, "__divkf3"); - setLibcallName(RTLIB::POWI_F128, "__powikf2"); + setLibcallName(RTLIB::POWI_F128, "__powikf2"); setLibcallName(RTLIB::FPEXT_F32_F128, "__extendsfkf2"); setLibcallName(RTLIB::FPEXT_F64_F128, "__extenddfkf2"); setLibcallName(RTLIB::FPROUND_F128_F32, "__trunckfsf2"); setLibcallName(RTLIB::FPROUND_F128_F64, "__trunckfdf2"); setLibcallName(RTLIB::FPTOSINT_F128_I32, "__fixkfsi"); setLibcallName(RTLIB::FPTOSINT_F128_I64, "__fixkfdi"); - setLibcallName(RTLIB::FPTOSINT_F128_I128, "__fixkfti"); + setLibcallName(RTLIB::FPTOSINT_F128_I128, "__fixkfti"); setLibcallName(RTLIB::FPTOUINT_F128_I32, "__fixunskfsi"); setLibcallName(RTLIB::FPTOUINT_F128_I64, "__fixunskfdi"); - setLibcallName(RTLIB::FPTOUINT_F128_I128, "__fixunskfti"); + setLibcallName(RTLIB::FPTOUINT_F128_I128, "__fixunskfti"); setLibcallName(RTLIB::SINTTOFP_I32_F128, "__floatsikf"); setLibcallName(RTLIB::SINTTOFP_I64_F128, "__floatdikf"); - setLibcallName(RTLIB::SINTTOFP_I128_F128, "__floattikf"); + setLibcallName(RTLIB::SINTTOFP_I128_F128, "__floattikf"); setLibcallName(RTLIB::UINTTOFP_I32_F128, "__floatunsikf"); setLibcallName(RTLIB::UINTTOFP_I64_F128, "__floatundikf"); - setLibcallName(RTLIB::UINTTOFP_I128_F128, "__floatuntikf"); + setLibcallName(RTLIB::UINTTOFP_I128_F128, "__floatuntikf"); setLibcallName(RTLIB::OEQ_F128, "__eqkf2"); setLibcallName(RTLIB::UNE_F128, "__nekf2"); setLibcallName(RTLIB::OGE_F128, "__gekf2"); @@ -229,10 +229,10 @@ RTLIB::Libcall RTLIB::getFPEXT(EVT OpVT, EVT RetVT) { if (OpVT == MVT::f16) { if (RetVT == MVT::f32) return FPEXT_F16_F32; - if (RetVT == MVT::f64) - return FPEXT_F16_F64; - if (RetVT == MVT::f128) - return FPEXT_F16_F128; + if (RetVT == MVT::f64) + return FPEXT_F16_F64; + if (RetVT == MVT::f128) + return FPEXT_F16_F128; } else if (OpVT == MVT::f32) { if (RetVT == MVT::f64) return FPEXT_F32_F64; @@ -294,15 +294,15 @@ RTLIB::Libcall RTLIB::getFPROUND(EVT OpVT, EVT RetVT) { /// getFPTOSINT - Return the FPTOSINT_*_* value for the given types, or /// UNKNOWN_LIBCALL if there is none. RTLIB::Libcall RTLIB::getFPTOSINT(EVT OpVT, EVT RetVT) { - if (OpVT == MVT::f16) { + if (OpVT == MVT::f16) { + if (RetVT == MVT::i32) + return FPTOSINT_F16_I32; + if (RetVT == MVT::i64) + return FPTOSINT_F16_I64; + if (RetVT == MVT::i128) + return FPTOSINT_F16_I128; + } else if (OpVT == MVT::f32) { if (RetVT == MVT::i32) - return FPTOSINT_F16_I32; - if (RetVT == MVT::i64) - return FPTOSINT_F16_I64; - if (RetVT == MVT::i128) - return FPTOSINT_F16_I128; - } else if (OpVT == MVT::f32) { - if (RetVT == MVT::i32) return FPTOSINT_F32_I32; if (RetVT == MVT::i64) return FPTOSINT_F32_I64; @@ -343,15 +343,15 @@ RTLIB::Libcall RTLIB::getFPTOSINT(EVT OpVT, EVT RetVT) { /// getFPTOUINT - Return the FPTOUINT_*_* value for the given types, or /// UNKNOWN_LIBCALL if there is none. RTLIB::Libcall RTLIB::getFPTOUINT(EVT OpVT, EVT RetVT) { - if (OpVT == MVT::f16) { + if (OpVT == MVT::f16) { + if (RetVT == MVT::i32) + return FPTOUINT_F16_I32; + if (RetVT == MVT::i64) + return FPTOUINT_F16_I64; + if (RetVT == MVT::i128) + return FPTOUINT_F16_I128; + } else if (OpVT == MVT::f32) { if (RetVT == MVT::i32) - return FPTOUINT_F16_I32; - if (RetVT == MVT::i64) - return FPTOUINT_F16_I64; - if (RetVT == MVT::i128) - return FPTOUINT_F16_I128; - } else if (OpVT == MVT::f32) { - if (RetVT == MVT::i32) return FPTOUINT_F32_I32; if (RetVT == MVT::i64) return FPTOUINT_F32_I64; @@ -393,8 +393,8 @@ RTLIB::Libcall RTLIB::getFPTOUINT(EVT OpVT, EVT RetVT) { /// UNKNOWN_LIBCALL if there is none. RTLIB::Libcall RTLIB::getSINTTOFP(EVT OpVT, EVT RetVT) { if (OpVT == MVT::i32) { - if (RetVT == MVT::f16) - return SINTTOFP_I32_F16; + if (RetVT == MVT::f16) + return SINTTOFP_I32_F16; if (RetVT == MVT::f32) return SINTTOFP_I32_F32; if (RetVT == MVT::f64) @@ -406,8 +406,8 @@ RTLIB::Libcall RTLIB::getSINTTOFP(EVT OpVT, EVT RetVT) { if (RetVT == MVT::ppcf128) return SINTTOFP_I32_PPCF128; } else if (OpVT == MVT::i64) { - if (RetVT == MVT::f16) - return SINTTOFP_I64_F16; + if (RetVT == MVT::f16) + return SINTTOFP_I64_F16; if (RetVT == MVT::f32) return SINTTOFP_I64_F32; if (RetVT == MVT::f64) @@ -419,8 +419,8 @@ RTLIB::Libcall RTLIB::getSINTTOFP(EVT OpVT, EVT RetVT) { if (RetVT == MVT::ppcf128) return SINTTOFP_I64_PPCF128; } else if (OpVT == MVT::i128) { - if (RetVT == MVT::f16) - return SINTTOFP_I128_F16; + if (RetVT == MVT::f16) + return SINTTOFP_I128_F16; if (RetVT == MVT::f32) return SINTTOFP_I128_F32; if (RetVT == MVT::f64) @@ -439,8 +439,8 @@ RTLIB::Libcall RTLIB::getSINTTOFP(EVT OpVT, EVT RetVT) { /// UNKNOWN_LIBCALL if there is none. RTLIB::Libcall RTLIB::getUINTTOFP(EVT OpVT, EVT RetVT) { if (OpVT == MVT::i32) { - if (RetVT == MVT::f16) - return UINTTOFP_I32_F16; + if (RetVT == MVT::f16) + return UINTTOFP_I32_F16; if (RetVT == MVT::f32) return UINTTOFP_I32_F32; if (RetVT == MVT::f64) @@ -452,8 +452,8 @@ RTLIB::Libcall RTLIB::getUINTTOFP(EVT OpVT, EVT RetVT) { if (RetVT == MVT::ppcf128) return UINTTOFP_I32_PPCF128; } else if (OpVT == MVT::i64) { - if (RetVT == MVT::f16) - return UINTTOFP_I64_F16; + if (RetVT == MVT::f16) + return UINTTOFP_I64_F16; if (RetVT == MVT::f32) return UINTTOFP_I64_F32; if (RetVT == MVT::f64) @@ -465,8 +465,8 @@ RTLIB::Libcall RTLIB::getUINTTOFP(EVT OpVT, EVT RetVT) { if (RetVT == MVT::ppcf128) return UINTTOFP_I64_PPCF128; } else if (OpVT == MVT::i128) { - if (RetVT == MVT::f16) - return UINTTOFP_I128_F16; + if (RetVT == MVT::f16) + return UINTTOFP_I128_F16; if (RetVT == MVT::f32) return UINTTOFP_I128_F32; if (RetVT == MVT::f64) @@ -481,83 +481,83 @@ RTLIB::Libcall RTLIB::getUINTTOFP(EVT OpVT, EVT RetVT) { return UNKNOWN_LIBCALL; } -RTLIB::Libcall RTLIB::getOUTLINE_ATOMIC(unsigned Opc, AtomicOrdering Order, - MVT VT) { - unsigned ModeN, ModelN; - switch (VT.SimpleTy) { - case MVT::i8: - ModeN = 0; - break; - case MVT::i16: - ModeN = 1; - break; - case MVT::i32: - ModeN = 2; - break; - case MVT::i64: - ModeN = 3; - break; - case MVT::i128: - ModeN = 4; - break; - default: - return UNKNOWN_LIBCALL; - } - - switch (Order) { - case AtomicOrdering::Monotonic: - ModelN = 0; - break; - case AtomicOrdering::Acquire: - ModelN = 1; - break; - case AtomicOrdering::Release: - ModelN = 2; - break; - case AtomicOrdering::AcquireRelease: - case AtomicOrdering::SequentiallyConsistent: - ModelN = 3; - break; - default: - return UNKNOWN_LIBCALL; - } - -#define LCALLS(A, B) \ - { A##B##_RELAX, A##B##_ACQ, A##B##_REL, A##B##_ACQ_REL } -#define LCALL5(A) \ - LCALLS(A, 1), LCALLS(A, 2), LCALLS(A, 4), LCALLS(A, 8), LCALLS(A, 16) - switch (Opc) { - case ISD::ATOMIC_CMP_SWAP: { - const Libcall LC[5][4] = {LCALL5(OUTLINE_ATOMIC_CAS)}; - return LC[ModeN][ModelN]; - } - case ISD::ATOMIC_SWAP: { - const Libcall LC[5][4] = {LCALL5(OUTLINE_ATOMIC_SWP)}; - return LC[ModeN][ModelN]; - } - case ISD::ATOMIC_LOAD_ADD: { - const Libcall LC[5][4] = {LCALL5(OUTLINE_ATOMIC_LDADD)}; - return LC[ModeN][ModelN]; - } - case ISD::ATOMIC_LOAD_OR: { - const Libcall LC[5][4] = {LCALL5(OUTLINE_ATOMIC_LDSET)}; - return LC[ModeN][ModelN]; - } - case ISD::ATOMIC_LOAD_CLR: { - const Libcall LC[5][4] = {LCALL5(OUTLINE_ATOMIC_LDCLR)}; - return LC[ModeN][ModelN]; - } - case ISD::ATOMIC_LOAD_XOR: { - const Libcall LC[5][4] = {LCALL5(OUTLINE_ATOMIC_LDEOR)}; - return LC[ModeN][ModelN]; - } - default: - return UNKNOWN_LIBCALL; - } -#undef LCALLS -#undef LCALL5 -} - +RTLIB::Libcall RTLIB::getOUTLINE_ATOMIC(unsigned Opc, AtomicOrdering Order, + MVT VT) { + unsigned ModeN, ModelN; + switch (VT.SimpleTy) { + case MVT::i8: + ModeN = 0; + break; + case MVT::i16: + ModeN = 1; + break; + case MVT::i32: + ModeN = 2; + break; + case MVT::i64: + ModeN = 3; + break; + case MVT::i128: + ModeN = 4; + break; + default: + return UNKNOWN_LIBCALL; + } + + switch (Order) { + case AtomicOrdering::Monotonic: + ModelN = 0; + break; + case AtomicOrdering::Acquire: + ModelN = 1; + break; + case AtomicOrdering::Release: + ModelN = 2; + break; + case AtomicOrdering::AcquireRelease: + case AtomicOrdering::SequentiallyConsistent: + ModelN = 3; + break; + default: + return UNKNOWN_LIBCALL; + } + +#define LCALLS(A, B) \ + { A##B##_RELAX, A##B##_ACQ, A##B##_REL, A##B##_ACQ_REL } +#define LCALL5(A) \ + LCALLS(A, 1), LCALLS(A, 2), LCALLS(A, 4), LCALLS(A, 8), LCALLS(A, 16) + switch (Opc) { + case ISD::ATOMIC_CMP_SWAP: { + const Libcall LC[5][4] = {LCALL5(OUTLINE_ATOMIC_CAS)}; + return LC[ModeN][ModelN]; + } + case ISD::ATOMIC_SWAP: { + const Libcall LC[5][4] = {LCALL5(OUTLINE_ATOMIC_SWP)}; + return LC[ModeN][ModelN]; + } + case ISD::ATOMIC_LOAD_ADD: { + const Libcall LC[5][4] = {LCALL5(OUTLINE_ATOMIC_LDADD)}; + return LC[ModeN][ModelN]; + } + case ISD::ATOMIC_LOAD_OR: { + const Libcall LC[5][4] = {LCALL5(OUTLINE_ATOMIC_LDSET)}; + return LC[ModeN][ModelN]; + } + case ISD::ATOMIC_LOAD_CLR: { + const Libcall LC[5][4] = {LCALL5(OUTLINE_ATOMIC_LDCLR)}; + return LC[ModeN][ModelN]; + } + case ISD::ATOMIC_LOAD_XOR: { + const Libcall LC[5][4] = {LCALL5(OUTLINE_ATOMIC_LDEOR)}; + return LC[ModeN][ModelN]; + } + default: + return UNKNOWN_LIBCALL; + } +#undef LCALLS +#undef LCALL5 +} + RTLIB::Libcall RTLIB::getSYNC(unsigned Opc, MVT VT) { #define OP_TO_LIBCALL(Name, Enum) \ case Name: \ @@ -727,7 +727,7 @@ void TargetLoweringBase::initActions() { std::end(TargetDAGCombineArray), 0); for (MVT VT : MVT::fp_valuetypes()) { - MVT IntVT = MVT::getIntegerVT(VT.getFixedSizeInBits()); + MVT IntVT = MVT::getIntegerVT(VT.getFixedSizeInBits()); if (IntVT.isValid()) { setOperationAction(ISD::ATOMIC_SWAP, VT, Promote); AddPromotedToType(ISD::ATOMIC_SWAP, VT, IntVT); @@ -769,8 +769,8 @@ void TargetLoweringBase::initActions() { setOperationAction(ISD::UADDSAT, VT, Expand); setOperationAction(ISD::SSUBSAT, VT, Expand); setOperationAction(ISD::USUBSAT, VT, Expand); - setOperationAction(ISD::SSHLSAT, VT, Expand); - setOperationAction(ISD::USHLSAT, VT, Expand); + setOperationAction(ISD::SSHLSAT, VT, Expand); + setOperationAction(ISD::USHLSAT, VT, Expand); setOperationAction(ISD::SMULFIX, VT, Expand); setOperationAction(ISD::SMULFIXSAT, VT, Expand); setOperationAction(ISD::UMULFIX, VT, Expand); @@ -779,8 +779,8 @@ void TargetLoweringBase::initActions() { setOperationAction(ISD::SDIVFIXSAT, VT, Expand); setOperationAction(ISD::UDIVFIX, VT, Expand); setOperationAction(ISD::UDIVFIXSAT, VT, Expand); - setOperationAction(ISD::FP_TO_SINT_SAT, VT, Expand); - setOperationAction(ISD::FP_TO_UINT_SAT, VT, Expand); + setOperationAction(ISD::FP_TO_SINT_SAT, VT, Expand); + setOperationAction(ISD::FP_TO_UINT_SAT, VT, Expand); // Overflow operations default to expand setOperationAction(ISD::SADDO, VT, Expand); @@ -794,8 +794,8 @@ void TargetLoweringBase::initActions() { setOperationAction(ISD::ADDCARRY, VT, Expand); setOperationAction(ISD::SUBCARRY, VT, Expand); setOperationAction(ISD::SETCCCARRY, VT, Expand); - setOperationAction(ISD::SADDO_CARRY, VT, Expand); - setOperationAction(ISD::SSUBO_CARRY, VT, Expand); + setOperationAction(ISD::SADDO_CARRY, VT, Expand); + setOperationAction(ISD::SSUBO_CARRY, VT, Expand); // ADDC/ADDE/SUBC/SUBE default to expand. setOperationAction(ISD::ADDC, VT, Expand); @@ -808,7 +808,7 @@ void TargetLoweringBase::initActions() { setOperationAction(ISD::CTTZ_ZERO_UNDEF, VT, Expand); setOperationAction(ISD::BITREVERSE, VT, Expand); - setOperationAction(ISD::PARITY, VT, Expand); + setOperationAction(ISD::PARITY, VT, Expand); // These library functions default to expand. setOperationAction(ISD::FROUND, VT, Expand); @@ -847,8 +847,8 @@ void TargetLoweringBase::initActions() { setOperationAction(ISD::VECREDUCE_UMIN, VT, Expand); setOperationAction(ISD::VECREDUCE_FMAX, VT, Expand); setOperationAction(ISD::VECREDUCE_FMIN, VT, Expand); - setOperationAction(ISD::VECREDUCE_SEQ_FADD, VT, Expand); - setOperationAction(ISD::VECREDUCE_SEQ_FMUL, VT, Expand); + setOperationAction(ISD::VECREDUCE_SEQ_FADD, VT, Expand); + setOperationAction(ISD::VECREDUCE_SEQ_FMUL, VT, Expand); } // Most targets ignore the @llvm.prefetch intrinsic. @@ -893,8 +893,8 @@ void TargetLoweringBase::initActions() { // On most systems, DEBUGTRAP and TRAP have no difference. The "Expand" // here is to inform DAG Legalizer to replace DEBUGTRAP with TRAP. setOperationAction(ISD::DEBUGTRAP, MVT::Other, Expand); - - setOperationAction(ISD::UBSANTRAP, MVT::Other, Expand); + + setOperationAction(ISD::UBSANTRAP, MVT::Other, Expand); } MVT TargetLoweringBase::getScalarShiftAmountTy(const DataLayout &DL, @@ -924,11 +924,11 @@ bool TargetLoweringBase::canOpTrap(unsigned Op, EVT VT) const { } } -bool TargetLoweringBase::isFreeAddrSpaceCast(unsigned SrcAS, - unsigned DestAS) const { - return TM.isNoopAddrSpaceCast(SrcAS, DestAS); -} - +bool TargetLoweringBase::isFreeAddrSpaceCast(unsigned SrcAS, + unsigned DestAS) const { + return TM.isNoopAddrSpaceCast(SrcAS, DestAS); +} + void TargetLoweringBase::setJumpIsExpensive(bool isExpensive) { // If the command-line option was specified, ignore this request. if (!JumpIsExpensiveOverride.getNumOccurrences()) @@ -951,7 +951,7 @@ TargetLoweringBase::getTypeConversion(LLVMContext &Context, EVT VT) const { "Promote may not follow Expand or Promote"); if (LA == TypeSplitVector) - return LegalizeKind(LA, EVT(SVT).getHalfNumVectorElementsVT(Context)); + return LegalizeKind(LA, EVT(SVT).getHalfNumVectorElementsVT(Context)); if (LA == TypeScalarizeVector) return LegalizeKind(LA, SVT.getVectorElementType()); return LegalizeKind(LA, NVT); @@ -982,10 +982,10 @@ TargetLoweringBase::getTypeConversion(LLVMContext &Context, EVT VT) const { EVT EltVT = VT.getVectorElementType(); // Vectors with only one element are always scalarized. - if (NumElts.isScalar()) + if (NumElts.isScalar()) return LegalizeKind(TypeScalarizeVector, EltVT); - if (VT.getVectorElementCount() == ElementCount::getScalable(1)) + if (VT.getVectorElementCount() == ElementCount::getScalable(1)) report_fatal_error("Cannot legalize this vector"); // Try to widen vector elements until the element type is a power of two and @@ -995,7 +995,7 @@ TargetLoweringBase::getTypeConversion(LLVMContext &Context, EVT VT) const { // Vectors with a number of elements that is not a power of two are always // widened, for example <3 x i8> -> <4 x i8>. if (!VT.isPow2VectorType()) { - NumElts = NumElts.coefficientNextPowerOf2(); + NumElts = NumElts.coefficientNextPowerOf2(); EVT NVT = EVT::getVectorVT(Context, EltVT, NumElts); return LegalizeKind(TypeWidenVector, NVT); } @@ -1007,7 +1007,7 @@ TargetLoweringBase::getTypeConversion(LLVMContext &Context, EVT VT) const { // <4 x i140> -> <2 x i140> if (LK.first == TypeExpandInteger) return LegalizeKind(TypeSplitVector, - VT.getHalfNumVectorElementsVT(Context)); + VT.getHalfNumVectorElementsVT(Context)); // Promote the integer element types until a legal vector type is found // or until the element integer type is too big. If a legal type was not @@ -1044,7 +1044,7 @@ TargetLoweringBase::getTypeConversion(LLVMContext &Context, EVT VT) const { // If there is no wider legal type, split the vector. while (true) { // Round up to the next power of 2. - NumElts = NumElts.coefficientNextPowerOf2(); + NumElts = NumElts.coefficientNextPowerOf2(); // If there is no simple vector type with this many elements then there // cannot be a larger legal vector type. Note that this assumes that @@ -1067,8 +1067,8 @@ TargetLoweringBase::getTypeConversion(LLVMContext &Context, EVT VT) const { } // Vectors with illegal element types are expanded. - EVT NVT = EVT::getVectorVT(Context, EltVT, - VT.getVectorElementCount().divideCoefficientBy(2)); + EVT NVT = EVT::getVectorVT(Context, EltVT, + VT.getVectorElementCount().divideCoefficientBy(2)); return LegalizeKind(TypeSplitVector, NVT); } @@ -1084,24 +1084,24 @@ static unsigned getVectorTypeBreakdownMVT(MVT VT, MVT &IntermediateVT, // Scalable vectors cannot be scalarized, so splitting or widening is // required. - if (VT.isScalableVector() && !isPowerOf2_32(EC.getKnownMinValue())) + if (VT.isScalableVector() && !isPowerOf2_32(EC.getKnownMinValue())) llvm_unreachable( "Splitting or widening of non-power-of-2 MVTs is not implemented."); // FIXME: We don't support non-power-of-2-sized vectors for now. // Ideally we could break down into LHS/RHS like LegalizeDAG does. - if (!isPowerOf2_32(EC.getKnownMinValue())) { + if (!isPowerOf2_32(EC.getKnownMinValue())) { // Split EC to unit size (scalable property is preserved). - NumVectorRegs = EC.getKnownMinValue(); - EC = ElementCount::getFixed(1); + NumVectorRegs = EC.getKnownMinValue(); + EC = ElementCount::getFixed(1); } // Divide the input until we get to a supported size. This will // always end up with an EC that represent a scalar or a scalable // scalar. - while (EC.getKnownMinValue() > 1 && - !TLI->isTypeLegal(MVT::getVectorVT(EltTy, EC))) { - EC = EC.divideCoefficientBy(2); + while (EC.getKnownMinValue() > 1 && + !TLI->isTypeLegal(MVT::getVectorVT(EltTy, EC))) { + EC = EC.divideCoefficientBy(2); NumVectorRegs <<= 1; } @@ -1112,7 +1112,7 @@ static unsigned getVectorTypeBreakdownMVT(MVT VT, MVT &IntermediateVT, NewVT = EltTy; IntermediateVT = NewVT; - unsigned LaneSizeInBits = NewVT.getScalarSizeInBits(); + unsigned LaneSizeInBits = NewVT.getScalarSizeInBits(); // Convert sizes such as i33 to i64. if (!isPowerOf2_32(LaneSizeInBits)) @@ -1121,7 +1121,7 @@ static unsigned getVectorTypeBreakdownMVT(MVT VT, MVT &IntermediateVT, MVT DestVT = TLI->getRegisterType(NewVT); RegisterVT = DestVT; if (EVT(DestVT).bitsLT(NewVT)) // Value is expanded, e.g. i64 -> i16. - return NumVectorRegs * (LaneSizeInBits / DestVT.getScalarSizeInBits()); + return NumVectorRegs * (LaneSizeInBits / DestVT.getScalarSizeInBits()); // Otherwise, promotion or legal types use the same number of registers as // the vector decimated to the appropriate level. @@ -1168,19 +1168,19 @@ TargetLoweringBase::emitPatchPoint(MachineInstr &InitialMI, // Inherit previous memory operands. MIB.cloneMemRefs(*MI); - for (unsigned i = 0; i < MI->getNumOperands(); ++i) { - MachineOperand &MO = MI->getOperand(i); + for (unsigned i = 0; i < MI->getNumOperands(); ++i) { + MachineOperand &MO = MI->getOperand(i); if (!MO.isFI()) { - // Index of Def operand this Use it tied to. - // Since Defs are coming before Uses, if Use is tied, then - // index of Def must be smaller that index of that Use. - // Also, Defs preserve their position in new MI. - unsigned TiedTo = i; - if (MO.isReg() && MO.isTied()) - TiedTo = MI->findTiedOperandIdx(i); + // Index of Def operand this Use it tied to. + // Since Defs are coming before Uses, if Use is tied, then + // index of Def must be smaller that index of that Use. + // Also, Defs preserve their position in new MI. + unsigned TiedTo = i; + if (MO.isReg() && MO.isTied()) + TiedTo = MI->findTiedOperandIdx(i); MIB.add(MO); - if (TiedTo < i) - MIB->tieOperands(TiedTo, MIB->getNumOperands() - 1); + if (TiedTo < i) + MIB->tieOperands(TiedTo, MIB->getNumOperands() - 1); continue; } @@ -1389,7 +1389,7 @@ void TargetLoweringBase::computeRegisterProperties( MVT SVT = (MVT::SimpleValueType) nVT; // Promote vectors of integers to vectors with the same number // of elements, with a wider element type. - if (SVT.getScalarSizeInBits() > EltVT.getFixedSizeInBits() && + if (SVT.getScalarSizeInBits() > EltVT.getFixedSizeInBits() && SVT.getVectorElementCount() == EC && isTypeLegal(SVT)) { TransformToType[i] = SVT; RegisterTypeForVT[i] = SVT; @@ -1405,15 +1405,15 @@ void TargetLoweringBase::computeRegisterProperties( } case TypeWidenVector: - if (isPowerOf2_32(EC.getKnownMinValue())) { + if (isPowerOf2_32(EC.getKnownMinValue())) { // Try to widen the vector. for (unsigned nVT = i + 1; nVT <= MVT::LAST_VECTOR_VALUETYPE; ++nVT) { MVT SVT = (MVT::SimpleValueType) nVT; if (SVT.getVectorElementType() == EltVT && SVT.isScalableVector() == IsScalable && - SVT.getVectorElementCount().getKnownMinValue() > - EC.getKnownMinValue() && - isTypeLegal(SVT)) { + SVT.getVectorElementCount().getKnownMinValue() > + EC.getKnownMinValue() && + isTypeLegal(SVT)) { TransformToType[i] = SVT; RegisterTypeForVT[i] = SVT; NumRegistersForVT[i] = 1; @@ -1457,10 +1457,10 @@ void TargetLoweringBase::computeRegisterProperties( ValueTypeActions.setTypeAction(VT, TypeScalarizeVector); else if (PreferredAction == TypeSplitVector) ValueTypeActions.setTypeAction(VT, TypeSplitVector); - else if (EC.getKnownMinValue() > 1) + else if (EC.getKnownMinValue() > 1) ValueTypeActions.setTypeAction(VT, TypeSplitVector); else - ValueTypeActions.setTypeAction(VT, EC.isScalable() + ValueTypeActions.setTypeAction(VT, EC.isScalable() ? TypeScalarizeScalableVector : TypeScalarizeVector); } else { @@ -1518,8 +1518,8 @@ unsigned TargetLoweringBase::getVectorTypeBreakdown(LLVMContext &Context, EVT VT // This handles things like <2 x float> -> <4 x float> and // <4 x i1> -> <4 x i32>. LegalizeTypeAction TA = getTypeAction(Context, VT); - if (EltCnt.getKnownMinValue() != 1 && - (TA == TypeWidenVector || TA == TypePromoteInteger)) { + if (EltCnt.getKnownMinValue() != 1 && + (TA == TypeWidenVector || TA == TypePromoteInteger)) { EVT RegisterEVT = getTypeToTransformTo(Context, VT); if (isTypeLegal(RegisterEVT)) { IntermediateVT = RegisterEVT; @@ -1536,7 +1536,7 @@ unsigned TargetLoweringBase::getVectorTypeBreakdown(LLVMContext &Context, EVT VT // Scalable vectors cannot be scalarized, so handle the legalisation of the // types like done elsewhere in SelectionDAG. - if (VT.isScalableVector() && !isPowerOf2_32(EltCnt.getKnownMinValue())) { + if (VT.isScalableVector() && !isPowerOf2_32(EltCnt.getKnownMinValue())) { LegalizeKind LK; EVT PartVT = VT; do { @@ -1545,15 +1545,15 @@ unsigned TargetLoweringBase::getVectorTypeBreakdown(LLVMContext &Context, EVT VT PartVT = LK.second; } while (LK.first != TypeLegal); - NumIntermediates = VT.getVectorElementCount().getKnownMinValue() / - PartVT.getVectorElementCount().getKnownMinValue(); + NumIntermediates = VT.getVectorElementCount().getKnownMinValue() / + PartVT.getVectorElementCount().getKnownMinValue(); // FIXME: This code needs to be extended to handle more complex vector // breakdowns, like nxv7i64 -> nxv8i64 -> 4 x nxv2i64. Currently the only // supported cases are vectors that are broken down into equal parts // such as nxv6i64 -> 3 x nxv2i64. - assert((PartVT.getVectorElementCount() * NumIntermediates) == - VT.getVectorElementCount() && + assert((PartVT.getVectorElementCount() * NumIntermediates) == + VT.getVectorElementCount() && "Expected an integer multiple of PartVT"); IntermediateVT = PartVT; RegisterVT = getRegisterType(Context, IntermediateVT); @@ -1562,16 +1562,16 @@ unsigned TargetLoweringBase::getVectorTypeBreakdown(LLVMContext &Context, EVT VT // FIXME: We don't support non-power-of-2-sized vectors for now. Ideally // we could break down into LHS/RHS like LegalizeDAG does. - if (!isPowerOf2_32(EltCnt.getKnownMinValue())) { - NumVectorRegs = EltCnt.getKnownMinValue(); - EltCnt = ElementCount::getFixed(1); + if (!isPowerOf2_32(EltCnt.getKnownMinValue())) { + NumVectorRegs = EltCnt.getKnownMinValue(); + EltCnt = ElementCount::getFixed(1); } // Divide the input until we get to a supported size. This will always // end with a scalar if the target doesn't support vectors. - while (EltCnt.getKnownMinValue() > 1 && + while (EltCnt.getKnownMinValue() > 1 && !isTypeLegal(EVT::getVectorVT(Context, EltTy, EltCnt))) { - EltCnt = EltCnt.divideCoefficientBy(2); + EltCnt = EltCnt.divideCoefficientBy(2); NumVectorRegs <<= 1; } @@ -1589,7 +1589,7 @@ unsigned TargetLoweringBase::getVectorTypeBreakdown(LLVMContext &Context, EVT VT TypeSize NewVTSize = NewVT.getSizeInBits(); // Convert sizes such as i33 to i64. if (!isPowerOf2_32(NewVTSize.getKnownMinSize())) - NewVTSize = NewVTSize.coefficientNextPowerOf2(); + NewVTSize = NewVTSize.coefficientNextPowerOf2(); return NumVectorRegs*(NewVTSize/DestVT.getSizeInBits()); } @@ -1726,14 +1726,14 @@ bool TargetLoweringBase::allowsMemoryAccess(LLVMContext &Context, MMO.getFlags(), Fast); } -bool TargetLoweringBase::allowsMemoryAccess(LLVMContext &Context, - const DataLayout &DL, LLT Ty, - const MachineMemOperand &MMO, - bool *Fast) const { - return allowsMemoryAccess(Context, DL, getMVTForLLT(Ty), MMO.getAddrSpace(), - MMO.getAlign(), MMO.getFlags(), Fast); -} - +bool TargetLoweringBase::allowsMemoryAccess(LLVMContext &Context, + const DataLayout &DL, LLT Ty, + const MachineMemOperand &MMO, + bool *Fast) const { + return allowsMemoryAccess(Context, DL, getMVTForLLT(Ty), MMO.getAddrSpace(), + MMO.getAlign(), MMO.getFlags(), Fast); +} + BranchProbability TargetLoweringBase::getPredictableBranchThreshold() const { return BranchProbability(MinPercentageForPredictableBranch, 100); } @@ -1956,14 +1956,14 @@ Value *TargetLoweringBase::getIRStackGuard(IRBuilder<> &IRB) const { // Currently only support "standard" __stack_chk_guard. // TODO: add LOAD_STACK_GUARD support. void TargetLoweringBase::insertSSPDeclarations(Module &M) const { - if (!M.getNamedValue("__stack_chk_guard")) { - auto *GV = new GlobalVariable(M, Type::getInt8PtrTy(M.getContext()), false, - GlobalVariable::ExternalLinkage, nullptr, - "__stack_chk_guard"); - if (TM.getRelocationModel() == Reloc::Static && - !TM.getTargetTriple().isWindowsGNUEnvironment()) - GV->setDSOLocal(true); - } + if (!M.getNamedValue("__stack_chk_guard")) { + auto *GV = new GlobalVariable(M, Type::getInt8PtrTy(M.getContext()), false, + GlobalVariable::ExternalLinkage, nullptr, + "__stack_chk_guard"); + if (TM.getRelocationModel() == Reloc::Static && + !TM.getTargetTriple().isWindowsGNUEnvironment()) + GV->setDSOLocal(true); + } } // Currently only support "standard" __stack_chk_guard. @@ -2047,7 +2047,7 @@ static bool parseRefinementStep(StringRef In, size_t &Position, // step parameter. if (RefStepString.size() == 1) { char RefStepChar = RefStepString[0]; - if (isDigit(RefStepChar)) { + if (isDigit(RefStepChar)) { Value = RefStepChar - '0'; return true; } |