aboutsummaryrefslogtreecommitdiffstats
path: root/contrib/libs/llvm12/lib/CodeGen/SelectionDAG
diff options
context:
space:
mode:
authorshadchin <shadchin@yandex-team.ru>2022-02-10 16:44:30 +0300
committerDaniil Cherednik <dcherednik@yandex-team.ru>2022-02-10 16:44:30 +0300
commit2598ef1d0aee359b4b6d5fdd1758916d5907d04f (patch)
tree012bb94d777798f1f56ac1cec429509766d05181 /contrib/libs/llvm12/lib/CodeGen/SelectionDAG
parent6751af0b0c1b952fede40b19b71da8025b5d8bcf (diff)
downloadydb-2598ef1d0aee359b4b6d5fdd1758916d5907d04f.tar.gz
Restoring authorship annotation for <shadchin@yandex-team.ru>. Commit 1 of 2.
Diffstat (limited to 'contrib/libs/llvm12/lib/CodeGen/SelectionDAG')
-rw-r--r--contrib/libs/llvm12/lib/CodeGen/SelectionDAG/DAGCombiner.cpp3020
-rw-r--r--contrib/libs/llvm12/lib/CodeGen/SelectionDAG/FastISel.cpp142
-rw-r--r--contrib/libs/llvm12/lib/CodeGen/SelectionDAG/FunctionLoweringInfo.cpp8
-rw-r--r--contrib/libs/llvm12/lib/CodeGen/SelectionDAG/InstrEmitter.cpp202
-rw-r--r--contrib/libs/llvm12/lib/CodeGen/SelectionDAG/InstrEmitter.h22
-rw-r--r--contrib/libs/llvm12/lib/CodeGen/SelectionDAG/LegalizeDAG.cpp882
-rw-r--r--contrib/libs/llvm12/lib/CodeGen/SelectionDAG/LegalizeFloatTypes.cpp432
-rw-r--r--contrib/libs/llvm12/lib/CodeGen/SelectionDAG/LegalizeIntegerTypes.cpp680
-rw-r--r--contrib/libs/llvm12/lib/CodeGen/SelectionDAG/LegalizeTypes.cpp20
-rw-r--r--contrib/libs/llvm12/lib/CodeGen/SelectionDAG/LegalizeTypes.h84
-rw-r--r--contrib/libs/llvm12/lib/CodeGen/SelectionDAG/LegalizeTypesGeneric.cpp12
-rw-r--r--contrib/libs/llvm12/lib/CodeGen/SelectionDAG/LegalizeVectorOps.cpp46
-rw-r--r--contrib/libs/llvm12/lib/CodeGen/SelectionDAG/LegalizeVectorTypes.cpp1334
-rw-r--r--contrib/libs/llvm12/lib/CodeGen/SelectionDAG/ScheduleDAGFast.cpp2
-rw-r--r--contrib/libs/llvm12/lib/CodeGen/SelectionDAG/ScheduleDAGRRList.cpp20
-rw-r--r--contrib/libs/llvm12/lib/CodeGen/SelectionDAG/ScheduleDAGSDNodes.cpp52
-rw-r--r--contrib/libs/llvm12/lib/CodeGen/SelectionDAG/SelectionDAG.cpp1186
-rw-r--r--contrib/libs/llvm12/lib/CodeGen/SelectionDAG/SelectionDAGAddressAnalysis.cpp42
-rw-r--r--contrib/libs/llvm12/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp932
-rw-r--r--contrib/libs/llvm12/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.h16
-rw-r--r--contrib/libs/llvm12/lib/CodeGen/SelectionDAG/SelectionDAGDumper.cpp100
-rw-r--r--contrib/libs/llvm12/lib/CodeGen/SelectionDAG/SelectionDAGISel.cpp212
-rw-r--r--contrib/libs/llvm12/lib/CodeGen/SelectionDAG/StatepointLowering.cpp492
-rw-r--r--contrib/libs/llvm12/lib/CodeGen/SelectionDAG/TargetLowering.cpp1614
-rw-r--r--contrib/libs/llvm12/lib/CodeGen/SelectionDAG/ya.make18
25 files changed, 5785 insertions, 5785 deletions
diff --git a/contrib/libs/llvm12/lib/CodeGen/SelectionDAG/DAGCombiner.cpp b/contrib/libs/llvm12/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
index 7f2add81e8..505253e02f 100644
--- a/contrib/libs/llvm12/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
+++ b/contrib/libs/llvm12/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
@@ -24,14 +24,14 @@
#include "llvm/ADT/Optional.h"
#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/SetVector.h"
-#include "llvm/ADT/SmallBitVector.h"
+#include "llvm/ADT/SmallBitVector.h"
#include "llvm/ADT/SmallPtrSet.h"
#include "llvm/ADT/SmallSet.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/Statistic.h"
#include "llvm/Analysis/AliasAnalysis.h"
#include "llvm/Analysis/MemoryLocation.h"
-#include "llvm/Analysis/TargetLibraryInfo.h"
+#include "llvm/Analysis/TargetLibraryInfo.h"
#include "llvm/Analysis/VectorUtils.h"
#include "llvm/CodeGen/DAGCombine.h"
#include "llvm/CodeGen/ISDOpcodes.h"
@@ -412,11 +412,11 @@ namespace {
SDValue visitSUBO(SDNode *N);
SDValue visitADDE(SDNode *N);
SDValue visitADDCARRY(SDNode *N);
- SDValue visitSADDO_CARRY(SDNode *N);
+ SDValue visitSADDO_CARRY(SDNode *N);
SDValue visitADDCARRYLike(SDValue N0, SDValue N1, SDValue CarryIn, SDNode *N);
SDValue visitSUBE(SDNode *N);
SDValue visitSUBCARRY(SDNode *N);
- SDValue visitSSUBO_CARRY(SDNode *N);
+ SDValue visitSSUBO_CARRY(SDNode *N);
SDValue visitMUL(SDNode *N);
SDValue visitMULFIX(SDNode *N);
SDValue useDivRem(SDNode *N);
@@ -468,7 +468,7 @@ namespace {
SDValue visitFREEZE(SDNode *N);
SDValue visitBUILD_PAIR(SDNode *N);
SDValue visitFADD(SDNode *N);
- SDValue visitSTRICT_FADD(SDNode *N);
+ SDValue visitSTRICT_FADD(SDNode *N);
SDValue visitFSUB(SDNode *N);
SDValue visitFMUL(SDNode *N);
SDValue visitFMA(SDNode *N);
@@ -544,7 +544,7 @@ namespace {
SDValue convertSelectOfFPConstantsToLoadOffset(
const SDLoc &DL, SDValue N0, SDValue N1, SDValue N2, SDValue N3,
ISD::CondCode CC);
- SDValue foldSignChangeInBitcast(SDNode *N);
+ SDValue foldSignChangeInBitcast(SDNode *N);
SDValue foldSelectCCToShiftAnd(const SDLoc &DL, SDValue N0, SDValue N1,
SDValue N2, SDValue N3, ISD::CondCode CC);
SDValue foldLogicOfSetCCs(bool IsAnd, SDValue N0, SDValue N1,
@@ -592,7 +592,7 @@ namespace {
const SDLoc &DL);
SDValue MatchRotate(SDValue LHS, SDValue RHS, const SDLoc &DL);
SDValue MatchLoadCombine(SDNode *N);
- SDValue mergeTruncStores(StoreSDNode *N);
+ SDValue mergeTruncStores(StoreSDNode *N);
SDValue ReduceLoadWidth(SDNode *N);
SDValue ReduceLoadOpStoreWidth(SDNode *N);
SDValue splitMergedValStore(StoreSDNode *ST);
@@ -647,18 +647,18 @@ namespace {
// Classify the origin of a stored value.
enum class StoreSource { Unknown, Constant, Extract, Load };
StoreSource getStoreSource(SDValue StoreVal) {
- switch (StoreVal.getOpcode()) {
- case ISD::Constant:
- case ISD::ConstantFP:
+ switch (StoreVal.getOpcode()) {
+ case ISD::Constant:
+ case ISD::ConstantFP:
return StoreSource::Constant;
- case ISD::EXTRACT_VECTOR_ELT:
- case ISD::EXTRACT_SUBVECTOR:
+ case ISD::EXTRACT_VECTOR_ELT:
+ case ISD::EXTRACT_SUBVECTOR:
return StoreSource::Extract;
- case ISD::LOAD:
+ case ISD::LOAD:
return StoreSource::Load;
- default:
- return StoreSource::Unknown;
- }
+ default:
+ return StoreSource::Unknown;
+ }
}
/// This is a helper function for visitMUL to check the profitability
@@ -762,7 +762,7 @@ namespace {
/// is legal or custom before legalizing operations, and whether is
/// legal (but not custom) after legalization.
bool hasOperation(unsigned Opcode, EVT VT) {
- return TLI.isOperationLegalOrCustom(Opcode, VT, LegalOperations);
+ return TLI.isOperationLegalOrCustom(Opcode, VT, LegalOperations);
}
public:
@@ -932,40 +932,40 @@ bool DAGCombiner::isOneUseSetCC(SDValue N) const {
return false;
}
-static bool isConstantSplatVectorMaskForType(SDNode *N, EVT ScalarTy) {
- if (!ScalarTy.isSimple())
- return false;
-
- uint64_t MaskForTy = 0ULL;
- switch (ScalarTy.getSimpleVT().SimpleTy) {
- case MVT::i8:
- MaskForTy = 0xFFULL;
- break;
- case MVT::i16:
- MaskForTy = 0xFFFFULL;
- break;
- case MVT::i32:
- MaskForTy = 0xFFFFFFFFULL;
- break;
- default:
- return false;
- break;
- }
-
- APInt Val;
- if (ISD::isConstantSplatVector(N, Val))
- return Val.getLimitedValue() == MaskForTy;
-
- return false;
-}
-
-// Determines if it is a constant integer or a splat/build vector of constant
+static bool isConstantSplatVectorMaskForType(SDNode *N, EVT ScalarTy) {
+ if (!ScalarTy.isSimple())
+ return false;
+
+ uint64_t MaskForTy = 0ULL;
+ switch (ScalarTy.getSimpleVT().SimpleTy) {
+ case MVT::i8:
+ MaskForTy = 0xFFULL;
+ break;
+ case MVT::i16:
+ MaskForTy = 0xFFFFULL;
+ break;
+ case MVT::i32:
+ MaskForTy = 0xFFFFFFFFULL;
+ break;
+ default:
+ return false;
+ break;
+ }
+
+ APInt Val;
+ if (ISD::isConstantSplatVector(N, Val))
+ return Val.getLimitedValue() == MaskForTy;
+
+ return false;
+}
+
+// Determines if it is a constant integer or a splat/build vector of constant
// integers (and undefs).
// Do not permit build vector implicit truncation.
static bool isConstantOrConstantVector(SDValue N, bool NoOpaques = false) {
if (ConstantSDNode *Const = dyn_cast<ConstantSDNode>(N))
return !(Const->isOpaque() && NoOpaques);
- if (N.getOpcode() != ISD::BUILD_VECTOR && N.getOpcode() != ISD::SPLAT_VECTOR)
+ if (N.getOpcode() != ISD::BUILD_VECTOR && N.getOpcode() != ISD::SPLAT_VECTOR)
return false;
unsigned BitWidth = N.getScalarValueSizeInBits();
for (const SDValue &Op : N->op_values()) {
@@ -1579,15 +1579,15 @@ void DAGCombiner::Run(CombineLevel AtLevel) {
DAG.ReplaceAllUsesWith(N, &RV);
}
- // Push the new node and any users onto the worklist. Omit this if the
- // new node is the EntryToken (e.g. if a store managed to get optimized
- // out), because re-visiting the EntryToken and its users will not uncover
- // any additional opportunities, but there may be a large number of such
- // users, potentially causing compile time explosion.
- if (RV.getOpcode() != ISD::EntryToken) {
- AddToWorklist(RV.getNode());
- AddUsersToWorklist(RV.getNode());
- }
+ // Push the new node and any users onto the worklist. Omit this if the
+ // new node is the EntryToken (e.g. if a store managed to get optimized
+ // out), because re-visiting the EntryToken and its users will not uncover
+ // any additional opportunities, but there may be a large number of such
+ // users, potentially causing compile time explosion.
+ if (RV.getOpcode() != ISD::EntryToken) {
+ AddToWorklist(RV.getNode());
+ AddUsersToWorklist(RV.getNode());
+ }
// Finally, if the node is now dead, remove it from the graph. The node
// may not be dead if the replacement process recursively simplified to
@@ -1620,10 +1620,10 @@ SDValue DAGCombiner::visit(SDNode *N) {
case ISD::USUBO: return visitSUBO(N);
case ISD::ADDE: return visitADDE(N);
case ISD::ADDCARRY: return visitADDCARRY(N);
- case ISD::SADDO_CARRY: return visitSADDO_CARRY(N);
+ case ISD::SADDO_CARRY: return visitSADDO_CARRY(N);
case ISD::SUBE: return visitSUBE(N);
case ISD::SUBCARRY: return visitSUBCARRY(N);
- case ISD::SSUBO_CARRY: return visitSSUBO_CARRY(N);
+ case ISD::SSUBO_CARRY: return visitSSUBO_CARRY(N);
case ISD::SMULFIX:
case ISD::SMULFIXSAT:
case ISD::UMULFIX:
@@ -1679,7 +1679,7 @@ SDValue DAGCombiner::visit(SDNode *N) {
case ISD::BITCAST: return visitBITCAST(N);
case ISD::BUILD_PAIR: return visitBUILD_PAIR(N);
case ISD::FADD: return visitFADD(N);
- case ISD::STRICT_FADD: return visitSTRICT_FADD(N);
+ case ISD::STRICT_FADD: return visitSTRICT_FADD(N);
case ISD::FSUB: return visitFSUB(N);
case ISD::FMUL: return visitFMUL(N);
case ISD::FMA: return visitFMA(N);
@@ -1839,10 +1839,10 @@ SDValue DAGCombiner::visitTokenFactor(SDNode *N) {
if (OptLevel == CodeGenOpt::None)
return SDValue();
- // Don't simplify the token factor if the node itself has too many operands.
- if (N->getNumOperands() > TokenFactorInlineLimit)
- return SDValue();
-
+ // Don't simplify the token factor if the node itself has too many operands.
+ if (N->getNumOperands() > TokenFactorInlineLimit)
+ return SDValue();
+
// If the sole user is a token factor, we should make sure we have a
// chance to merge them together. This prevents TF chains from inhibiting
// optimizations.
@@ -1928,7 +1928,7 @@ SDValue DAGCombiner::visitTokenFactor(SDNode *N) {
auto AddToWorklist = [&](unsigned CurIdx, SDNode *Op, unsigned OpNumber) {
// If this is an Op, we can remove the op from the list. Remark any
// search associated with it as from the current OpNumber.
- if (SeenOps.contains(Op)) {
+ if (SeenOps.contains(Op)) {
Changed = true;
DidPruneOps = true;
unsigned OrigOpNumber = 0;
@@ -2040,62 +2040,62 @@ static ConstantSDNode *getAsNonOpaqueConstant(SDValue N) {
return Const != nullptr && !Const->isOpaque() ? Const : nullptr;
}
-/// Return true if 'Use' is a load or a store that uses N as its base pointer
-/// and that N may be folded in the load / store addressing mode.
-static bool canFoldInAddressingMode(SDNode *N, SDNode *Use, SelectionDAG &DAG,
- const TargetLowering &TLI) {
- EVT VT;
- unsigned AS;
-
- if (LoadSDNode *LD = dyn_cast<LoadSDNode>(Use)) {
- if (LD->isIndexed() || LD->getBasePtr().getNode() != N)
- return false;
- VT = LD->getMemoryVT();
- AS = LD->getAddressSpace();
- } else if (StoreSDNode *ST = dyn_cast<StoreSDNode>(Use)) {
- if (ST->isIndexed() || ST->getBasePtr().getNode() != N)
- return false;
- VT = ST->getMemoryVT();
- AS = ST->getAddressSpace();
- } else if (MaskedLoadSDNode *LD = dyn_cast<MaskedLoadSDNode>(Use)) {
- if (LD->isIndexed() || LD->getBasePtr().getNode() != N)
- return false;
- VT = LD->getMemoryVT();
- AS = LD->getAddressSpace();
- } else if (MaskedStoreSDNode *ST = dyn_cast<MaskedStoreSDNode>(Use)) {
- if (ST->isIndexed() || ST->getBasePtr().getNode() != N)
- return false;
- VT = ST->getMemoryVT();
- AS = ST->getAddressSpace();
- } else
- return false;
-
- TargetLowering::AddrMode AM;
- if (N->getOpcode() == ISD::ADD) {
- AM.HasBaseReg = true;
- ConstantSDNode *Offset = dyn_cast<ConstantSDNode>(N->getOperand(1));
- if (Offset)
- // [reg +/- imm]
- AM.BaseOffs = Offset->getSExtValue();
- else
- // [reg +/- reg]
- AM.Scale = 1;
- } else if (N->getOpcode() == ISD::SUB) {
- AM.HasBaseReg = true;
- ConstantSDNode *Offset = dyn_cast<ConstantSDNode>(N->getOperand(1));
- if (Offset)
- // [reg +/- imm]
- AM.BaseOffs = -Offset->getSExtValue();
- else
- // [reg +/- reg]
- AM.Scale = 1;
- } else
- return false;
-
- return TLI.isLegalAddressingMode(DAG.getDataLayout(), AM,
- VT.getTypeForEVT(*DAG.getContext()), AS);
-}
-
+/// Return true if 'Use' is a load or a store that uses N as its base pointer
+/// and that N may be folded in the load / store addressing mode.
+static bool canFoldInAddressingMode(SDNode *N, SDNode *Use, SelectionDAG &DAG,
+ const TargetLowering &TLI) {
+ EVT VT;
+ unsigned AS;
+
+ if (LoadSDNode *LD = dyn_cast<LoadSDNode>(Use)) {
+ if (LD->isIndexed() || LD->getBasePtr().getNode() != N)
+ return false;
+ VT = LD->getMemoryVT();
+ AS = LD->getAddressSpace();
+ } else if (StoreSDNode *ST = dyn_cast<StoreSDNode>(Use)) {
+ if (ST->isIndexed() || ST->getBasePtr().getNode() != N)
+ return false;
+ VT = ST->getMemoryVT();
+ AS = ST->getAddressSpace();
+ } else if (MaskedLoadSDNode *LD = dyn_cast<MaskedLoadSDNode>(Use)) {
+ if (LD->isIndexed() || LD->getBasePtr().getNode() != N)
+ return false;
+ VT = LD->getMemoryVT();
+ AS = LD->getAddressSpace();
+ } else if (MaskedStoreSDNode *ST = dyn_cast<MaskedStoreSDNode>(Use)) {
+ if (ST->isIndexed() || ST->getBasePtr().getNode() != N)
+ return false;
+ VT = ST->getMemoryVT();
+ AS = ST->getAddressSpace();
+ } else
+ return false;
+
+ TargetLowering::AddrMode AM;
+ if (N->getOpcode() == ISD::ADD) {
+ AM.HasBaseReg = true;
+ ConstantSDNode *Offset = dyn_cast<ConstantSDNode>(N->getOperand(1));
+ if (Offset)
+ // [reg +/- imm]
+ AM.BaseOffs = Offset->getSExtValue();
+ else
+ // [reg +/- reg]
+ AM.Scale = 1;
+ } else if (N->getOpcode() == ISD::SUB) {
+ AM.HasBaseReg = true;
+ ConstantSDNode *Offset = dyn_cast<ConstantSDNode>(N->getOperand(1));
+ if (Offset)
+ // [reg +/- imm]
+ AM.BaseOffs = -Offset->getSExtValue();
+ else
+ // [reg +/- reg]
+ AM.Scale = 1;
+ } else
+ return false;
+
+ return TLI.isLegalAddressingMode(DAG.getDataLayout(), AM,
+ VT.getTypeForEVT(*DAG.getContext()), AS);
+}
+
SDValue DAGCombiner::foldBinOpIntoSelect(SDNode *BO) {
assert(TLI.isBinOp(BO->getOpcode()) && BO->getNumValues() == 1 &&
"Unexpected binary operator");
@@ -2115,12 +2115,12 @@ SDValue DAGCombiner::foldBinOpIntoSelect(SDNode *BO) {
SDValue CT = Sel.getOperand(1);
if (!isConstantOrConstantVector(CT, true) &&
- !DAG.isConstantFPBuildVectorOrConstantFP(CT))
+ !DAG.isConstantFPBuildVectorOrConstantFP(CT))
return SDValue();
SDValue CF = Sel.getOperand(2);
if (!isConstantOrConstantVector(CF, true) &&
- !DAG.isConstantFPBuildVectorOrConstantFP(CF))
+ !DAG.isConstantFPBuildVectorOrConstantFP(CF))
return SDValue();
// Bail out if any constants are opaque because we can't constant fold those.
@@ -2137,10 +2137,10 @@ SDValue DAGCombiner::foldBinOpIntoSelect(SDNode *BO) {
SDValue CBO = BO->getOperand(SelOpNo ^ 1);
if (!CanFoldNonConst &&
!isConstantOrConstantVector(CBO, true) &&
- !DAG.isConstantFPBuildVectorOrConstantFP(CBO))
+ !DAG.isConstantFPBuildVectorOrConstantFP(CBO))
return SDValue();
- EVT VT = BO->getValueType(0);
+ EVT VT = BO->getValueType(0);
// We have a select-of-constants followed by a binary operator with a
// constant. Eliminate the binop by pulling the constant math into the select.
@@ -2150,14 +2150,14 @@ SDValue DAGCombiner::foldBinOpIntoSelect(SDNode *BO) {
: DAG.getNode(BinOpcode, DL, VT, CT, CBO);
if (!CanFoldNonConst && !NewCT.isUndef() &&
!isConstantOrConstantVector(NewCT, true) &&
- !DAG.isConstantFPBuildVectorOrConstantFP(NewCT))
+ !DAG.isConstantFPBuildVectorOrConstantFP(NewCT))
return SDValue();
SDValue NewCF = SelOpNo ? DAG.getNode(BinOpcode, DL, VT, CBO, CF)
: DAG.getNode(BinOpcode, DL, VT, CF, CBO);
if (!CanFoldNonConst && !NewCF.isUndef() &&
!isConstantOrConstantVector(NewCF, true) &&
- !DAG.isConstantFPBuildVectorOrConstantFP(NewCF))
+ !DAG.isConstantFPBuildVectorOrConstantFP(NewCF))
return SDValue();
SDValue SelectOp = DAG.getSelect(DL, VT, Sel.getOperand(0), NewCT, NewCF);
@@ -2487,8 +2487,8 @@ SDValue DAGCombiner::visitADD(SDNode *N) {
// Fold (add (vscale * C0), (vscale * C1)) to (vscale * (C0 + C1)).
if (N0.getOpcode() == ISD::VSCALE && N1.getOpcode() == ISD::VSCALE) {
- const APInt &C0 = N0->getConstantOperandAPInt(0);
- const APInt &C1 = N1->getConstantOperandAPInt(0);
+ const APInt &C0 = N0->getConstantOperandAPInt(0);
+ const APInt &C1 = N1->getConstantOperandAPInt(0);
return DAG.getVScale(DL, VT, C0 + C1);
}
@@ -2496,9 +2496,9 @@ SDValue DAGCombiner::visitADD(SDNode *N) {
if ((N0.getOpcode() == ISD::ADD) &&
(N0.getOperand(1).getOpcode() == ISD::VSCALE) &&
(N1.getOpcode() == ISD::VSCALE)) {
- const APInt &VS0 = N0.getOperand(1)->getConstantOperandAPInt(0);
- const APInt &VS1 = N1->getConstantOperandAPInt(0);
- SDValue VS = DAG.getVScale(DL, VT, VS0 + VS1);
+ const APInt &VS0 = N0.getOperand(1)->getConstantOperandAPInt(0);
+ const APInt &VS1 = N1->getConstantOperandAPInt(0);
+ SDValue VS = DAG.getVScale(DL, VT, VS0 + VS1);
return DAG.getNode(ISD::ADD, DL, VT, N0.getOperand(0), VS);
}
@@ -2721,13 +2721,13 @@ SDValue DAGCombiner::visitADDC(SDNode *N) {
* then the flip also occurs if computing the inverse is the same cost.
* This function returns an empty SDValue in case it cannot flip the boolean
* without increasing the cost of the computation. If you want to flip a boolean
- * no matter what, use DAG.getLogicalNOT.
+ * no matter what, use DAG.getLogicalNOT.
*/
static SDValue extractBooleanFlip(SDValue V, SelectionDAG &DAG,
const TargetLowering &TLI,
bool Force) {
if (Force && isa<ConstantSDNode>(V))
- return DAG.getLogicalNOT(SDLoc(V), V, V.getValueType());
+ return DAG.getLogicalNOT(SDLoc(V), V, V.getValueType());
if (V.getOpcode() != ISD::XOR)
return SDValue();
@@ -2754,7 +2754,7 @@ static SDValue extractBooleanFlip(SDValue V, SelectionDAG &DAG,
if (IsFlip)
return V.getOperand(0);
if (Force)
- return DAG.getLogicalNOT(SDLoc(V), V, V.getValueType());
+ return DAG.getLogicalNOT(SDLoc(V), V, V.getValueType());
return SDValue();
}
@@ -2791,8 +2791,8 @@ SDValue DAGCombiner::visitADDO(SDNode *N) {
if (isBitwiseNot(N0) && isOneOrOneSplat(N1)) {
SDValue Sub = DAG.getNode(ISD::USUBO, DL, N->getVTList(),
DAG.getConstant(0, DL, VT), N0.getOperand(0));
- return CombineTo(
- N, Sub, DAG.getLogicalNOT(DL, Sub.getValue(1), Sub->getValueType(1)));
+ return CombineTo(
+ N, Sub, DAG.getLogicalNOT(DL, Sub.getValue(1), Sub->getValueType(1)));
}
if (SDValue Combined = visitUADDOLike(N0, N1, N))
@@ -2887,28 +2887,28 @@ SDValue DAGCombiner::visitADDCARRY(SDNode *N) {
return SDValue();
}
-SDValue DAGCombiner::visitSADDO_CARRY(SDNode *N) {
- SDValue N0 = N->getOperand(0);
- SDValue N1 = N->getOperand(1);
- SDValue CarryIn = N->getOperand(2);
- SDLoc DL(N);
-
- // canonicalize constant to RHS
- ConstantSDNode *N0C = dyn_cast<ConstantSDNode>(N0);
- ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N1);
- if (N0C && !N1C)
- return DAG.getNode(ISD::SADDO_CARRY, DL, N->getVTList(), N1, N0, CarryIn);
-
- // fold (saddo_carry x, y, false) -> (saddo x, y)
- if (isNullConstant(CarryIn)) {
- if (!LegalOperations ||
- TLI.isOperationLegalOrCustom(ISD::SADDO, N->getValueType(0)))
- return DAG.getNode(ISD::SADDO, DL, N->getVTList(), N0, N1);
- }
-
- return SDValue();
-}
-
+SDValue DAGCombiner::visitSADDO_CARRY(SDNode *N) {
+ SDValue N0 = N->getOperand(0);
+ SDValue N1 = N->getOperand(1);
+ SDValue CarryIn = N->getOperand(2);
+ SDLoc DL(N);
+
+ // canonicalize constant to RHS
+ ConstantSDNode *N0C = dyn_cast<ConstantSDNode>(N0);
+ ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N1);
+ if (N0C && !N1C)
+ return DAG.getNode(ISD::SADDO_CARRY, DL, N->getVTList(), N1, N0, CarryIn);
+
+ // fold (saddo_carry x, y, false) -> (saddo x, y)
+ if (isNullConstant(CarryIn)) {
+ if (!LegalOperations ||
+ TLI.isOperationLegalOrCustom(ISD::SADDO, N->getValueType(0)))
+ return DAG.getNode(ISD::SADDO, DL, N->getVTList(), N0, N1);
+ }
+
+ return SDValue();
+}
+
/**
* If we are facing some sort of diamond carry propapagtion pattern try to
* break it up to generate something like:
@@ -3094,8 +3094,8 @@ SDValue DAGCombiner::visitADDCARRYLike(SDValue N0, SDValue N1, SDValue CarryIn,
SDLoc DL(N);
SDValue Sub = DAG.getNode(ISD::SUBCARRY, DL, N->getVTList(), N1,
N0.getOperand(0), NotC);
- return CombineTo(
- N, Sub, DAG.getLogicalNOT(DL, Sub.getValue(1), Sub->getValueType(1)));
+ return CombineTo(
+ N, Sub, DAG.getLogicalNOT(DL, Sub.getValue(1), Sub->getValueType(1)));
}
// Iff the flag result is dead:
@@ -3200,13 +3200,13 @@ SDValue DAGCombiner::visitSUB(SDNode *N) {
// 0 - X --> X if X is 0 or the minimum signed value.
return N1;
}
-
- // Convert 0 - abs(x).
- SDValue Result;
- if (N1->getOpcode() == ISD::ABS &&
- !TLI.isOperationLegalOrCustom(ISD::ABS, VT) &&
- TLI.expandABS(N1.getNode(), Result, DAG, true))
- return Result;
+
+ // Convert 0 - abs(x).
+ SDValue Result;
+ if (N1->getOpcode() == ISD::ABS &&
+ !TLI.isOperationLegalOrCustom(ISD::ABS, VT) &&
+ TLI.expandABS(N1.getNode(), Result, DAG, true))
+ return Result;
}
// Canonicalize (sub -1, x) -> ~x, i.e. (xor x, -1)
@@ -3402,9 +3402,9 @@ SDValue DAGCombiner::visitSUB(SDNode *N) {
if (N0.getOpcode() == ISD::XOR && N1.getOpcode() == ISD::SRA) {
SDValue X0 = N0.getOperand(0), X1 = N0.getOperand(1);
SDValue S0 = N1.getOperand(0);
- if ((X0 == S0 && X1 == N1) || (X0 == N1 && X1 == S0))
+ if ((X0 == S0 && X1 == N1) || (X0 == N1 && X1 == S0))
if (ConstantSDNode *C = isConstOrConstSplat(N1.getOperand(1)))
- if (C->getAPIntValue() == (VT.getScalarSizeInBits() - 1))
+ if (C->getAPIntValue() == (VT.getScalarSizeInBits() - 1))
return DAG.getNode(ISD::ABS, SDLoc(N), VT, S0);
}
}
@@ -3436,7 +3436,7 @@ SDValue DAGCombiner::visitSUB(SDNode *N) {
// canonicalize (sub X, (vscale * C)) to (add X, (vscale * -C))
if (N1.getOpcode() == ISD::VSCALE) {
- const APInt &IntVal = N1.getConstantOperandAPInt(0);
+ const APInt &IntVal = N1.getConstantOperandAPInt(0);
return DAG.getNode(ISD::ADD, DL, VT, N0, DAG.getVScale(DL, VT, -IntVal));
}
@@ -3595,21 +3595,21 @@ SDValue DAGCombiner::visitSUBCARRY(SDNode *N) {
return SDValue();
}
-SDValue DAGCombiner::visitSSUBO_CARRY(SDNode *N) {
- SDValue N0 = N->getOperand(0);
- SDValue N1 = N->getOperand(1);
- SDValue CarryIn = N->getOperand(2);
-
- // fold (ssubo_carry x, y, false) -> (ssubo x, y)
- if (isNullConstant(CarryIn)) {
- if (!LegalOperations ||
- TLI.isOperationLegalOrCustom(ISD::SSUBO, N->getValueType(0)))
- return DAG.getNode(ISD::SSUBO, SDLoc(N), N->getVTList(), N0, N1);
- }
-
- return SDValue();
-}
-
+SDValue DAGCombiner::visitSSUBO_CARRY(SDNode *N) {
+ SDValue N0 = N->getOperand(0);
+ SDValue N1 = N->getOperand(1);
+ SDValue CarryIn = N->getOperand(2);
+
+ // fold (ssubo_carry x, y, false) -> (ssubo x, y)
+ if (isNullConstant(CarryIn)) {
+ if (!LegalOperations ||
+ TLI.isOperationLegalOrCustom(ISD::SSUBO, N->getValueType(0)))
+ return DAG.getNode(ISD::SSUBO, SDLoc(N), N->getVTList(), N0, N1);
+ }
+
+ return SDValue();
+}
+
// Notice that "mulfix" can be any of SMULFIX, SMULFIXSAT, UMULFIX and
// UMULFIXSAT here.
SDValue DAGCombiner::visitMULFIX(SDNode *N) {
@@ -3715,30 +3715,30 @@ SDValue DAGCombiner::visitMUL(SDNode *N) {
getShiftAmountTy(N0.getValueType()))));
}
- // Try to transform:
- // (1) multiply-by-(power-of-2 +/- 1) into shift and add/sub.
+ // Try to transform:
+ // (1) multiply-by-(power-of-2 +/- 1) into shift and add/sub.
// mul x, (2^N + 1) --> add (shl x, N), x
// mul x, (2^N - 1) --> sub (shl x, N), x
// Examples: x * 33 --> (x << 5) + x
// x * 15 --> (x << 4) - x
// x * -33 --> -((x << 5) + x)
// x * -15 --> -((x << 4) - x) ; this reduces --> x - (x << 4)
- // (2) multiply-by-(power-of-2 +/- power-of-2) into shifts and add/sub.
- // mul x, (2^N + 2^M) --> (add (shl x, N), (shl x, M))
- // mul x, (2^N - 2^M) --> (sub (shl x, N), (shl x, M))
- // Examples: x * 0x8800 --> (x << 15) + (x << 11)
- // x * 0xf800 --> (x << 16) - (x << 11)
- // x * -0x8800 --> -((x << 15) + (x << 11))
- // x * -0xf800 --> -((x << 16) - (x << 11)) ; (x << 11) - (x << 16)
+ // (2) multiply-by-(power-of-2 +/- power-of-2) into shifts and add/sub.
+ // mul x, (2^N + 2^M) --> (add (shl x, N), (shl x, M))
+ // mul x, (2^N - 2^M) --> (sub (shl x, N), (shl x, M))
+ // Examples: x * 0x8800 --> (x << 15) + (x << 11)
+ // x * 0xf800 --> (x << 16) - (x << 11)
+ // x * -0x8800 --> -((x << 15) + (x << 11))
+ // x * -0xf800 --> -((x << 16) - (x << 11)) ; (x << 11) - (x << 16)
if (N1IsConst && TLI.decomposeMulByConstant(*DAG.getContext(), VT, N1)) {
// TODO: We could handle more general decomposition of any constant by
// having the target set a limit on number of ops and making a
// callback to determine that sequence (similar to sqrt expansion).
unsigned MathOp = ISD::DELETED_NODE;
APInt MulC = ConstValue1.abs();
- // The constant `2` should be treated as (2^0 + 1).
- unsigned TZeros = MulC == 2 ? 0 : MulC.countTrailingZeros();
- MulC.lshrInPlace(TZeros);
+ // The constant `2` should be treated as (2^0 + 1).
+ unsigned TZeros = MulC == 2 ? 0 : MulC.countTrailingZeros();
+ MulC.lshrInPlace(TZeros);
if ((MulC - 1).isPowerOf2())
MathOp = ISD::ADD;
else if ((MulC + 1).isPowerOf2())
@@ -3747,17 +3747,17 @@ SDValue DAGCombiner::visitMUL(SDNode *N) {
if (MathOp != ISD::DELETED_NODE) {
unsigned ShAmt =
MathOp == ISD::ADD ? (MulC - 1).logBase2() : (MulC + 1).logBase2();
- ShAmt += TZeros;
+ ShAmt += TZeros;
assert(ShAmt < VT.getScalarSizeInBits() &&
"multiply-by-constant generated out of bounds shift");
SDLoc DL(N);
SDValue Shl =
DAG.getNode(ISD::SHL, DL, VT, N0, DAG.getConstant(ShAmt, DL, VT));
- SDValue R =
- TZeros ? DAG.getNode(MathOp, DL, VT, Shl,
- DAG.getNode(ISD::SHL, DL, VT, N0,
- DAG.getConstant(TZeros, DL, VT)))
- : DAG.getNode(MathOp, DL, VT, Shl, N0);
+ SDValue R =
+ TZeros ? DAG.getNode(MathOp, DL, VT, Shl,
+ DAG.getNode(ISD::SHL, DL, VT, N0,
+ DAG.getConstant(TZeros, DL, VT)))
+ : DAG.getNode(MathOp, DL, VT, Shl, N0);
if (ConstValue1.isNegative())
R = DAG.getNode(ISD::SUB, DL, VT, DAG.getConstant(0, DL, VT), R);
return R;
@@ -3809,42 +3809,42 @@ SDValue DAGCombiner::visitMUL(SDNode *N) {
// Fold (mul (vscale * C0), C1) to (vscale * (C0 * C1)).
if (N0.getOpcode() == ISD::VSCALE)
if (ConstantSDNode *NC1 = isConstOrConstSplat(N1)) {
- const APInt &C0 = N0.getConstantOperandAPInt(0);
- const APInt &C1 = NC1->getAPIntValue();
+ const APInt &C0 = N0.getConstantOperandAPInt(0);
+ const APInt &C1 = NC1->getAPIntValue();
return DAG.getVScale(SDLoc(N), VT, C0 * C1);
}
- // Fold ((mul x, 0/undef) -> 0,
- // (mul x, 1) -> x) -> x)
- // -> and(x, mask)
- // We can replace vectors with '0' and '1' factors with a clearing mask.
- if (VT.isFixedLengthVector()) {
- unsigned NumElts = VT.getVectorNumElements();
- SmallBitVector ClearMask;
- ClearMask.reserve(NumElts);
- auto IsClearMask = [&ClearMask](ConstantSDNode *V) {
- if (!V || V->isNullValue()) {
- ClearMask.push_back(true);
- return true;
- }
- ClearMask.push_back(false);
- return V->isOne();
- };
- if ((!LegalOperations || TLI.isOperationLegalOrCustom(ISD::AND, VT)) &&
- ISD::matchUnaryPredicate(N1, IsClearMask, /*AllowUndefs*/ true)) {
- assert(N1.getOpcode() == ISD::BUILD_VECTOR && "Unknown constant vector");
- SDLoc DL(N);
- EVT LegalSVT = N1.getOperand(0).getValueType();
- SDValue Zero = DAG.getConstant(0, DL, LegalSVT);
- SDValue AllOnes = DAG.getAllOnesConstant(DL, LegalSVT);
- SmallVector<SDValue, 16> Mask(NumElts, AllOnes);
- for (unsigned I = 0; I != NumElts; ++I)
- if (ClearMask[I])
- Mask[I] = Zero;
- return DAG.getNode(ISD::AND, DL, VT, N0, DAG.getBuildVector(VT, DL, Mask));
- }
- }
-
+ // Fold ((mul x, 0/undef) -> 0,
+ // (mul x, 1) -> x) -> x)
+ // -> and(x, mask)
+ // We can replace vectors with '0' and '1' factors with a clearing mask.
+ if (VT.isFixedLengthVector()) {
+ unsigned NumElts = VT.getVectorNumElements();
+ SmallBitVector ClearMask;
+ ClearMask.reserve(NumElts);
+ auto IsClearMask = [&ClearMask](ConstantSDNode *V) {
+ if (!V || V->isNullValue()) {
+ ClearMask.push_back(true);
+ return true;
+ }
+ ClearMask.push_back(false);
+ return V->isOne();
+ };
+ if ((!LegalOperations || TLI.isOperationLegalOrCustom(ISD::AND, VT)) &&
+ ISD::matchUnaryPredicate(N1, IsClearMask, /*AllowUndefs*/ true)) {
+ assert(N1.getOpcode() == ISD::BUILD_VECTOR && "Unknown constant vector");
+ SDLoc DL(N);
+ EVT LegalSVT = N1.getOperand(0).getValueType();
+ SDValue Zero = DAG.getConstant(0, DL, LegalSVT);
+ SDValue AllOnes = DAG.getAllOnesConstant(DL, LegalSVT);
+ SmallVector<SDValue, 16> Mask(NumElts, AllOnes);
+ for (unsigned I = 0; I != NumElts; ++I)
+ if (ClearMask[I])
+ Mask[I] = Zero;
+ return DAG.getNode(ISD::AND, DL, VT, N0, DAG.getBuildVector(VT, DL, Mask));
+ }
+ }
+
// reassociate mul
if (SDValue RMUL = reassociateOps(ISD::MUL, SDLoc(N), N0, N1, N->getFlags()))
return RMUL;
@@ -4266,7 +4266,7 @@ SDValue DAGCombiner::visitREM(SDNode *N) {
} else {
if (DAG.isKnownToBeAPowerOfTwo(N1)) {
// fold (urem x, pow2) -> (and x, pow2-1)
- SDValue NegOne = DAG.getAllOnesConstant(DL, VT);
+ SDValue NegOne = DAG.getAllOnesConstant(DL, VT);
SDValue Add = DAG.getNode(ISD::ADD, DL, VT, N1, NegOne);
AddToWorklist(Add.getNode());
return DAG.getNode(ISD::AND, DL, VT, N0, Add);
@@ -4274,7 +4274,7 @@ SDValue DAGCombiner::visitREM(SDNode *N) {
if (N1.getOpcode() == ISD::SHL &&
DAG.isKnownToBeAPowerOfTwo(N1.getOperand(0))) {
// fold (urem x, (shl pow2, y)) -> (and x, (add (shl pow2, y), -1))
- SDValue NegOne = DAG.getAllOnesConstant(DL, VT);
+ SDValue NegOne = DAG.getAllOnesConstant(DL, VT);
SDValue Add = DAG.getNode(ISD::ADD, DL, VT, N1, NegOne);
AddToWorklist(Add.getNode());
return DAG.getNode(ISD::AND, DL, VT, N0, Add);
@@ -4343,8 +4343,8 @@ SDValue DAGCombiner::visitMULHS(SDNode *N) {
// If the type twice as wide is legal, transform the mulhs to a wider multiply
// plus a shift.
- if (!TLI.isOperationLegalOrCustom(ISD::MULHS, VT) && VT.isSimple() &&
- !VT.isVector()) {
+ if (!TLI.isOperationLegalOrCustom(ISD::MULHS, VT) && VT.isSimple() &&
+ !VT.isVector()) {
MVT Simple = VT.getSimpleVT();
unsigned SimpleSize = Simple.getSizeInBits();
EVT NewVT = EVT::getIntegerVT(*DAG.getContext(), SimpleSize*2);
@@ -4400,8 +4400,8 @@ SDValue DAGCombiner::visitMULHU(SDNode *N) {
// If the type twice as wide is legal, transform the mulhu to a wider multiply
// plus a shift.
- if (!TLI.isOperationLegalOrCustom(ISD::MULHU, VT) && VT.isSimple() &&
- !VT.isVector()) {
+ if (!TLI.isOperationLegalOrCustom(ISD::MULHU, VT) && VT.isSimple() &&
+ !VT.isVector()) {
MVT Simple = VT.getSimpleVT();
unsigned SimpleSize = Simple.getSizeInBits();
EVT NewVT = EVT::getIntegerVT(*DAG.getContext(), SimpleSize*2);
@@ -4607,10 +4607,10 @@ SDValue DAGCombiner::visitIMINMAX(SDNode *N) {
return DAG.getNode(AltOpcode, SDLoc(N), VT, N0, N1);
}
- // Simplify the operands using demanded-bits information.
- if (SimplifyDemandedBits(SDValue(N, 0)))
- return SDValue(N, 0);
-
+ // Simplify the operands using demanded-bits information.
+ if (SimplifyDemandedBits(SDValue(N, 0)))
+ return SDValue(N, 0);
+
return SDValue();
}
@@ -5079,15 +5079,15 @@ bool DAGCombiner::isLegalNarrowLdSt(LSBaseSDNode *LDST,
if (!LDST->isSimple())
return false;
- EVT LdStMemVT = LDST->getMemoryVT();
-
- // Bail out when changing the scalable property, since we can't be sure that
- // we're actually narrowing here.
- if (LdStMemVT.isScalableVector() != MemVT.isScalableVector())
- return false;
-
+ EVT LdStMemVT = LDST->getMemoryVT();
+
+ // Bail out when changing the scalable property, since we can't be sure that
+ // we're actually narrowing here.
+ if (LdStMemVT.isScalableVector() != MemVT.isScalableVector())
+ return false;
+
// Verify that we are actually reducing a load width here.
- if (LdStMemVT.bitsLT(MemVT))
+ if (LdStMemVT.bitsLT(MemVT))
return false;
// Ensure that this isn't going to produce an unsupported memory access.
@@ -5442,31 +5442,31 @@ SDValue DAGCombiner::visitAND(SDNode *N) {
return N1;
if (ISD::isBuildVectorAllOnes(N1.getNode()))
return N0;
-
- // fold (and (masked_load) (build_vec (x, ...))) to zext_masked_load
- auto *MLoad = dyn_cast<MaskedLoadSDNode>(N0);
- auto *BVec = dyn_cast<BuildVectorSDNode>(N1);
- if (MLoad && BVec && MLoad->getExtensionType() == ISD::EXTLOAD &&
- N0.hasOneUse() && N1.hasOneUse()) {
- EVT LoadVT = MLoad->getMemoryVT();
- EVT ExtVT = VT;
- if (TLI.isLoadExtLegal(ISD::ZEXTLOAD, ExtVT, LoadVT)) {
- // For this AND to be a zero extension of the masked load the elements
- // of the BuildVec must mask the bottom bits of the extended element
- // type
- if (ConstantSDNode *Splat = BVec->getConstantSplatNode()) {
- uint64_t ElementSize =
- LoadVT.getVectorElementType().getScalarSizeInBits();
- if (Splat->getAPIntValue().isMask(ElementSize)) {
- return DAG.getMaskedLoad(
- ExtVT, SDLoc(N), MLoad->getChain(), MLoad->getBasePtr(),
- MLoad->getOffset(), MLoad->getMask(), MLoad->getPassThru(),
- LoadVT, MLoad->getMemOperand(), MLoad->getAddressingMode(),
- ISD::ZEXTLOAD, MLoad->isExpandingLoad());
- }
- }
- }
- }
+
+ // fold (and (masked_load) (build_vec (x, ...))) to zext_masked_load
+ auto *MLoad = dyn_cast<MaskedLoadSDNode>(N0);
+ auto *BVec = dyn_cast<BuildVectorSDNode>(N1);
+ if (MLoad && BVec && MLoad->getExtensionType() == ISD::EXTLOAD &&
+ N0.hasOneUse() && N1.hasOneUse()) {
+ EVT LoadVT = MLoad->getMemoryVT();
+ EVT ExtVT = VT;
+ if (TLI.isLoadExtLegal(ISD::ZEXTLOAD, ExtVT, LoadVT)) {
+ // For this AND to be a zero extension of the masked load the elements
+ // of the BuildVec must mask the bottom bits of the extended element
+ // type
+ if (ConstantSDNode *Splat = BVec->getConstantSplatNode()) {
+ uint64_t ElementSize =
+ LoadVT.getVectorElementType().getScalarSizeInBits();
+ if (Splat->getAPIntValue().isMask(ElementSize)) {
+ return DAG.getMaskedLoad(
+ ExtVT, SDLoc(N), MLoad->getChain(), MLoad->getBasePtr(),
+ MLoad->getOffset(), MLoad->getMask(), MLoad->getPassThru(),
+ LoadVT, MLoad->getMemOperand(), MLoad->getAddressingMode(),
+ ISD::ZEXTLOAD, MLoad->isExpandingLoad());
+ }
+ }
+ }
+ }
}
// fold (and c1, c2) -> c1&c2
@@ -5635,28 +5635,28 @@ SDValue DAGCombiner::visitAND(SDNode *N) {
}
}
- // fold (and (masked_gather x)) -> (zext_masked_gather x)
- if (auto *GN0 = dyn_cast<MaskedGatherSDNode>(N0)) {
- EVT MemVT = GN0->getMemoryVT();
- EVT ScalarVT = MemVT.getScalarType();
-
- if (SDValue(GN0, 0).hasOneUse() &&
- isConstantSplatVectorMaskForType(N1.getNode(), ScalarVT) &&
- TLI.isVectorLoadExtDesirable(SDValue(SDValue(GN0, 0)))) {
- SDValue Ops[] = {GN0->getChain(), GN0->getPassThru(), GN0->getMask(),
- GN0->getBasePtr(), GN0->getIndex(), GN0->getScale()};
-
- SDValue ZExtLoad = DAG.getMaskedGather(
- DAG.getVTList(VT, MVT::Other), MemVT, SDLoc(N), Ops,
- GN0->getMemOperand(), GN0->getIndexType(), ISD::ZEXTLOAD);
-
- CombineTo(N, ZExtLoad);
- AddToWorklist(ZExtLoad.getNode());
- // Avoid recheck of N.
- return SDValue(N, 0);
- }
- }
-
+ // fold (and (masked_gather x)) -> (zext_masked_gather x)
+ if (auto *GN0 = dyn_cast<MaskedGatherSDNode>(N0)) {
+ EVT MemVT = GN0->getMemoryVT();
+ EVT ScalarVT = MemVT.getScalarType();
+
+ if (SDValue(GN0, 0).hasOneUse() &&
+ isConstantSplatVectorMaskForType(N1.getNode(), ScalarVT) &&
+ TLI.isVectorLoadExtDesirable(SDValue(SDValue(GN0, 0)))) {
+ SDValue Ops[] = {GN0->getChain(), GN0->getPassThru(), GN0->getMask(),
+ GN0->getBasePtr(), GN0->getIndex(), GN0->getScale()};
+
+ SDValue ZExtLoad = DAG.getMaskedGather(
+ DAG.getVTList(VT, MVT::Other), MemVT, SDLoc(N), Ops,
+ GN0->getMemOperand(), GN0->getIndexType(), ISD::ZEXTLOAD);
+
+ CombineTo(N, ZExtLoad);
+ AddToWorklist(ZExtLoad.getNode());
+ // Avoid recheck of N.
+ return SDValue(N, 0);
+ }
+ }
+
// fold (and (load x), 255) -> (zextload x, i8)
// fold (and (extload x, i16), 255) -> (zextload x, i8)
// fold (and (any_ext (extload x, i16)), 255) -> (zextload x, i8)
@@ -5751,31 +5751,31 @@ SDValue DAGCombiner::visitAND(SDNode *N) {
if (SDValue V = combineShiftAnd1ToBitTest(N, DAG))
return V;
- // Recognize the following pattern:
- //
- // AndVT = (and (sign_extend NarrowVT to AndVT) #bitmask)
- //
- // where bitmask is a mask that clears the upper bits of AndVT. The
- // number of bits in bitmask must be a power of two.
- auto IsAndZeroExtMask = [](SDValue LHS, SDValue RHS) {
- if (LHS->getOpcode() != ISD::SIGN_EXTEND)
- return false;
-
- auto *C = dyn_cast<ConstantSDNode>(RHS);
- if (!C)
- return false;
-
- if (!C->getAPIntValue().isMask(
- LHS.getOperand(0).getValueType().getFixedSizeInBits()))
- return false;
-
- return true;
- };
-
- // Replace (and (sign_extend ...) #bitmask) with (zero_extend ...).
- if (IsAndZeroExtMask(N0, N1))
- return DAG.getNode(ISD::ZERO_EXTEND, SDLoc(N), VT, N0.getOperand(0));
-
+ // Recognize the following pattern:
+ //
+ // AndVT = (and (sign_extend NarrowVT to AndVT) #bitmask)
+ //
+ // where bitmask is a mask that clears the upper bits of AndVT. The
+ // number of bits in bitmask must be a power of two.
+ auto IsAndZeroExtMask = [](SDValue LHS, SDValue RHS) {
+ if (LHS->getOpcode() != ISD::SIGN_EXTEND)
+ return false;
+
+ auto *C = dyn_cast<ConstantSDNode>(RHS);
+ if (!C)
+ return false;
+
+ if (!C->getAPIntValue().isMask(
+ LHS.getOperand(0).getValueType().getFixedSizeInBits()))
+ return false;
+
+ return true;
+ };
+
+ // Replace (and (sign_extend ...) #bitmask) with (zero_extend ...).
+ if (IsAndZeroExtMask(N0, N1))
+ return DAG.getNode(ISD::ZERO_EXTEND, SDLoc(N), VT, N0.getOperand(0));
+
return SDValue();
}
@@ -6517,11 +6517,11 @@ static SDValue extractShiftForRotate(SelectionDAG &DAG, SDValue OppShift,
// reduces to a rotate in direction shift2 by Pos or (equivalently) a rotate
// in direction shift1 by Neg. The range [0, EltSize) means that we only need
// to consider shift amounts with defined behavior.
-//
-// The IsRotate flag should be set when the LHS of both shifts is the same.
-// Otherwise if matching a general funnel shift, it should be clear.
+//
+// The IsRotate flag should be set when the LHS of both shifts is the same.
+// Otherwise if matching a general funnel shift, it should be clear.
static bool matchRotateSub(SDValue Pos, SDValue Neg, unsigned EltSize,
- SelectionDAG &DAG, bool IsRotate) {
+ SelectionDAG &DAG, bool IsRotate) {
// If EltSize is a power of 2 then:
//
// (a) (Pos == 0 ? 0 : EltSize - Pos) == (EltSize - Pos) & (EltSize - 1)
@@ -6553,11 +6553,11 @@ static bool matchRotateSub(SDValue Pos, SDValue Neg, unsigned EltSize,
// always invokes undefined behavior for 32-bit X.
//
// Below, Mask == EltSize - 1 when using [A] and is all-ones otherwise.
- //
- // NOTE: We can only do this when matching an AND and not a general
- // funnel shift.
+ //
+ // NOTE: We can only do this when matching an AND and not a general
+ // funnel shift.
unsigned MaskLoBits = 0;
- if (IsRotate && Neg.getOpcode() == ISD::AND && isPowerOf2_64(EltSize)) {
+ if (IsRotate && Neg.getOpcode() == ISD::AND && isPowerOf2_64(EltSize)) {
if (ConstantSDNode *NegC = isConstOrConstSplat(Neg.getOperand(1))) {
KnownBits Known = DAG.computeKnownBits(Neg.getOperand(0));
unsigned Bits = Log2_64(EltSize);
@@ -6647,8 +6647,8 @@ SDValue DAGCombiner::MatchRotatePosNeg(SDValue Shifted, SDValue Pos,
// (srl x, (*ext y))) ->
// (rotr x, y) or (rotl x, (sub 32, y))
EVT VT = Shifted.getValueType();
- if (matchRotateSub(InnerPos, InnerNeg, VT.getScalarSizeInBits(), DAG,
- /*IsRotate*/ true)) {
+ if (matchRotateSub(InnerPos, InnerNeg, VT.getScalarSizeInBits(), DAG,
+ /*IsRotate*/ true)) {
bool HasPos = TLI.isOperationLegalOrCustom(PosOpcode, VT);
return DAG.getNode(HasPos ? PosOpcode : NegOpcode, DL, VT, Shifted,
HasPos ? Pos : Neg);
@@ -6677,7 +6677,7 @@ SDValue DAGCombiner::MatchFunnelPosNeg(SDValue N0, SDValue N1, SDValue Pos,
// fold (or (shl x0, (*ext (sub 32, y))),
// (srl x1, (*ext y))) ->
// (fshr x0, x1, y) or (fshl x0, x1, (sub 32, y))
- if (matchRotateSub(InnerPos, InnerNeg, EltBits, DAG, /*IsRotate*/ N0 == N1)) {
+ if (matchRotateSub(InnerPos, InnerNeg, EltBits, DAG, /*IsRotate*/ N0 == N1)) {
bool HasPos = TLI.isOperationLegalOrCustom(PosOpcode, VT);
return DAG.getNode(HasPos ? PosOpcode : NegOpcode, DL, VT, N0, N1,
HasPos ? Pos : Neg);
@@ -7031,11 +7031,11 @@ calculateByteProvider(SDValue Op, unsigned Index, unsigned Depth,
return None;
}
-static unsigned littleEndianByteAt(unsigned BW, unsigned i) {
+static unsigned littleEndianByteAt(unsigned BW, unsigned i) {
return i;
}
-static unsigned bigEndianByteAt(unsigned BW, unsigned i) {
+static unsigned bigEndianByteAt(unsigned BW, unsigned i) {
return BW - i - 1;
}
@@ -7052,8 +7052,8 @@ static Optional<bool> isBigEndian(const ArrayRef<int64_t> ByteOffsets,
bool BigEndian = true, LittleEndian = true;
for (unsigned i = 0; i < Width; i++) {
int64_t CurrentByteOffset = ByteOffsets[i] - FirstOffset;
- LittleEndian &= CurrentByteOffset == littleEndianByteAt(Width, i);
- BigEndian &= CurrentByteOffset == bigEndianByteAt(Width, i);
+ LittleEndian &= CurrentByteOffset == littleEndianByteAt(Width, i);
+ BigEndian &= CurrentByteOffset == bigEndianByteAt(Width, i);
if (!BigEndian && !LittleEndian)
return None;
}
@@ -7096,98 +7096,98 @@ static SDValue stripTruncAndExt(SDValue Value) {
/// p[3] = (val >> 0) & 0xFF;
/// =>
/// *((i32)p) = BSWAP(val);
-SDValue DAGCombiner::mergeTruncStores(StoreSDNode *N) {
- // The matching looks for "store (trunc x)" patterns that appear early but are
- // likely to be replaced by truncating store nodes during combining.
- // TODO: If there is evidence that running this later would help, this
- // limitation could be removed. Legality checks may need to be added
- // for the created store and optional bswap/rotate.
- if (LegalOperations)
- return SDValue();
-
- // We only handle merging simple stores of 1-4 bytes.
- // TODO: Allow unordered atomics when wider type is legal (see D66309)
- EVT MemVT = N->getMemoryVT();
- if (!(MemVT == MVT::i8 || MemVT == MVT::i16 || MemVT == MVT::i32) ||
- !N->isSimple() || N->isIndexed())
- return SDValue();
-
- // Collect all of the stores in the chain.
- SDValue Chain = N->getChain();
- SmallVector<StoreSDNode *, 8> Stores = {N};
- while (auto *Store = dyn_cast<StoreSDNode>(Chain)) {
- // All stores must be the same size to ensure that we are writing all of the
- // bytes in the wide value.
- // TODO: We could allow multiple sizes by tracking each stored byte.
- if (Store->getMemoryVT() != MemVT || !Store->isSimple() ||
- Store->isIndexed())
+SDValue DAGCombiner::mergeTruncStores(StoreSDNode *N) {
+ // The matching looks for "store (trunc x)" patterns that appear early but are
+ // likely to be replaced by truncating store nodes during combining.
+ // TODO: If there is evidence that running this later would help, this
+ // limitation could be removed. Legality checks may need to be added
+ // for the created store and optional bswap/rotate.
+ if (LegalOperations)
+ return SDValue();
+
+ // We only handle merging simple stores of 1-4 bytes.
+ // TODO: Allow unordered atomics when wider type is legal (see D66309)
+ EVT MemVT = N->getMemoryVT();
+ if (!(MemVT == MVT::i8 || MemVT == MVT::i16 || MemVT == MVT::i32) ||
+ !N->isSimple() || N->isIndexed())
+ return SDValue();
+
+ // Collect all of the stores in the chain.
+ SDValue Chain = N->getChain();
+ SmallVector<StoreSDNode *, 8> Stores = {N};
+ while (auto *Store = dyn_cast<StoreSDNode>(Chain)) {
+ // All stores must be the same size to ensure that we are writing all of the
+ // bytes in the wide value.
+ // TODO: We could allow multiple sizes by tracking each stored byte.
+ if (Store->getMemoryVT() != MemVT || !Store->isSimple() ||
+ Store->isIndexed())
return SDValue();
Stores.push_back(Store);
Chain = Store->getChain();
}
- // There is no reason to continue if we do not have at least a pair of stores.
- if (Stores.size() < 2)
+ // There is no reason to continue if we do not have at least a pair of stores.
+ if (Stores.size() < 2)
return SDValue();
- // Handle simple types only.
- LLVMContext &Context = *DAG.getContext();
- unsigned NumStores = Stores.size();
- unsigned NarrowNumBits = N->getMemoryVT().getScalarSizeInBits();
- unsigned WideNumBits = NumStores * NarrowNumBits;
- EVT WideVT = EVT::getIntegerVT(Context, WideNumBits);
- if (WideVT != MVT::i16 && WideVT != MVT::i32 && WideVT != MVT::i64)
+ // Handle simple types only.
+ LLVMContext &Context = *DAG.getContext();
+ unsigned NumStores = Stores.size();
+ unsigned NarrowNumBits = N->getMemoryVT().getScalarSizeInBits();
+ unsigned WideNumBits = NumStores * NarrowNumBits;
+ EVT WideVT = EVT::getIntegerVT(Context, WideNumBits);
+ if (WideVT != MVT::i16 && WideVT != MVT::i32 && WideVT != MVT::i64)
return SDValue();
- // Check if all bytes of the source value that we are looking at are stored
- // to the same base address. Collect offsets from Base address into OffsetMap.
- SDValue SourceValue;
- SmallVector<int64_t, 8> OffsetMap(NumStores, INT64_MAX);
+ // Check if all bytes of the source value that we are looking at are stored
+ // to the same base address. Collect offsets from Base address into OffsetMap.
+ SDValue SourceValue;
+ SmallVector<int64_t, 8> OffsetMap(NumStores, INT64_MAX);
int64_t FirstOffset = INT64_MAX;
StoreSDNode *FirstStore = nullptr;
Optional<BaseIndexOffset> Base;
for (auto Store : Stores) {
- // All the stores store different parts of the CombinedValue. A truncate is
- // required to get the partial value.
+ // All the stores store different parts of the CombinedValue. A truncate is
+ // required to get the partial value.
SDValue Trunc = Store->getValue();
if (Trunc.getOpcode() != ISD::TRUNCATE)
return SDValue();
- // Other than the first/last part, a shift operation is required to get the
- // offset.
+ // Other than the first/last part, a shift operation is required to get the
+ // offset.
int64_t Offset = 0;
- SDValue WideVal = Trunc.getOperand(0);
- if ((WideVal.getOpcode() == ISD::SRL || WideVal.getOpcode() == ISD::SRA) &&
- isa<ConstantSDNode>(WideVal.getOperand(1))) {
- // The shift amount must be a constant multiple of the narrow type.
- // It is translated to the offset address in the wide source value "y".
+ SDValue WideVal = Trunc.getOperand(0);
+ if ((WideVal.getOpcode() == ISD::SRL || WideVal.getOpcode() == ISD::SRA) &&
+ isa<ConstantSDNode>(WideVal.getOperand(1))) {
+ // The shift amount must be a constant multiple of the narrow type.
+ // It is translated to the offset address in the wide source value "y".
//
- // x = srl y, ShiftAmtC
+ // x = srl y, ShiftAmtC
// i8 z = trunc x
// store z, ...
- uint64_t ShiftAmtC = WideVal.getConstantOperandVal(1);
- if (ShiftAmtC % NarrowNumBits != 0)
+ uint64_t ShiftAmtC = WideVal.getConstantOperandVal(1);
+ if (ShiftAmtC % NarrowNumBits != 0)
return SDValue();
- Offset = ShiftAmtC / NarrowNumBits;
- WideVal = WideVal.getOperand(0);
+ Offset = ShiftAmtC / NarrowNumBits;
+ WideVal = WideVal.getOperand(0);
}
- // Stores must share the same source value with different offsets.
- // Truncate and extends should be stripped to get the single source value.
- if (!SourceValue)
- SourceValue = WideVal;
- else if (stripTruncAndExt(SourceValue) != stripTruncAndExt(WideVal))
+ // Stores must share the same source value with different offsets.
+ // Truncate and extends should be stripped to get the single source value.
+ if (!SourceValue)
+ SourceValue = WideVal;
+ else if (stripTruncAndExt(SourceValue) != stripTruncAndExt(WideVal))
return SDValue();
- else if (SourceValue.getValueType() != WideVT) {
- if (WideVal.getValueType() == WideVT ||
- WideVal.getScalarValueSizeInBits() >
- SourceValue.getScalarValueSizeInBits())
- SourceValue = WideVal;
- // Give up if the source value type is smaller than the store size.
- if (SourceValue.getScalarValueSizeInBits() < WideVT.getScalarSizeInBits())
+ else if (SourceValue.getValueType() != WideVT) {
+ if (WideVal.getValueType() == WideVT ||
+ WideVal.getScalarValueSizeInBits() >
+ SourceValue.getScalarValueSizeInBits())
+ SourceValue = WideVal;
+ // Give up if the source value type is smaller than the store size.
+ if (SourceValue.getScalarValueSizeInBits() < WideVT.getScalarSizeInBits())
return SDValue();
}
- // Stores must share the same base address.
+ // Stores must share the same base address.
BaseIndexOffset Ptr = BaseIndexOffset::match(Store, DAG);
int64_t ByteOffsetFromBase = 0;
if (!Base)
@@ -7195,78 +7195,78 @@ SDValue DAGCombiner::mergeTruncStores(StoreSDNode *N) {
else if (!Base->equalBaseIndex(Ptr, DAG, ByteOffsetFromBase))
return SDValue();
- // Remember the first store.
+ // Remember the first store.
if (ByteOffsetFromBase < FirstOffset) {
FirstStore = Store;
FirstOffset = ByteOffsetFromBase;
}
// Map the offset in the store and the offset in the combined value, and
// early return if it has been set before.
- if (Offset < 0 || Offset >= NumStores || OffsetMap[Offset] != INT64_MAX)
+ if (Offset < 0 || Offset >= NumStores || OffsetMap[Offset] != INT64_MAX)
return SDValue();
- OffsetMap[Offset] = ByteOffsetFromBase;
+ OffsetMap[Offset] = ByteOffsetFromBase;
}
assert(FirstOffset != INT64_MAX && "First byte offset must be set");
assert(FirstStore && "First store must be set");
- // Check that a store of the wide type is both allowed and fast on the target
- const DataLayout &Layout = DAG.getDataLayout();
- bool Fast = false;
- bool Allowed = TLI.allowsMemoryAccess(Context, Layout, WideVT,
- *FirstStore->getMemOperand(), &Fast);
- if (!Allowed || !Fast)
- return SDValue();
-
- // Check if the pieces of the value are going to the expected places in memory
- // to merge the stores.
- auto checkOffsets = [&](bool MatchLittleEndian) {
- if (MatchLittleEndian) {
- for (unsigned i = 0; i != NumStores; ++i)
- if (OffsetMap[i] != i * (NarrowNumBits / 8) + FirstOffset)
- return false;
- } else { // MatchBigEndian by reversing loop counter.
- for (unsigned i = 0, j = NumStores - 1; i != NumStores; ++i, --j)
- if (OffsetMap[j] != i * (NarrowNumBits / 8) + FirstOffset)
- return false;
- }
- return true;
- };
-
- // Check if the offsets line up for the native data layout of this target.
- bool NeedBswap = false;
- bool NeedRotate = false;
- if (!checkOffsets(Layout.isLittleEndian())) {
- // Special-case: check if byte offsets line up for the opposite endian.
- if (NarrowNumBits == 8 && checkOffsets(Layout.isBigEndian()))
- NeedBswap = true;
- else if (NumStores == 2 && checkOffsets(Layout.isBigEndian()))
- NeedRotate = true;
- else
- return SDValue();
- }
-
- SDLoc DL(N);
- if (WideVT != SourceValue.getValueType()) {
- assert(SourceValue.getValueType().getScalarSizeInBits() > WideNumBits &&
- "Unexpected store value to merge");
- SourceValue = DAG.getNode(ISD::TRUNCATE, DL, WideVT, SourceValue);
- }
-
- // Before legalize we can introduce illegal bswaps/rotates which will be later
+ // Check that a store of the wide type is both allowed and fast on the target
+ const DataLayout &Layout = DAG.getDataLayout();
+ bool Fast = false;
+ bool Allowed = TLI.allowsMemoryAccess(Context, Layout, WideVT,
+ *FirstStore->getMemOperand(), &Fast);
+ if (!Allowed || !Fast)
+ return SDValue();
+
+ // Check if the pieces of the value are going to the expected places in memory
+ // to merge the stores.
+ auto checkOffsets = [&](bool MatchLittleEndian) {
+ if (MatchLittleEndian) {
+ for (unsigned i = 0; i != NumStores; ++i)
+ if (OffsetMap[i] != i * (NarrowNumBits / 8) + FirstOffset)
+ return false;
+ } else { // MatchBigEndian by reversing loop counter.
+ for (unsigned i = 0, j = NumStores - 1; i != NumStores; ++i, --j)
+ if (OffsetMap[j] != i * (NarrowNumBits / 8) + FirstOffset)
+ return false;
+ }
+ return true;
+ };
+
+ // Check if the offsets line up for the native data layout of this target.
+ bool NeedBswap = false;
+ bool NeedRotate = false;
+ if (!checkOffsets(Layout.isLittleEndian())) {
+ // Special-case: check if byte offsets line up for the opposite endian.
+ if (NarrowNumBits == 8 && checkOffsets(Layout.isBigEndian()))
+ NeedBswap = true;
+ else if (NumStores == 2 && checkOffsets(Layout.isBigEndian()))
+ NeedRotate = true;
+ else
+ return SDValue();
+ }
+
+ SDLoc DL(N);
+ if (WideVT != SourceValue.getValueType()) {
+ assert(SourceValue.getValueType().getScalarSizeInBits() > WideNumBits &&
+ "Unexpected store value to merge");
+ SourceValue = DAG.getNode(ISD::TRUNCATE, DL, WideVT, SourceValue);
+ }
+
+ // Before legalize we can introduce illegal bswaps/rotates which will be later
// converted to an explicit bswap sequence. This way we end up with a single
// store and byte shuffling instead of several stores and byte shuffling.
- if (NeedBswap) {
- SourceValue = DAG.getNode(ISD::BSWAP, DL, WideVT, SourceValue);
- } else if (NeedRotate) {
- assert(WideNumBits % 2 == 0 && "Unexpected type for rotate");
- SDValue RotAmt = DAG.getConstant(WideNumBits / 2, DL, WideVT);
- SourceValue = DAG.getNode(ISD::ROTR, DL, WideVT, SourceValue, RotAmt);
+ if (NeedBswap) {
+ SourceValue = DAG.getNode(ISD::BSWAP, DL, WideVT, SourceValue);
+ } else if (NeedRotate) {
+ assert(WideNumBits % 2 == 0 && "Unexpected type for rotate");
+ SDValue RotAmt = DAG.getConstant(WideNumBits / 2, DL, WideVT);
+ SourceValue = DAG.getNode(ISD::ROTR, DL, WideVT, SourceValue, RotAmt);
}
SDValue NewStore =
- DAG.getStore(Chain, DL, SourceValue, FirstStore->getBasePtr(),
- FirstStore->getPointerInfo(), FirstStore->getAlign());
+ DAG.getStore(Chain, DL, SourceValue, FirstStore->getBasePtr(),
+ FirstStore->getPointerInfo(), FirstStore->getAlign());
// Rely on other DAG combine rules to remove the other individual stores.
DAG.ReplaceAllUsesWith(N, NewStore.getNode());
@@ -7321,8 +7321,8 @@ SDValue DAGCombiner::MatchLoadCombine(SDNode *N) {
"can only analyze providers for individual bytes not bit");
unsigned LoadByteWidth = LoadBitWidth / 8;
return IsBigEndianTarget
- ? bigEndianByteAt(LoadByteWidth, P.ByteOffset)
- : littleEndianByteAt(LoadByteWidth, P.ByteOffset);
+ ? bigEndianByteAt(LoadByteWidth, P.ByteOffset)
+ : littleEndianByteAt(LoadByteWidth, P.ByteOffset);
};
Optional<BaseIndexOffset> Base;
@@ -7449,10 +7449,10 @@ SDValue DAGCombiner::MatchLoadCombine(SDNode *N) {
if (!Allowed || !Fast)
return SDValue();
- SDValue NewLoad =
- DAG.getExtLoad(NeedsZext ? ISD::ZEXTLOAD : ISD::NON_EXTLOAD, SDLoc(N), VT,
- Chain, FirstLoad->getBasePtr(),
- FirstLoad->getPointerInfo(), MemVT, FirstLoad->getAlign());
+ SDValue NewLoad =
+ DAG.getExtLoad(NeedsZext ? ISD::ZEXTLOAD : ISD::NON_EXTLOAD, SDLoc(N), VT,
+ Chain, FirstLoad->getBasePtr(),
+ FirstLoad->getPointerInfo(), MemVT, FirstLoad->getAlign());
// Transfer chain users from old loads to the new load.
for (LoadSDNode *L : Loads)
@@ -7622,9 +7622,9 @@ SDValue DAGCombiner::visitXOR(SDNode *N) {
if (N0.hasOneUse()) {
// FIXME Can we handle multiple uses? Could we token factor the chain
// results from the new/old setcc?
- SDValue SetCC =
- DAG.getSetCC(SDLoc(N0), VT, LHS, RHS, NotCC,
- N0.getOperand(0), N0Opcode == ISD::STRICT_FSETCCS);
+ SDValue SetCC =
+ DAG.getSetCC(SDLoc(N0), VT, LHS, RHS, NotCC,
+ N0.getOperand(0), N0Opcode == ISD::STRICT_FSETCCS);
CombineTo(N, SetCC);
DAG.ReplaceAllUsesOfValueWith(N0.getValue(1), SetCC.getValue(1));
recursivelyDeleteUnusedNodes(N0.getNode());
@@ -7725,9 +7725,9 @@ SDValue DAGCombiner::visitXOR(SDNode *N) {
if (A.getOpcode() == ISD::ADD && S.getOpcode() == ISD::SRA) {
SDValue A0 = A.getOperand(0), A1 = A.getOperand(1);
SDValue S0 = S.getOperand(0);
- if ((A0 == S && A1 == S0) || (A1 == S && A0 == S0))
+ if ((A0 == S && A1 == S0) || (A1 == S && A0 == S0))
if (ConstantSDNode *C = isConstOrConstSplat(S.getOperand(1)))
- if (C->getAPIntValue() == (VT.getScalarSizeInBits() - 1))
+ if (C->getAPIntValue() == (VT.getScalarSizeInBits() - 1))
return DAG.getNode(ISD::ABS, DL, VT, S0);
}
}
@@ -8263,9 +8263,9 @@ SDValue DAGCombiner::visitSHL(SDNode *N) {
// Fold (shl (vscale * C0), C1) to (vscale * (C0 << C1)).
if (N0.getOpcode() == ISD::VSCALE)
if (ConstantSDNode *NC1 = isConstOrConstSplat(N->getOperand(1))) {
- const APInt &C0 = N0.getConstantOperandAPInt(0);
- const APInt &C1 = NC1->getAPIntValue();
- return DAG.getVScale(SDLoc(N), VT, C0 << C1);
+ const APInt &C0 = N0.getConstantOperandAPInt(0);
+ const APInt &C1 = NC1->getAPIntValue();
+ return DAG.getVScale(SDLoc(N), VT, C0 << C1);
}
return SDValue();
@@ -8331,10 +8331,10 @@ static SDValue combineShiftToMULH(SDNode *N, SelectionDAG &DAG,
// we use mulhs. Othewise, zero extends (zext) use mulhu.
unsigned MulhOpcode = IsSignExt ? ISD::MULHS : ISD::MULHU;
- // Combine to mulh if mulh is legal/custom for the narrow type on the target.
- if (!TLI.isOperationLegalOrCustom(MulhOpcode, NarrowVT))
- return SDValue();
-
+ // Combine to mulh if mulh is legal/custom for the narrow type on the target.
+ if (!TLI.isOperationLegalOrCustom(MulhOpcode, NarrowVT))
+ return SDValue();
+
SDValue Result = DAG.getNode(MulhOpcode, DL, NarrowVT, LeftOp.getOperand(0),
RightOp.getOperand(0));
return (N->getOpcode() == ISD::SRA ? DAG.getSExtOrTrunc(Result, DL, WideVT1)
@@ -8836,8 +8836,8 @@ SDValue DAGCombiner::visitFunnelShift(SDNode *N) {
RHS->getAddressSpace(), NewAlign,
RHS->getMemOperand()->getFlags(), &Fast) &&
Fast) {
- SDValue NewPtr = DAG.getMemBasePlusOffset(
- RHS->getBasePtr(), TypeSize::Fixed(PtrOff), DL);
+ SDValue NewPtr = DAG.getMemBasePlusOffset(
+ RHS->getBasePtr(), TypeSize::Fixed(PtrOff), DL);
AddToWorklist(NewPtr.getNode());
SDValue Load = DAG.getLoad(
VT, DL, RHS->getChain(), NewPtr,
@@ -9434,75 +9434,75 @@ static SDValue ConvertSelectToConcatVector(SDNode *N, SelectionDAG &DAG) {
TopHalf->isNullValue() ? RHS->getOperand(1) : LHS->getOperand(1));
}
-bool refineUniformBase(SDValue &BasePtr, SDValue &Index, SelectionDAG &DAG) {
- if (!isNullConstant(BasePtr) || Index.getOpcode() != ISD::ADD)
- return false;
-
- // For now we check only the LHS of the add.
- SDValue LHS = Index.getOperand(0);
- SDValue SplatVal = DAG.getSplatValue(LHS);
- if (!SplatVal)
- return false;
-
- BasePtr = SplatVal;
- Index = Index.getOperand(1);
- return true;
-}
-
-// Fold sext/zext of index into index type.
-bool refineIndexType(MaskedGatherScatterSDNode *MGS, SDValue &Index,
- bool Scaled, SelectionDAG &DAG) {
- const TargetLowering &TLI = DAG.getTargetLoweringInfo();
-
- if (Index.getOpcode() == ISD::ZERO_EXTEND) {
- SDValue Op = Index.getOperand(0);
- MGS->setIndexType(Scaled ? ISD::UNSIGNED_SCALED : ISD::UNSIGNED_UNSCALED);
- if (TLI.shouldRemoveExtendFromGSIndex(Op.getValueType())) {
- Index = Op;
- return true;
- }
- }
-
- if (Index.getOpcode() == ISD::SIGN_EXTEND) {
- SDValue Op = Index.getOperand(0);
- MGS->setIndexType(Scaled ? ISD::SIGNED_SCALED : ISD::SIGNED_UNSCALED);
- if (TLI.shouldRemoveExtendFromGSIndex(Op.getValueType())) {
- Index = Op;
- return true;
- }
- }
-
- return false;
-}
-
+bool refineUniformBase(SDValue &BasePtr, SDValue &Index, SelectionDAG &DAG) {
+ if (!isNullConstant(BasePtr) || Index.getOpcode() != ISD::ADD)
+ return false;
+
+ // For now we check only the LHS of the add.
+ SDValue LHS = Index.getOperand(0);
+ SDValue SplatVal = DAG.getSplatValue(LHS);
+ if (!SplatVal)
+ return false;
+
+ BasePtr = SplatVal;
+ Index = Index.getOperand(1);
+ return true;
+}
+
+// Fold sext/zext of index into index type.
+bool refineIndexType(MaskedGatherScatterSDNode *MGS, SDValue &Index,
+ bool Scaled, SelectionDAG &DAG) {
+ const TargetLowering &TLI = DAG.getTargetLoweringInfo();
+
+ if (Index.getOpcode() == ISD::ZERO_EXTEND) {
+ SDValue Op = Index.getOperand(0);
+ MGS->setIndexType(Scaled ? ISD::UNSIGNED_SCALED : ISD::UNSIGNED_UNSCALED);
+ if (TLI.shouldRemoveExtendFromGSIndex(Op.getValueType())) {
+ Index = Op;
+ return true;
+ }
+ }
+
+ if (Index.getOpcode() == ISD::SIGN_EXTEND) {
+ SDValue Op = Index.getOperand(0);
+ MGS->setIndexType(Scaled ? ISD::SIGNED_SCALED : ISD::SIGNED_UNSCALED);
+ if (TLI.shouldRemoveExtendFromGSIndex(Op.getValueType())) {
+ Index = Op;
+ return true;
+ }
+ }
+
+ return false;
+}
+
SDValue DAGCombiner::visitMSCATTER(SDNode *N) {
MaskedScatterSDNode *MSC = cast<MaskedScatterSDNode>(N);
SDValue Mask = MSC->getMask();
SDValue Chain = MSC->getChain();
- SDValue Index = MSC->getIndex();
- SDValue Scale = MSC->getScale();
- SDValue StoreVal = MSC->getValue();
- SDValue BasePtr = MSC->getBasePtr();
+ SDValue Index = MSC->getIndex();
+ SDValue Scale = MSC->getScale();
+ SDValue StoreVal = MSC->getValue();
+ SDValue BasePtr = MSC->getBasePtr();
SDLoc DL(N);
// Zap scatters with a zero mask.
if (ISD::isBuildVectorAllZeros(Mask.getNode()))
return Chain;
- if (refineUniformBase(BasePtr, Index, DAG)) {
- SDValue Ops[] = {Chain, StoreVal, Mask, BasePtr, Index, Scale};
- return DAG.getMaskedScatter(
- DAG.getVTList(MVT::Other), StoreVal.getValueType(), DL, Ops,
- MSC->getMemOperand(), MSC->getIndexType(), MSC->isTruncatingStore());
- }
-
- if (refineIndexType(MSC, Index, MSC->isIndexScaled(), DAG)) {
- SDValue Ops[] = {Chain, StoreVal, Mask, BasePtr, Index, Scale};
- return DAG.getMaskedScatter(
- DAG.getVTList(MVT::Other), StoreVal.getValueType(), DL, Ops,
- MSC->getMemOperand(), MSC->getIndexType(), MSC->isTruncatingStore());
- }
-
+ if (refineUniformBase(BasePtr, Index, DAG)) {
+ SDValue Ops[] = {Chain, StoreVal, Mask, BasePtr, Index, Scale};
+ return DAG.getMaskedScatter(
+ DAG.getVTList(MVT::Other), StoreVal.getValueType(), DL, Ops,
+ MSC->getMemOperand(), MSC->getIndexType(), MSC->isTruncatingStore());
+ }
+
+ if (refineIndexType(MSC, Index, MSC->isIndexScaled(), DAG)) {
+ SDValue Ops[] = {Chain, StoreVal, Mask, BasePtr, Index, Scale};
+ return DAG.getMaskedScatter(
+ DAG.getVTList(MVT::Other), StoreVal.getValueType(), DL, Ops,
+ MSC->getMemOperand(), MSC->getIndexType(), MSC->isTruncatingStore());
+ }
+
return SDValue();
}
@@ -9516,14 +9516,14 @@ SDValue DAGCombiner::visitMSTORE(SDNode *N) {
if (ISD::isBuildVectorAllZeros(Mask.getNode()))
return Chain;
- // If this is a masked load with an all ones mask, we can use a unmasked load.
- // FIXME: Can we do this for indexed, compressing, or truncating stores?
- if (ISD::isBuildVectorAllOnes(Mask.getNode()) &&
- MST->isUnindexed() && !MST->isCompressingStore() &&
- !MST->isTruncatingStore())
- return DAG.getStore(MST->getChain(), SDLoc(N), MST->getValue(),
- MST->getBasePtr(), MST->getMemOperand());
-
+ // If this is a masked load with an all ones mask, we can use a unmasked load.
+ // FIXME: Can we do this for indexed, compressing, or truncating stores?
+ if (ISD::isBuildVectorAllOnes(Mask.getNode()) &&
+ MST->isUnindexed() && !MST->isCompressingStore() &&
+ !MST->isTruncatingStore())
+ return DAG.getStore(MST->getChain(), SDLoc(N), MST->getValue(),
+ MST->getBasePtr(), MST->getMemOperand());
+
// Try transforming N to an indexed store.
if (CombineToPreIndexedLoadStore(N) || CombineToPostIndexedLoadStore(N))
return SDValue(N, 0);
@@ -9534,33 +9534,33 @@ SDValue DAGCombiner::visitMSTORE(SDNode *N) {
SDValue DAGCombiner::visitMGATHER(SDNode *N) {
MaskedGatherSDNode *MGT = cast<MaskedGatherSDNode>(N);
SDValue Mask = MGT->getMask();
- SDValue Chain = MGT->getChain();
- SDValue Index = MGT->getIndex();
- SDValue Scale = MGT->getScale();
- SDValue PassThru = MGT->getPassThru();
- SDValue BasePtr = MGT->getBasePtr();
+ SDValue Chain = MGT->getChain();
+ SDValue Index = MGT->getIndex();
+ SDValue Scale = MGT->getScale();
+ SDValue PassThru = MGT->getPassThru();
+ SDValue BasePtr = MGT->getBasePtr();
SDLoc DL(N);
// Zap gathers with a zero mask.
if (ISD::isBuildVectorAllZeros(Mask.getNode()))
- return CombineTo(N, PassThru, MGT->getChain());
-
- if (refineUniformBase(BasePtr, Index, DAG)) {
- SDValue Ops[] = {Chain, PassThru, Mask, BasePtr, Index, Scale};
- return DAG.getMaskedGather(DAG.getVTList(N->getValueType(0), MVT::Other),
- PassThru.getValueType(), DL, Ops,
- MGT->getMemOperand(), MGT->getIndexType(),
- MGT->getExtensionType());
- }
-
- if (refineIndexType(MGT, Index, MGT->isIndexScaled(), DAG)) {
- SDValue Ops[] = {Chain, PassThru, Mask, BasePtr, Index, Scale};
- return DAG.getMaskedGather(DAG.getVTList(N->getValueType(0), MVT::Other),
- PassThru.getValueType(), DL, Ops,
- MGT->getMemOperand(), MGT->getIndexType(),
- MGT->getExtensionType());
- }
-
+ return CombineTo(N, PassThru, MGT->getChain());
+
+ if (refineUniformBase(BasePtr, Index, DAG)) {
+ SDValue Ops[] = {Chain, PassThru, Mask, BasePtr, Index, Scale};
+ return DAG.getMaskedGather(DAG.getVTList(N->getValueType(0), MVT::Other),
+ PassThru.getValueType(), DL, Ops,
+ MGT->getMemOperand(), MGT->getIndexType(),
+ MGT->getExtensionType());
+ }
+
+ if (refineIndexType(MGT, Index, MGT->isIndexScaled(), DAG)) {
+ SDValue Ops[] = {Chain, PassThru, Mask, BasePtr, Index, Scale};
+ return DAG.getMaskedGather(DAG.getVTList(N->getValueType(0), MVT::Other),
+ PassThru.getValueType(), DL, Ops,
+ MGT->getMemOperand(), MGT->getIndexType(),
+ MGT->getExtensionType());
+ }
+
return SDValue();
}
@@ -9573,16 +9573,16 @@ SDValue DAGCombiner::visitMLOAD(SDNode *N) {
if (ISD::isBuildVectorAllZeros(Mask.getNode()))
return CombineTo(N, MLD->getPassThru(), MLD->getChain());
- // If this is a masked load with an all ones mask, we can use a unmasked load.
- // FIXME: Can we do this for indexed, expanding, or extending loads?
- if (ISD::isBuildVectorAllOnes(Mask.getNode()) &&
- MLD->isUnindexed() && !MLD->isExpandingLoad() &&
- MLD->getExtensionType() == ISD::NON_EXTLOAD) {
- SDValue NewLd = DAG.getLoad(N->getValueType(0), SDLoc(N), MLD->getChain(),
- MLD->getBasePtr(), MLD->getMemOperand());
- return CombineTo(N, NewLd, NewLd.getValue(1));
- }
-
+ // If this is a masked load with an all ones mask, we can use a unmasked load.
+ // FIXME: Can we do this for indexed, expanding, or extending loads?
+ if (ISD::isBuildVectorAllOnes(Mask.getNode()) &&
+ MLD->isUnindexed() && !MLD->isExpandingLoad() &&
+ MLD->getExtensionType() == ISD::NON_EXTLOAD) {
+ SDValue NewLd = DAG.getLoad(N->getValueType(0), SDLoc(N), MLD->getChain(),
+ MLD->getBasePtr(), MLD->getMemOperand());
+ return CombineTo(N, NewLd, NewLd.getValue(1));
+ }
+
// Try transforming N to an indexed load.
if (CombineToPreIndexedLoadStore(N) || CombineToPostIndexedLoadStore(N))
return SDValue(N, 0);
@@ -9742,113 +9742,113 @@ SDValue DAGCombiner::visitVSELECT(SDNode *N) {
return DAG.getSelect(DL, N1.getValueType(), WideSetCC, N1, N2);
}
}
-
- // Match VSELECTs into add with unsigned saturation.
- if (hasOperation(ISD::UADDSAT, VT)) {
- // Check if one of the arms of the VSELECT is vector with all bits set.
- // If it's on the left side invert the predicate to simplify logic below.
- SDValue Other;
- ISD::CondCode SatCC = CC;
- if (ISD::isBuildVectorAllOnes(N1.getNode())) {
- Other = N2;
- SatCC = ISD::getSetCCInverse(SatCC, VT.getScalarType());
- } else if (ISD::isBuildVectorAllOnes(N2.getNode())) {
- Other = N1;
- }
-
- if (Other && Other.getOpcode() == ISD::ADD) {
- SDValue CondLHS = LHS, CondRHS = RHS;
- SDValue OpLHS = Other.getOperand(0), OpRHS = Other.getOperand(1);
-
- // Canonicalize condition operands.
- if (SatCC == ISD::SETUGE) {
- std::swap(CondLHS, CondRHS);
- SatCC = ISD::SETULE;
- }
-
- // We can test against either of the addition operands.
- // x <= x+y ? x+y : ~0 --> uaddsat x, y
- // x+y >= x ? x+y : ~0 --> uaddsat x, y
- if (SatCC == ISD::SETULE && Other == CondRHS &&
- (OpLHS == CondLHS || OpRHS == CondLHS))
- return DAG.getNode(ISD::UADDSAT, DL, VT, OpLHS, OpRHS);
-
- if (isa<BuildVectorSDNode>(OpRHS) && isa<BuildVectorSDNode>(CondRHS) &&
- CondLHS == OpLHS) {
- // If the RHS is a constant we have to reverse the const
- // canonicalization.
- // x >= ~C ? x+C : ~0 --> uaddsat x, C
- auto MatchUADDSAT = [](ConstantSDNode *Op, ConstantSDNode *Cond) {
- return Cond->getAPIntValue() == ~Op->getAPIntValue();
- };
- if (SatCC == ISD::SETULE &&
- ISD::matchBinaryPredicate(OpRHS, CondRHS, MatchUADDSAT))
- return DAG.getNode(ISD::UADDSAT, DL, VT, OpLHS, OpRHS);
- }
- }
- }
-
- // Match VSELECTs into sub with unsigned saturation.
- if (hasOperation(ISD::USUBSAT, VT)) {
- // Check if one of the arms of the VSELECT is a zero vector. If it's on
- // the left side invert the predicate to simplify logic below.
- SDValue Other;
- ISD::CondCode SatCC = CC;
- if (ISD::isBuildVectorAllZeros(N1.getNode())) {
- Other = N2;
- SatCC = ISD::getSetCCInverse(SatCC, VT.getScalarType());
- } else if (ISD::isBuildVectorAllZeros(N2.getNode())) {
- Other = N1;
- }
-
- if (Other && Other.getNumOperands() == 2 && Other.getOperand(0) == LHS) {
- SDValue CondRHS = RHS;
- SDValue OpLHS = Other.getOperand(0), OpRHS = Other.getOperand(1);
-
- // Look for a general sub with unsigned saturation first.
- // x >= y ? x-y : 0 --> usubsat x, y
- // x > y ? x-y : 0 --> usubsat x, y
- if ((SatCC == ISD::SETUGE || SatCC == ISD::SETUGT) &&
- Other.getOpcode() == ISD::SUB && OpRHS == CondRHS)
- return DAG.getNode(ISD::USUBSAT, DL, VT, OpLHS, OpRHS);
-
- if (auto *OpRHSBV = dyn_cast<BuildVectorSDNode>(OpRHS)) {
- if (isa<BuildVectorSDNode>(CondRHS)) {
- // If the RHS is a constant we have to reverse the const
- // canonicalization.
- // x > C-1 ? x+-C : 0 --> usubsat x, C
- auto MatchUSUBSAT = [](ConstantSDNode *Op, ConstantSDNode *Cond) {
- return (!Op && !Cond) ||
- (Op && Cond &&
- Cond->getAPIntValue() == (-Op->getAPIntValue() - 1));
- };
- if (SatCC == ISD::SETUGT && Other.getOpcode() == ISD::ADD &&
- ISD::matchBinaryPredicate(OpRHS, CondRHS, MatchUSUBSAT,
- /*AllowUndefs*/ true)) {
- OpRHS = DAG.getNode(ISD::SUB, DL, VT, DAG.getConstant(0, DL, VT),
- OpRHS);
- return DAG.getNode(ISD::USUBSAT, DL, VT, OpLHS, OpRHS);
- }
-
- // Another special case: If C was a sign bit, the sub has been
- // canonicalized into a xor.
- // FIXME: Would it be better to use computeKnownBits to determine
- // whether it's safe to decanonicalize the xor?
- // x s< 0 ? x^C : 0 --> usubsat x, C
- if (auto *OpRHSConst = OpRHSBV->getConstantSplatNode()) {
- if (SatCC == ISD::SETLT && Other.getOpcode() == ISD::XOR &&
- ISD::isBuildVectorAllZeros(CondRHS.getNode()) &&
- OpRHSConst->getAPIntValue().isSignMask()) {
- // Note that we have to rebuild the RHS constant here to ensure
- // we don't rely on particular values of undef lanes.
- OpRHS = DAG.getConstant(OpRHSConst->getAPIntValue(), DL, VT);
- return DAG.getNode(ISD::USUBSAT, DL, VT, OpLHS, OpRHS);
- }
- }
- }
- }
- }
- }
+
+ // Match VSELECTs into add with unsigned saturation.
+ if (hasOperation(ISD::UADDSAT, VT)) {
+ // Check if one of the arms of the VSELECT is vector with all bits set.
+ // If it's on the left side invert the predicate to simplify logic below.
+ SDValue Other;
+ ISD::CondCode SatCC = CC;
+ if (ISD::isBuildVectorAllOnes(N1.getNode())) {
+ Other = N2;
+ SatCC = ISD::getSetCCInverse(SatCC, VT.getScalarType());
+ } else if (ISD::isBuildVectorAllOnes(N2.getNode())) {
+ Other = N1;
+ }
+
+ if (Other && Other.getOpcode() == ISD::ADD) {
+ SDValue CondLHS = LHS, CondRHS = RHS;
+ SDValue OpLHS = Other.getOperand(0), OpRHS = Other.getOperand(1);
+
+ // Canonicalize condition operands.
+ if (SatCC == ISD::SETUGE) {
+ std::swap(CondLHS, CondRHS);
+ SatCC = ISD::SETULE;
+ }
+
+ // We can test against either of the addition operands.
+ // x <= x+y ? x+y : ~0 --> uaddsat x, y
+ // x+y >= x ? x+y : ~0 --> uaddsat x, y
+ if (SatCC == ISD::SETULE && Other == CondRHS &&
+ (OpLHS == CondLHS || OpRHS == CondLHS))
+ return DAG.getNode(ISD::UADDSAT, DL, VT, OpLHS, OpRHS);
+
+ if (isa<BuildVectorSDNode>(OpRHS) && isa<BuildVectorSDNode>(CondRHS) &&
+ CondLHS == OpLHS) {
+ // If the RHS is a constant we have to reverse the const
+ // canonicalization.
+ // x >= ~C ? x+C : ~0 --> uaddsat x, C
+ auto MatchUADDSAT = [](ConstantSDNode *Op, ConstantSDNode *Cond) {
+ return Cond->getAPIntValue() == ~Op->getAPIntValue();
+ };
+ if (SatCC == ISD::SETULE &&
+ ISD::matchBinaryPredicate(OpRHS, CondRHS, MatchUADDSAT))
+ return DAG.getNode(ISD::UADDSAT, DL, VT, OpLHS, OpRHS);
+ }
+ }
+ }
+
+ // Match VSELECTs into sub with unsigned saturation.
+ if (hasOperation(ISD::USUBSAT, VT)) {
+ // Check if one of the arms of the VSELECT is a zero vector. If it's on
+ // the left side invert the predicate to simplify logic below.
+ SDValue Other;
+ ISD::CondCode SatCC = CC;
+ if (ISD::isBuildVectorAllZeros(N1.getNode())) {
+ Other = N2;
+ SatCC = ISD::getSetCCInverse(SatCC, VT.getScalarType());
+ } else if (ISD::isBuildVectorAllZeros(N2.getNode())) {
+ Other = N1;
+ }
+
+ if (Other && Other.getNumOperands() == 2 && Other.getOperand(0) == LHS) {
+ SDValue CondRHS = RHS;
+ SDValue OpLHS = Other.getOperand(0), OpRHS = Other.getOperand(1);
+
+ // Look for a general sub with unsigned saturation first.
+ // x >= y ? x-y : 0 --> usubsat x, y
+ // x > y ? x-y : 0 --> usubsat x, y
+ if ((SatCC == ISD::SETUGE || SatCC == ISD::SETUGT) &&
+ Other.getOpcode() == ISD::SUB && OpRHS == CondRHS)
+ return DAG.getNode(ISD::USUBSAT, DL, VT, OpLHS, OpRHS);
+
+ if (auto *OpRHSBV = dyn_cast<BuildVectorSDNode>(OpRHS)) {
+ if (isa<BuildVectorSDNode>(CondRHS)) {
+ // If the RHS is a constant we have to reverse the const
+ // canonicalization.
+ // x > C-1 ? x+-C : 0 --> usubsat x, C
+ auto MatchUSUBSAT = [](ConstantSDNode *Op, ConstantSDNode *Cond) {
+ return (!Op && !Cond) ||
+ (Op && Cond &&
+ Cond->getAPIntValue() == (-Op->getAPIntValue() - 1));
+ };
+ if (SatCC == ISD::SETUGT && Other.getOpcode() == ISD::ADD &&
+ ISD::matchBinaryPredicate(OpRHS, CondRHS, MatchUSUBSAT,
+ /*AllowUndefs*/ true)) {
+ OpRHS = DAG.getNode(ISD::SUB, DL, VT, DAG.getConstant(0, DL, VT),
+ OpRHS);
+ return DAG.getNode(ISD::USUBSAT, DL, VT, OpLHS, OpRHS);
+ }
+
+ // Another special case: If C was a sign bit, the sub has been
+ // canonicalized into a xor.
+ // FIXME: Would it be better to use computeKnownBits to determine
+ // whether it's safe to decanonicalize the xor?
+ // x s< 0 ? x^C : 0 --> usubsat x, C
+ if (auto *OpRHSConst = OpRHSBV->getConstantSplatNode()) {
+ if (SatCC == ISD::SETLT && Other.getOpcode() == ISD::XOR &&
+ ISD::isBuildVectorAllZeros(CondRHS.getNode()) &&
+ OpRHSConst->getAPIntValue().isSignMask()) {
+ // Note that we have to rebuild the RHS constant here to ensure
+ // we don't rely on particular values of undef lanes.
+ OpRHS = DAG.getConstant(OpRHSConst->getAPIntValue(), DL, VT);
+ return DAG.getNode(ISD::USUBSAT, DL, VT, OpLHS, OpRHS);
+ }
+ }
+ }
+ }
+ }
+ }
}
if (SimplifySelectOps(N, N1, N2))
@@ -10207,14 +10207,14 @@ SDValue DAGCombiner::CombineExtLoad(SDNode *N) {
SDValue BasePtr = LN0->getBasePtr();
for (unsigned Idx = 0; Idx < NumSplits; Idx++) {
const unsigned Offset = Idx * Stride;
- const Align Align = commonAlignment(LN0->getAlign(), Offset);
+ const Align Align = commonAlignment(LN0->getAlign(), Offset);
SDValue SplitLoad = DAG.getExtLoad(
ExtType, SDLoc(LN0), SplitDstVT, LN0->getChain(), BasePtr,
LN0->getPointerInfo().getWithOffset(Offset), SplitSrcVT, Align,
LN0->getMemOperand()->getFlags(), LN0->getAAInfo());
- BasePtr = DAG.getMemBasePlusOffset(BasePtr, TypeSize::Fixed(Stride), DL);
+ BasePtr = DAG.getMemBasePlusOffset(BasePtr, TypeSize::Fixed(Stride), DL);
Loads.push_back(SplitLoad.getValue(0));
Chains.push_back(SplitLoad.getValue(1));
@@ -10631,7 +10631,7 @@ SDValue DAGCombiner::visitSIGN_EXTEND(SDNode *N) {
SDValue N00 = N0.getOperand(0);
SDValue N01 = N0.getOperand(1);
ISD::CondCode CC = cast<CondCodeSDNode>(N0.getOperand(2))->get();
- EVT N00VT = N00.getValueType();
+ EVT N00VT = N00.getValueType();
// sext(setcc) -> sext_in_reg(vsetcc) for vectors.
// Only do this before legalize for now.
@@ -10725,29 +10725,29 @@ SDValue DAGCombiner::visitSIGN_EXTEND(SDNode *N) {
return DAG.getNode(ISD::ADD, DL, VT, Zext, DAG.getAllOnesConstant(DL, VT));
}
- // fold sext (not i1 X) -> add (zext i1 X), -1
- // TODO: This could be extended to handle bool vectors.
- if (N0.getValueType() == MVT::i1 && isBitwiseNot(N0) && N0.hasOneUse() &&
- (!LegalOperations || (TLI.isOperationLegal(ISD::ZERO_EXTEND, VT) &&
- TLI.isOperationLegal(ISD::ADD, VT)))) {
- // If we can eliminate the 'not', the sext form should be better
- if (SDValue NewXor = visitXOR(N0.getNode())) {
- // Returning N0 is a form of in-visit replacement that may have
- // invalidated N0.
- if (NewXor.getNode() == N0.getNode()) {
- // Return SDValue here as the xor should have already been replaced in
- // this sext.
- return SDValue();
- } else {
- // Return a new sext with the new xor.
- return DAG.getNode(ISD::SIGN_EXTEND, DL, VT, NewXor);
- }
- }
-
- SDValue Zext = DAG.getNode(ISD::ZERO_EXTEND, DL, VT, N0.getOperand(0));
- return DAG.getNode(ISD::ADD, DL, VT, Zext, DAG.getAllOnesConstant(DL, VT));
- }
-
+ // fold sext (not i1 X) -> add (zext i1 X), -1
+ // TODO: This could be extended to handle bool vectors.
+ if (N0.getValueType() == MVT::i1 && isBitwiseNot(N0) && N0.hasOneUse() &&
+ (!LegalOperations || (TLI.isOperationLegal(ISD::ZERO_EXTEND, VT) &&
+ TLI.isOperationLegal(ISD::ADD, VT)))) {
+ // If we can eliminate the 'not', the sext form should be better
+ if (SDValue NewXor = visitXOR(N0.getNode())) {
+ // Returning N0 is a form of in-visit replacement that may have
+ // invalidated N0.
+ if (NewXor.getNode() == N0.getNode()) {
+ // Return SDValue here as the xor should have already been replaced in
+ // this sext.
+ return SDValue();
+ } else {
+ // Return a new sext with the new xor.
+ return DAG.getNode(ISD::SIGN_EXTEND, DL, VT, NewXor);
+ }
+ }
+
+ SDValue Zext = DAG.getNode(ISD::ZERO_EXTEND, DL, VT, N0.getOperand(0));
+ return DAG.getNode(ISD::ADD, DL, VT, Zext, DAG.getAllOnesConstant(DL, VT));
+ }
+
return SDValue();
}
@@ -11015,16 +11015,16 @@ SDValue DAGCombiner::visitZERO_EXTEND(SDNode *N) {
N0.getValueType());
}
- // zext(setcc x,y,cc) -> zext(select x, y, true, false, cc)
+ // zext(setcc x,y,cc) -> zext(select x, y, true, false, cc)
SDLoc DL(N);
- EVT N0VT = N0.getValueType();
- EVT N00VT = N0.getOperand(0).getValueType();
+ EVT N0VT = N0.getValueType();
+ EVT N00VT = N0.getOperand(0).getValueType();
if (SDValue SCC = SimplifySelectCC(
- DL, N0.getOperand(0), N0.getOperand(1),
- DAG.getBoolConstant(true, DL, N0VT, N00VT),
- DAG.getBoolConstant(false, DL, N0VT, N00VT),
+ DL, N0.getOperand(0), N0.getOperand(1),
+ DAG.getBoolConstant(true, DL, N0VT, N00VT),
+ DAG.getBoolConstant(false, DL, N0VT, N00VT),
cast<CondCodeSDNode>(N0.getOperand(2))->get(), true))
- return DAG.getNode(ISD::ZERO_EXTEND, DL, VT, SCC);
+ return DAG.getNode(ISD::ZERO_EXTEND, DL, VT, SCC);
}
// (zext (shl (zext x), cst)) -> (shl (zext x), cst)
@@ -11113,26 +11113,26 @@ SDValue DAGCombiner::visitANY_EXTEND(SDNode *N) {
// fold (aext (load x)) -> (aext (truncate (extload x)))
// None of the supported targets knows how to perform load and any_ext
- // on vectors in one instruction, so attempt to fold to zext instead.
- if (VT.isVector()) {
- // Try to simplify (zext (load x)).
- if (SDValue foldedExt =
- tryToFoldExtOfLoad(DAG, *this, TLI, VT, LegalOperations, N, N0,
- ISD::ZEXTLOAD, ISD::ZERO_EXTEND))
- return foldedExt;
- } else if (ISD::isNON_EXTLoad(N0.getNode()) &&
- ISD::isUNINDEXEDLoad(N0.getNode()) &&
- TLI.isLoadExtLegal(ISD::EXTLOAD, VT, N0.getValueType())) {
+ // on vectors in one instruction, so attempt to fold to zext instead.
+ if (VT.isVector()) {
+ // Try to simplify (zext (load x)).
+ if (SDValue foldedExt =
+ tryToFoldExtOfLoad(DAG, *this, TLI, VT, LegalOperations, N, N0,
+ ISD::ZEXTLOAD, ISD::ZERO_EXTEND))
+ return foldedExt;
+ } else if (ISD::isNON_EXTLoad(N0.getNode()) &&
+ ISD::isUNINDEXEDLoad(N0.getNode()) &&
+ TLI.isLoadExtLegal(ISD::EXTLOAD, VT, N0.getValueType())) {
bool DoXform = true;
- SmallVector<SDNode *, 4> SetCCs;
+ SmallVector<SDNode *, 4> SetCCs;
if (!N0.hasOneUse())
- DoXform =
- ExtendUsesToFormExtLoad(VT, N, N0, ISD::ANY_EXTEND, SetCCs, TLI);
+ DoXform =
+ ExtendUsesToFormExtLoad(VT, N, N0, ISD::ANY_EXTEND, SetCCs, TLI);
if (DoXform) {
LoadSDNode *LN0 = cast<LoadSDNode>(N0);
SDValue ExtLoad = DAG.getExtLoad(ISD::EXTLOAD, SDLoc(N), VT,
- LN0->getChain(), LN0->getBasePtr(),
- N0.getValueType(), LN0->getMemOperand());
+ LN0->getChain(), LN0->getBasePtr(),
+ N0.getValueType(), LN0->getMemOperand());
ExtendSetCCUses(SetCCs, N0, ExtLoad, ISD::ANY_EXTEND);
// If the load value is used only by N, replace it via CombineTo N.
bool NoReplaceTrunc = N0.hasOneUse();
@@ -11141,8 +11141,8 @@ SDValue DAGCombiner::visitANY_EXTEND(SDNode *N) {
DAG.ReplaceAllUsesOfValueWith(SDValue(LN0, 1), ExtLoad.getValue(1));
recursivelyDeleteUnusedNodes(LN0);
} else {
- SDValue Trunc =
- DAG.getNode(ISD::TRUNCATE, SDLoc(N0), N0.getValueType(), ExtLoad);
+ SDValue Trunc =
+ DAG.getNode(ISD::TRUNCATE, SDLoc(N0), N0.getValueType(), ExtLoad);
CombineTo(LN0, Trunc, ExtLoad.getValue(1));
}
return SDValue(N, 0); // Return N so it doesn't get rechecked!
@@ -11347,12 +11347,12 @@ SDValue DAGCombiner::ReduceLoadWidth(SDNode *N) {
return SDValue();
uint64_t ShiftAmt = N01->getZExtValue();
- uint64_t MemoryWidth = LN0->getMemoryVT().getScalarSizeInBits();
+ uint64_t MemoryWidth = LN0->getMemoryVT().getScalarSizeInBits();
if (LN0->getExtensionType() != ISD::SEXTLOAD && MemoryWidth > ShiftAmt)
ExtVT = EVT::getIntegerVT(*DAG.getContext(), MemoryWidth - ShiftAmt);
else
ExtVT = EVT::getIntegerVT(*DAG.getContext(),
- VT.getScalarSizeInBits() - ShiftAmt);
+ VT.getScalarSizeInBits() - ShiftAmt);
} else if (Opc == ISD::AND) {
// An AND with a constant mask is the same as a truncate + zero-extend.
auto AndC = dyn_cast<ConstantSDNode>(N->getOperand(1));
@@ -11379,12 +11379,12 @@ SDValue DAGCombiner::ReduceLoadWidth(SDNode *N) {
SDValue SRL = N0;
if (auto *ConstShift = dyn_cast<ConstantSDNode>(SRL.getOperand(1))) {
ShAmt = ConstShift->getZExtValue();
- unsigned EVTBits = ExtVT.getScalarSizeInBits();
+ unsigned EVTBits = ExtVT.getScalarSizeInBits();
// Is the shift amount a multiple of size of VT?
if ((ShAmt & (EVTBits-1)) == 0) {
N0 = N0.getOperand(0);
// Is the load width a multiple of size of VT?
- if ((N0.getScalarValueSizeInBits() & (EVTBits - 1)) != 0)
+ if ((N0.getScalarValueSizeInBits() & (EVTBits - 1)) != 0)
return SDValue();
}
@@ -11414,7 +11414,7 @@ SDValue DAGCombiner::ReduceLoadWidth(SDNode *N) {
EVT MaskedVT = EVT::getIntegerVT(*DAG.getContext(),
ShiftMask.countTrailingOnes());
// If the mask is smaller, recompute the type.
- if ((ExtVT.getScalarSizeInBits() > MaskedVT.getScalarSizeInBits()) &&
+ if ((ExtVT.getScalarSizeInBits() > MaskedVT.getScalarSizeInBits()) &&
TLI.isLoadExtLegal(ExtType, N0.getValueType(), MaskedVT))
ExtVT = MaskedVT;
}
@@ -11445,9 +11445,9 @@ SDValue DAGCombiner::ReduceLoadWidth(SDNode *N) {
return SDValue();
auto AdjustBigEndianShift = [&](unsigned ShAmt) {
- unsigned LVTStoreBits =
- LN0->getMemoryVT().getStoreSizeInBits().getFixedSize();
- unsigned EVTStoreBits = ExtVT.getStoreSizeInBits().getFixedSize();
+ unsigned LVTStoreBits =
+ LN0->getMemoryVT().getStoreSizeInBits().getFixedSize();
+ unsigned EVTStoreBits = ExtVT.getStoreSizeInBits().getFixedSize();
return LVTStoreBits - EVTStoreBits - ShAmt;
};
@@ -11457,13 +11457,13 @@ SDValue DAGCombiner::ReduceLoadWidth(SDNode *N) {
ShAmt = AdjustBigEndianShift(ShAmt);
uint64_t PtrOff = ShAmt / 8;
- Align NewAlign = commonAlignment(LN0->getAlign(), PtrOff);
+ Align NewAlign = commonAlignment(LN0->getAlign(), PtrOff);
SDLoc DL(LN0);
// The original load itself didn't wrap, so an offset within it doesn't.
SDNodeFlags Flags;
Flags.setNoUnsignedWrap(true);
- SDValue NewPtr = DAG.getMemBasePlusOffset(LN0->getBasePtr(),
- TypeSize::Fixed(PtrOff), DL, Flags);
+ SDValue NewPtr = DAG.getMemBasePlusOffset(LN0->getBasePtr(),
+ TypeSize::Fixed(PtrOff), DL, Flags);
AddToWorklist(NewPtr.getNode());
SDValue Load;
@@ -11485,13 +11485,13 @@ SDValue DAGCombiner::ReduceLoadWidth(SDNode *N) {
SDValue Result = Load;
if (ShLeftAmt != 0) {
EVT ShImmTy = getShiftAmountTy(Result.getValueType());
- if (!isUIntN(ShImmTy.getScalarSizeInBits(), ShLeftAmt))
+ if (!isUIntN(ShImmTy.getScalarSizeInBits(), ShLeftAmt))
ShImmTy = VT;
// If the shift amount is as large as the result size (but, presumably,
// no larger than the source) then the useful bits of the result are
// zero; we can't simply return the shortened shift, because the result
// of that operation is undefined.
- if (ShLeftAmt >= VT.getScalarSizeInBits())
+ if (ShLeftAmt >= VT.getScalarSizeInBits())
Result = DAG.getConstant(0, DL, VT);
else
Result = DAG.getNode(ISD::SHL, DL, VT,
@@ -11641,41 +11641,41 @@ SDValue DAGCombiner::visitSIGN_EXTEND_INREG(SDNode *N) {
return SDValue(N, 0); // Return N so it doesn't get rechecked!
}
- // fold (sext_inreg (masked_load x)) -> (sext_masked_load x)
- // ignore it if the masked load is already sign extended
- if (MaskedLoadSDNode *Ld = dyn_cast<MaskedLoadSDNode>(N0)) {
- if (ExtVT == Ld->getMemoryVT() && N0.hasOneUse() &&
- Ld->getExtensionType() != ISD::LoadExtType::NON_EXTLOAD &&
- TLI.isLoadExtLegal(ISD::SEXTLOAD, VT, ExtVT)) {
- SDValue ExtMaskedLoad = DAG.getMaskedLoad(
- VT, SDLoc(N), Ld->getChain(), Ld->getBasePtr(), Ld->getOffset(),
- Ld->getMask(), Ld->getPassThru(), ExtVT, Ld->getMemOperand(),
- Ld->getAddressingMode(), ISD::SEXTLOAD, Ld->isExpandingLoad());
- CombineTo(N, ExtMaskedLoad);
- CombineTo(N0.getNode(), ExtMaskedLoad, ExtMaskedLoad.getValue(1));
- return SDValue(N, 0); // Return N so it doesn't get rechecked!
- }
- }
-
- // fold (sext_inreg (masked_gather x)) -> (sext_masked_gather x)
- if (auto *GN0 = dyn_cast<MaskedGatherSDNode>(N0)) {
- if (SDValue(GN0, 0).hasOneUse() &&
- ExtVT == GN0->getMemoryVT() &&
- TLI.isVectorLoadExtDesirable(SDValue(SDValue(GN0, 0)))) {
- SDValue Ops[] = {GN0->getChain(), GN0->getPassThru(), GN0->getMask(),
- GN0->getBasePtr(), GN0->getIndex(), GN0->getScale()};
-
- SDValue ExtLoad = DAG.getMaskedGather(
- DAG.getVTList(VT, MVT::Other), ExtVT, SDLoc(N), Ops,
- GN0->getMemOperand(), GN0->getIndexType(), ISD::SEXTLOAD);
-
- CombineTo(N, ExtLoad);
- CombineTo(N0.getNode(), ExtLoad, ExtLoad.getValue(1));
- AddToWorklist(ExtLoad.getNode());
- return SDValue(N, 0); // Return N so it doesn't get rechecked!
- }
- }
-
+ // fold (sext_inreg (masked_load x)) -> (sext_masked_load x)
+ // ignore it if the masked load is already sign extended
+ if (MaskedLoadSDNode *Ld = dyn_cast<MaskedLoadSDNode>(N0)) {
+ if (ExtVT == Ld->getMemoryVT() && N0.hasOneUse() &&
+ Ld->getExtensionType() != ISD::LoadExtType::NON_EXTLOAD &&
+ TLI.isLoadExtLegal(ISD::SEXTLOAD, VT, ExtVT)) {
+ SDValue ExtMaskedLoad = DAG.getMaskedLoad(
+ VT, SDLoc(N), Ld->getChain(), Ld->getBasePtr(), Ld->getOffset(),
+ Ld->getMask(), Ld->getPassThru(), ExtVT, Ld->getMemOperand(),
+ Ld->getAddressingMode(), ISD::SEXTLOAD, Ld->isExpandingLoad());
+ CombineTo(N, ExtMaskedLoad);
+ CombineTo(N0.getNode(), ExtMaskedLoad, ExtMaskedLoad.getValue(1));
+ return SDValue(N, 0); // Return N so it doesn't get rechecked!
+ }
+ }
+
+ // fold (sext_inreg (masked_gather x)) -> (sext_masked_gather x)
+ if (auto *GN0 = dyn_cast<MaskedGatherSDNode>(N0)) {
+ if (SDValue(GN0, 0).hasOneUse() &&
+ ExtVT == GN0->getMemoryVT() &&
+ TLI.isVectorLoadExtDesirable(SDValue(SDValue(GN0, 0)))) {
+ SDValue Ops[] = {GN0->getChain(), GN0->getPassThru(), GN0->getMask(),
+ GN0->getBasePtr(), GN0->getIndex(), GN0->getScale()};
+
+ SDValue ExtLoad = DAG.getMaskedGather(
+ DAG.getVTList(VT, MVT::Other), ExtVT, SDLoc(N), Ops,
+ GN0->getMemOperand(), GN0->getIndexType(), ISD::SEXTLOAD);
+
+ CombineTo(N, ExtLoad);
+ CombineTo(N0.getNode(), ExtLoad, ExtLoad.getValue(1));
+ AddToWorklist(ExtLoad.getNode());
+ return SDValue(N, 0); // Return N so it doesn't get rechecked!
+ }
+ }
+
// Form (sext_inreg (bswap >> 16)) or (sext_inreg (rotl (bswap) 16))
if (ExtVTBits <= 16 && N0.getOpcode() == ISD::OR) {
if (SDValue BSwap = MatchBSwapHWordLow(N0.getNode(), N0.getOperand(0),
@@ -11776,11 +11776,11 @@ SDValue DAGCombiner::visitTRUNCATE(SDNode *N) {
EVT ExTy = N0.getValueType();
EVT TrTy = N->getValueType(0);
- auto EltCnt = VecTy.getVectorElementCount();
+ auto EltCnt = VecTy.getVectorElementCount();
unsigned SizeRatio = ExTy.getSizeInBits()/TrTy.getSizeInBits();
- auto NewEltCnt = EltCnt * SizeRatio;
+ auto NewEltCnt = EltCnt * SizeRatio;
- EVT NVT = EVT::getVectorVT(*DAG.getContext(), TrTy, NewEltCnt);
+ EVT NVT = EVT::getVectorVT(*DAG.getContext(), TrTy, NewEltCnt);
assert(NVT.getSizeInBits() == VecTy.getSizeInBits() && "Invalid Size");
SDValue EltNo = N0->getOperand(1);
@@ -11894,7 +11894,7 @@ SDValue DAGCombiner::visitTRUNCATE(SDNode *N) {
// after truncation.
if (N0.hasOneUse() && ISD::isUNINDEXEDLoad(N0.getNode())) {
LoadSDNode *LN0 = cast<LoadSDNode>(N0);
- if (LN0->isSimple() && LN0->getMemoryVT().bitsLT(VT)) {
+ if (LN0->isSimple() && LN0->getMemoryVT().bitsLT(VT)) {
SDValue NewLoad = DAG.getExtLoad(LN0->getExtensionType(), SDLoc(LN0),
VT, LN0->getChain(), LN0->getBasePtr(),
LN0->getMemoryVT(),
@@ -11967,7 +11967,7 @@ SDValue DAGCombiner::visitTRUNCATE(SDNode *N) {
}
// Simplify the operands using demanded-bits information.
- if (SimplifyDemandedBits(SDValue(N, 0)))
+ if (SimplifyDemandedBits(SDValue(N, 0)))
return SDValue(N, 0);
// (trunc adde(X, Y, Carry)) -> (adde trunc(X), trunc(Y), Carry)
@@ -12194,7 +12194,7 @@ SDValue DAGCombiner::visitBITCAST(SDNode *N) {
*LN0->getMemOperand())) {
SDValue Load =
DAG.getLoad(VT, SDLoc(N), LN0->getChain(), LN0->getBasePtr(),
- LN0->getPointerInfo(), LN0->getAlign(),
+ LN0->getPointerInfo(), LN0->getAlign(),
LN0->getMemOperand()->getFlags(), LN0->getAAInfo());
DAG.ReplaceAllUsesOfValueWith(N0.getValue(1), Load.getValue(1));
return Load;
@@ -12573,15 +12573,15 @@ SDValue DAGCombiner::visitFADDForFMACombine(SDNode *N) {
// fold (fadd (fmul x, y), z) -> (fma x, y, z)
if (isContractableFMUL(N0) && (Aggressive || N0->hasOneUse())) {
- return DAG.getNode(PreferredFusedOpcode, SL, VT, N0.getOperand(0),
- N0.getOperand(1), N1);
+ return DAG.getNode(PreferredFusedOpcode, SL, VT, N0.getOperand(0),
+ N0.getOperand(1), N1);
}
// fold (fadd x, (fmul y, z)) -> (fma y, z, x)
// Note: Commutes FADD operands.
if (isContractableFMUL(N1) && (Aggressive || N1->hasOneUse())) {
- return DAG.getNode(PreferredFusedOpcode, SL, VT, N1.getOperand(0),
- N1.getOperand(1), N0);
+ return DAG.getNode(PreferredFusedOpcode, SL, VT, N1.getOperand(0),
+ N1.getOperand(1), N0);
}
// fadd (fma A, B, (fmul C, D)), E --> fma A, B, (fma C, D, E)
@@ -12604,8 +12604,8 @@ SDValue DAGCombiner::visitFADDForFMACombine(SDNode *N) {
SDValue B = FMA.getOperand(1);
SDValue C = FMA.getOperand(2).getOperand(0);
SDValue D = FMA.getOperand(2).getOperand(1);
- SDValue CDE = DAG.getNode(PreferredFusedOpcode, SL, VT, C, D, E);
- return DAG.getNode(PreferredFusedOpcode, SL, VT, A, B, CDE);
+ SDValue CDE = DAG.getNode(PreferredFusedOpcode, SL, VT, C, D, E);
+ return DAG.getNode(PreferredFusedOpcode, SL, VT, A, B, CDE);
}
// Look through FP_EXTEND nodes to do more combining.
@@ -12617,9 +12617,9 @@ SDValue DAGCombiner::visitFADDForFMACombine(SDNode *N) {
TLI.isFPExtFoldable(DAG, PreferredFusedOpcode, VT,
N00.getValueType())) {
return DAG.getNode(PreferredFusedOpcode, SL, VT,
- DAG.getNode(ISD::FP_EXTEND, SL, VT, N00.getOperand(0)),
- DAG.getNode(ISD::FP_EXTEND, SL, VT, N00.getOperand(1)),
- N1);
+ DAG.getNode(ISD::FP_EXTEND, SL, VT, N00.getOperand(0)),
+ DAG.getNode(ISD::FP_EXTEND, SL, VT, N00.getOperand(1)),
+ N1);
}
}
@@ -12631,9 +12631,9 @@ SDValue DAGCombiner::visitFADDForFMACombine(SDNode *N) {
TLI.isFPExtFoldable(DAG, PreferredFusedOpcode, VT,
N10.getValueType())) {
return DAG.getNode(PreferredFusedOpcode, SL, VT,
- DAG.getNode(ISD::FP_EXTEND, SL, VT, N10.getOperand(0)),
- DAG.getNode(ISD::FP_EXTEND, SL, VT, N10.getOperand(1)),
- N0);
+ DAG.getNode(ISD::FP_EXTEND, SL, VT, N10.getOperand(0)),
+ DAG.getNode(ISD::FP_EXTEND, SL, VT, N10.getOperand(1)),
+ N0);
}
}
@@ -12641,13 +12641,13 @@ SDValue DAGCombiner::visitFADDForFMACombine(SDNode *N) {
if (Aggressive) {
// fold (fadd (fma x, y, (fpext (fmul u, v))), z)
// -> (fma x, y, (fma (fpext u), (fpext v), z))
- auto FoldFAddFMAFPExtFMul = [&](SDValue X, SDValue Y, SDValue U, SDValue V,
- SDValue Z) {
+ auto FoldFAddFMAFPExtFMul = [&](SDValue X, SDValue Y, SDValue U, SDValue V,
+ SDValue Z) {
return DAG.getNode(PreferredFusedOpcode, SL, VT, X, Y,
DAG.getNode(PreferredFusedOpcode, SL, VT,
DAG.getNode(ISD::FP_EXTEND, SL, VT, U),
DAG.getNode(ISD::FP_EXTEND, SL, VT, V),
- Z));
+ Z));
};
if (N0.getOpcode() == PreferredFusedOpcode) {
SDValue N02 = N0.getOperand(2);
@@ -12658,7 +12658,7 @@ SDValue DAGCombiner::visitFADDForFMACombine(SDNode *N) {
N020.getValueType())) {
return FoldFAddFMAFPExtFMul(N0.getOperand(0), N0.getOperand(1),
N020.getOperand(0), N020.getOperand(1),
- N1);
+ N1);
}
}
}
@@ -12668,14 +12668,14 @@ SDValue DAGCombiner::visitFADDForFMACombine(SDNode *N) {
// FIXME: This turns two single-precision and one double-precision
// operation into two double-precision operations, which might not be
// interesting for all targets, especially GPUs.
- auto FoldFAddFPExtFMAFMul = [&](SDValue X, SDValue Y, SDValue U, SDValue V,
- SDValue Z) {
- return DAG.getNode(
- PreferredFusedOpcode, SL, VT, DAG.getNode(ISD::FP_EXTEND, SL, VT, X),
- DAG.getNode(ISD::FP_EXTEND, SL, VT, Y),
- DAG.getNode(PreferredFusedOpcode, SL, VT,
- DAG.getNode(ISD::FP_EXTEND, SL, VT, U),
- DAG.getNode(ISD::FP_EXTEND, SL, VT, V), Z));
+ auto FoldFAddFPExtFMAFMul = [&](SDValue X, SDValue Y, SDValue U, SDValue V,
+ SDValue Z) {
+ return DAG.getNode(
+ PreferredFusedOpcode, SL, VT, DAG.getNode(ISD::FP_EXTEND, SL, VT, X),
+ DAG.getNode(ISD::FP_EXTEND, SL, VT, Y),
+ DAG.getNode(PreferredFusedOpcode, SL, VT,
+ DAG.getNode(ISD::FP_EXTEND, SL, VT, U),
+ DAG.getNode(ISD::FP_EXTEND, SL, VT, V), Z));
};
if (N0.getOpcode() == ISD::FP_EXTEND) {
SDValue N00 = N0.getOperand(0);
@@ -12686,7 +12686,7 @@ SDValue DAGCombiner::visitFADDForFMACombine(SDNode *N) {
N00.getValueType())) {
return FoldFAddFPExtFMAFMul(N00.getOperand(0), N00.getOperand(1),
N002.getOperand(0), N002.getOperand(1),
- N1);
+ N1);
}
}
}
@@ -12702,7 +12702,7 @@ SDValue DAGCombiner::visitFADDForFMACombine(SDNode *N) {
N120.getValueType())) {
return FoldFAddFMAFPExtFMul(N1.getOperand(0), N1.getOperand(1),
N120.getOperand(0), N120.getOperand(1),
- N0);
+ N0);
}
}
}
@@ -12721,7 +12721,7 @@ SDValue DAGCombiner::visitFADDForFMACombine(SDNode *N) {
N10.getValueType())) {
return FoldFAddFPExtFMAFMul(N10.getOperand(0), N10.getOperand(1),
N102.getOperand(0), N102.getOperand(1),
- N0);
+ N0);
}
}
}
@@ -12779,7 +12779,7 @@ SDValue DAGCombiner::visitFSUBForFMACombine(SDNode *N) {
auto tryToFoldXYSubZ = [&](SDValue XY, SDValue Z) {
if (isContractableFMUL(XY) && (Aggressive || XY->hasOneUse())) {
return DAG.getNode(PreferredFusedOpcode, SL, VT, XY.getOperand(0),
- XY.getOperand(1), DAG.getNode(ISD::FNEG, SL, VT, Z));
+ XY.getOperand(1), DAG.getNode(ISD::FNEG, SL, VT, Z));
}
return SDValue();
};
@@ -12790,7 +12790,7 @@ SDValue DAGCombiner::visitFSUBForFMACombine(SDNode *N) {
if (isContractableFMUL(YZ) && (Aggressive || YZ->hasOneUse())) {
return DAG.getNode(PreferredFusedOpcode, SL, VT,
DAG.getNode(ISD::FNEG, SL, VT, YZ.getOperand(0)),
- YZ.getOperand(1), X);
+ YZ.getOperand(1), X);
}
return SDValue();
};
@@ -12821,7 +12821,7 @@ SDValue DAGCombiner::visitFSUBForFMACombine(SDNode *N) {
SDValue N01 = N0.getOperand(0).getOperand(1);
return DAG.getNode(PreferredFusedOpcode, SL, VT,
DAG.getNode(ISD::FNEG, SL, VT, N00), N01,
- DAG.getNode(ISD::FNEG, SL, VT, N1));
+ DAG.getNode(ISD::FNEG, SL, VT, N1));
}
// Look through FP_EXTEND nodes to do more combining.
@@ -12834,9 +12834,9 @@ SDValue DAGCombiner::visitFSUBForFMACombine(SDNode *N) {
TLI.isFPExtFoldable(DAG, PreferredFusedOpcode, VT,
N00.getValueType())) {
return DAG.getNode(PreferredFusedOpcode, SL, VT,
- DAG.getNode(ISD::FP_EXTEND, SL, VT, N00.getOperand(0)),
- DAG.getNode(ISD::FP_EXTEND, SL, VT, N00.getOperand(1)),
- DAG.getNode(ISD::FNEG, SL, VT, N1));
+ DAG.getNode(ISD::FP_EXTEND, SL, VT, N00.getOperand(0)),
+ DAG.getNode(ISD::FP_EXTEND, SL, VT, N00.getOperand(1)),
+ DAG.getNode(ISD::FNEG, SL, VT, N1));
}
}
@@ -12848,11 +12848,11 @@ SDValue DAGCombiner::visitFSUBForFMACombine(SDNode *N) {
if (isContractableFMUL(N10) &&
TLI.isFPExtFoldable(DAG, PreferredFusedOpcode, VT,
N10.getValueType())) {
- return DAG.getNode(
- PreferredFusedOpcode, SL, VT,
- DAG.getNode(ISD::FNEG, SL, VT,
- DAG.getNode(ISD::FP_EXTEND, SL, VT, N10.getOperand(0))),
- DAG.getNode(ISD::FP_EXTEND, SL, VT, N10.getOperand(1)), N0);
+ return DAG.getNode(
+ PreferredFusedOpcode, SL, VT,
+ DAG.getNode(ISD::FNEG, SL, VT,
+ DAG.getNode(ISD::FP_EXTEND, SL, VT, N10.getOperand(0))),
+ DAG.getNode(ISD::FP_EXTEND, SL, VT, N10.getOperand(1)), N0);
}
}
@@ -12869,12 +12869,12 @@ SDValue DAGCombiner::visitFSUBForFMACombine(SDNode *N) {
if (isContractableFMUL(N000) &&
TLI.isFPExtFoldable(DAG, PreferredFusedOpcode, VT,
N00.getValueType())) {
- return DAG.getNode(
- ISD::FNEG, SL, VT,
- DAG.getNode(PreferredFusedOpcode, SL, VT,
- DAG.getNode(ISD::FP_EXTEND, SL, VT, N000.getOperand(0)),
- DAG.getNode(ISD::FP_EXTEND, SL, VT, N000.getOperand(1)),
- N1));
+ return DAG.getNode(
+ ISD::FNEG, SL, VT,
+ DAG.getNode(PreferredFusedOpcode, SL, VT,
+ DAG.getNode(ISD::FP_EXTEND, SL, VT, N000.getOperand(0)),
+ DAG.getNode(ISD::FP_EXTEND, SL, VT, N000.getOperand(1)),
+ N1));
}
}
}
@@ -12892,12 +12892,12 @@ SDValue DAGCombiner::visitFSUBForFMACombine(SDNode *N) {
if (isContractableFMUL(N000) &&
TLI.isFPExtFoldable(DAG, PreferredFusedOpcode, VT,
N000.getValueType())) {
- return DAG.getNode(
- ISD::FNEG, SL, VT,
- DAG.getNode(PreferredFusedOpcode, SL, VT,
- DAG.getNode(ISD::FP_EXTEND, SL, VT, N000.getOperand(0)),
- DAG.getNode(ISD::FP_EXTEND, SL, VT, N000.getOperand(1)),
- N1));
+ return DAG.getNode(
+ ISD::FNEG, SL, VT,
+ DAG.getNode(PreferredFusedOpcode, SL, VT,
+ DAG.getNode(ISD::FP_EXTEND, SL, VT, N000.getOperand(0)),
+ DAG.getNode(ISD::FP_EXTEND, SL, VT, N000.getOperand(1)),
+ N1));
}
}
}
@@ -12909,12 +12909,12 @@ SDValue DAGCombiner::visitFSUBForFMACombine(SDNode *N) {
if (CanFuse && N0.getOpcode() == PreferredFusedOpcode &&
isContractableFMUL(N0.getOperand(2)) && N0->hasOneUse() &&
N0.getOperand(2)->hasOneUse()) {
- return DAG.getNode(PreferredFusedOpcode, SL, VT, N0.getOperand(0),
- N0.getOperand(1),
+ return DAG.getNode(PreferredFusedOpcode, SL, VT, N0.getOperand(0),
+ N0.getOperand(1),
DAG.getNode(PreferredFusedOpcode, SL, VT,
N0.getOperand(2).getOperand(0),
N0.getOperand(2).getOperand(1),
- DAG.getNode(ISD::FNEG, SL, VT, N1)));
+ DAG.getNode(ISD::FNEG, SL, VT, N1)));
}
// fold (fsub x, (fma y, z, (fmul u, v)))
@@ -12924,11 +12924,11 @@ SDValue DAGCombiner::visitFSUBForFMACombine(SDNode *N) {
N1->hasOneUse() && NoSignedZero) {
SDValue N20 = N1.getOperand(2).getOperand(0);
SDValue N21 = N1.getOperand(2).getOperand(1);
- return DAG.getNode(
- PreferredFusedOpcode, SL, VT,
- DAG.getNode(ISD::FNEG, SL, VT, N1.getOperand(0)), N1.getOperand(1),
- DAG.getNode(PreferredFusedOpcode, SL, VT,
- DAG.getNode(ISD::FNEG, SL, VT, N20), N21, N0));
+ return DAG.getNode(
+ PreferredFusedOpcode, SL, VT,
+ DAG.getNode(ISD::FNEG, SL, VT, N1.getOperand(0)), N1.getOperand(1),
+ DAG.getNode(PreferredFusedOpcode, SL, VT,
+ DAG.getNode(ISD::FNEG, SL, VT, N20), N21, N0));
}
@@ -12942,13 +12942,13 @@ SDValue DAGCombiner::visitFSUBForFMACombine(SDNode *N) {
if (isContractableFMUL(N020) &&
TLI.isFPExtFoldable(DAG, PreferredFusedOpcode, VT,
N020.getValueType())) {
- return DAG.getNode(
- PreferredFusedOpcode, SL, VT, N0.getOperand(0), N0.getOperand(1),
- DAG.getNode(
- PreferredFusedOpcode, SL, VT,
- DAG.getNode(ISD::FP_EXTEND, SL, VT, N020.getOperand(0)),
- DAG.getNode(ISD::FP_EXTEND, SL, VT, N020.getOperand(1)),
- DAG.getNode(ISD::FNEG, SL, VT, N1)));
+ return DAG.getNode(
+ PreferredFusedOpcode, SL, VT, N0.getOperand(0), N0.getOperand(1),
+ DAG.getNode(
+ PreferredFusedOpcode, SL, VT,
+ DAG.getNode(ISD::FP_EXTEND, SL, VT, N020.getOperand(0)),
+ DAG.getNode(ISD::FP_EXTEND, SL, VT, N020.getOperand(1)),
+ DAG.getNode(ISD::FNEG, SL, VT, N1)));
}
}
}
@@ -12966,15 +12966,15 @@ SDValue DAGCombiner::visitFSUBForFMACombine(SDNode *N) {
if (isContractableFMUL(N002) &&
TLI.isFPExtFoldable(DAG, PreferredFusedOpcode, VT,
N00.getValueType())) {
- return DAG.getNode(
- PreferredFusedOpcode, SL, VT,
- DAG.getNode(ISD::FP_EXTEND, SL, VT, N00.getOperand(0)),
- DAG.getNode(ISD::FP_EXTEND, SL, VT, N00.getOperand(1)),
- DAG.getNode(
- PreferredFusedOpcode, SL, VT,
- DAG.getNode(ISD::FP_EXTEND, SL, VT, N002.getOperand(0)),
- DAG.getNode(ISD::FP_EXTEND, SL, VT, N002.getOperand(1)),
- DAG.getNode(ISD::FNEG, SL, VT, N1)));
+ return DAG.getNode(
+ PreferredFusedOpcode, SL, VT,
+ DAG.getNode(ISD::FP_EXTEND, SL, VT, N00.getOperand(0)),
+ DAG.getNode(ISD::FP_EXTEND, SL, VT, N00.getOperand(1)),
+ DAG.getNode(
+ PreferredFusedOpcode, SL, VT,
+ DAG.getNode(ISD::FP_EXTEND, SL, VT, N002.getOperand(0)),
+ DAG.getNode(ISD::FP_EXTEND, SL, VT, N002.getOperand(1)),
+ DAG.getNode(ISD::FNEG, SL, VT, N1)));
}
}
}
@@ -12990,13 +12990,13 @@ SDValue DAGCombiner::visitFSUBForFMACombine(SDNode *N) {
N120.getValueType())) {
SDValue N1200 = N120.getOperand(0);
SDValue N1201 = N120.getOperand(1);
- return DAG.getNode(
- PreferredFusedOpcode, SL, VT,
- DAG.getNode(ISD::FNEG, SL, VT, N1.getOperand(0)), N1.getOperand(1),
- DAG.getNode(PreferredFusedOpcode, SL, VT,
- DAG.getNode(ISD::FNEG, SL, VT,
- DAG.getNode(ISD::FP_EXTEND, SL, VT, N1200)),
- DAG.getNode(ISD::FP_EXTEND, SL, VT, N1201), N0));
+ return DAG.getNode(
+ PreferredFusedOpcode, SL, VT,
+ DAG.getNode(ISD::FNEG, SL, VT, N1.getOperand(0)), N1.getOperand(1),
+ DAG.getNode(PreferredFusedOpcode, SL, VT,
+ DAG.getNode(ISD::FNEG, SL, VT,
+ DAG.getNode(ISD::FP_EXTEND, SL, VT, N1200)),
+ DAG.getNode(ISD::FP_EXTEND, SL, VT, N1201), N0));
}
}
@@ -13017,15 +13017,15 @@ SDValue DAGCombiner::visitFSUBForFMACombine(SDNode *N) {
CvtSrc.getValueType())) {
SDValue N1020 = N102.getOperand(0);
SDValue N1021 = N102.getOperand(1);
- return DAG.getNode(
- PreferredFusedOpcode, SL, VT,
- DAG.getNode(ISD::FNEG, SL, VT,
- DAG.getNode(ISD::FP_EXTEND, SL, VT, N100)),
- DAG.getNode(ISD::FP_EXTEND, SL, VT, N101),
- DAG.getNode(PreferredFusedOpcode, SL, VT,
- DAG.getNode(ISD::FNEG, SL, VT,
- DAG.getNode(ISD::FP_EXTEND, SL, VT, N1020)),
- DAG.getNode(ISD::FP_EXTEND, SL, VT, N1021), N0));
+ return DAG.getNode(
+ PreferredFusedOpcode, SL, VT,
+ DAG.getNode(ISD::FNEG, SL, VT,
+ DAG.getNode(ISD::FP_EXTEND, SL, VT, N100)),
+ DAG.getNode(ISD::FP_EXTEND, SL, VT, N101),
+ DAG.getNode(PreferredFusedOpcode, SL, VT,
+ DAG.getNode(ISD::FNEG, SL, VT,
+ DAG.getNode(ISD::FP_EXTEND, SL, VT, N1020)),
+ DAG.getNode(ISD::FP_EXTEND, SL, VT, N1021), N0));
}
}
}
@@ -13072,56 +13072,56 @@ SDValue DAGCombiner::visitFMULForFMADistributiveCombine(SDNode *N) {
// fold (fmul (fadd x0, +1.0), y) -> (fma x0, y, y)
// fold (fmul (fadd x0, -1.0), y) -> (fma x0, y, (fneg y))
- auto FuseFADD = [&](SDValue X, SDValue Y) {
+ auto FuseFADD = [&](SDValue X, SDValue Y) {
if (X.getOpcode() == ISD::FADD && (Aggressive || X->hasOneUse())) {
if (auto *C = isConstOrConstSplatFP(X.getOperand(1), true)) {
if (C->isExactlyValue(+1.0))
return DAG.getNode(PreferredFusedOpcode, SL, VT, X.getOperand(0), Y,
- Y);
+ Y);
if (C->isExactlyValue(-1.0))
return DAG.getNode(PreferredFusedOpcode, SL, VT, X.getOperand(0), Y,
- DAG.getNode(ISD::FNEG, SL, VT, Y));
+ DAG.getNode(ISD::FNEG, SL, VT, Y));
}
}
return SDValue();
};
- if (SDValue FMA = FuseFADD(N0, N1))
+ if (SDValue FMA = FuseFADD(N0, N1))
return FMA;
- if (SDValue FMA = FuseFADD(N1, N0))
+ if (SDValue FMA = FuseFADD(N1, N0))
return FMA;
// fold (fmul (fsub +1.0, x1), y) -> (fma (fneg x1), y, y)
// fold (fmul (fsub -1.0, x1), y) -> (fma (fneg x1), y, (fneg y))
// fold (fmul (fsub x0, +1.0), y) -> (fma x0, y, (fneg y))
// fold (fmul (fsub x0, -1.0), y) -> (fma x0, y, y)
- auto FuseFSUB = [&](SDValue X, SDValue Y) {
+ auto FuseFSUB = [&](SDValue X, SDValue Y) {
if (X.getOpcode() == ISD::FSUB && (Aggressive || X->hasOneUse())) {
if (auto *C0 = isConstOrConstSplatFP(X.getOperand(0), true)) {
if (C0->isExactlyValue(+1.0))
return DAG.getNode(PreferredFusedOpcode, SL, VT,
DAG.getNode(ISD::FNEG, SL, VT, X.getOperand(1)), Y,
- Y);
+ Y);
if (C0->isExactlyValue(-1.0))
return DAG.getNode(PreferredFusedOpcode, SL, VT,
DAG.getNode(ISD::FNEG, SL, VT, X.getOperand(1)), Y,
- DAG.getNode(ISD::FNEG, SL, VT, Y));
+ DAG.getNode(ISD::FNEG, SL, VT, Y));
}
if (auto *C1 = isConstOrConstSplatFP(X.getOperand(1), true)) {
if (C1->isExactlyValue(+1.0))
return DAG.getNode(PreferredFusedOpcode, SL, VT, X.getOperand(0), Y,
- DAG.getNode(ISD::FNEG, SL, VT, Y));
+ DAG.getNode(ISD::FNEG, SL, VT, Y));
if (C1->isExactlyValue(-1.0))
return DAG.getNode(PreferredFusedOpcode, SL, VT, X.getOperand(0), Y,
- Y);
+ Y);
}
}
return SDValue();
};
- if (SDValue FMA = FuseFSUB(N0, N1))
+ if (SDValue FMA = FuseFSUB(N0, N1))
return FMA;
- if (SDValue FMA = FuseFSUB(N1, N0))
+ if (SDValue FMA = FuseFSUB(N1, N0))
return FMA;
return SDValue();
@@ -13130,13 +13130,13 @@ SDValue DAGCombiner::visitFMULForFMADistributiveCombine(SDNode *N) {
SDValue DAGCombiner::visitFADD(SDNode *N) {
SDValue N0 = N->getOperand(0);
SDValue N1 = N->getOperand(1);
- bool N0CFP = DAG.isConstantFPBuildVectorOrConstantFP(N0);
- bool N1CFP = DAG.isConstantFPBuildVectorOrConstantFP(N1);
+ bool N0CFP = DAG.isConstantFPBuildVectorOrConstantFP(N0);
+ bool N1CFP = DAG.isConstantFPBuildVectorOrConstantFP(N1);
EVT VT = N->getValueType(0);
SDLoc DL(N);
const TargetOptions &Options = DAG.getTarget().Options;
- SDNodeFlags Flags = N->getFlags();
- SelectionDAG::FlagInserter FlagsInserter(DAG, N);
+ SDNodeFlags Flags = N->getFlags();
+ SelectionDAG::FlagInserter FlagsInserter(DAG, N);
if (SDValue R = DAG.simplifyFPBinop(N->getOpcode(), N0, N1, Flags))
return R;
@@ -13148,11 +13148,11 @@ SDValue DAGCombiner::visitFADD(SDNode *N) {
// fold (fadd c1, c2) -> c1 + c2
if (N0CFP && N1CFP)
- return DAG.getNode(ISD::FADD, DL, VT, N0, N1);
+ return DAG.getNode(ISD::FADD, DL, VT, N0, N1);
// canonicalize constant to RHS
if (N0CFP && !N1CFP)
- return DAG.getNode(ISD::FADD, DL, VT, N1, N0);
+ return DAG.getNode(ISD::FADD, DL, VT, N1, N0);
// N0 + -0.0 --> N0 (also allowed with +0.0 and fast-math)
ConstantFPSDNode *N1C = isConstOrConstSplatFP(N1, true);
@@ -13167,13 +13167,13 @@ SDValue DAGCombiner::visitFADD(SDNode *N) {
if (!LegalOperations || TLI.isOperationLegalOrCustom(ISD::FSUB, VT))
if (SDValue NegN1 = TLI.getCheaperNegatedExpression(
N1, DAG, LegalOperations, ForCodeSize))
- return DAG.getNode(ISD::FSUB, DL, VT, N0, NegN1);
+ return DAG.getNode(ISD::FSUB, DL, VT, N0, NegN1);
// fold (fadd (fneg A), B) -> (fsub B, A)
if (!LegalOperations || TLI.isOperationLegalOrCustom(ISD::FSUB, VT))
if (SDValue NegN0 = TLI.getCheaperNegatedExpression(
N0, DAG, LegalOperations, ForCodeSize))
- return DAG.getNode(ISD::FSUB, DL, VT, N1, NegN0);
+ return DAG.getNode(ISD::FSUB, DL, VT, N1, NegN0);
auto isFMulNegTwo = [](SDValue FMul) {
if (!FMul.hasOneUse() || FMul.getOpcode() != ISD::FMUL)
@@ -13185,14 +13185,14 @@ SDValue DAGCombiner::visitFADD(SDNode *N) {
// fadd (fmul B, -2.0), A --> fsub A, (fadd B, B)
if (isFMulNegTwo(N0)) {
SDValue B = N0.getOperand(0);
- SDValue Add = DAG.getNode(ISD::FADD, DL, VT, B, B);
- return DAG.getNode(ISD::FSUB, DL, VT, N1, Add);
+ SDValue Add = DAG.getNode(ISD::FADD, DL, VT, B, B);
+ return DAG.getNode(ISD::FSUB, DL, VT, N1, Add);
}
// fadd A, (fmul B, -2.0) --> fsub A, (fadd B, B)
if (isFMulNegTwo(N1)) {
SDValue B = N1.getOperand(0);
- SDValue Add = DAG.getNode(ISD::FADD, DL, VT, B, B);
- return DAG.getNode(ISD::FSUB, DL, VT, N0, Add);
+ SDValue Add = DAG.getNode(ISD::FADD, DL, VT, B, B);
+ return DAG.getNode(ISD::FSUB, DL, VT, N0, Add);
}
// No FP constant should be created after legalization as Instruction
@@ -13218,9 +13218,9 @@ SDValue DAGCombiner::visitFADD(SDNode *N) {
AllowNewConst) {
// fadd (fadd x, c1), c2 -> fadd x, c1 + c2
if (N1CFP && N0.getOpcode() == ISD::FADD &&
- DAG.isConstantFPBuildVectorOrConstantFP(N0.getOperand(1))) {
- SDValue NewC = DAG.getNode(ISD::FADD, DL, VT, N0.getOperand(1), N1);
- return DAG.getNode(ISD::FADD, DL, VT, N0.getOperand(0), NewC);
+ DAG.isConstantFPBuildVectorOrConstantFP(N0.getOperand(1))) {
+ SDValue NewC = DAG.getNode(ISD::FADD, DL, VT, N0.getOperand(1), N1);
+ return DAG.getNode(ISD::FADD, DL, VT, N0.getOperand(0), NewC);
}
// We can fold chains of FADD's of the same value into multiplications.
@@ -13228,14 +13228,14 @@ SDValue DAGCombiner::visitFADD(SDNode *N) {
// of rounding steps.
if (TLI.isOperationLegalOrCustom(ISD::FMUL, VT) && !N0CFP && !N1CFP) {
if (N0.getOpcode() == ISD::FMUL) {
- bool CFP00 = DAG.isConstantFPBuildVectorOrConstantFP(N0.getOperand(0));
- bool CFP01 = DAG.isConstantFPBuildVectorOrConstantFP(N0.getOperand(1));
+ bool CFP00 = DAG.isConstantFPBuildVectorOrConstantFP(N0.getOperand(0));
+ bool CFP01 = DAG.isConstantFPBuildVectorOrConstantFP(N0.getOperand(1));
// (fadd (fmul x, c), x) -> (fmul x, c+1)
if (CFP01 && !CFP00 && N0.getOperand(0) == N1) {
SDValue NewCFP = DAG.getNode(ISD::FADD, DL, VT, N0.getOperand(1),
- DAG.getConstantFP(1.0, DL, VT));
- return DAG.getNode(ISD::FMUL, DL, VT, N1, NewCFP);
+ DAG.getConstantFP(1.0, DL, VT));
+ return DAG.getNode(ISD::FMUL, DL, VT, N1, NewCFP);
}
// (fadd (fmul x, c), (fadd x, x)) -> (fmul x, c+2)
@@ -13243,20 +13243,20 @@ SDValue DAGCombiner::visitFADD(SDNode *N) {
N1.getOperand(0) == N1.getOperand(1) &&
N0.getOperand(0) == N1.getOperand(0)) {
SDValue NewCFP = DAG.getNode(ISD::FADD, DL, VT, N0.getOperand(1),
- DAG.getConstantFP(2.0, DL, VT));
- return DAG.getNode(ISD::FMUL, DL, VT, N0.getOperand(0), NewCFP);
+ DAG.getConstantFP(2.0, DL, VT));
+ return DAG.getNode(ISD::FMUL, DL, VT, N0.getOperand(0), NewCFP);
}
}
if (N1.getOpcode() == ISD::FMUL) {
- bool CFP10 = DAG.isConstantFPBuildVectorOrConstantFP(N1.getOperand(0));
- bool CFP11 = DAG.isConstantFPBuildVectorOrConstantFP(N1.getOperand(1));
+ bool CFP10 = DAG.isConstantFPBuildVectorOrConstantFP(N1.getOperand(0));
+ bool CFP11 = DAG.isConstantFPBuildVectorOrConstantFP(N1.getOperand(1));
// (fadd x, (fmul x, c)) -> (fmul x, c+1)
if (CFP11 && !CFP10 && N1.getOperand(0) == N0) {
SDValue NewCFP = DAG.getNode(ISD::FADD, DL, VT, N1.getOperand(1),
- DAG.getConstantFP(1.0, DL, VT));
- return DAG.getNode(ISD::FMUL, DL, VT, N0, NewCFP);
+ DAG.getConstantFP(1.0, DL, VT));
+ return DAG.getNode(ISD::FMUL, DL, VT, N0, NewCFP);
}
// (fadd (fadd x, x), (fmul x, c)) -> (fmul x, c+2)
@@ -13264,28 +13264,28 @@ SDValue DAGCombiner::visitFADD(SDNode *N) {
N0.getOperand(0) == N0.getOperand(1) &&
N1.getOperand(0) == N0.getOperand(0)) {
SDValue NewCFP = DAG.getNode(ISD::FADD, DL, VT, N1.getOperand(1),
- DAG.getConstantFP(2.0, DL, VT));
- return DAG.getNode(ISD::FMUL, DL, VT, N1.getOperand(0), NewCFP);
+ DAG.getConstantFP(2.0, DL, VT));
+ return DAG.getNode(ISD::FMUL, DL, VT, N1.getOperand(0), NewCFP);
}
}
if (N0.getOpcode() == ISD::FADD) {
- bool CFP00 = DAG.isConstantFPBuildVectorOrConstantFP(N0.getOperand(0));
+ bool CFP00 = DAG.isConstantFPBuildVectorOrConstantFP(N0.getOperand(0));
// (fadd (fadd x, x), x) -> (fmul x, 3.0)
if (!CFP00 && N0.getOperand(0) == N0.getOperand(1) &&
(N0.getOperand(0) == N1)) {
- return DAG.getNode(ISD::FMUL, DL, VT, N1,
- DAG.getConstantFP(3.0, DL, VT));
+ return DAG.getNode(ISD::FMUL, DL, VT, N1,
+ DAG.getConstantFP(3.0, DL, VT));
}
}
if (N1.getOpcode() == ISD::FADD) {
- bool CFP10 = DAG.isConstantFPBuildVectorOrConstantFP(N1.getOperand(0));
+ bool CFP10 = DAG.isConstantFPBuildVectorOrConstantFP(N1.getOperand(0));
// (fadd x, (fadd x, x)) -> (fmul x, 3.0)
if (!CFP10 && N1.getOperand(0) == N1.getOperand(1) &&
N1.getOperand(0) == N0) {
- return DAG.getNode(ISD::FMUL, DL, VT, N0,
- DAG.getConstantFP(3.0, DL, VT));
+ return DAG.getNode(ISD::FMUL, DL, VT, N0,
+ DAG.getConstantFP(3.0, DL, VT));
}
}
@@ -13295,7 +13295,7 @@ SDValue DAGCombiner::visitFADD(SDNode *N) {
N1.getOperand(0) == N1.getOperand(1) &&
N0.getOperand(0) == N1.getOperand(0)) {
return DAG.getNode(ISD::FMUL, DL, VT, N0.getOperand(0),
- DAG.getConstantFP(4.0, DL, VT));
+ DAG.getConstantFP(4.0, DL, VT));
}
}
} // enable-unsafe-fp-math
@@ -13308,33 +13308,33 @@ SDValue DAGCombiner::visitFADD(SDNode *N) {
return SDValue();
}
-SDValue DAGCombiner::visitSTRICT_FADD(SDNode *N) {
- SDValue Chain = N->getOperand(0);
- SDValue N0 = N->getOperand(1);
- SDValue N1 = N->getOperand(2);
- EVT VT = N->getValueType(0);
- EVT ChainVT = N->getValueType(1);
- SDLoc DL(N);
- SelectionDAG::FlagInserter FlagsInserter(DAG, N);
-
- // fold (strict_fadd A, (fneg B)) -> (strict_fsub A, B)
- if (!LegalOperations || TLI.isOperationLegalOrCustom(ISD::STRICT_FSUB, VT))
- if (SDValue NegN1 = TLI.getCheaperNegatedExpression(
- N1, DAG, LegalOperations, ForCodeSize)) {
- return DAG.getNode(ISD::STRICT_FSUB, DL, DAG.getVTList(VT, ChainVT),
- {Chain, N0, NegN1});
- }
-
- // fold (strict_fadd (fneg A), B) -> (strict_fsub B, A)
- if (!LegalOperations || TLI.isOperationLegalOrCustom(ISD::STRICT_FSUB, VT))
- if (SDValue NegN0 = TLI.getCheaperNegatedExpression(
- N0, DAG, LegalOperations, ForCodeSize)) {
- return DAG.getNode(ISD::STRICT_FSUB, DL, DAG.getVTList(VT, ChainVT),
- {Chain, N1, NegN0});
- }
- return SDValue();
-}
-
+SDValue DAGCombiner::visitSTRICT_FADD(SDNode *N) {
+ SDValue Chain = N->getOperand(0);
+ SDValue N0 = N->getOperand(1);
+ SDValue N1 = N->getOperand(2);
+ EVT VT = N->getValueType(0);
+ EVT ChainVT = N->getValueType(1);
+ SDLoc DL(N);
+ SelectionDAG::FlagInserter FlagsInserter(DAG, N);
+
+ // fold (strict_fadd A, (fneg B)) -> (strict_fsub A, B)
+ if (!LegalOperations || TLI.isOperationLegalOrCustom(ISD::STRICT_FSUB, VT))
+ if (SDValue NegN1 = TLI.getCheaperNegatedExpression(
+ N1, DAG, LegalOperations, ForCodeSize)) {
+ return DAG.getNode(ISD::STRICT_FSUB, DL, DAG.getVTList(VT, ChainVT),
+ {Chain, N0, NegN1});
+ }
+
+ // fold (strict_fadd (fneg A), B) -> (strict_fsub B, A)
+ if (!LegalOperations || TLI.isOperationLegalOrCustom(ISD::STRICT_FSUB, VT))
+ if (SDValue NegN0 = TLI.getCheaperNegatedExpression(
+ N0, DAG, LegalOperations, ForCodeSize)) {
+ return DAG.getNode(ISD::STRICT_FSUB, DL, DAG.getVTList(VT, ChainVT),
+ {Chain, N1, NegN0});
+ }
+ return SDValue();
+}
+
SDValue DAGCombiner::visitFSUB(SDNode *N) {
SDValue N0 = N->getOperand(0);
SDValue N1 = N->getOperand(1);
@@ -13344,7 +13344,7 @@ SDValue DAGCombiner::visitFSUB(SDNode *N) {
SDLoc DL(N);
const TargetOptions &Options = DAG.getTarget().Options;
const SDNodeFlags Flags = N->getFlags();
- SelectionDAG::FlagInserter FlagsInserter(DAG, N);
+ SelectionDAG::FlagInserter FlagsInserter(DAG, N);
if (SDValue R = DAG.simplifyFPBinop(N->getOpcode(), N0, N1, Flags))
return R;
@@ -13356,7 +13356,7 @@ SDValue DAGCombiner::visitFSUB(SDNode *N) {
// fold (fsub c1, c2) -> c1-c2
if (N0CFP && N1CFP)
- return DAG.getNode(ISD::FSUB, DL, VT, N0, N1);
+ return DAG.getNode(ISD::FSUB, DL, VT, N0, N1);
if (SDValue NewSel = foldBinOpIntoSelect(N))
return NewSel;
@@ -13379,18 +13379,18 @@ SDValue DAGCombiner::visitFSUB(SDNode *N) {
if (N0CFP && N0CFP->isZero()) {
if (N0CFP->isNegative() ||
(Options.NoSignedZerosFPMath || Flags.hasNoSignedZeros())) {
- // We cannot replace an FSUB(+-0.0,X) with FNEG(X) when denormals are
- // flushed to zero, unless all users treat denorms as zero (DAZ).
- // FIXME: This transform will change the sign of a NaN and the behavior
- // of a signaling NaN. It is only valid when a NoNaN flag is present.
- DenormalMode DenormMode = DAG.getDenormalMode(VT);
- if (DenormMode == DenormalMode::getIEEE()) {
- if (SDValue NegN1 =
- TLI.getNegatedExpression(N1, DAG, LegalOperations, ForCodeSize))
- return NegN1;
- if (!LegalOperations || TLI.isOperationLegal(ISD::FNEG, VT))
- return DAG.getNode(ISD::FNEG, DL, VT, N1);
- }
+ // We cannot replace an FSUB(+-0.0,X) with FNEG(X) when denormals are
+ // flushed to zero, unless all users treat denorms as zero (DAZ).
+ // FIXME: This transform will change the sign of a NaN and the behavior
+ // of a signaling NaN. It is only valid when a NoNaN flag is present.
+ DenormalMode DenormMode = DAG.getDenormalMode(VT);
+ if (DenormMode == DenormalMode::getIEEE()) {
+ if (SDValue NegN1 =
+ TLI.getNegatedExpression(N1, DAG, LegalOperations, ForCodeSize))
+ return NegN1;
+ if (!LegalOperations || TLI.isOperationLegal(ISD::FNEG, VT))
+ return DAG.getNode(ISD::FNEG, DL, VT, N1);
+ }
}
}
@@ -13399,16 +13399,16 @@ SDValue DAGCombiner::visitFSUB(SDNode *N) {
N1.getOpcode() == ISD::FADD) {
// X - (X + Y) -> -Y
if (N0 == N1->getOperand(0))
- return DAG.getNode(ISD::FNEG, DL, VT, N1->getOperand(1));
+ return DAG.getNode(ISD::FNEG, DL, VT, N1->getOperand(1));
// X - (Y + X) -> -Y
if (N0 == N1->getOperand(1))
- return DAG.getNode(ISD::FNEG, DL, VT, N1->getOperand(0));
+ return DAG.getNode(ISD::FNEG, DL, VT, N1->getOperand(0));
}
// fold (fsub A, (fneg B)) -> (fadd A, B)
if (SDValue NegN1 =
TLI.getNegatedExpression(N1, DAG, LegalOperations, ForCodeSize))
- return DAG.getNode(ISD::FADD, DL, VT, N0, NegN1);
+ return DAG.getNode(ISD::FADD, DL, VT, N0, NegN1);
// FSUB -> FMA combines:
if (SDValue Fused = visitFSUBForFMACombine(N)) {
@@ -13428,7 +13428,7 @@ SDValue DAGCombiner::visitFMUL(SDNode *N) {
SDLoc DL(N);
const TargetOptions &Options = DAG.getTarget().Options;
const SDNodeFlags Flags = N->getFlags();
- SelectionDAG::FlagInserter FlagsInserter(DAG, N);
+ SelectionDAG::FlagInserter FlagsInserter(DAG, N);
if (SDValue R = DAG.simplifyFPBinop(N->getOpcode(), N0, N1, Flags))
return R;
@@ -13442,28 +13442,28 @@ SDValue DAGCombiner::visitFMUL(SDNode *N) {
// fold (fmul c1, c2) -> c1*c2
if (N0CFP && N1CFP)
- return DAG.getNode(ISD::FMUL, DL, VT, N0, N1);
+ return DAG.getNode(ISD::FMUL, DL, VT, N0, N1);
// canonicalize constant to RHS
- if (DAG.isConstantFPBuildVectorOrConstantFP(N0) &&
- !DAG.isConstantFPBuildVectorOrConstantFP(N1))
- return DAG.getNode(ISD::FMUL, DL, VT, N1, N0);
+ if (DAG.isConstantFPBuildVectorOrConstantFP(N0) &&
+ !DAG.isConstantFPBuildVectorOrConstantFP(N1))
+ return DAG.getNode(ISD::FMUL, DL, VT, N1, N0);
if (SDValue NewSel = foldBinOpIntoSelect(N))
return NewSel;
if (Options.UnsafeFPMath || Flags.hasAllowReassociation()) {
// fmul (fmul X, C1), C2 -> fmul X, C1 * C2
- if (DAG.isConstantFPBuildVectorOrConstantFP(N1) &&
+ if (DAG.isConstantFPBuildVectorOrConstantFP(N1) &&
N0.getOpcode() == ISD::FMUL) {
SDValue N00 = N0.getOperand(0);
SDValue N01 = N0.getOperand(1);
// Avoid an infinite loop by making sure that N00 is not a constant
// (the inner multiply has not been constant folded yet).
- if (DAG.isConstantFPBuildVectorOrConstantFP(N01) &&
- !DAG.isConstantFPBuildVectorOrConstantFP(N00)) {
- SDValue MulConsts = DAG.getNode(ISD::FMUL, DL, VT, N01, N1);
- return DAG.getNode(ISD::FMUL, DL, VT, N00, MulConsts);
+ if (DAG.isConstantFPBuildVectorOrConstantFP(N01) &&
+ !DAG.isConstantFPBuildVectorOrConstantFP(N00)) {
+ SDValue MulConsts = DAG.getNode(ISD::FMUL, DL, VT, N01, N1);
+ return DAG.getNode(ISD::FMUL, DL, VT, N00, MulConsts);
}
}
@@ -13472,14 +13472,14 @@ SDValue DAGCombiner::visitFMUL(SDNode *N) {
if (N0.getOpcode() == ISD::FADD && N0.hasOneUse() &&
N0.getOperand(0) == N0.getOperand(1)) {
const SDValue Two = DAG.getConstantFP(2.0, DL, VT);
- SDValue MulConsts = DAG.getNode(ISD::FMUL, DL, VT, Two, N1);
- return DAG.getNode(ISD::FMUL, DL, VT, N0.getOperand(0), MulConsts);
+ SDValue MulConsts = DAG.getNode(ISD::FMUL, DL, VT, Two, N1);
+ return DAG.getNode(ISD::FMUL, DL, VT, N0.getOperand(0), MulConsts);
}
}
// fold (fmul X, 2.0) -> (fadd X, X)
if (N1CFP && N1CFP->isExactlyValue(+2.0))
- return DAG.getNode(ISD::FADD, DL, VT, N0, N0);
+ return DAG.getNode(ISD::FADD, DL, VT, N0, N0);
// fold (fmul X, -1.0) -> (fneg X)
if (N1CFP && N1CFP->isExactlyValue(-1.0))
@@ -13498,7 +13498,7 @@ SDValue DAGCombiner::visitFMUL(SDNode *N) {
if (NegN0 && NegN1 &&
(CostN0 == TargetLowering::NegatibleCost::Cheaper ||
CostN1 == TargetLowering::NegatibleCost::Cheaper))
- return DAG.getNode(ISD::FMUL, DL, VT, NegN0, NegN1);
+ return DAG.getNode(ISD::FMUL, DL, VT, NegN0, NegN1);
// fold (fmul X, (select (fcmp X > 0.0), -1.0, 1.0)) -> (fneg (fabs X))
// fold (fmul X, (select (fcmp X > 0.0), 1.0, -1.0)) -> (fabs X)
@@ -13565,11 +13565,11 @@ SDValue DAGCombiner::visitFMA(SDNode *N) {
SDLoc DL(N);
const TargetOptions &Options = DAG.getTarget().Options;
// FMA nodes have flags that propagate to the created nodes.
- SelectionDAG::FlagInserter FlagsInserter(DAG, N);
-
- bool UnsafeFPMath =
- Options.UnsafeFPMath || N->getFlags().hasAllowReassociation();
+ SelectionDAG::FlagInserter FlagsInserter(DAG, N);
+ bool UnsafeFPMath =
+ Options.UnsafeFPMath || N->getFlags().hasAllowReassociation();
+
// Constant fold FMA.
if (isa<ConstantFPSDNode>(N0) &&
isa<ConstantFPSDNode>(N1) &&
@@ -13589,7 +13589,7 @@ SDValue DAGCombiner::visitFMA(SDNode *N) {
if (NegN0 && NegN1 &&
(CostN0 == TargetLowering::NegatibleCost::Cheaper ||
CostN1 == TargetLowering::NegatibleCost::Cheaper))
- return DAG.getNode(ISD::FMA, DL, VT, NegN0, NegN1, N2);
+ return DAG.getNode(ISD::FMA, DL, VT, NegN0, NegN1, N2);
if (UnsafeFPMath) {
if (N0CFP && N0CFP->isZero())
@@ -13597,32 +13597,32 @@ SDValue DAGCombiner::visitFMA(SDNode *N) {
if (N1CFP && N1CFP->isZero())
return N2;
}
-
+
if (N0CFP && N0CFP->isExactlyValue(1.0))
return DAG.getNode(ISD::FADD, SDLoc(N), VT, N1, N2);
if (N1CFP && N1CFP->isExactlyValue(1.0))
return DAG.getNode(ISD::FADD, SDLoc(N), VT, N0, N2);
// Canonicalize (fma c, x, y) -> (fma x, c, y)
- if (DAG.isConstantFPBuildVectorOrConstantFP(N0) &&
- !DAG.isConstantFPBuildVectorOrConstantFP(N1))
+ if (DAG.isConstantFPBuildVectorOrConstantFP(N0) &&
+ !DAG.isConstantFPBuildVectorOrConstantFP(N1))
return DAG.getNode(ISD::FMA, SDLoc(N), VT, N1, N0, N2);
if (UnsafeFPMath) {
// (fma x, c1, (fmul x, c2)) -> (fmul x, c1+c2)
if (N2.getOpcode() == ISD::FMUL && N0 == N2.getOperand(0) &&
- DAG.isConstantFPBuildVectorOrConstantFP(N1) &&
- DAG.isConstantFPBuildVectorOrConstantFP(N2.getOperand(1))) {
+ DAG.isConstantFPBuildVectorOrConstantFP(N1) &&
+ DAG.isConstantFPBuildVectorOrConstantFP(N2.getOperand(1))) {
return DAG.getNode(ISD::FMUL, DL, VT, N0,
- DAG.getNode(ISD::FADD, DL, VT, N1, N2.getOperand(1)));
+ DAG.getNode(ISD::FADD, DL, VT, N1, N2.getOperand(1)));
}
// (fma (fmul x, c1), c2, y) -> (fma x, c1*c2, y)
if (N0.getOpcode() == ISD::FMUL &&
- DAG.isConstantFPBuildVectorOrConstantFP(N1) &&
- DAG.isConstantFPBuildVectorOrConstantFP(N0.getOperand(1))) {
- return DAG.getNode(ISD::FMA, DL, VT, N0.getOperand(0),
- DAG.getNode(ISD::FMUL, DL, VT, N1, N0.getOperand(1)),
+ DAG.isConstantFPBuildVectorOrConstantFP(N1) &&
+ DAG.isConstantFPBuildVectorOrConstantFP(N0.getOperand(1))) {
+ return DAG.getNode(ISD::FMA, DL, VT, N0.getOperand(0),
+ DAG.getNode(ISD::FMUL, DL, VT, N1, N0.getOperand(1)),
N2);
}
}
@@ -13645,23 +13645,23 @@ SDValue DAGCombiner::visitFMA(SDNode *N) {
(N1.hasOneUse() && !TLI.isFPImmLegal(N1CFP->getValueAPF(), VT,
ForCodeSize)))) {
return DAG.getNode(ISD::FMA, DL, VT, N0.getOperand(0),
- DAG.getNode(ISD::FNEG, DL, VT, N1), N2);
+ DAG.getNode(ISD::FNEG, DL, VT, N1), N2);
}
}
if (UnsafeFPMath) {
// (fma x, c, x) -> (fmul x, (c+1))
if (N1CFP && N0 == N2) {
- return DAG.getNode(
- ISD::FMUL, DL, VT, N0,
- DAG.getNode(ISD::FADD, DL, VT, N1, DAG.getConstantFP(1.0, DL, VT)));
+ return DAG.getNode(
+ ISD::FMUL, DL, VT, N0,
+ DAG.getNode(ISD::FADD, DL, VT, N1, DAG.getConstantFP(1.0, DL, VT)));
}
// (fma x, c, (fneg x)) -> (fmul x, (c-1))
if (N1CFP && N2.getOpcode() == ISD::FNEG && N2.getOperand(0) == N0) {
- return DAG.getNode(
- ISD::FMUL, DL, VT, N0,
- DAG.getNode(ISD::FADD, DL, VT, N1, DAG.getConstantFP(-1.0, DL, VT)));
+ return DAG.getNode(
+ ISD::FMUL, DL, VT, N0,
+ DAG.getNode(ISD::FADD, DL, VT, N1, DAG.getConstantFP(-1.0, DL, VT)));
}
}
@@ -13670,7 +13670,7 @@ SDValue DAGCombiner::visitFMA(SDNode *N) {
if (!TLI.isFNegFree(VT))
if (SDValue Neg = TLI.getCheaperNegatedExpression(
SDValue(N, 0), DAG, LegalOperations, ForCodeSize))
- return DAG.getNode(ISD::FNEG, DL, VT, Neg);
+ return DAG.getNode(ISD::FNEG, DL, VT, Neg);
return SDValue();
}
@@ -13691,7 +13691,7 @@ SDValue DAGCombiner::combineRepeatedFPDivisors(SDNode *N) {
return SDValue();
// Skip if current node is a reciprocal/fneg-reciprocal.
- SDValue N0 = N->getOperand(0), N1 = N->getOperand(1);
+ SDValue N0 = N->getOperand(0), N1 = N->getOperand(1);
ConstantFPSDNode *N0CFP = isConstOrConstSplatFP(N0, /* AllowUndefs */ true);
if (N0CFP && (N0CFP->isExactlyValue(1.0) || N0CFP->isExactlyValue(-1.0)))
return SDValue();
@@ -13715,13 +13715,13 @@ SDValue DAGCombiner::combineRepeatedFPDivisors(SDNode *N) {
SetVector<SDNode *> Users;
for (auto *U : N1->uses()) {
if (U->getOpcode() == ISD::FDIV && U->getOperand(1) == N1) {
- // Skip X/sqrt(X) that has not been simplified to sqrt(X) yet.
- if (U->getOperand(1).getOpcode() == ISD::FSQRT &&
- U->getOperand(0) == U->getOperand(1).getOperand(0) &&
- U->getFlags().hasAllowReassociation() &&
- U->getFlags().hasNoSignedZeros())
- continue;
-
+ // Skip X/sqrt(X) that has not been simplified to sqrt(X) yet.
+ if (U->getOperand(1).getOpcode() == ISD::FSQRT &&
+ U->getOperand(0) == U->getOperand(1).getOperand(0) &&
+ U->getFlags().hasAllowReassociation() &&
+ U->getFlags().hasNoSignedZeros())
+ continue;
+
// This division is eligible for optimization only if global unsafe math
// is enabled or if this division allows reciprocal formation.
if (UnsafeMath || U->getFlags().hasAllowReciprocal())
@@ -13763,7 +13763,7 @@ SDValue DAGCombiner::visitFDIV(SDNode *N) {
SDLoc DL(N);
const TargetOptions &Options = DAG.getTarget().Options;
SDNodeFlags Flags = N->getFlags();
- SelectionDAG::FlagInserter FlagsInserter(DAG, N);
+ SelectionDAG::FlagInserter FlagsInserter(DAG, N);
if (SDValue R = DAG.simplifyFPBinop(N->getOpcode(), N0, N1, Flags))
return R;
@@ -13775,7 +13775,7 @@ SDValue DAGCombiner::visitFDIV(SDNode *N) {
// fold (fdiv c1, c2) -> c1/c2
if (N0CFP && N1CFP)
- return DAG.getNode(ISD::FDIV, SDLoc(N), VT, N0, N1);
+ return DAG.getNode(ISD::FDIV, SDLoc(N), VT, N0, N1);
if (SDValue NewSel = foldBinOpIntoSelect(N))
return NewSel;
@@ -13800,29 +13800,29 @@ SDValue DAGCombiner::visitFDIV(SDNode *N) {
TLI.isOperationLegal(ISD::ConstantFP, VT) ||
TLI.isFPImmLegal(Recip, VT, ForCodeSize)))
return DAG.getNode(ISD::FMUL, DL, VT, N0,
- DAG.getConstantFP(Recip, DL, VT));
+ DAG.getConstantFP(Recip, DL, VT));
}
// If this FDIV is part of a reciprocal square root, it may be folded
// into a target-specific square root estimate instruction.
if (N1.getOpcode() == ISD::FSQRT) {
if (SDValue RV = buildRsqrtEstimate(N1.getOperand(0), Flags))
- return DAG.getNode(ISD::FMUL, DL, VT, N0, RV);
+ return DAG.getNode(ISD::FMUL, DL, VT, N0, RV);
} else if (N1.getOpcode() == ISD::FP_EXTEND &&
N1.getOperand(0).getOpcode() == ISD::FSQRT) {
- if (SDValue RV =
- buildRsqrtEstimate(N1.getOperand(0).getOperand(0), Flags)) {
+ if (SDValue RV =
+ buildRsqrtEstimate(N1.getOperand(0).getOperand(0), Flags)) {
RV = DAG.getNode(ISD::FP_EXTEND, SDLoc(N1), VT, RV);
AddToWorklist(RV.getNode());
- return DAG.getNode(ISD::FMUL, DL, VT, N0, RV);
+ return DAG.getNode(ISD::FMUL, DL, VT, N0, RV);
}
} else if (N1.getOpcode() == ISD::FP_ROUND &&
N1.getOperand(0).getOpcode() == ISD::FSQRT) {
- if (SDValue RV =
- buildRsqrtEstimate(N1.getOperand(0).getOperand(0), Flags)) {
+ if (SDValue RV =
+ buildRsqrtEstimate(N1.getOperand(0).getOperand(0), Flags)) {
RV = DAG.getNode(ISD::FP_ROUND, SDLoc(N1), VT, RV, N1.getOperand(1));
AddToWorklist(RV.getNode());
- return DAG.getNode(ISD::FMUL, DL, VT, N0, RV);
+ return DAG.getNode(ISD::FMUL, DL, VT, N0, RV);
}
} else if (N1.getOpcode() == ISD::FMUL) {
// Look through an FMUL. Even though this won't remove the FDIV directly,
@@ -13837,34 +13837,34 @@ SDValue DAGCombiner::visitFDIV(SDNode *N) {
}
if (Sqrt.getNode()) {
// If the other multiply operand is known positive, pull it into the
- // sqrt. That will eliminate the division if we convert to an estimate.
+ // sqrt. That will eliminate the division if we convert to an estimate.
if (Flags.hasAllowReassociation() && N1.hasOneUse() &&
- N1->getFlags().hasAllowReassociation() && Sqrt.hasOneUse()) {
- SDValue A;
- if (Y.getOpcode() == ISD::FABS && Y.hasOneUse())
- A = Y.getOperand(0);
- else if (Y == Sqrt.getOperand(0))
- A = Y;
- if (A) {
- // X / (fabs(A) * sqrt(Z)) --> X / sqrt(A*A*Z) --> X * rsqrt(A*A*Z)
- // X / (A * sqrt(A)) --> X / sqrt(A*A*A) --> X * rsqrt(A*A*A)
- SDValue AA = DAG.getNode(ISD::FMUL, DL, VT, A, A);
- SDValue AAZ =
- DAG.getNode(ISD::FMUL, DL, VT, AA, Sqrt.getOperand(0));
- if (SDValue Rsqrt = buildRsqrtEstimate(AAZ, Flags))
- return DAG.getNode(ISD::FMUL, DL, VT, N0, Rsqrt);
-
- // Estimate creation failed. Clean up speculatively created nodes.
- recursivelyDeleteUnusedNodes(AAZ.getNode());
- }
+ N1->getFlags().hasAllowReassociation() && Sqrt.hasOneUse()) {
+ SDValue A;
+ if (Y.getOpcode() == ISD::FABS && Y.hasOneUse())
+ A = Y.getOperand(0);
+ else if (Y == Sqrt.getOperand(0))
+ A = Y;
+ if (A) {
+ // X / (fabs(A) * sqrt(Z)) --> X / sqrt(A*A*Z) --> X * rsqrt(A*A*Z)
+ // X / (A * sqrt(A)) --> X / sqrt(A*A*A) --> X * rsqrt(A*A*A)
+ SDValue AA = DAG.getNode(ISD::FMUL, DL, VT, A, A);
+ SDValue AAZ =
+ DAG.getNode(ISD::FMUL, DL, VT, AA, Sqrt.getOperand(0));
+ if (SDValue Rsqrt = buildRsqrtEstimate(AAZ, Flags))
+ return DAG.getNode(ISD::FMUL, DL, VT, N0, Rsqrt);
+
+ // Estimate creation failed. Clean up speculatively created nodes.
+ recursivelyDeleteUnusedNodes(AAZ.getNode());
+ }
}
// We found a FSQRT, so try to make this fold:
// X / (Y * sqrt(Z)) -> X * (rsqrt(Z) / Y)
if (SDValue Rsqrt = buildRsqrtEstimate(Sqrt.getOperand(0), Flags)) {
- SDValue Div = DAG.getNode(ISD::FDIV, SDLoc(N1), VT, Rsqrt, Y);
+ SDValue Div = DAG.getNode(ISD::FDIV, SDLoc(N1), VT, Rsqrt, Y);
AddToWorklist(Div.getNode());
- return DAG.getNode(ISD::FMUL, DL, VT, N0, Div);
+ return DAG.getNode(ISD::FMUL, DL, VT, N0, Div);
}
}
}
@@ -13875,12 +13875,12 @@ SDValue DAGCombiner::visitFDIV(SDNode *N) {
return RV;
}
- // Fold X/Sqrt(X) -> Sqrt(X)
- if ((Options.NoSignedZerosFPMath || Flags.hasNoSignedZeros()) &&
- (Options.UnsafeFPMath || Flags.hasAllowReassociation()))
- if (N1.getOpcode() == ISD::FSQRT && N0 == N1.getOperand(0))
- return N1;
-
+ // Fold X/Sqrt(X) -> Sqrt(X)
+ if ((Options.NoSignedZerosFPMath || Flags.hasNoSignedZeros()) &&
+ (Options.UnsafeFPMath || Flags.hasAllowReassociation()))
+ if (N1.getOpcode() == ISD::FSQRT && N0 == N1.getOperand(0))
+ return N1;
+
// (fdiv (fneg X), (fneg Y)) -> (fdiv X, Y)
TargetLowering::NegatibleCost CostN0 =
TargetLowering::NegatibleCost::Expensive;
@@ -13893,7 +13893,7 @@ SDValue DAGCombiner::visitFDIV(SDNode *N) {
if (NegN0 && NegN1 &&
(CostN0 == TargetLowering::NegatibleCost::Cheaper ||
CostN1 == TargetLowering::NegatibleCost::Cheaper))
- return DAG.getNode(ISD::FDIV, SDLoc(N), VT, NegN0, NegN1);
+ return DAG.getNode(ISD::FDIV, SDLoc(N), VT, NegN0, NegN1);
return SDValue();
}
@@ -13905,14 +13905,14 @@ SDValue DAGCombiner::visitFREM(SDNode *N) {
ConstantFPSDNode *N1CFP = dyn_cast<ConstantFPSDNode>(N1);
EVT VT = N->getValueType(0);
SDNodeFlags Flags = N->getFlags();
- SelectionDAG::FlagInserter FlagsInserter(DAG, N);
+ SelectionDAG::FlagInserter FlagsInserter(DAG, N);
if (SDValue R = DAG.simplifyFPBinop(N->getOpcode(), N0, N1, Flags))
return R;
// fold (frem c1, c2) -> fmod(c1,c2)
if (N0CFP && N1CFP)
- return DAG.getNode(ISD::FREM, SDLoc(N), VT, N0, N1);
+ return DAG.getNode(ISD::FREM, SDLoc(N), VT, N0, N1);
if (SDValue NewSel = foldBinOpIntoSelect(N))
return NewSel;
@@ -13926,7 +13926,7 @@ SDValue DAGCombiner::visitFSQRT(SDNode *N) {
// Require 'ninf' flag since sqrt(+Inf) = +Inf, but the estimation goes as:
// sqrt(+Inf) == rsqrt(+Inf) * +Inf = 0 * +Inf = NaN
- if (!Flags.hasApproximateFuncs() ||
+ if (!Flags.hasApproximateFuncs() ||
(!Options.NoInfsFPMath && !Flags.hasNoInfs()))
return SDValue();
@@ -13935,10 +13935,10 @@ SDValue DAGCombiner::visitFSQRT(SDNode *N) {
return SDValue();
// FSQRT nodes have flags that propagate to the created nodes.
- // TODO: If this is N0/sqrt(N0), and we reach this node before trying to
- // transform the fdiv, we may produce a sub-optimal estimate sequence
- // because the reciprocal calculation may not have to filter out a
- // 0.0 input.
+ // TODO: If this is N0/sqrt(N0), and we reach this node before trying to
+ // transform the fdiv, we may produce a sub-optimal estimate sequence
+ // because the reciprocal calculation may not have to filter out a
+ // 0.0 input.
return buildSqrtEstimate(N0, Flags);
}
@@ -13962,8 +13962,8 @@ static inline bool CanCombineFCOPYSIGN_EXTEND_ROUND(SDNode *N) {
SDValue DAGCombiner::visitFCOPYSIGN(SDNode *N) {
SDValue N0 = N->getOperand(0);
SDValue N1 = N->getOperand(1);
- bool N0CFP = DAG.isConstantFPBuildVectorOrConstantFP(N0);
- bool N1CFP = DAG.isConstantFPBuildVectorOrConstantFP(N1);
+ bool N0CFP = DAG.isConstantFPBuildVectorOrConstantFP(N0);
+ bool N1CFP = DAG.isConstantFPBuildVectorOrConstantFP(N1);
EVT VT = N->getValueType(0);
if (N0CFP && N1CFP) // Constant fold
@@ -14010,7 +14010,7 @@ SDValue DAGCombiner::visitFPOW(SDNode *N) {
ConstantFPSDNode *ExponentC = isConstOrConstSplatFP(N->getOperand(1));
if (!ExponentC)
return SDValue();
- SelectionDAG::FlagInserter FlagsInserter(DAG, N);
+ SelectionDAG::FlagInserter FlagsInserter(DAG, N);
// Try to convert x ** (1/3) into cube root.
// TODO: Handle the various flavors of long double.
@@ -14037,7 +14037,7 @@ SDValue DAGCombiner::visitFPOW(SDNode *N) {
DAG.getTargetLoweringInfo().isOperationExpand(ISD::FCBRT, VT)))
return SDValue();
- return DAG.getNode(ISD::FCBRT, SDLoc(N), VT, N->getOperand(0));
+ return DAG.getNode(ISD::FCBRT, SDLoc(N), VT, N->getOperand(0));
}
// Try to convert x ** (1/4) and x ** (3/4) into square roots.
@@ -14072,12 +14072,12 @@ SDValue DAGCombiner::visitFPOW(SDNode *N) {
// pow(X, 0.25) --> sqrt(sqrt(X))
SDLoc DL(N);
- SDValue Sqrt = DAG.getNode(ISD::FSQRT, DL, VT, N->getOperand(0));
- SDValue SqrtSqrt = DAG.getNode(ISD::FSQRT, DL, VT, Sqrt);
+ SDValue Sqrt = DAG.getNode(ISD::FSQRT, DL, VT, N->getOperand(0));
+ SDValue SqrtSqrt = DAG.getNode(ISD::FSQRT, DL, VT, Sqrt);
if (ExponentIs025)
return SqrtSqrt;
// pow(X, 0.75) --> sqrt(X) * sqrt(sqrt(X))
- return DAG.getNode(ISD::FMUL, DL, VT, Sqrt, SqrtSqrt);
+ return DAG.getNode(ISD::FMUL, DL, VT, Sqrt, SqrtSqrt);
}
return SDValue();
@@ -14260,7 +14260,7 @@ SDValue DAGCombiner::visitFP_TO_SINT(SDNode *N) {
return DAG.getUNDEF(VT);
// fold (fp_to_sint c1fp) -> c1
- if (DAG.isConstantFPBuildVectorOrConstantFP(N0))
+ if (DAG.isConstantFPBuildVectorOrConstantFP(N0))
return DAG.getNode(ISD::FP_TO_SINT, SDLoc(N), VT, N0);
return FoldIntToFPToInt(N, DAG);
@@ -14275,7 +14275,7 @@ SDValue DAGCombiner::visitFP_TO_UINT(SDNode *N) {
return DAG.getUNDEF(VT);
// fold (fp_to_uint c1fp) -> c1
- if (DAG.isConstantFPBuildVectorOrConstantFP(N0))
+ if (DAG.isConstantFPBuildVectorOrConstantFP(N0))
return DAG.getNode(ISD::FP_TO_UINT, SDLoc(N), VT, N0);
return FoldIntToFPToInt(N, DAG);
@@ -14347,7 +14347,7 @@ SDValue DAGCombiner::visitFP_EXTEND(SDNode *N) {
return SDValue();
// fold (fp_extend c1fp) -> c1fp
- if (DAG.isConstantFPBuildVectorOrConstantFP(N0))
+ if (DAG.isConstantFPBuildVectorOrConstantFP(N0))
return DAG.getNode(ISD::FP_EXTEND, SDLoc(N), VT, N0);
// fold (fp_extend (fp16_to_fp op)) -> (fp16_to_fp op)
@@ -14395,7 +14395,7 @@ SDValue DAGCombiner::visitFCEIL(SDNode *N) {
EVT VT = N->getValueType(0);
// fold (fceil c1) -> fceil(c1)
- if (DAG.isConstantFPBuildVectorOrConstantFP(N0))
+ if (DAG.isConstantFPBuildVectorOrConstantFP(N0))
return DAG.getNode(ISD::FCEIL, SDLoc(N), VT, N0);
return SDValue();
@@ -14406,7 +14406,7 @@ SDValue DAGCombiner::visitFTRUNC(SDNode *N) {
EVT VT = N->getValueType(0);
// fold (ftrunc c1) -> ftrunc(c1)
- if (DAG.isConstantFPBuildVectorOrConstantFP(N0))
+ if (DAG.isConstantFPBuildVectorOrConstantFP(N0))
return DAG.getNode(ISD::FTRUNC, SDLoc(N), VT, N0);
// fold ftrunc (known rounded int x) -> x
@@ -14430,7 +14430,7 @@ SDValue DAGCombiner::visitFFLOOR(SDNode *N) {
EVT VT = N->getValueType(0);
// fold (ffloor c1) -> ffloor(c1)
- if (DAG.isConstantFPBuildVectorOrConstantFP(N0))
+ if (DAG.isConstantFPBuildVectorOrConstantFP(N0))
return DAG.getNode(ISD::FFLOOR, SDLoc(N), VT, N0);
return SDValue();
@@ -14439,10 +14439,10 @@ SDValue DAGCombiner::visitFFLOOR(SDNode *N) {
SDValue DAGCombiner::visitFNEG(SDNode *N) {
SDValue N0 = N->getOperand(0);
EVT VT = N->getValueType(0);
- SelectionDAG::FlagInserter FlagsInserter(DAG, N);
+ SelectionDAG::FlagInserter FlagsInserter(DAG, N);
// Constant fold FNEG.
- if (DAG.isConstantFPBuildVectorOrConstantFP(N0))
+ if (DAG.isConstantFPBuildVectorOrConstantFP(N0))
return DAG.getNode(ISD::FNEG, SDLoc(N), VT, N0);
if (SDValue NegN0 =
@@ -14457,11 +14457,11 @@ SDValue DAGCombiner::visitFNEG(SDNode *N) {
(DAG.getTarget().Options.NoSignedZerosFPMath ||
N->getFlags().hasNoSignedZeros()) && N0.hasOneUse()) {
return DAG.getNode(ISD::FSUB, SDLoc(N), VT, N0.getOperand(1),
- N0.getOperand(0));
+ N0.getOperand(0));
}
- if (SDValue Cast = foldSignChangeInBitcast(N))
- return Cast;
+ if (SDValue Cast = foldSignChangeInBitcast(N))
+ return Cast;
return SDValue();
}
@@ -14473,11 +14473,11 @@ static SDValue visitFMinMax(SelectionDAG &DAG, SDNode *N,
EVT VT = N->getValueType(0);
const ConstantFPSDNode *N0CFP = isConstOrConstSplatFP(N0);
const ConstantFPSDNode *N1CFP = isConstOrConstSplatFP(N1);
- const SDNodeFlags Flags = N->getFlags();
- unsigned Opc = N->getOpcode();
- bool PropagatesNaN = Opc == ISD::FMINIMUM || Opc == ISD::FMAXIMUM;
- bool IsMin = Opc == ISD::FMINNUM || Opc == ISD::FMINIMUM;
- SelectionDAG::FlagInserter FlagsInserter(DAG, N);
+ const SDNodeFlags Flags = N->getFlags();
+ unsigned Opc = N->getOpcode();
+ bool PropagatesNaN = Opc == ISD::FMINIMUM || Opc == ISD::FMAXIMUM;
+ bool IsMin = Opc == ISD::FMINNUM || Opc == ISD::FMINIMUM;
+ SelectionDAG::FlagInserter FlagsInserter(DAG, N);
if (N0CFP && N1CFP) {
const APFloat &C0 = N0CFP->getValueAPF();
@@ -14486,39 +14486,39 @@ static SDValue visitFMinMax(SelectionDAG &DAG, SDNode *N,
}
// Canonicalize to constant on RHS.
- if (DAG.isConstantFPBuildVectorOrConstantFP(N0) &&
- !DAG.isConstantFPBuildVectorOrConstantFP(N1))
+ if (DAG.isConstantFPBuildVectorOrConstantFP(N0) &&
+ !DAG.isConstantFPBuildVectorOrConstantFP(N1))
return DAG.getNode(N->getOpcode(), SDLoc(N), VT, N1, N0);
- if (N1CFP) {
- const APFloat &AF = N1CFP->getValueAPF();
-
- // minnum(X, nan) -> X
- // maxnum(X, nan) -> X
- // minimum(X, nan) -> nan
- // maximum(X, nan) -> nan
- if (AF.isNaN())
- return PropagatesNaN ? N->getOperand(1) : N->getOperand(0);
-
- // In the following folds, inf can be replaced with the largest finite
- // float, if the ninf flag is set.
- if (AF.isInfinity() || (Flags.hasNoInfs() && AF.isLargest())) {
- // minnum(X, -inf) -> -inf
- // maxnum(X, +inf) -> +inf
- // minimum(X, -inf) -> -inf if nnan
- // maximum(X, +inf) -> +inf if nnan
- if (IsMin == AF.isNegative() && (!PropagatesNaN || Flags.hasNoNaNs()))
- return N->getOperand(1);
-
- // minnum(X, +inf) -> X if nnan
- // maxnum(X, -inf) -> X if nnan
- // minimum(X, +inf) -> X
- // maximum(X, -inf) -> X
- if (IsMin != AF.isNegative() && (PropagatesNaN || Flags.hasNoNaNs()))
- return N->getOperand(0);
- }
- }
-
+ if (N1CFP) {
+ const APFloat &AF = N1CFP->getValueAPF();
+
+ // minnum(X, nan) -> X
+ // maxnum(X, nan) -> X
+ // minimum(X, nan) -> nan
+ // maximum(X, nan) -> nan
+ if (AF.isNaN())
+ return PropagatesNaN ? N->getOperand(1) : N->getOperand(0);
+
+ // In the following folds, inf can be replaced with the largest finite
+ // float, if the ninf flag is set.
+ if (AF.isInfinity() || (Flags.hasNoInfs() && AF.isLargest())) {
+ // minnum(X, -inf) -> -inf
+ // maxnum(X, +inf) -> +inf
+ // minimum(X, -inf) -> -inf if nnan
+ // maximum(X, +inf) -> +inf if nnan
+ if (IsMin == AF.isNegative() && (!PropagatesNaN || Flags.hasNoNaNs()))
+ return N->getOperand(1);
+
+ // minnum(X, +inf) -> X if nnan
+ // maxnum(X, -inf) -> X if nnan
+ // minimum(X, +inf) -> X
+ // maximum(X, -inf) -> X
+ if (IsMin != AF.isNegative() && (PropagatesNaN || Flags.hasNoNaNs()))
+ return N->getOperand(0);
+ }
+ }
+
return SDValue();
}
@@ -14543,7 +14543,7 @@ SDValue DAGCombiner::visitFABS(SDNode *N) {
EVT VT = N->getValueType(0);
// fold (fabs c1) -> fabs(c1)
- if (DAG.isConstantFPBuildVectorOrConstantFP(N0))
+ if (DAG.isConstantFPBuildVectorOrConstantFP(N0))
return DAG.getNode(ISD::FABS, SDLoc(N), VT, N0);
// fold (fabs (fabs x)) -> (fabs x)
@@ -14555,8 +14555,8 @@ SDValue DAGCombiner::visitFABS(SDNode *N) {
if (N0.getOpcode() == ISD::FNEG || N0.getOpcode() == ISD::FCOPYSIGN)
return DAG.getNode(ISD::FABS, SDLoc(N), VT, N0.getOperand(0));
- if (SDValue Cast = foldSignChangeInBitcast(N))
- return Cast;
+ if (SDValue Cast = foldSignChangeInBitcast(N))
+ return Cast;
return SDValue();
}
@@ -14566,13 +14566,13 @@ SDValue DAGCombiner::visitBRCOND(SDNode *N) {
SDValue N1 = N->getOperand(1);
SDValue N2 = N->getOperand(2);
- // BRCOND(FREEZE(cond)) is equivalent to BRCOND(cond) (both are
- // nondeterministic jumps).
- if (N1->getOpcode() == ISD::FREEZE && N1.hasOneUse()) {
- return DAG.getNode(ISD::BRCOND, SDLoc(N), MVT::Other, Chain,
- N1->getOperand(0), N2);
- }
-
+ // BRCOND(FREEZE(cond)) is equivalent to BRCOND(cond) (both are
+ // nondeterministic jumps).
+ if (N1->getOpcode() == ISD::FREEZE && N1.hasOneUse()) {
+ return DAG.getNode(ISD::BRCOND, SDLoc(N), MVT::Other, Chain,
+ N1->getOperand(0), N2);
+ }
+
// If N is a constant we could fold this into a fallthrough or unconditional
// branch. However that doesn't happen very often in normal code, because
// Instcombine/SimplifyCFG should have handled the available opportunities.
@@ -14954,13 +14954,13 @@ bool DAGCombiner::CombineToPreIndexedLoadStore(SDNode *N) {
// Therefore, we have:
// t0 = (x0 * offset0 - x1 * y0 * y1 *offset1) + (y0 * y1) * t1
- auto *CN = cast<ConstantSDNode>(OtherUses[i]->getOperand(OffsetIdx));
+ auto *CN = cast<ConstantSDNode>(OtherUses[i]->getOperand(OffsetIdx));
const APInt &Offset0 = CN->getAPIntValue();
- const APInt &Offset1 = cast<ConstantSDNode>(Offset)->getAPIntValue();
- int X0 = (OtherUses[i]->getOpcode() == ISD::SUB && OffsetIdx == 1) ? -1 : 1;
- int Y0 = (OtherUses[i]->getOpcode() == ISD::SUB && OffsetIdx == 0) ? -1 : 1;
- int X1 = (AM == ISD::PRE_DEC && !Swapped) ? -1 : 1;
- int Y1 = (AM == ISD::PRE_DEC && Swapped) ? -1 : 1;
+ const APInt &Offset1 = cast<ConstantSDNode>(Offset)->getAPIntValue();
+ int X0 = (OtherUses[i]->getOpcode() == ISD::SUB && OffsetIdx == 1) ? -1 : 1;
+ int Y0 = (OtherUses[i]->getOpcode() == ISD::SUB && OffsetIdx == 0) ? -1 : 1;
+ int X1 = (AM == ISD::PRE_DEC && !Swapped) ? -1 : 1;
+ int Y1 = (AM == ISD::PRE_DEC && Swapped) ? -1 : 1;
unsigned Opcode = (Y0 * Y1 < 0) ? ISD::SUB : ISD::ADD;
@@ -15152,8 +15152,8 @@ SDValue DAGCombiner::SplitIndexingFromLoad(LoadSDNode *LD) {
return DAG.getNode(Opc, SDLoc(LD), BP.getSimpleValueType(), BP, Inc);
}
-static inline ElementCount numVectorEltsOrZero(EVT T) {
- return T.isVector() ? T.getVectorElementCount() : ElementCount::getFixed(0);
+static inline ElementCount numVectorEltsOrZero(EVT T) {
+ return T.isVector() ? T.getVectorElementCount() : ElementCount::getFixed(0);
}
bool DAGCombiner::getTruncatedStoreValue(StoreSDNode *ST, SDValue &Val) {
@@ -15221,24 +15221,24 @@ SDValue DAGCombiner::ForwardStoreValueToDirectLoad(LoadSDNode *LD) {
EVT STMemType = ST->getMemoryVT();
EVT STType = ST->getValue().getValueType();
- // There are two cases to consider here:
- // 1. The store is fixed width and the load is scalable. In this case we
- // don't know at compile time if the store completely envelops the load
- // so we abandon the optimisation.
- // 2. The store is scalable and the load is fixed width. We could
- // potentially support a limited number of cases here, but there has been
- // no cost-benefit analysis to prove it's worth it.
- bool LdStScalable = LDMemType.isScalableVector();
- if (LdStScalable != STMemType.isScalableVector())
- return SDValue();
-
- // If we are dealing with scalable vectors on a big endian platform the
- // calculation of offsets below becomes trickier, since we do not know at
- // compile time the absolute size of the vector. Until we've done more
- // analysis on big-endian platforms it seems better to bail out for now.
- if (LdStScalable && DAG.getDataLayout().isBigEndian())
- return SDValue();
-
+ // There are two cases to consider here:
+ // 1. The store is fixed width and the load is scalable. In this case we
+ // don't know at compile time if the store completely envelops the load
+ // so we abandon the optimisation.
+ // 2. The store is scalable and the load is fixed width. We could
+ // potentially support a limited number of cases here, but there has been
+ // no cost-benefit analysis to prove it's worth it.
+ bool LdStScalable = LDMemType.isScalableVector();
+ if (LdStScalable != STMemType.isScalableVector())
+ return SDValue();
+
+ // If we are dealing with scalable vectors on a big endian platform the
+ // calculation of offsets below becomes trickier, since we do not know at
+ // compile time the absolute size of the vector. Until we've done more
+ // analysis on big-endian platforms it seems better to bail out for now.
+ if (LdStScalable && DAG.getDataLayout().isBigEndian())
+ return SDValue();
+
BaseIndexOffset BasePtrLD = BaseIndexOffset::match(LD, DAG);
BaseIndexOffset BasePtrST = BaseIndexOffset::match(ST, DAG);
int64_t Offset;
@@ -15250,22 +15250,22 @@ SDValue DAGCombiner::ForwardStoreValueToDirectLoad(LoadSDNode *LD) {
// the stored value). With Offset=n (for n > 0) the loaded value starts at the
// n:th least significant byte of the stored value.
if (DAG.getDataLayout().isBigEndian())
- Offset = ((int64_t)STMemType.getStoreSizeInBits().getFixedSize() -
- (int64_t)LDMemType.getStoreSizeInBits().getFixedSize()) /
- 8 -
- Offset;
+ Offset = ((int64_t)STMemType.getStoreSizeInBits().getFixedSize() -
+ (int64_t)LDMemType.getStoreSizeInBits().getFixedSize()) /
+ 8 -
+ Offset;
// Check that the stored value cover all bits that are loaded.
- bool STCoversLD;
-
- TypeSize LdMemSize = LDMemType.getSizeInBits();
- TypeSize StMemSize = STMemType.getSizeInBits();
- if (LdStScalable)
- STCoversLD = (Offset == 0) && LdMemSize == StMemSize;
- else
- STCoversLD = (Offset >= 0) && (Offset * 8 + LdMemSize.getFixedSize() <=
- StMemSize.getFixedSize());
-
+ bool STCoversLD;
+
+ TypeSize LdMemSize = LDMemType.getSizeInBits();
+ TypeSize StMemSize = STMemType.getSizeInBits();
+ if (LdStScalable)
+ STCoversLD = (Offset == 0) && LdMemSize == StMemSize;
+ else
+ STCoversLD = (Offset >= 0) && (Offset * 8 + LdMemSize.getFixedSize() <=
+ StMemSize.getFixedSize());
+
auto ReplaceLd = [&](LoadSDNode *LD, SDValue Val, SDValue Chain) -> SDValue {
if (LD->isIndexed()) {
// Cannot handle opaque target constants and we must respect the user's
@@ -15285,15 +15285,15 @@ SDValue DAGCombiner::ForwardStoreValueToDirectLoad(LoadSDNode *LD) {
// Memory as copy space (potentially masked).
if (Offset == 0 && LDType == STType && STMemType == LDMemType) {
// Simple case: Direct non-truncating forwarding
- if (LDType.getSizeInBits() == LdMemSize)
+ if (LDType.getSizeInBits() == LdMemSize)
return ReplaceLd(LD, ST->getValue(), Chain);
// Can we model the truncate and extension with an and mask?
if (STType.isInteger() && LDMemType.isInteger() && !STType.isVector() &&
!LDMemType.isVector() && LD->getExtensionType() != ISD::SEXTLOAD) {
// Mask to size of LDMemType
auto Mask =
- DAG.getConstant(APInt::getLowBitsSet(STType.getFixedSizeInBits(),
- StMemSize.getFixedSize()),
+ DAG.getConstant(APInt::getLowBitsSet(STType.getFixedSizeInBits(),
+ StMemSize.getFixedSize()),
SDLoc(ST), STType);
auto Val = DAG.getNode(ISD::AND, SDLoc(LD), LDType, ST->getValue(), Mask);
return ReplaceLd(LD, Val, Chain);
@@ -16124,7 +16124,7 @@ ShrinkLoadReplaceStoreWithStore(const std::pair<unsigned, unsigned> &MaskInfo,
SDValue Ptr = St->getBasePtr();
if (StOffset) {
SDLoc DL(IVal);
- Ptr = DAG.getMemBasePlusOffset(Ptr, TypeSize::Fixed(StOffset), DL);
+ Ptr = DAG.getMemBasePlusOffset(Ptr, TypeSize::Fixed(StOffset), DL);
}
// Truncate down to the new size.
@@ -16133,8 +16133,8 @@ ShrinkLoadReplaceStoreWithStore(const std::pair<unsigned, unsigned> &MaskInfo,
++OpsNarrowed;
return DAG
.getStore(St->getChain(), SDLoc(St), IVal, Ptr,
- St->getPointerInfo().getWithOffset(StOffset),
- St->getOriginalAlign());
+ St->getPointerInfo().getWithOffset(StOffset),
+ St->getOriginalAlign());
}
/// Look for sequence of load / op / store where op is one of 'or', 'xor', and
@@ -16238,8 +16238,8 @@ SDValue DAGCombiner::ReduceLoadOpStoreWidth(SDNode *N) {
if (NewAlign < DAG.getDataLayout().getABITypeAlign(NewVTTy))
return SDValue();
- SDValue NewPtr =
- DAG.getMemBasePlusOffset(Ptr, TypeSize::Fixed(PtrOff), SDLoc(LD));
+ SDValue NewPtr =
+ DAG.getMemBasePlusOffset(Ptr, TypeSize::Fixed(PtrOff), SDLoc(LD));
SDValue NewLD =
DAG.getLoad(NewVT, SDLoc(N0), LD->getChain(), NewPtr,
LD->getPointerInfo().getWithOffset(PtrOff), NewAlign,
@@ -16547,9 +16547,9 @@ bool DAGCombiner::mergeStoresOfConstantsOrVecElts(
// make sure we use trunc store if it's necessary to be legal.
SDValue NewStore;
if (!UseTrunc) {
- NewStore =
- DAG.getStore(NewChain, DL, StoredVal, FirstInChain->getBasePtr(),
- FirstInChain->getPointerInfo(), FirstInChain->getAlign());
+ NewStore =
+ DAG.getStore(NewChain, DL, StoredVal, FirstInChain->getBasePtr(),
+ FirstInChain->getPointerInfo(), FirstInChain->getAlign());
} else { // Must be realized as a trunc store
EVT LegalizedStoredValTy =
TLI.getTypeToTransformTo(*DAG.getContext(), StoredVal.getValueType());
@@ -16561,7 +16561,7 @@ bool DAGCombiner::mergeStoresOfConstantsOrVecElts(
NewStore = DAG.getTruncStore(
NewChain, DL, ExtendedStoreVal, FirstInChain->getBasePtr(),
FirstInChain->getPointerInfo(), StoredVal.getValueType() /*TVT*/,
- FirstInChain->getAlign(), FirstInChain->getMemOperand()->getFlags());
+ FirstInChain->getAlign(), FirstInChain->getMemOperand()->getFlags());
}
// Replace all merged stores with the new store.
@@ -16576,18 +16576,18 @@ void DAGCombiner::getStoreMergeCandidates(
StoreSDNode *St, SmallVectorImpl<MemOpLink> &StoreNodes,
SDNode *&RootNode) {
// This holds the base pointer, index, and the offset in bytes from the base
- // pointer. We must have a base and an offset. Do not handle stores to undef
- // base pointers.
+ // pointer. We must have a base and an offset. Do not handle stores to undef
+ // base pointers.
BaseIndexOffset BasePtr = BaseIndexOffset::match(St, DAG);
- if (!BasePtr.getBase().getNode() || BasePtr.getBase().isUndef())
- return;
+ if (!BasePtr.getBase().getNode() || BasePtr.getBase().isUndef())
+ return;
SDValue Val = peekThroughBitcasts(St->getValue());
StoreSource StoreSrc = getStoreSource(Val);
assert(StoreSrc != StoreSource::Unknown && "Expected known source for store");
-
- // Match on loadbaseptr if relevant.
- EVT MemVT = St->getMemoryVT();
+
+ // Match on loadbaseptr if relevant.
+ EVT MemVT = St->getMemoryVT();
BaseIndexOffset LBasePtr;
EVT LoadVT;
if (StoreSrc == StoreSource::Load) {
@@ -16609,7 +16609,7 @@ void DAGCombiner::getStoreMergeCandidates(
int64_t &Offset) -> bool {
// The memory operands must not be volatile/indexed/atomic.
// TODO: May be able to relax for unordered atomics (see D66309)
- if (!Other->isSimple() || Other->isIndexed())
+ if (!Other->isSimple() || Other->isIndexed())
return false;
// Don't mix temporal stores with non-temporal stores.
if (St->isNonTemporal() != Other->isNonTemporal())
@@ -16618,38 +16618,38 @@ void DAGCombiner::getStoreMergeCandidates(
// Allow merging constants of different types as integers.
bool NoTypeMatch = (MemVT.isInteger()) ? !MemVT.bitsEq(Other->getMemoryVT())
: Other->getMemoryVT() != MemVT;
- switch (StoreSrc) {
- case StoreSource::Load: {
+ switch (StoreSrc) {
+ case StoreSource::Load: {
if (NoTypeMatch)
return false;
- // The Load's Base Ptr must also match.
- auto *OtherLd = dyn_cast<LoadSDNode>(OtherBC);
- if (!OtherLd)
- return false;
- BaseIndexOffset LPtr = BaseIndexOffset::match(OtherLd, DAG);
- if (LoadVT != OtherLd->getMemoryVT())
- return false;
- // Loads must only have one use.
- if (!OtherLd->hasNUsesOfValue(1, 0))
+ // The Load's Base Ptr must also match.
+ auto *OtherLd = dyn_cast<LoadSDNode>(OtherBC);
+ if (!OtherLd)
return false;
- // The memory operands must not be volatile/indexed/atomic.
- // TODO: May be able to relax for unordered atomics (see D66309)
- if (!OtherLd->isSimple() || OtherLd->isIndexed())
- return false;
- // Don't mix temporal loads with non-temporal loads.
- if (cast<LoadSDNode>(Val)->isNonTemporal() != OtherLd->isNonTemporal())
- return false;
- if (!(LBasePtr.equalBaseIndex(LPtr, DAG)))
- return false;
- break;
- }
- case StoreSource::Constant:
+ BaseIndexOffset LPtr = BaseIndexOffset::match(OtherLd, DAG);
+ if (LoadVT != OtherLd->getMemoryVT())
+ return false;
+ // Loads must only have one use.
+ if (!OtherLd->hasNUsesOfValue(1, 0))
+ return false;
+ // The memory operands must not be volatile/indexed/atomic.
+ // TODO: May be able to relax for unordered atomics (see D66309)
+ if (!OtherLd->isSimple() || OtherLd->isIndexed())
+ return false;
+ // Don't mix temporal loads with non-temporal loads.
+ if (cast<LoadSDNode>(Val)->isNonTemporal() != OtherLd->isNonTemporal())
+ return false;
+ if (!(LBasePtr.equalBaseIndex(LPtr, DAG)))
+ return false;
+ break;
+ }
+ case StoreSource::Constant:
if (NoTypeMatch)
return false;
if (!(isa<ConstantSDNode>(OtherBC) || isa<ConstantFPSDNode>(OtherBC)))
return false;
- break;
- case StoreSource::Extract:
+ break;
+ case StoreSource::Extract:
// Do not merge truncated stores here.
if (Other->isTruncatingStore())
return false;
@@ -16658,9 +16658,9 @@ void DAGCombiner::getStoreMergeCandidates(
if (OtherBC.getOpcode() != ISD::EXTRACT_VECTOR_ELT &&
OtherBC.getOpcode() != ISD::EXTRACT_SUBVECTOR)
return false;
- break;
- default:
- llvm_unreachable("Unhandled store source for merging");
+ break;
+ default:
+ llvm_unreachable("Unhandled store source for merging");
}
Ptr = BaseIndexOffset::match(Other, DAG);
return (BasePtr.equalBaseIndex(Ptr, DAG, Offset));
@@ -16671,24 +16671,24 @@ void DAGCombiner::getStoreMergeCandidates(
auto OverLimitInDependenceCheck = [&](SDNode *StoreNode,
SDNode *RootNode) -> bool {
auto RootCount = StoreRootCountMap.find(StoreNode);
- return RootCount != StoreRootCountMap.end() &&
- RootCount->second.first == RootNode &&
- RootCount->second.second > StoreMergeDependenceLimit;
- };
-
- auto TryToAddCandidate = [&](SDNode::use_iterator UseIter) {
- // This must be a chain use.
- if (UseIter.getOperandNo() != 0)
- return;
- if (auto *OtherStore = dyn_cast<StoreSDNode>(*UseIter)) {
- BaseIndexOffset Ptr;
- int64_t PtrDiff;
- if (CandidateMatch(OtherStore, Ptr, PtrDiff) &&
- !OverLimitInDependenceCheck(OtherStore, RootNode))
- StoreNodes.push_back(MemOpLink(OtherStore, PtrDiff));
- }
+ return RootCount != StoreRootCountMap.end() &&
+ RootCount->second.first == RootNode &&
+ RootCount->second.second > StoreMergeDependenceLimit;
};
+ auto TryToAddCandidate = [&](SDNode::use_iterator UseIter) {
+ // This must be a chain use.
+ if (UseIter.getOperandNo() != 0)
+ return;
+ if (auto *OtherStore = dyn_cast<StoreSDNode>(*UseIter)) {
+ BaseIndexOffset Ptr;
+ int64_t PtrDiff;
+ if (CandidateMatch(OtherStore, Ptr, PtrDiff) &&
+ !OverLimitInDependenceCheck(OtherStore, RootNode))
+ StoreNodes.push_back(MemOpLink(OtherStore, PtrDiff));
+ }
+ };
+
// We looking for a root node which is an ancestor to all mergable
// stores. We search up through a load, to our root and then down
// through all children. For instance we will find Store{1,2,3} if
@@ -16708,21 +16708,21 @@ void DAGCombiner::getStoreMergeCandidates(
RootNode = St->getChain().getNode();
unsigned NumNodesExplored = 0;
- const unsigned MaxSearchNodes = 1024;
- if (auto *Ldn = dyn_cast<LoadSDNode>(RootNode)) {
+ const unsigned MaxSearchNodes = 1024;
+ if (auto *Ldn = dyn_cast<LoadSDNode>(RootNode)) {
RootNode = Ldn->getChain().getNode();
for (auto I = RootNode->use_begin(), E = RootNode->use_end();
- I != E && NumNodesExplored < MaxSearchNodes; ++I, ++NumNodesExplored) {
- if (I.getOperandNo() == 0 && isa<LoadSDNode>(*I)) { // walk down chain
+ I != E && NumNodesExplored < MaxSearchNodes; ++I, ++NumNodesExplored) {
+ if (I.getOperandNo() == 0 && isa<LoadSDNode>(*I)) { // walk down chain
for (auto I2 = (*I)->use_begin(), E2 = (*I)->use_end(); I2 != E2; ++I2)
- TryToAddCandidate(I2);
- }
- }
- } else {
+ TryToAddCandidate(I2);
+ }
+ }
+ } else {
for (auto I = RootNode->use_begin(), E = RootNode->use_end();
- I != E && NumNodesExplored < MaxSearchNodes; ++I, ++NumNodesExplored)
- TryToAddCandidate(I);
- }
+ I != E && NumNodesExplored < MaxSearchNodes; ++I, ++NumNodesExplored)
+ TryToAddCandidate(I);
+ }
}
// We need to check that merging these stores does not cause a loop in
@@ -17092,7 +17092,7 @@ bool DAGCombiner::tryStoreMergeOfLoads(SmallVectorImpl<MemOpLink> &StoreNodes,
}
LSBaseSDNode *FirstInChain = StoreNodes[0].MemNode;
unsigned FirstStoreAS = FirstInChain->getAddressSpace();
- Align FirstStoreAlign = FirstInChain->getAlign();
+ Align FirstStoreAlign = FirstInChain->getAlign();
LoadSDNode *FirstLoad = cast<LoadSDNode>(LoadNodes[0].MemNode);
// Scan the memory operations on the chain and find the first
@@ -17187,7 +17187,7 @@ bool DAGCombiner::tryStoreMergeOfLoads(SmallVectorImpl<MemOpLink> &StoreNodes,
// the NumElem refers to array/index size.
unsigned NumElem = std::min(NumConsecutiveStores, LastConsecutiveLoad + 1);
NumElem = std::min(LastLegalType, NumElem);
- Align FirstLoadAlign = FirstLoad->getAlign();
+ Align FirstLoadAlign = FirstLoad->getAlign();
if (NumElem < 2) {
// We know that candidate stores are in order and of correct
@@ -17199,8 +17199,8 @@ bool DAGCombiner::tryStoreMergeOfLoads(SmallVectorImpl<MemOpLink> &StoreNodes,
// can here.
unsigned NumSkip = 1;
while ((NumSkip < LoadNodes.size()) &&
- (LoadNodes[NumSkip].MemNode->getAlign() <= FirstLoadAlign) &&
- (StoreNodes[NumSkip].MemNode->getAlign() <= FirstStoreAlign))
+ (LoadNodes[NumSkip].MemNode->getAlign() <= FirstLoadAlign) &&
+ (StoreNodes[NumSkip].MemNode->getAlign() <= FirstStoreAlign))
NumSkip++;
StoreNodes.erase(StoreNodes.begin(), StoreNodes.begin() + NumSkip);
LoadNodes.erase(LoadNodes.begin(), LoadNodes.begin() + NumSkip);
@@ -17273,10 +17273,10 @@ bool DAGCombiner::tryStoreMergeOfLoads(SmallVectorImpl<MemOpLink> &StoreNodes,
FirstLoad->getChain(), FirstLoad->getBasePtr(),
FirstLoad->getPointerInfo(), JointMemOpVT,
FirstLoadAlign, LdMMOFlags);
- NewStore = DAG.getTruncStore(
- NewStoreChain, StoreDL, NewLoad, FirstInChain->getBasePtr(),
- FirstInChain->getPointerInfo(), JointMemOpVT,
- FirstInChain->getAlign(), FirstInChain->getMemOperand()->getFlags());
+ NewStore = DAG.getTruncStore(
+ NewStoreChain, StoreDL, NewLoad, FirstInChain->getBasePtr(),
+ FirstInChain->getPointerInfo(), JointMemOpVT,
+ FirstInChain->getAlign(), FirstInChain->getMemOperand()->getFlags());
}
// Transfer chain users from old loads to the new load.
@@ -17482,11 +17482,11 @@ SDValue DAGCombiner::replaceStoreOfFPConstant(StoreSDNode *ST) {
AAMDNodes AAInfo = ST->getAAInfo();
SDValue St0 = DAG.getStore(Chain, DL, Lo, Ptr, ST->getPointerInfo(),
- ST->getOriginalAlign(), MMOFlags, AAInfo);
- Ptr = DAG.getMemBasePlusOffset(Ptr, TypeSize::Fixed(4), DL);
+ ST->getOriginalAlign(), MMOFlags, AAInfo);
+ Ptr = DAG.getMemBasePlusOffset(Ptr, TypeSize::Fixed(4), DL);
SDValue St1 = DAG.getStore(Chain, DL, Hi, Ptr,
ST->getPointerInfo().getWithOffset(4),
- ST->getOriginalAlign(), MMOFlags, AAInfo);
+ ST->getOriginalAlign(), MMOFlags, AAInfo);
return DAG.getNode(ISD::TokenFactor, DL, MVT::Other,
St0, St1);
}
@@ -17547,7 +17547,7 @@ SDValue DAGCombiner::visitSTORE(SDNode *N) {
return NewST;
// Try transforming several stores into STORE (BSWAP).
- if (SDValue Store = mergeTruncStores(ST))
+ if (SDValue Store = mergeTruncStores(ST))
return Store;
if (ST->isUnindexed()) {
@@ -17620,12 +17620,12 @@ SDValue DAGCombiner::visitSTORE(SDNode *N) {
!ST1->getBasePtr().isUndef() &&
// BaseIndexOffset and the code below requires knowing the size
// of a vector, so bail out if MemoryVT is scalable.
- !ST->getMemoryVT().isScalableVector() &&
+ !ST->getMemoryVT().isScalableVector() &&
!ST1->getMemoryVT().isScalableVector()) {
const BaseIndexOffset STBase = BaseIndexOffset::match(ST, DAG);
const BaseIndexOffset ChainBase = BaseIndexOffset::match(ST1, DAG);
- unsigned STBitSize = ST->getMemoryVT().getFixedSizeInBits();
- unsigned ChainBitSize = ST1->getMemoryVT().getFixedSizeInBits();
+ unsigned STBitSize = ST->getMemoryVT().getFixedSizeInBits();
+ unsigned ChainBitSize = ST1->getMemoryVT().getFixedSizeInBits();
// If this is a store who's preceding store to a subset of the current
// location and no one other node is chained to that store we can
// effectively drop the store. Do not remove stores to undef as they may
@@ -17696,7 +17696,7 @@ SDValue DAGCombiner::visitLIFETIME_END(SDNode *N) {
// We walk up the chains to find stores.
SmallVector<SDValue, 8> Chains = {N->getOperand(0)};
while (!Chains.empty()) {
- SDValue Chain = Chains.pop_back_val();
+ SDValue Chain = Chains.pop_back_val();
if (!Chain.hasOneUse())
continue;
switch (Chain.getOpcode()) {
@@ -17716,16 +17716,16 @@ SDValue DAGCombiner::visitLIFETIME_END(SDNode *N) {
// TODO: Can relax for unordered atomics (see D66309)
if (!ST->isSimple() || ST->isIndexed())
continue;
- const TypeSize StoreSize = ST->getMemoryVT().getStoreSize();
- // The bounds of a scalable store are not known until runtime, so this
- // store cannot be elided.
- if (StoreSize.isScalable())
- continue;
+ const TypeSize StoreSize = ST->getMemoryVT().getStoreSize();
+ // The bounds of a scalable store are not known until runtime, so this
+ // store cannot be elided.
+ if (StoreSize.isScalable())
+ continue;
const BaseIndexOffset StoreBase = BaseIndexOffset::match(ST, DAG);
// If we store purely within object bounds just before its lifetime ends,
// we can remove the store.
if (LifetimeEndBase.contains(DAG, LifetimeEnd->getSize() * 8, StoreBase,
- StoreSize.getFixedSize() * 8)) {
+ StoreSize.getFixedSize() * 8)) {
LLVM_DEBUG(dbgs() << "\nRemoving store:"; StoreBase.dump();
dbgs() << "\nwithin LIFETIME_END of : ";
LifetimeEndBase.dump(); dbgs() << "\n");
@@ -17836,12 +17836,12 @@ SDValue DAGCombiner::splitMergedValStore(StoreSDNode *ST) {
SDValue Ptr = ST->getBasePtr();
// Lower value store.
SDValue St0 = DAG.getStore(Chain, DL, Lo, Ptr, ST->getPointerInfo(),
- ST->getOriginalAlign(), MMOFlags, AAInfo);
- Ptr = DAG.getMemBasePlusOffset(Ptr, TypeSize::Fixed(HalfValBitSize / 8), DL);
+ ST->getOriginalAlign(), MMOFlags, AAInfo);
+ Ptr = DAG.getMemBasePlusOffset(Ptr, TypeSize::Fixed(HalfValBitSize / 8), DL);
// Higher value store.
- SDValue St1 = DAG.getStore(
- St0, DL, Hi, Ptr, ST->getPointerInfo().getWithOffset(HalfValBitSize / 8),
- ST->getOriginalAlign(), MMOFlags, AAInfo);
+ SDValue St1 = DAG.getStore(
+ St0, DL, Hi, Ptr, ST->getPointerInfo().getWithOffset(HalfValBitSize / 8),
+ ST->getOriginalAlign(), MMOFlags, AAInfo);
return St1;
}
@@ -18079,13 +18079,13 @@ SDValue DAGCombiner::scalarizeExtractedVectorLoad(SDNode *EVE, EVT InVecVT,
EVT ResultVT = EVE->getValueType(0);
EVT VecEltVT = InVecVT.getVectorElementType();
-
- // If the vector element type is not a multiple of a byte then we are unable
- // to correctly compute an address to load only the extracted element as a
- // scalar.
- if (!VecEltVT.isByteSized())
- return SDValue();
-
+
+ // If the vector element type is not a multiple of a byte then we are unable
+ // to correctly compute an address to load only the extracted element as a
+ // scalar.
+ if (!VecEltVT.isByteSized())
+ return SDValue();
+
Align Alignment = OriginalLoad->getAlign();
Align NewAlign = DAG.getDataLayout().getABITypeAlign(
VecEltVT.getTypeForEVT(*DAG.getContext()));
@@ -18721,24 +18721,24 @@ SDValue DAGCombiner::createBuildVecShuffle(const SDLoc &DL, SDNode *N,
// operands will all be based off of VecIn1, even those in VecIn2.
unsigned Vec2Offset = DidSplitVec ? 0 : InVT1.getVectorNumElements();
- uint64_t VTSize = VT.getFixedSizeInBits();
- uint64_t InVT1Size = InVT1.getFixedSizeInBits();
- uint64_t InVT2Size = InVT2.getFixedSizeInBits();
-
+ uint64_t VTSize = VT.getFixedSizeInBits();
+ uint64_t InVT1Size = InVT1.getFixedSizeInBits();
+ uint64_t InVT2Size = InVT2.getFixedSizeInBits();
+
// We can't generate a shuffle node with mismatched input and output types.
// Try to make the types match the type of the output.
if (InVT1 != VT || InVT2 != VT) {
- if ((VTSize % InVT1Size == 0) && InVT1 == InVT2) {
+ if ((VTSize % InVT1Size == 0) && InVT1 == InVT2) {
// If the output vector length is a multiple of both input lengths,
// we can concatenate them and pad the rest with undefs.
- unsigned NumConcats = VTSize / InVT1Size;
+ unsigned NumConcats = VTSize / InVT1Size;
assert(NumConcats >= 2 && "Concat needs at least two inputs!");
SmallVector<SDValue, 2> ConcatOps(NumConcats, DAG.getUNDEF(InVT1));
ConcatOps[0] = VecIn1;
ConcatOps[1] = VecIn2 ? VecIn2 : DAG.getUNDEF(InVT1);
VecIn1 = DAG.getNode(ISD::CONCAT_VECTORS, DL, VT, ConcatOps);
VecIn2 = SDValue();
- } else if (InVT1Size == VTSize * 2) {
+ } else if (InVT1Size == VTSize * 2) {
if (!TLI.isExtractSubvectorCheap(VT, InVT1, NumElems))
return SDValue();
@@ -18751,7 +18751,7 @@ SDValue DAGCombiner::createBuildVecShuffle(const SDLoc &DL, SDNode *N,
// Since we now have shorter input vectors, adjust the offset of the
// second vector's start.
Vec2Offset = NumElems;
- } else if (InVT2Size <= InVT1Size) {
+ } else if (InVT2Size <= InVT1Size) {
// VecIn1 is wider than the output, and we have another, possibly
// smaller input. Pad the smaller input with undefs, shuffle at the
// input vector width, and extract the output.
@@ -18776,7 +18776,7 @@ SDValue DAGCombiner::createBuildVecShuffle(const SDLoc &DL, SDNode *N,
// when we start sorting the vectors by type.
return SDValue();
}
- } else if (InVT2Size * 2 == VTSize && InVT1Size == VTSize) {
+ } else if (InVT2Size * 2 == VTSize && InVT1Size == VTSize) {
SmallVector<SDValue, 2> ConcatOps(2, DAG.getUNDEF(InVT2));
ConcatOps[0] = VecIn2;
VecIn2 = DAG.getNode(ISD::CONCAT_VECTORS, DL, VT, ConcatOps);
@@ -18967,7 +18967,7 @@ SDValue DAGCombiner::reduceBuildVecToShuffle(SDNode *N) {
// Have we seen this input vector before?
// The vectors are expected to be tiny (usually 1 or 2 elements), so using
// a map back from SDValues to numbers isn't worth it.
- unsigned Idx = std::distance(VecIn.begin(), find(VecIn, ExtractedFromVec));
+ unsigned Idx = std::distance(VecIn.begin(), find(VecIn, ExtractedFromVec));
if (Idx == VecIn.size())
VecIn.push_back(ExtractedFromVec);
@@ -19425,7 +19425,7 @@ static SDValue combineConcatVectorOfCasts(SDNode *N, SelectionDAG &DAG) {
// check the other type in the cast to make sure this is really legal.
EVT VT = N->getValueType(0);
EVT SrcEltVT = SrcVT.getVectorElementType();
- ElementCount NumElts = SrcVT.getVectorElementCount() * N->getNumOperands();
+ ElementCount NumElts = SrcVT.getVectorElementCount() * N->getNumOperands();
EVT ConcatSrcVT = EVT::getVectorVT(*DAG.getContext(), SrcEltVT, NumElts);
const TargetLowering &TLI = DAG.getTargetLoweringInfo();
switch (CastOpcode) {
@@ -19462,8 +19462,8 @@ SDValue DAGCombiner::visitCONCAT_VECTORS(SDNode *N) {
return DAG.getUNDEF(VT);
// Optimize concat_vectors where all but the first of the vectors are undef.
- if (all_of(drop_begin(N->ops()),
- [](const SDValue &Op) { return Op.isUndef(); })) {
+ if (all_of(drop_begin(N->ops()),
+ [](const SDValue &Op) { return Op.isUndef(); })) {
SDValue In = N->getOperand(0);
assert(In.getValueType().isVector() && "Must concat vectors");
@@ -19636,16 +19636,16 @@ static SDValue getSubVectorSrc(SDValue V, SDValue Index, EVT SubVT) {
auto *IndexC = dyn_cast<ConstantSDNode>(Index);
if (IndexC && V.getOpcode() == ISD::CONCAT_VECTORS &&
V.getOperand(0).getValueType() == SubVT &&
- (IndexC->getZExtValue() % SubVT.getVectorMinNumElements()) == 0) {
- uint64_t SubIdx = IndexC->getZExtValue() / SubVT.getVectorMinNumElements();
+ (IndexC->getZExtValue() % SubVT.getVectorMinNumElements()) == 0) {
+ uint64_t SubIdx = IndexC->getZExtValue() / SubVT.getVectorMinNumElements();
return V.getOperand(SubIdx);
}
return SDValue();
}
static SDValue narrowInsertExtractVectorBinOp(SDNode *Extract,
- SelectionDAG &DAG,
- bool LegalOperations) {
+ SelectionDAG &DAG,
+ bool LegalOperations) {
const TargetLowering &TLI = DAG.getTargetLoweringInfo();
SDValue BinOp = Extract->getOperand(0);
unsigned BinOpcode = BinOp.getOpcode();
@@ -19659,7 +19659,7 @@ static SDValue narrowInsertExtractVectorBinOp(SDNode *Extract,
SDValue Index = Extract->getOperand(1);
EVT SubVT = Extract->getValueType(0);
- if (!TLI.isOperationLegalOrCustom(BinOpcode, SubVT, LegalOperations))
+ if (!TLI.isOperationLegalOrCustom(BinOpcode, SubVT, LegalOperations))
return SDValue();
SDValue Sub0 = getSubVectorSrc(Bop0, Index, SubVT);
@@ -19680,12 +19680,12 @@ static SDValue narrowInsertExtractVectorBinOp(SDNode *Extract,
/// If we are extracting a subvector produced by a wide binary operator try
/// to use a narrow binary operator and/or avoid concatenation and extraction.
-static SDValue narrowExtractedVectorBinOp(SDNode *Extract, SelectionDAG &DAG,
- bool LegalOperations) {
+static SDValue narrowExtractedVectorBinOp(SDNode *Extract, SelectionDAG &DAG,
+ bool LegalOperations) {
// TODO: Refactor with the caller (visitEXTRACT_SUBVECTOR), so we can share
// some of these bailouts with other transforms.
- if (SDValue V = narrowInsertExtractVectorBinOp(Extract, DAG, LegalOperations))
+ if (SDValue V = narrowInsertExtractVectorBinOp(Extract, DAG, LegalOperations))
return V;
// The extract index must be a constant, so we can map it to a concat operand.
@@ -19830,16 +19830,16 @@ static SDValue narrowExtractedVectorLoad(SDNode *Extract, SelectionDAG &DAG) {
return SDValue();
unsigned Index = ExtIdx->getZExtValue();
- unsigned NumElts = VT.getVectorMinNumElements();
+ unsigned NumElts = VT.getVectorMinNumElements();
- // The definition of EXTRACT_SUBVECTOR states that the index must be a
- // multiple of the minimum number of elements in the result type.
- assert(Index % NumElts == 0 && "The extract subvector index is not a "
- "multiple of the result's element count");
-
- // It's fine to use TypeSize here as we know the offset will not be negative.
- TypeSize Offset = VT.getStoreSize() * (Index / NumElts);
+ // The definition of EXTRACT_SUBVECTOR states that the index must be a
+ // multiple of the minimum number of elements in the result type.
+ assert(Index % NumElts == 0 && "The extract subvector index is not a "
+ "multiple of the result's element count");
+ // It's fine to use TypeSize here as we know the offset will not be negative.
+ TypeSize Offset = VT.getStoreSize() * (Index / NumElts);
+
const TargetLowering &TLI = DAG.getTargetLoweringInfo();
if (!TLI.shouldReduceLoadWidth(Ld, Ld->getExtensionType(), VT))
return SDValue();
@@ -19849,19 +19849,19 @@ static SDValue narrowExtractedVectorLoad(SDNode *Extract, SelectionDAG &DAG) {
SDLoc DL(Extract);
// TODO: Use "BaseIndexOffset" to make this more effective.
- SDValue NewAddr = DAG.getMemBasePlusOffset(Ld->getBasePtr(), Offset, DL);
-
- uint64_t StoreSize = MemoryLocation::getSizeOrUnknown(VT.getStoreSize());
+ SDValue NewAddr = DAG.getMemBasePlusOffset(Ld->getBasePtr(), Offset, DL);
+
+ uint64_t StoreSize = MemoryLocation::getSizeOrUnknown(VT.getStoreSize());
MachineFunction &MF = DAG.getMachineFunction();
- MachineMemOperand *MMO;
- if (Offset.isScalable()) {
- MachinePointerInfo MPI =
- MachinePointerInfo(Ld->getPointerInfo().getAddrSpace());
- MMO = MF.getMachineMemOperand(Ld->getMemOperand(), MPI, StoreSize);
- } else
- MMO = MF.getMachineMemOperand(Ld->getMemOperand(), Offset.getFixedSize(),
- StoreSize);
-
+ MachineMemOperand *MMO;
+ if (Offset.isScalable()) {
+ MachinePointerInfo MPI =
+ MachinePointerInfo(Ld->getPointerInfo().getAddrSpace());
+ MMO = MF.getMachineMemOperand(Ld->getMemOperand(), MPI, StoreSize);
+ } else
+ MMO = MF.getMachineMemOperand(Ld->getMemOperand(), Offset.getFixedSize(),
+ StoreSize);
+
SDValue NewLd = DAG.getLoad(VT, DL, Ld->getChain(), NewAddr, MMO);
DAG.makeEquivalentMemoryOrdering(Ld, NewLd);
return NewLd;
@@ -19914,9 +19914,9 @@ SDValue DAGCombiner::visitEXTRACT_SUBVECTOR(SDNode *N) {
}
if ((DestNumElts % SrcNumElts) == 0) {
unsigned DestSrcRatio = DestNumElts / SrcNumElts;
- if (NVT.getVectorElementCount().isKnownMultipleOf(DestSrcRatio)) {
- ElementCount NewExtEC =
- NVT.getVectorElementCount().divideCoefficientBy(DestSrcRatio);
+ if (NVT.getVectorElementCount().isKnownMultipleOf(DestSrcRatio)) {
+ ElementCount NewExtEC =
+ NVT.getVectorElementCount().divideCoefficientBy(DestSrcRatio);
EVT ScalarVT = SrcVT.getScalarType();
if ((ExtIdx % DestSrcRatio) == 0) {
SDLoc DL(N);
@@ -19930,7 +19930,7 @@ SDValue DAGCombiner::visitEXTRACT_SUBVECTOR(SDNode *N) {
V.getOperand(0), NewIndex);
return DAG.getBitcast(NVT, NewExtract);
}
- if (NewExtEC.isScalar() &&
+ if (NewExtEC.isScalar() &&
TLI.isOperationLegalOrCustom(ISD::EXTRACT_VECTOR_ELT, ScalarVT)) {
SDValue NewIndex = DAG.getVectorIdxConstant(IndexValScaled, DL);
SDValue NewExtract =
@@ -20035,7 +20035,7 @@ SDValue DAGCombiner::visitEXTRACT_SUBVECTOR(SDNode *N) {
N->getOperand(1));
}
- if (SDValue NarrowBOp = narrowExtractedVectorBinOp(N, DAG, LegalOperations))
+ if (SDValue NarrowBOp = narrowExtractedVectorBinOp(N, DAG, LegalOperations))
return NarrowBOp;
if (SimplifyDemandedVectorElts(SDValue(N, 0)))
@@ -20813,51 +20813,51 @@ SDValue DAGCombiner::visitVECTOR_SHUFFLE(SDNode *N) {
}
}
- if (Level < AfterLegalizeDAG && TLI.isTypeLegal(VT)) {
- // Canonicalize shuffles according to rules:
- // shuffle(A, shuffle(A, B)) -> shuffle(shuffle(A,B), A)
- // shuffle(B, shuffle(A, B)) -> shuffle(shuffle(A,B), B)
- // shuffle(B, shuffle(A, Undef)) -> shuffle(shuffle(A, Undef), B)
- if (N1.getOpcode() == ISD::VECTOR_SHUFFLE &&
- N0.getOpcode() != ISD::VECTOR_SHUFFLE) {
- // The incoming shuffle must be of the same type as the result of the
- // current shuffle.
- assert(N1->getOperand(0).getValueType() == VT &&
- "Shuffle types don't match");
-
- SDValue SV0 = N1->getOperand(0);
- SDValue SV1 = N1->getOperand(1);
- bool HasSameOp0 = N0 == SV0;
- bool IsSV1Undef = SV1.isUndef();
- if (HasSameOp0 || IsSV1Undef || N0 == SV1)
- // Commute the operands of this shuffle so merging below will trigger.
- return DAG.getCommutedVectorShuffle(*SVN);
- }
-
- // Canonicalize splat shuffles to the RHS to improve merging below.
- // shuffle(splat(A,u), shuffle(C,D)) -> shuffle'(shuffle(C,D), splat(A,u))
- if (N0.getOpcode() == ISD::VECTOR_SHUFFLE &&
- N1.getOpcode() == ISD::VECTOR_SHUFFLE &&
- cast<ShuffleVectorSDNode>(N0)->isSplat() &&
- !cast<ShuffleVectorSDNode>(N1)->isSplat()) {
+ if (Level < AfterLegalizeDAG && TLI.isTypeLegal(VT)) {
+ // Canonicalize shuffles according to rules:
+ // shuffle(A, shuffle(A, B)) -> shuffle(shuffle(A,B), A)
+ // shuffle(B, shuffle(A, B)) -> shuffle(shuffle(A,B), B)
+ // shuffle(B, shuffle(A, Undef)) -> shuffle(shuffle(A, Undef), B)
+ if (N1.getOpcode() == ISD::VECTOR_SHUFFLE &&
+ N0.getOpcode() != ISD::VECTOR_SHUFFLE) {
+ // The incoming shuffle must be of the same type as the result of the
+ // current shuffle.
+ assert(N1->getOperand(0).getValueType() == VT &&
+ "Shuffle types don't match");
+
+ SDValue SV0 = N1->getOperand(0);
+ SDValue SV1 = N1->getOperand(1);
+ bool HasSameOp0 = N0 == SV0;
+ bool IsSV1Undef = SV1.isUndef();
+ if (HasSameOp0 || IsSV1Undef || N0 == SV1)
+ // Commute the operands of this shuffle so merging below will trigger.
+ return DAG.getCommutedVectorShuffle(*SVN);
+ }
+
+ // Canonicalize splat shuffles to the RHS to improve merging below.
+ // shuffle(splat(A,u), shuffle(C,D)) -> shuffle'(shuffle(C,D), splat(A,u))
+ if (N0.getOpcode() == ISD::VECTOR_SHUFFLE &&
+ N1.getOpcode() == ISD::VECTOR_SHUFFLE &&
+ cast<ShuffleVectorSDNode>(N0)->isSplat() &&
+ !cast<ShuffleVectorSDNode>(N1)->isSplat()) {
return DAG.getCommutedVectorShuffle(*SVN);
- }
+ }
}
- // Compute the combined shuffle mask for a shuffle with SV0 as the first
- // operand, and SV1 as the second operand.
- // i.e. Merge SVN(OtherSVN, N1) -> shuffle(SV0, SV1, Mask).
- auto MergeInnerShuffle = [NumElts](ShuffleVectorSDNode *SVN,
- ShuffleVectorSDNode *OtherSVN, SDValue N1,
- SDValue &SV0, SDValue &SV1,
- SmallVectorImpl<int> &Mask) -> bool {
+ // Compute the combined shuffle mask for a shuffle with SV0 as the first
+ // operand, and SV1 as the second operand.
+ // i.e. Merge SVN(OtherSVN, N1) -> shuffle(SV0, SV1, Mask).
+ auto MergeInnerShuffle = [NumElts](ShuffleVectorSDNode *SVN,
+ ShuffleVectorSDNode *OtherSVN, SDValue N1,
+ SDValue &SV0, SDValue &SV1,
+ SmallVectorImpl<int> &Mask) -> bool {
// Don't try to fold splats; they're likely to simplify somehow, or they
// might be free.
- if (OtherSVN->isSplat())
- return false;
+ if (OtherSVN->isSplat())
+ return false;
- SV0 = SV1 = SDValue();
- Mask.clear();
+ SV0 = SV1 = SDValue();
+ Mask.clear();
for (unsigned i = 0; i != NumElts; ++i) {
int Idx = SVN->getMaskElt(i);
@@ -20871,14 +20871,14 @@ SDValue DAGCombiner::visitVECTOR_SHUFFLE(SDNode *N) {
if (Idx < (int)NumElts) {
// This shuffle index refers to the inner shuffle N0. Lookup the inner
// shuffle mask to identify which vector is actually referenced.
- Idx = OtherSVN->getMaskElt(Idx);
+ Idx = OtherSVN->getMaskElt(Idx);
if (Idx < 0) {
// Propagate Undef.
Mask.push_back(Idx);
continue;
}
- CurrentVec = (Idx < (int)NumElts) ? OtherSVN->getOperand(0)
- : OtherSVN->getOperand(1);
+ CurrentVec = (Idx < (int)NumElts) ? OtherSVN->getOperand(0)
+ : OtherSVN->getOperand(1);
} else {
// This shuffle index references an element within N1.
CurrentVec = N1;
@@ -20900,82 +20900,82 @@ SDValue DAGCombiner::visitVECTOR_SHUFFLE(SDNode *N) {
Mask.push_back(Idx);
continue;
}
- if (!SV1.getNode() || SV1 == CurrentVec) {
- // Ok. CurrentVec is the right hand side.
- // Update the mask accordingly.
- SV1 = CurrentVec;
- Mask.push_back(Idx + NumElts);
- continue;
- }
-
- // Last chance - see if the vector is another shuffle and if it
- // uses one of the existing candidate shuffle ops.
- if (auto *CurrentSVN = dyn_cast<ShuffleVectorSDNode>(CurrentVec)) {
- int InnerIdx = CurrentSVN->getMaskElt(Idx);
- if (InnerIdx < 0) {
- Mask.push_back(-1);
- continue;
- }
- SDValue InnerVec = (InnerIdx < (int)NumElts)
- ? CurrentSVN->getOperand(0)
- : CurrentSVN->getOperand(1);
- if (InnerVec.isUndef()) {
- Mask.push_back(-1);
- continue;
- }
- InnerIdx %= NumElts;
- if (InnerVec == SV0) {
- Mask.push_back(InnerIdx);
- continue;
- }
- if (InnerVec == SV1) {
- Mask.push_back(InnerIdx + NumElts);
- continue;
- }
- }
-
+ if (!SV1.getNode() || SV1 == CurrentVec) {
+ // Ok. CurrentVec is the right hand side.
+ // Update the mask accordingly.
+ SV1 = CurrentVec;
+ Mask.push_back(Idx + NumElts);
+ continue;
+ }
+
+ // Last chance - see if the vector is another shuffle and if it
+ // uses one of the existing candidate shuffle ops.
+ if (auto *CurrentSVN = dyn_cast<ShuffleVectorSDNode>(CurrentVec)) {
+ int InnerIdx = CurrentSVN->getMaskElt(Idx);
+ if (InnerIdx < 0) {
+ Mask.push_back(-1);
+ continue;
+ }
+ SDValue InnerVec = (InnerIdx < (int)NumElts)
+ ? CurrentSVN->getOperand(0)
+ : CurrentSVN->getOperand(1);
+ if (InnerVec.isUndef()) {
+ Mask.push_back(-1);
+ continue;
+ }
+ InnerIdx %= NumElts;
+ if (InnerVec == SV0) {
+ Mask.push_back(InnerIdx);
+ continue;
+ }
+ if (InnerVec == SV1) {
+ Mask.push_back(InnerIdx + NumElts);
+ continue;
+ }
+ }
+
// Bail out if we cannot convert the shuffle pair into a single shuffle.
- return false;
- }
- return true;
- };
-
- // Try to fold according to rules:
- // shuffle(shuffle(A, B, M0), C, M1) -> shuffle(A, B, M2)
- // shuffle(shuffle(A, B, M0), C, M1) -> shuffle(A, C, M2)
- // shuffle(shuffle(A, B, M0), C, M1) -> shuffle(B, C, M2)
- // Don't try to fold shuffles with illegal type.
- // Only fold if this shuffle is the only user of the other shuffle.
- if (N0.getOpcode() == ISD::VECTOR_SHUFFLE && N->isOnlyUserOf(N0.getNode()) &&
- Level < AfterLegalizeDAG && TLI.isTypeLegal(VT)) {
- ShuffleVectorSDNode *OtherSV = cast<ShuffleVectorSDNode>(N0);
-
- // The incoming shuffle must be of the same type as the result of the
- // current shuffle.
- assert(OtherSV->getOperand(0).getValueType() == VT &&
- "Shuffle types don't match");
-
- SDValue SV0, SV1;
- SmallVector<int, 4> Mask;
- if (MergeInnerShuffle(SVN, OtherSV, N1, SV0, SV1, Mask)) {
- // Check if all indices in Mask are Undef. In case, propagate Undef.
- if (llvm::all_of(Mask, [](int M) { return M < 0; }))
- return DAG.getUNDEF(VT);
-
- if (!SV0.getNode())
- SV0 = DAG.getUNDEF(VT);
- if (!SV1.getNode())
- SV1 = DAG.getUNDEF(VT);
-
- // Avoid introducing shuffles with illegal mask.
- // shuffle(shuffle(A, B, M0), C, M1) -> shuffle(A, B, M2)
- // shuffle(shuffle(A, B, M0), C, M1) -> shuffle(A, C, M2)
- // shuffle(shuffle(A, B, M0), C, M1) -> shuffle(B, C, M2)
- // shuffle(shuffle(A, B, M0), C, M1) -> shuffle(B, A, M2)
- // shuffle(shuffle(A, B, M0), C, M1) -> shuffle(C, A, M2)
- // shuffle(shuffle(A, B, M0), C, M1) -> shuffle(C, B, M2)
- return TLI.buildLegalVectorShuffle(VT, SDLoc(N), SV0, SV1, Mask, DAG);
- }
+ return false;
+ }
+ return true;
+ };
+
+ // Try to fold according to rules:
+ // shuffle(shuffle(A, B, M0), C, M1) -> shuffle(A, B, M2)
+ // shuffle(shuffle(A, B, M0), C, M1) -> shuffle(A, C, M2)
+ // shuffle(shuffle(A, B, M0), C, M1) -> shuffle(B, C, M2)
+ // Don't try to fold shuffles with illegal type.
+ // Only fold if this shuffle is the only user of the other shuffle.
+ if (N0.getOpcode() == ISD::VECTOR_SHUFFLE && N->isOnlyUserOf(N0.getNode()) &&
+ Level < AfterLegalizeDAG && TLI.isTypeLegal(VT)) {
+ ShuffleVectorSDNode *OtherSV = cast<ShuffleVectorSDNode>(N0);
+
+ // The incoming shuffle must be of the same type as the result of the
+ // current shuffle.
+ assert(OtherSV->getOperand(0).getValueType() == VT &&
+ "Shuffle types don't match");
+
+ SDValue SV0, SV1;
+ SmallVector<int, 4> Mask;
+ if (MergeInnerShuffle(SVN, OtherSV, N1, SV0, SV1, Mask)) {
+ // Check if all indices in Mask are Undef. In case, propagate Undef.
+ if (llvm::all_of(Mask, [](int M) { return M < 0; }))
+ return DAG.getUNDEF(VT);
+
+ if (!SV0.getNode())
+ SV0 = DAG.getUNDEF(VT);
+ if (!SV1.getNode())
+ SV1 = DAG.getUNDEF(VT);
+
+ // Avoid introducing shuffles with illegal mask.
+ // shuffle(shuffle(A, B, M0), C, M1) -> shuffle(A, B, M2)
+ // shuffle(shuffle(A, B, M0), C, M1) -> shuffle(A, C, M2)
+ // shuffle(shuffle(A, B, M0), C, M1) -> shuffle(B, C, M2)
+ // shuffle(shuffle(A, B, M0), C, M1) -> shuffle(B, A, M2)
+ // shuffle(shuffle(A, B, M0), C, M1) -> shuffle(C, A, M2)
+ // shuffle(shuffle(A, B, M0), C, M1) -> shuffle(C, B, M2)
+ return TLI.buildLegalVectorShuffle(VT, SDLoc(N), SV0, SV1, Mask, DAG);
+ }
}
if (SDValue V = foldShuffleOfConcatUndefs(SVN, DAG))
@@ -21060,8 +21060,8 @@ SDValue DAGCombiner::visitINSERT_SUBVECTOR(SDNode *N) {
if (N0.isUndef() && N1.getOpcode() == ISD::BITCAST &&
N1.getOperand(0).getOpcode() == ISD::EXTRACT_SUBVECTOR &&
N1.getOperand(0).getOperand(1) == N2 &&
- N1.getOperand(0).getOperand(0).getValueType().getVectorElementCount() ==
- VT.getVectorElementCount() &&
+ N1.getOperand(0).getOperand(0).getValueType().getVectorElementCount() ==
+ VT.getVectorElementCount() &&
N1.getOperand(0).getOperand(0).getValueType().getSizeInBits() ==
VT.getSizeInBits()) {
return DAG.getBitcast(VT, N1.getOperand(0).getOperand(0));
@@ -21078,7 +21078,7 @@ SDValue DAGCombiner::visitINSERT_SUBVECTOR(SDNode *N) {
EVT CN1VT = CN1.getValueType();
if (CN0VT.isVector() && CN1VT.isVector() &&
CN0VT.getVectorElementType() == CN1VT.getVectorElementType() &&
- CN0VT.getVectorElementCount() == VT.getVectorElementCount()) {
+ CN0VT.getVectorElementCount() == VT.getVectorElementCount()) {
SDValue NewINSERT = DAG.getNode(ISD::INSERT_SUBVECTOR, SDLoc(N),
CN0.getValueType(), CN0, CN1, N2);
return DAG.getBitcast(VT, NewINSERT);
@@ -21117,7 +21117,7 @@ SDValue DAGCombiner::visitINSERT_SUBVECTOR(SDNode *N) {
SDLoc DL(N);
SDValue NewIdx;
LLVMContext &Ctx = *DAG.getContext();
- ElementCount NumElts = VT.getVectorElementCount();
+ ElementCount NumElts = VT.getVectorElementCount();
unsigned EltSizeInBits = VT.getScalarSizeInBits();
if ((EltSizeInBits % N1SrcSVT.getSizeInBits()) == 0) {
unsigned Scale = EltSizeInBits / N1SrcSVT.getSizeInBits();
@@ -21125,9 +21125,9 @@ SDValue DAGCombiner::visitINSERT_SUBVECTOR(SDNode *N) {
NewIdx = DAG.getVectorIdxConstant(InsIdx * Scale, DL);
} else if ((N1SrcSVT.getSizeInBits() % EltSizeInBits) == 0) {
unsigned Scale = N1SrcSVT.getSizeInBits() / EltSizeInBits;
- if (NumElts.isKnownMultipleOf(Scale) && (InsIdx % Scale) == 0) {
- NewVT = EVT::getVectorVT(Ctx, N1SrcSVT,
- NumElts.divideCoefficientBy(Scale));
+ if (NumElts.isKnownMultipleOf(Scale) && (InsIdx % Scale) == 0) {
+ NewVT = EVT::getVectorVT(Ctx, N1SrcSVT,
+ NumElts.divideCoefficientBy(Scale));
NewIdx = DAG.getVectorIdxConstant(InsIdx / Scale, DL);
}
}
@@ -21159,10 +21159,10 @@ SDValue DAGCombiner::visitINSERT_SUBVECTOR(SDNode *N) {
// If the input vector is a concatenation, and the insert replaces
// one of the pieces, we can optimize into a single concat_vectors.
if (N0.getOpcode() == ISD::CONCAT_VECTORS && N0.hasOneUse() &&
- N0.getOperand(0).getValueType() == N1.getValueType() &&
- N0.getOperand(0).getValueType().isScalableVector() ==
- N1.getValueType().isScalableVector()) {
- unsigned Factor = N1.getValueType().getVectorMinNumElements();
+ N0.getOperand(0).getValueType() == N1.getValueType() &&
+ N0.getOperand(0).getValueType().isScalableVector() ==
+ N1.getValueType().isScalableVector()) {
+ unsigned Factor = N1.getValueType().getVectorMinNumElements();
SmallVector<SDValue, 8> Ops(N0->op_begin(), N0->op_end());
Ops[InsIdx / Factor] = N1;
return DAG.getNode(ISD::CONCAT_VECTORS, SDLoc(N), VT, Ops);
@@ -21189,7 +21189,7 @@ SDValue DAGCombiner::visitFP16_TO_FP(SDNode *N) {
SDValue N0 = N->getOperand(0);
// fold fp16_to_fp(op & 0xffff) -> fp16_to_fp(op)
- if (!TLI.shouldKeepZExtForFP16Conv() && N0->getOpcode() == ISD::AND) {
+ if (!TLI.shouldKeepZExtForFP16Conv() && N0->getOpcode() == ISD::AND) {
ConstantSDNode *AndConst = getAsNonOpaqueConstant(N0.getOperand(1));
if (AndConst && AndConst->getAPIntValue() == 0xffff) {
return DAG.getNode(ISD::FP16_TO_FP, SDLoc(N), N->getValueType(0),
@@ -21206,7 +21206,7 @@ SDValue DAGCombiner::visitVECREDUCE(SDNode *N) {
unsigned Opcode = N->getOpcode();
// VECREDUCE over 1-element vector is just an extract.
- if (VT.getVectorElementCount().isScalar()) {
+ if (VT.getVectorElementCount().isScalar()) {
SDLoc dl(N);
SDValue Res =
DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, VT.getVectorElementType(), N0,
@@ -21445,8 +21445,8 @@ SDValue DAGCombiner::SimplifyVBinOp(SDNode *N) {
SDValue Z = LHS.getOperand(2);
EVT NarrowVT = X.getValueType();
if (NarrowVT == Y.getValueType() &&
- TLI.isOperationLegalOrCustomOrPromote(Opcode, NarrowVT,
- LegalOperations)) {
+ TLI.isOperationLegalOrCustomOrPromote(Opcode, NarrowVT,
+ LegalOperations)) {
// (binop undef, undef) may not return undef, so compute that result.
SDLoc DL(N);
SDValue VecC =
@@ -21459,10 +21459,10 @@ SDValue DAGCombiner::SimplifyVBinOp(SDNode *N) {
// Make sure all but the first op are undef or constant.
auto ConcatWithConstantOrUndef = [](SDValue Concat) {
return Concat.getOpcode() == ISD::CONCAT_VECTORS &&
- all_of(drop_begin(Concat->ops()), [](const SDValue &Op) {
- return Op.isUndef() ||
- ISD::isBuildVectorOfConstantSDNodes(Op.getNode());
- });
+ all_of(drop_begin(Concat->ops()), [](const SDValue &Op) {
+ return Op.isUndef() ||
+ ISD::isBuildVectorOfConstantSDNodes(Op.getNode());
+ });
};
// The following pattern is likely to emerge with vector reduction ops. Moving
@@ -21684,7 +21684,7 @@ bool DAGCombiner::SimplifySelectOps(SDNode *TheSelect, SDValue LHS,
// It is safe to replace the two loads if they have different alignments,
// but the new load must be the minimum (most restrictive) alignment of the
// inputs.
- Align Alignment = std::min(LLD->getAlign(), RLD->getAlign());
+ Align Alignment = std::min(LLD->getAlign(), RLD->getAlign());
MachineMemOperand::Flags MMOFlags = LLD->getMemOperand()->getFlags();
if (!RLD->isInvariant())
MMOFlags &= ~MachineMemOperand::MOInvariant;
@@ -21790,46 +21790,46 @@ SDValue DAGCombiner::foldSelectCCToShiftAnd(const SDLoc &DL, SDValue N0,
return DAG.getNode(ISD::AND, DL, AType, Shift, N2);
}
-// Transform (fneg/fabs (bitconvert x)) to avoid loading constant pool values.
-SDValue DAGCombiner::foldSignChangeInBitcast(SDNode *N) {
- SDValue N0 = N->getOperand(0);
- EVT VT = N->getValueType(0);
- bool IsFabs = N->getOpcode() == ISD::FABS;
- bool IsFree = IsFabs ? TLI.isFAbsFree(VT) : TLI.isFNegFree(VT);
-
- if (IsFree || N0.getOpcode() != ISD::BITCAST || !N0.hasOneUse())
- return SDValue();
-
- SDValue Int = N0.getOperand(0);
- EVT IntVT = Int.getValueType();
-
- // The operand to cast should be integer.
- if (!IntVT.isInteger() || IntVT.isVector())
- return SDValue();
-
- // (fneg (bitconvert x)) -> (bitconvert (xor x sign))
- // (fabs (bitconvert x)) -> (bitconvert (and x ~sign))
- APInt SignMask;
- if (N0.getValueType().isVector()) {
- // For vector, create a sign mask (0x80...) or its inverse (for fabs,
- // 0x7f...) per element and splat it.
- SignMask = APInt::getSignMask(N0.getScalarValueSizeInBits());
- if (IsFabs)
- SignMask = ~SignMask;
- SignMask = APInt::getSplat(IntVT.getSizeInBits(), SignMask);
- } else {
- // For scalar, just use the sign mask (0x80... or the inverse, 0x7f...)
- SignMask = APInt::getSignMask(IntVT.getSizeInBits());
- if (IsFabs)
- SignMask = ~SignMask;
- }
- SDLoc DL(N0);
- Int = DAG.getNode(IsFabs ? ISD::AND : ISD::XOR, DL, IntVT, Int,
- DAG.getConstant(SignMask, DL, IntVT));
- AddToWorklist(Int.getNode());
- return DAG.getBitcast(VT, Int);
-}
-
+// Transform (fneg/fabs (bitconvert x)) to avoid loading constant pool values.
+SDValue DAGCombiner::foldSignChangeInBitcast(SDNode *N) {
+ SDValue N0 = N->getOperand(0);
+ EVT VT = N->getValueType(0);
+ bool IsFabs = N->getOpcode() == ISD::FABS;
+ bool IsFree = IsFabs ? TLI.isFAbsFree(VT) : TLI.isFNegFree(VT);
+
+ if (IsFree || N0.getOpcode() != ISD::BITCAST || !N0.hasOneUse())
+ return SDValue();
+
+ SDValue Int = N0.getOperand(0);
+ EVT IntVT = Int.getValueType();
+
+ // The operand to cast should be integer.
+ if (!IntVT.isInteger() || IntVT.isVector())
+ return SDValue();
+
+ // (fneg (bitconvert x)) -> (bitconvert (xor x sign))
+ // (fabs (bitconvert x)) -> (bitconvert (and x ~sign))
+ APInt SignMask;
+ if (N0.getValueType().isVector()) {
+ // For vector, create a sign mask (0x80...) or its inverse (for fabs,
+ // 0x7f...) per element and splat it.
+ SignMask = APInt::getSignMask(N0.getScalarValueSizeInBits());
+ if (IsFabs)
+ SignMask = ~SignMask;
+ SignMask = APInt::getSplat(IntVT.getSizeInBits(), SignMask);
+ } else {
+ // For scalar, just use the sign mask (0x80... or the inverse, 0x7f...)
+ SignMask = APInt::getSignMask(IntVT.getSizeInBits());
+ if (IsFabs)
+ SignMask = ~SignMask;
+ }
+ SDLoc DL(N0);
+ Int = DAG.getNode(IsFabs ? ISD::AND : ISD::XOR, DL, IntVT, Int,
+ DAG.getConstant(SignMask, DL, IntVT));
+ AddToWorklist(Int.getNode());
+ return DAG.getBitcast(VT, Int);
+}
+
/// Turn "(a cond b) ? 1.0f : 2.0f" into "load (tmp + ((a cond b) ? 0 : 4)"
/// where "tmp" is a constant pool entry containing an array with 1.0 and 2.0
/// in it. This may be a win when the constant is not otherwise available
@@ -22112,7 +22112,7 @@ SDValue DAGCombiner::BuildUDIV(SDNode *N) {
SDValue DAGCombiner::BuildLogBase2(SDValue V, const SDLoc &DL) {
EVT VT = V.getValueType();
SDValue Ctlz = DAG.getNode(ISD::CTLZ, DL, VT, V);
- SDValue Base = DAG.getConstant(VT.getScalarSizeInBits() - 1, DL, VT);
+ SDValue Base = DAG.getConstant(VT.getScalarSizeInBits() - 1, DL, VT);
SDValue LogBase2 = DAG.getNode(ISD::SUB, DL, VT, Base, Ctlz);
return LogBase2;
}
@@ -22290,21 +22290,21 @@ SDValue DAGCombiner::buildSqrtEstimateImpl(SDValue Op, SDNodeFlags Flags,
Reciprocal)) {
AddToWorklist(Est.getNode());
- if (Iterations)
+ if (Iterations)
Est = UseOneConstNR
? buildSqrtNROneConst(Op, Est, Iterations, Flags, Reciprocal)
: buildSqrtNRTwoConst(Op, Est, Iterations, Flags, Reciprocal);
- if (!Reciprocal) {
- SDLoc DL(Op);
- // Try the target specific test first.
- SDValue Test = TLI.getSqrtInputTest(Op, DAG, DAG.getDenormalMode(VT));
-
- // The estimate is now completely wrong if the input was exactly 0.0 or
- // possibly a denormal. Force the answer to 0.0 or value provided by
- // target for those cases.
- Est = DAG.getNode(
- Test.getValueType().isVector() ? ISD::VSELECT : ISD::SELECT, DL, VT,
- Test, TLI.getSqrtResultForDenormInput(Op, DAG), Est);
+ if (!Reciprocal) {
+ SDLoc DL(Op);
+ // Try the target specific test first.
+ SDValue Test = TLI.getSqrtInputTest(Op, DAG, DAG.getDenormalMode(VT));
+
+ // The estimate is now completely wrong if the input was exactly 0.0 or
+ // possibly a denormal. Force the answer to 0.0 or value provided by
+ // target for those cases.
+ Est = DAG.getNode(
+ Test.getValueType().isVector() ? ISD::VSELECT : ISD::SELECT, DL, VT,
+ Test, TLI.getSqrtResultForDenormInput(Op, DAG), Est);
}
return Est;
}
diff --git a/contrib/libs/llvm12/lib/CodeGen/SelectionDAG/FastISel.cpp b/contrib/libs/llvm12/lib/CodeGen/SelectionDAG/FastISel.cpp
index 0ff77d4ba1..94d2762e52 100644
--- a/contrib/libs/llvm12/lib/CodeGen/SelectionDAG/FastISel.cpp
+++ b/contrib/libs/llvm12/lib/CodeGen/SelectionDAG/FastISel.cpp
@@ -158,77 +158,77 @@ bool FastISel::lowerArguments() {
/// Return the defined register if this instruction defines exactly one
/// virtual register and uses no other virtual registers. Otherwise return 0.
-static Register findLocalRegDef(MachineInstr &MI) {
+static Register findLocalRegDef(MachineInstr &MI) {
Register RegDef;
for (const MachineOperand &MO : MI.operands()) {
if (!MO.isReg())
continue;
if (MO.isDef()) {
if (RegDef)
- return Register();
+ return Register();
RegDef = MO.getReg();
} else if (MO.getReg().isVirtual()) {
- // This is another use of a vreg. Don't delete it.
+ // This is another use of a vreg. Don't delete it.
return Register();
}
}
return RegDef;
}
-static bool isRegUsedByPhiNodes(Register DefReg,
- FunctionLoweringInfo &FuncInfo) {
- for (auto &P : FuncInfo.PHINodesToUpdate)
- if (P.second == DefReg)
- return true;
- return false;
-}
-
+static bool isRegUsedByPhiNodes(Register DefReg,
+ FunctionLoweringInfo &FuncInfo) {
+ for (auto &P : FuncInfo.PHINodesToUpdate)
+ if (P.second == DefReg)
+ return true;
+ return false;
+}
+
void FastISel::flushLocalValueMap() {
- // If FastISel bails out, it could leave local value instructions behind
- // that aren't used for anything. Detect and erase those.
- if (LastLocalValue != EmitStartPt) {
- // Save the first instruction after local values, for later.
- MachineBasicBlock::iterator FirstNonValue(LastLocalValue);
- ++FirstNonValue;
-
+ // If FastISel bails out, it could leave local value instructions behind
+ // that aren't used for anything. Detect and erase those.
+ if (LastLocalValue != EmitStartPt) {
+ // Save the first instruction after local values, for later.
+ MachineBasicBlock::iterator FirstNonValue(LastLocalValue);
+ ++FirstNonValue;
+
MachineBasicBlock::reverse_iterator RE =
EmitStartPt ? MachineBasicBlock::reverse_iterator(EmitStartPt)
: FuncInfo.MBB->rend();
MachineBasicBlock::reverse_iterator RI(LastLocalValue);
for (; RI != RE;) {
MachineInstr &LocalMI = *RI;
- // Increment before erasing what it points to.
+ // Increment before erasing what it points to.
++RI;
- Register DefReg = findLocalRegDef(LocalMI);
- if (!DefReg)
+ Register DefReg = findLocalRegDef(LocalMI);
+ if (!DefReg)
continue;
- if (FuncInfo.RegsWithFixups.count(DefReg))
+ if (FuncInfo.RegsWithFixups.count(DefReg))
continue;
- bool UsedByPHI = isRegUsedByPhiNodes(DefReg, FuncInfo);
- if (!UsedByPHI && MRI.use_nodbg_empty(DefReg)) {
- if (EmitStartPt == &LocalMI)
- EmitStartPt = EmitStartPt->getPrevNode();
- LLVM_DEBUG(dbgs() << "removing dead local value materialization"
- << LocalMI);
- LocalMI.eraseFromParent();
- }
- }
-
- if (FirstNonValue != FuncInfo.MBB->end()) {
- // See if there are any local value instructions left. If so, we want to
- // make sure the first one has a debug location; if it doesn't, use the
- // first non-value instruction's debug location.
-
- // If EmitStartPt is non-null, this block had copies at the top before
- // FastISel started doing anything; it points to the last one, so the
- // first local value instruction is the one after EmitStartPt.
- // If EmitStartPt is null, the first local value instruction is at the
- // top of the block.
- MachineBasicBlock::iterator FirstLocalValue =
- EmitStartPt ? ++MachineBasicBlock::iterator(EmitStartPt)
- : FuncInfo.MBB->begin();
- if (FirstLocalValue != FirstNonValue && !FirstLocalValue->getDebugLoc())
- FirstLocalValue->setDebugLoc(FirstNonValue->getDebugLoc());
+ bool UsedByPHI = isRegUsedByPhiNodes(DefReg, FuncInfo);
+ if (!UsedByPHI && MRI.use_nodbg_empty(DefReg)) {
+ if (EmitStartPt == &LocalMI)
+ EmitStartPt = EmitStartPt->getPrevNode();
+ LLVM_DEBUG(dbgs() << "removing dead local value materialization"
+ << LocalMI);
+ LocalMI.eraseFromParent();
+ }
+ }
+
+ if (FirstNonValue != FuncInfo.MBB->end()) {
+ // See if there are any local value instructions left. If so, we want to
+ // make sure the first one has a debug location; if it doesn't, use the
+ // first non-value instruction's debug location.
+
+ // If EmitStartPt is non-null, this block had copies at the top before
+ // FastISel started doing anything; it points to the last one, so the
+ // first local value instruction is the one after EmitStartPt.
+ // If EmitStartPt is null, the first local value instruction is at the
+ // top of the block.
+ MachineBasicBlock::iterator FirstLocalValue =
+ EmitStartPt ? ++MachineBasicBlock::iterator(EmitStartPt)
+ : FuncInfo.MBB->begin();
+ if (FirstLocalValue != FirstNonValue && !FirstLocalValue->getDebugLoc())
+ FirstLocalValue->setDebugLoc(FirstNonValue->getDebugLoc());
}
}
@@ -261,13 +261,13 @@ bool FastISel::hasTrivialKill(const Value *V) {
if (GEP->hasAllZeroIndices() && !hasTrivialKill(GEP->getOperand(0)))
return false;
- // Casts and extractvalues may be trivially coalesced by fast-isel.
- if (I->getOpcode() == Instruction::BitCast ||
- I->getOpcode() == Instruction::PtrToInt ||
- I->getOpcode() == Instruction::IntToPtr ||
- I->getOpcode() == Instruction::ExtractValue)
- return false;
-
+ // Casts and extractvalues may be trivially coalesced by fast-isel.
+ if (I->getOpcode() == Instruction::BitCast ||
+ I->getOpcode() == Instruction::PtrToInt ||
+ I->getOpcode() == Instruction::IntToPtr ||
+ I->getOpcode() == Instruction::ExtractValue)
+ return false;
+
// Only instructions with a single use in the same basic block are considered
// to have trivial kills.
return I->hasOneUse() &&
@@ -347,7 +347,7 @@ Register FastISel::materializeConstant(const Value *V, MVT VT) {
getRegForValue(ConstantInt::get(V->getContext(), SIntVal));
if (IntegerReg)
Reg = fastEmit_r(IntVT.getSimpleVT(), VT, ISD::SINT_TO_FP, IntegerReg,
- /*Op0IsKill=*/false);
+ /*Op0IsKill=*/false);
}
}
} else if (const auto *Op = dyn_cast<Operator>(V)) {
@@ -477,9 +477,9 @@ void FastISel::removeDeadCode(MachineBasicBlock::iterator I,
}
FastISel::SavePoint FastISel::enterLocalValueArea() {
- SavePoint OldInsertPt = FuncInfo.InsertPt;
+ SavePoint OldInsertPt = FuncInfo.InsertPt;
recomputeInsertPt();
- return OldInsertPt;
+ return OldInsertPt;
}
void FastISel::leaveLocalValueArea(SavePoint OldInsertPt) {
@@ -487,7 +487,7 @@ void FastISel::leaveLocalValueArea(SavePoint OldInsertPt) {
LastLocalValue = &*std::prev(FuncInfo.InsertPt);
// Restore the previous insert position.
- FuncInfo.InsertPt = OldInsertPt;
+ FuncInfo.InsertPt = OldInsertPt;
}
bool FastISel::selectBinaryOp(const User *I, unsigned ISDOpcode) {
@@ -1256,8 +1256,8 @@ bool FastISel::selectIntrinsicCall(const IntrinsicInst *II) {
case Intrinsic::sideeffect:
// Neither does the assume intrinsic; it's also OK not to codegen its operand.
case Intrinsic::assume:
- // Neither does the llvm.experimental.noalias.scope.decl intrinsic
- case Intrinsic::experimental_noalias_scope_decl:
+ // Neither does the llvm.experimental.noalias.scope.decl intrinsic
+ case Intrinsic::experimental_noalias_scope_decl:
return true;
case Intrinsic::dbg_declare: {
const DbgDeclareInst *DI = cast<DbgDeclareInst>(II);
@@ -1526,11 +1526,11 @@ void FastISel::removeDeadLocalValueCode(MachineInstr *SavedLastLocalValue)
}
bool FastISel::selectInstruction(const Instruction *I) {
- // Flush the local value map before starting each instruction.
- // This improves locality and debugging, and can reduce spills.
- // Reuse of values across IR instructions is relatively uncommon.
- flushLocalValueMap();
-
+ // Flush the local value map before starting each instruction.
+ // This improves locality and debugging, and can reduce spills.
+ // Reuse of values across IR instructions is relatively uncommon.
+ flushLocalValueMap();
+
MachineInstr *SavedLastLocalValue = getLastLocalValue();
// Just before the terminator instruction, insert instructions to
// feed PHI nodes in successor blocks.
@@ -1677,13 +1677,13 @@ bool FastISel::selectFNeg(const User *I, const Value *In) {
return false;
Register IntResultReg = fastEmit_ri_(
- IntVT.getSimpleVT(), ISD::XOR, IntReg, /*Op0IsKill=*/true,
+ IntVT.getSimpleVT(), ISD::XOR, IntReg, /*Op0IsKill=*/true,
UINT64_C(1) << (VT.getSizeInBits() - 1), IntVT.getSimpleVT());
if (!IntResultReg)
return false;
ResultReg = fastEmit_r(IntVT.getSimpleVT(), VT.getSimpleVT(), ISD::BITCAST,
- IntResultReg, /*Op0IsKill=*/true);
+ IntResultReg, /*Op0IsKill=*/true);
if (!ResultReg)
return false;
@@ -1739,7 +1739,7 @@ bool FastISel::selectOperator(const User *I, unsigned Opcode) {
return selectBinaryOp(I, ISD::FADD);
case Instruction::Sub:
return selectBinaryOp(I, ISD::SUB);
- case Instruction::FSub:
+ case Instruction::FSub:
return selectBinaryOp(I, ISD::FSUB);
case Instruction::Mul:
return selectBinaryOp(I, ISD::MUL);
@@ -2236,9 +2236,9 @@ bool FastISel::handlePHINodesInSuccessorBlocks(const BasicBlock *LLVMBB) {
const Value *PHIOp = PN.getIncomingValueForBlock(LLVMBB);
- // Set the DebugLoc for the copy. Use the location of the operand if
- // there is one; otherwise no location, flushLocalValueMap will fix it.
- DbgLoc = DebugLoc();
+ // Set the DebugLoc for the copy. Use the location of the operand if
+ // there is one; otherwise no location, flushLocalValueMap will fix it.
+ DbgLoc = DebugLoc();
if (const auto *Inst = dyn_cast<Instruction>(PHIOp))
DbgLoc = Inst->getDebugLoc();
diff --git a/contrib/libs/llvm12/lib/CodeGen/SelectionDAG/FunctionLoweringInfo.cpp b/contrib/libs/llvm12/lib/CodeGen/SelectionDAG/FunctionLoweringInfo.cpp
index 32a4f60df0..e2412b6892 100644
--- a/contrib/libs/llvm12/lib/CodeGen/SelectionDAG/FunctionLoweringInfo.cpp
+++ b/contrib/libs/llvm12/lib/CodeGen/SelectionDAG/FunctionLoweringInfo.cpp
@@ -197,7 +197,7 @@ void FunctionLoweringInfo::set(const Function &fn, MachineFunction &mf,
// Look for inline asm that clobbers the SP register.
if (auto *Call = dyn_cast<CallBase>(&I)) {
if (Call->isInlineAsm()) {
- Register SP = TLI->getStackPointerRegisterToSaveRestore();
+ Register SP = TLI->getStackPointerRegisterToSaveRestore();
const TargetRegisterInfo *TRI = MF->getSubtarget().getRegisterInfo();
std::vector<TargetLowering::AsmOperandInfo> Ops =
TLI->ParseConstraints(Fn->getParent()->getDataLayout(), TRI,
@@ -360,7 +360,7 @@ void FunctionLoweringInfo::clear() {
RegFixups.clear();
RegsWithFixups.clear();
StatepointStackSlots.clear();
- StatepointRelocationMaps.clear();
+ StatepointRelocationMaps.clear();
PreferredExtendType.clear();
}
@@ -458,7 +458,7 @@ void FunctionLoweringInfo::ComputePHILiveOutRegInfo(const PHINode *PN) {
if (ConstantInt *CI = dyn_cast<ConstantInt>(V)) {
APInt Val = CI->getValue().zextOrTrunc(BitWidth);
DestLOI.NumSignBits = Val.getNumSignBits();
- DestLOI.Known = KnownBits::makeConstant(Val);
+ DestLOI.Known = KnownBits::makeConstant(Val);
} else {
assert(ValueMap.count(V) && "V should have been placed in ValueMap when its"
"CopyToReg node was created.");
@@ -508,7 +508,7 @@ void FunctionLoweringInfo::ComputePHILiveOutRegInfo(const PHINode *PN) {
return;
}
DestLOI.NumSignBits = std::min(DestLOI.NumSignBits, SrcLOI->NumSignBits);
- DestLOI.Known = KnownBits::commonBits(DestLOI.Known, SrcLOI->Known);
+ DestLOI.Known = KnownBits::commonBits(DestLOI.Known, SrcLOI->Known);
}
}
diff --git a/contrib/libs/llvm12/lib/CodeGen/SelectionDAG/InstrEmitter.cpp b/contrib/libs/llvm12/lib/CodeGen/SelectionDAG/InstrEmitter.cpp
index a5978711b8..298f7cee42 100644
--- a/contrib/libs/llvm12/lib/CodeGen/SelectionDAG/InstrEmitter.cpp
+++ b/contrib/libs/llvm12/lib/CodeGen/SelectionDAG/InstrEmitter.cpp
@@ -26,7 +26,7 @@
#include "llvm/CodeGen/TargetSubtargetInfo.h"
#include "llvm/IR/DataLayout.h"
#include "llvm/IR/DebugInfo.h"
-#include "llvm/IR/PseudoProbe.h"
+#include "llvm/IR/PseudoProbe.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/MathExtras.h"
@@ -201,8 +201,8 @@ void InstrEmitter::CreateVirtualRegisters(SDNode *Node,
bool HasVRegVariadicDefs = !MF->getTarget().usesPhysRegsForValues() &&
II.isVariadic() && II.variadicOpsAreDefs();
unsigned NumVRegs = HasVRegVariadicDefs ? NumResults : II.getNumDefs();
- if (Node->getMachineOpcode() == TargetOpcode::STATEPOINT)
- NumVRegs = NumResults;
+ if (Node->getMachineOpcode() == TargetOpcode::STATEPOINT)
+ NumVRegs = NumResults;
for (unsigned i = 0; i < NumVRegs; ++i) {
// If the specific node value is only used by a CopyToReg and the dest reg
// is a vreg in the same register class, use the CopyToReg'd destination
@@ -696,11 +696,11 @@ InstrEmitter::EmitDbgValue(SDDbgValue *SD,
return &*MIB;
}
- // Attempt to produce a DBG_INSTR_REF if we've been asked to.
- if (EmitDebugInstrRefs)
- if (auto *InstrRef = EmitDbgInstrRef(SD, VRBaseMap))
- return InstrRef;
-
+ // Attempt to produce a DBG_INSTR_REF if we've been asked to.
+ if (EmitDebugInstrRefs)
+ if (auto *InstrRef = EmitDbgInstrRef(SD, VRBaseMap))
+ return InstrRef;
+
if (SD->getKind() == SDDbgValue::FRAMEIX) {
// Stack address; this needs to be lowered in target-dependent fashion.
// EmitTargetCodeForFrameDebugValue is responsible for allocation.
@@ -768,63 +768,63 @@ InstrEmitter::EmitDbgValue(SDDbgValue *SD,
}
MachineInstr *
-InstrEmitter::EmitDbgInstrRef(SDDbgValue *SD,
- DenseMap<SDValue, Register> &VRBaseMap) {
- // Instruction referencing is still in a prototype state: for now we're only
- // going to support SDNodes within a block. Copies are not supported, they
- // don't actually define a value.
- if (SD->getKind() != SDDbgValue::SDNODE)
- return nullptr;
-
- SDNode *Node = SD->getSDNode();
- SDValue Op = SDValue(Node, SD->getResNo());
- DenseMap<SDValue, Register>::iterator I = VRBaseMap.find(Op);
- if (I==VRBaseMap.end())
- return nullptr; // undef value: let EmitDbgValue produce a DBG_VALUE $noreg.
-
- MDNode *Var = SD->getVariable();
- MDNode *Expr = SD->getExpression();
- DebugLoc DL = SD->getDebugLoc();
-
- // Try to pick out a defining instruction at this point.
- unsigned VReg = getVR(Op, VRBaseMap);
- MachineInstr *ResultInstr = nullptr;
-
- // No definition corresponds to scenarios where a vreg is live-in to a block,
- // and doesn't have a defining instruction (yet). This can be patched up
- // later; at this early stage of implementation, fall back to using DBG_VALUE.
- if (!MRI->hasOneDef(VReg))
- return nullptr;
-
- MachineInstr &DefMI = *MRI->def_instr_begin(VReg);
- // Some target specific opcodes can become copies. As stated above, we're
- // ignoring those for now.
- if (DefMI.isCopy() || DefMI.getOpcode() == TargetOpcode::SUBREG_TO_REG)
- return nullptr;
-
- const MCInstrDesc &RefII = TII->get(TargetOpcode::DBG_INSTR_REF);
- auto MIB = BuildMI(*MF, DL, RefII);
-
- // Find the operand which defines the specified VReg.
- unsigned OperandIdx = 0;
- for (const auto &MO : DefMI.operands()) {
- if (MO.isReg() && MO.isDef() && MO.getReg() == VReg)
- break;
- ++OperandIdx;
- }
- assert(OperandIdx < DefMI.getNumOperands());
-
- // Make the DBG_INSTR_REF refer to that instruction, and that operand.
- unsigned InstrNum = DefMI.getDebugInstrNum();
- MIB.addImm(InstrNum);
- MIB.addImm(OperandIdx);
- MIB.addMetadata(Var);
- MIB.addMetadata(Expr);
- ResultInstr = &*MIB;
- return ResultInstr;
-}
-
-MachineInstr *
+InstrEmitter::EmitDbgInstrRef(SDDbgValue *SD,
+ DenseMap<SDValue, Register> &VRBaseMap) {
+ // Instruction referencing is still in a prototype state: for now we're only
+ // going to support SDNodes within a block. Copies are not supported, they
+ // don't actually define a value.
+ if (SD->getKind() != SDDbgValue::SDNODE)
+ return nullptr;
+
+ SDNode *Node = SD->getSDNode();
+ SDValue Op = SDValue(Node, SD->getResNo());
+ DenseMap<SDValue, Register>::iterator I = VRBaseMap.find(Op);
+ if (I==VRBaseMap.end())
+ return nullptr; // undef value: let EmitDbgValue produce a DBG_VALUE $noreg.
+
+ MDNode *Var = SD->getVariable();
+ MDNode *Expr = SD->getExpression();
+ DebugLoc DL = SD->getDebugLoc();
+
+ // Try to pick out a defining instruction at this point.
+ unsigned VReg = getVR(Op, VRBaseMap);
+ MachineInstr *ResultInstr = nullptr;
+
+ // No definition corresponds to scenarios where a vreg is live-in to a block,
+ // and doesn't have a defining instruction (yet). This can be patched up
+ // later; at this early stage of implementation, fall back to using DBG_VALUE.
+ if (!MRI->hasOneDef(VReg))
+ return nullptr;
+
+ MachineInstr &DefMI = *MRI->def_instr_begin(VReg);
+ // Some target specific opcodes can become copies. As stated above, we're
+ // ignoring those for now.
+ if (DefMI.isCopy() || DefMI.getOpcode() == TargetOpcode::SUBREG_TO_REG)
+ return nullptr;
+
+ const MCInstrDesc &RefII = TII->get(TargetOpcode::DBG_INSTR_REF);
+ auto MIB = BuildMI(*MF, DL, RefII);
+
+ // Find the operand which defines the specified VReg.
+ unsigned OperandIdx = 0;
+ for (const auto &MO : DefMI.operands()) {
+ if (MO.isReg() && MO.isDef() && MO.getReg() == VReg)
+ break;
+ ++OperandIdx;
+ }
+ assert(OperandIdx < DefMI.getNumOperands());
+
+ // Make the DBG_INSTR_REF refer to that instruction, and that operand.
+ unsigned InstrNum = DefMI.getDebugInstrNum();
+ MIB.addImm(InstrNum);
+ MIB.addImm(OperandIdx);
+ MIB.addMetadata(Var);
+ MIB.addMetadata(Expr);
+ ResultInstr = &*MIB;
+ return ResultInstr;
+}
+
+MachineInstr *
InstrEmitter::EmitDbgLabel(SDDbgLabel *SD) {
MDNode *Label = SD->getLabel();
DebugLoc DL = SD->getDebugLoc();
@@ -886,8 +886,8 @@ EmitMachineNode(SDNode *Node, bool IsClone, bool IsCloned,
NumDefs = NumResults;
}
ScratchRegs = TLI->getScratchRegisters((CallingConv::ID) CC);
- } else if (Opc == TargetOpcode::STATEPOINT) {
- NumDefs = NumResults;
+ } else if (Opc == TargetOpcode::STATEPOINT) {
+ NumDefs = NumResults;
}
unsigned NumImpUses = 0;
@@ -1037,22 +1037,22 @@ EmitMachineNode(SDNode *Node, bool IsClone, bool IsCloned,
if (!UsedRegs.empty() || II.getImplicitDefs() || II.hasOptionalDef())
MIB->setPhysRegsDeadExcept(UsedRegs, *TRI);
- // STATEPOINT is too 'dynamic' to have meaningful machine description.
- // We have to manually tie operands.
- if (Opc == TargetOpcode::STATEPOINT && NumDefs > 0) {
- assert(!HasPhysRegOuts && "STATEPOINT mishandled");
- MachineInstr *MI = MIB;
- unsigned Def = 0;
- int First = StatepointOpers(MI).getFirstGCPtrIdx();
- assert(First > 0 && "Statepoint has Defs but no GC ptr list");
- unsigned Use = (unsigned)First;
- while (Def < NumDefs) {
- if (MI->getOperand(Use).isReg())
- MI->tieOperands(Def++, Use);
- Use = StackMaps::getNextMetaArgIdx(MI, Use);
- }
- }
-
+ // STATEPOINT is too 'dynamic' to have meaningful machine description.
+ // We have to manually tie operands.
+ if (Opc == TargetOpcode::STATEPOINT && NumDefs > 0) {
+ assert(!HasPhysRegOuts && "STATEPOINT mishandled");
+ MachineInstr *MI = MIB;
+ unsigned Def = 0;
+ int First = StatepointOpers(MI).getFirstGCPtrIdx();
+ assert(First > 0 && "Statepoint has Defs but no GC ptr list");
+ unsigned Use = (unsigned)First;
+ while (Def < NumDefs) {
+ if (MI->getOperand(Use).isReg())
+ MI->tieOperands(Def++, Use);
+ Use = StackMaps::getNextMetaArgIdx(MI, Use);
+ }
+ }
+
// Run post-isel target hook to adjust this instruction if needed.
if (II.hasPostISelHook())
TLI->AdjustInstrPostInstrSelection(*MIB, Node);
@@ -1125,20 +1125,20 @@ EmitSpecialNode(SDNode *Node, bool IsClone, bool IsCloned,
break;
}
- case ISD::PSEUDO_PROBE: {
- unsigned TarOp = TargetOpcode::PSEUDO_PROBE;
- auto Guid = cast<PseudoProbeSDNode>(Node)->getGuid();
- auto Index = cast<PseudoProbeSDNode>(Node)->getIndex();
- auto Attr = cast<PseudoProbeSDNode>(Node)->getAttributes();
-
- BuildMI(*MBB, InsertPos, Node->getDebugLoc(), TII->get(TarOp))
- .addImm(Guid)
- .addImm(Index)
- .addImm((uint8_t)PseudoProbeType::Block)
- .addImm(Attr);
- break;
- }
-
+ case ISD::PSEUDO_PROBE: {
+ unsigned TarOp = TargetOpcode::PSEUDO_PROBE;
+ auto Guid = cast<PseudoProbeSDNode>(Node)->getGuid();
+ auto Index = cast<PseudoProbeSDNode>(Node)->getIndex();
+ auto Attr = cast<PseudoProbeSDNode>(Node)->getAttributes();
+
+ BuildMI(*MBB, InsertPos, Node->getDebugLoc(), TII->get(TarOp))
+ .addImm(Guid)
+ .addImm(Index)
+ .addImm((uint8_t)PseudoProbeType::Block)
+ .addImm(Attr);
+ break;
+ }
+
case ISD::INLINEASM:
case ISD::INLINEASM_BR: {
unsigned NumOps = Node->getNumOperands();
@@ -1254,12 +1254,12 @@ EmitSpecialNode(SDNode *Node, bool IsClone, bool IsCloned,
/// InstrEmitter - Construct an InstrEmitter and set it to start inserting
/// at the given position in the given block.
-InstrEmitter::InstrEmitter(const TargetMachine &TM, MachineBasicBlock *mbb,
+InstrEmitter::InstrEmitter(const TargetMachine &TM, MachineBasicBlock *mbb,
MachineBasicBlock::iterator insertpos)
: MF(mbb->getParent()), MRI(&MF->getRegInfo()),
TII(MF->getSubtarget().getInstrInfo()),
TRI(MF->getSubtarget().getRegisterInfo()),
TLI(MF->getSubtarget().getTargetLowering()), MBB(mbb),
- InsertPos(insertpos) {
- EmitDebugInstrRefs = TM.Options.ValueTrackingVariableLocations;
-}
+ InsertPos(insertpos) {
+ EmitDebugInstrRefs = TM.Options.ValueTrackingVariableLocations;
+}
diff --git a/contrib/libs/llvm12/lib/CodeGen/SelectionDAG/InstrEmitter.h b/contrib/libs/llvm12/lib/CodeGen/SelectionDAG/InstrEmitter.h
index 09658b8143..78d19cde4c 100644
--- a/contrib/libs/llvm12/lib/CodeGen/SelectionDAG/InstrEmitter.h
+++ b/contrib/libs/llvm12/lib/CodeGen/SelectionDAG/InstrEmitter.h
@@ -26,7 +26,7 @@ class MCInstrDesc;
class SDDbgLabel;
class SDDbgValue;
class TargetLowering;
-class TargetMachine;
+class TargetMachine;
class LLVM_LIBRARY_VISIBILITY InstrEmitter {
MachineFunction *MF;
@@ -38,9 +38,9 @@ class LLVM_LIBRARY_VISIBILITY InstrEmitter {
MachineBasicBlock *MBB;
MachineBasicBlock::iterator InsertPos;
- /// Should we try to produce DBG_INSTR_REF instructions?
- bool EmitDebugInstrRefs;
-
+ /// Should we try to produce DBG_INSTR_REF instructions?
+ bool EmitDebugInstrRefs;
+
/// EmitCopyFromReg - Generate machine code for an CopyFromReg node or an
/// implicit physical register output.
void EmitCopyFromReg(SDNode *Node, unsigned ResNo,
@@ -113,11 +113,11 @@ public:
MachineInstr *EmitDbgValue(SDDbgValue *SD,
DenseMap<SDValue, Register> &VRBaseMap);
- /// Attempt to emit a dbg_value as a DBG_INSTR_REF. May fail and return
- /// nullptr, in which case we fall back to plain EmitDbgValue.
- MachineInstr *EmitDbgInstrRef(SDDbgValue *SD,
- DenseMap<SDValue, Register> &VRBaseMap);
-
+ /// Attempt to emit a dbg_value as a DBG_INSTR_REF. May fail and return
+ /// nullptr, in which case we fall back to plain EmitDbgValue.
+ MachineInstr *EmitDbgInstrRef(SDDbgValue *SD,
+ DenseMap<SDValue, Register> &VRBaseMap);
+
/// Generate machine instruction for a dbg_label node.
MachineInstr *EmitDbgLabel(SDDbgLabel *SD);
@@ -139,8 +139,8 @@ public:
/// InstrEmitter - Construct an InstrEmitter and set it to start inserting
/// at the given position in the given block.
- InstrEmitter(const TargetMachine &TM, MachineBasicBlock *mbb,
- MachineBasicBlock::iterator insertpos);
+ InstrEmitter(const TargetMachine &TM, MachineBasicBlock *mbb,
+ MachineBasicBlock::iterator insertpos);
private:
void EmitMachineNode(SDNode *Node, bool IsClone, bool IsCloned,
diff --git a/contrib/libs/llvm12/lib/CodeGen/SelectionDAG/LegalizeDAG.cpp b/contrib/libs/llvm12/lib/CodeGen/SelectionDAG/LegalizeDAG.cpp
index 62d7191036..8530a0f43a 100644
--- a/contrib/libs/llvm12/lib/CodeGen/SelectionDAG/LegalizeDAG.cpp
+++ b/contrib/libs/llvm12/lib/CodeGen/SelectionDAG/LegalizeDAG.cpp
@@ -173,17 +173,17 @@ private:
SDValue NewIntValue) const;
SDValue ExpandFCOPYSIGN(SDNode *Node) const;
SDValue ExpandFABS(SDNode *Node) const;
- SDValue ExpandFNEG(SDNode *Node) const;
+ SDValue ExpandFNEG(SDNode *Node) const;
SDValue ExpandLegalINT_TO_FP(SDNode *Node, SDValue &Chain);
void PromoteLegalINT_TO_FP(SDNode *N, const SDLoc &dl,
SmallVectorImpl<SDValue> &Results);
void PromoteLegalFP_TO_INT(SDNode *N, const SDLoc &dl,
SmallVectorImpl<SDValue> &Results);
- SDValue PromoteLegalFP_TO_INT_SAT(SDNode *Node, const SDLoc &dl);
+ SDValue PromoteLegalFP_TO_INT_SAT(SDNode *Node, const SDLoc &dl);
SDValue ExpandBITREVERSE(SDValue Op, const SDLoc &dl);
SDValue ExpandBSWAP(SDValue Op, const SDLoc &dl);
- SDValue ExpandPARITY(SDValue Op, const SDLoc &dl);
+ SDValue ExpandPARITY(SDValue Op, const SDLoc &dl);
SDValue ExpandExtractFromVectorThroughStack(SDValue Op);
SDValue ExpandInsertToVectorThroughStack(SDValue Op);
@@ -438,16 +438,16 @@ SDValue SelectionDAGLegalize::OptimizeFloatStore(StoreSDNode* ST) {
// We generally can't do this one for long doubles.
SDValue Chain = ST->getChain();
SDValue Ptr = ST->getBasePtr();
- SDValue Value = ST->getValue();
+ SDValue Value = ST->getValue();
MachineMemOperand::Flags MMOFlags = ST->getMemOperand()->getFlags();
AAMDNodes AAInfo = ST->getAAInfo();
SDLoc dl(ST);
-
- // Don't optimise TargetConstantFP
- if (Value.getOpcode() == ISD::TargetConstantFP)
- return SDValue();
-
- if (ConstantFPSDNode *CFP = dyn_cast<ConstantFPSDNode>(Value)) {
+
+ // Don't optimise TargetConstantFP
+ if (Value.getOpcode() == ISD::TargetConstantFP)
+ return SDValue();
+
+ if (ConstantFPSDNode *CFP = dyn_cast<ConstantFPSDNode>(Value)) {
if (CFP->getValueType(0) == MVT::f32 &&
TLI.isTypeLegal(MVT::i32)) {
SDValue Con = DAG.getConstant(CFP->getValueAPF().
@@ -478,7 +478,7 @@ SDValue SelectionDAGLegalize::OptimizeFloatStore(StoreSDNode* ST) {
Lo = DAG.getStore(Chain, dl, Lo, Ptr, ST->getPointerInfo(),
ST->getOriginalAlign(), MMOFlags, AAInfo);
- Ptr = DAG.getMemBasePlusOffset(Ptr, TypeSize::Fixed(4), dl);
+ Ptr = DAG.getMemBasePlusOffset(Ptr, TypeSize::Fixed(4), dl);
Hi = DAG.getStore(Chain, dl, Hi, Ptr,
ST->getPointerInfo().getWithOffset(4),
ST->getOriginalAlign(), MMOFlags, AAInfo);
@@ -487,7 +487,7 @@ SDValue SelectionDAGLegalize::OptimizeFloatStore(StoreSDNode* ST) {
}
}
}
- return SDValue();
+ return SDValue();
}
void SelectionDAGLegalize::LegalizeStoreOps(SDNode *Node) {
@@ -548,29 +548,29 @@ void SelectionDAGLegalize::LegalizeStoreOps(SDNode *Node) {
LLVM_DEBUG(dbgs() << "Legalizing truncating store operations\n");
SDValue Value = ST->getValue();
EVT StVT = ST->getMemoryVT();
- TypeSize StWidth = StVT.getSizeInBits();
- TypeSize StSize = StVT.getStoreSizeInBits();
+ TypeSize StWidth = StVT.getSizeInBits();
+ TypeSize StSize = StVT.getStoreSizeInBits();
auto &DL = DAG.getDataLayout();
- if (StWidth != StSize) {
+ if (StWidth != StSize) {
// Promote to a byte-sized store with upper bits zero if not
// storing an integral number of bytes. For example, promote
// TRUNCSTORE:i1 X -> TRUNCSTORE:i8 (and X, 1)
- EVT NVT = EVT::getIntegerVT(*DAG.getContext(), StSize.getFixedSize());
+ EVT NVT = EVT::getIntegerVT(*DAG.getContext(), StSize.getFixedSize());
Value = DAG.getZeroExtendInReg(Value, dl, StVT);
SDValue Result =
DAG.getTruncStore(Chain, dl, Value, Ptr, ST->getPointerInfo(), NVT,
ST->getOriginalAlign(), MMOFlags, AAInfo);
ReplaceNode(SDValue(Node, 0), Result);
- } else if (!StVT.isVector() && !isPowerOf2_64(StWidth.getFixedSize())) {
+ } else if (!StVT.isVector() && !isPowerOf2_64(StWidth.getFixedSize())) {
// If not storing a power-of-2 number of bits, expand as two stores.
assert(!StVT.isVector() && "Unsupported truncstore!");
- unsigned StWidthBits = StWidth.getFixedSize();
- unsigned LogStWidth = Log2_32(StWidthBits);
+ unsigned StWidthBits = StWidth.getFixedSize();
+ unsigned LogStWidth = Log2_32(StWidthBits);
assert(LogStWidth < 32);
unsigned RoundWidth = 1 << LogStWidth;
- assert(RoundWidth < StWidthBits);
- unsigned ExtraWidth = StWidthBits - RoundWidth;
+ assert(RoundWidth < StWidthBits);
+ unsigned ExtraWidth = StWidthBits - RoundWidth;
assert(ExtraWidth < RoundWidth);
assert(!(RoundWidth % 8) && !(ExtraWidth % 8) &&
"Store size not an integral number of bytes!");
@@ -587,7 +587,7 @@ void SelectionDAGLegalize::LegalizeStoreOps(SDNode *Node) {
// Store the remaining ExtraWidth bits.
IncrementSize = RoundWidth / 8;
- Ptr = DAG.getMemBasePlusOffset(Ptr, TypeSize::Fixed(IncrementSize), dl);
+ Ptr = DAG.getMemBasePlusOffset(Ptr, TypeSize::Fixed(IncrementSize), dl);
Hi = DAG.getNode(
ISD::SRL, dl, Value.getValueType(), Value,
DAG.getConstant(RoundWidth, dl,
@@ -727,7 +727,7 @@ void SelectionDAGLegalize::LegalizeLoadOps(SDNode *Node) {
LLVM_DEBUG(dbgs() << "Legalizing extending load operation\n");
EVT SrcVT = LD->getMemoryVT();
- TypeSize SrcWidth = SrcVT.getSizeInBits();
+ TypeSize SrcWidth = SrcVT.getSizeInBits();
MachineMemOperand::Flags MMOFlags = LD->getMemOperand()->getFlags();
AAMDNodes AAInfo = LD->getAAInfo();
@@ -773,15 +773,15 @@ void SelectionDAGLegalize::LegalizeLoadOps(SDNode *Node) {
Value = Result;
Chain = Ch;
- } else if (!isPowerOf2_64(SrcWidth.getKnownMinSize())) {
+ } else if (!isPowerOf2_64(SrcWidth.getKnownMinSize())) {
// If not loading a power-of-2 number of bits, expand as two loads.
assert(!SrcVT.isVector() && "Unsupported extload!");
- unsigned SrcWidthBits = SrcWidth.getFixedSize();
- unsigned LogSrcWidth = Log2_32(SrcWidthBits);
+ unsigned SrcWidthBits = SrcWidth.getFixedSize();
+ unsigned LogSrcWidth = Log2_32(SrcWidthBits);
assert(LogSrcWidth < 32);
unsigned RoundWidth = 1 << LogSrcWidth;
- assert(RoundWidth < SrcWidthBits);
- unsigned ExtraWidth = SrcWidthBits - RoundWidth;
+ assert(RoundWidth < SrcWidthBits);
+ unsigned ExtraWidth = SrcWidthBits - RoundWidth;
assert(ExtraWidth < RoundWidth);
assert(!(RoundWidth % 8) && !(ExtraWidth % 8) &&
"Load size not an integral number of bytes!");
@@ -800,7 +800,7 @@ void SelectionDAGLegalize::LegalizeLoadOps(SDNode *Node) {
// Load the remaining ExtraWidth bits.
IncrementSize = RoundWidth / 8;
- Ptr = DAG.getMemBasePlusOffset(Ptr, TypeSize::Fixed(IncrementSize), dl);
+ Ptr = DAG.getMemBasePlusOffset(Ptr, TypeSize::Fixed(IncrementSize), dl);
Hi = DAG.getExtLoad(ExtType, dl, Node->getValueType(0), Chain, Ptr,
LD->getPointerInfo().getWithOffset(IncrementSize),
ExtraVT, LD->getOriginalAlign(), MMOFlags, AAInfo);
@@ -828,7 +828,7 @@ void SelectionDAGLegalize::LegalizeLoadOps(SDNode *Node) {
// Load the remaining ExtraWidth bits.
IncrementSize = RoundWidth / 8;
- Ptr = DAG.getMemBasePlusOffset(Ptr, TypeSize::Fixed(IncrementSize), dl);
+ Ptr = DAG.getMemBasePlusOffset(Ptr, TypeSize::Fixed(IncrementSize), dl);
Lo = DAG.getExtLoad(ISD::ZEXTLOAD, dl, Node->getValueType(0), Chain, Ptr,
LD->getPointerInfo().getWithOffset(IncrementSize),
ExtraVT, LD->getOriginalAlign(), MMOFlags, AAInfo);
@@ -1113,18 +1113,18 @@ void SelectionDAGLegalize::LegalizeOp(SDNode *Node) {
// They'll be converted to Copy(To/From)Reg.
Action = TargetLowering::Legal;
break;
- case ISD::UBSANTRAP:
- Action = TLI.getOperationAction(Node->getOpcode(), Node->getValueType(0));
- if (Action == TargetLowering::Expand) {
- // replace ISD::UBSANTRAP with ISD::TRAP
- SDValue NewVal;
- NewVal = DAG.getNode(ISD::TRAP, SDLoc(Node), Node->getVTList(),
- Node->getOperand(0));
- ReplaceNode(Node, NewVal.getNode());
- LegalizeOp(NewVal.getNode());
- return;
- }
- break;
+ case ISD::UBSANTRAP:
+ Action = TLI.getOperationAction(Node->getOpcode(), Node->getValueType(0));
+ if (Action == TargetLowering::Expand) {
+ // replace ISD::UBSANTRAP with ISD::TRAP
+ SDValue NewVal;
+ NewVal = DAG.getNode(ISD::TRAP, SDLoc(Node), Node->getVTList(),
+ Node->getOperand(0));
+ ReplaceNode(Node, NewVal.getNode());
+ LegalizeOp(NewVal.getNode());
+ return;
+ }
+ break;
case ISD::DEBUGTRAP:
Action = TLI.getOperationAction(Node->getOpcode(), Node->getValueType(0));
if (Action == TargetLowering::Expand) {
@@ -1140,11 +1140,11 @@ void SelectionDAGLegalize::LegalizeOp(SDNode *Node) {
case ISD::SADDSAT:
case ISD::UADDSAT:
case ISD::SSUBSAT:
- case ISD::USUBSAT:
- case ISD::SSHLSAT:
- case ISD::USHLSAT:
- case ISD::FP_TO_SINT_SAT:
- case ISD::FP_TO_UINT_SAT:
+ case ISD::USUBSAT:
+ case ISD::SSHLSAT:
+ case ISD::USHLSAT:
+ case ISD::FP_TO_SINT_SAT:
+ case ISD::FP_TO_UINT_SAT:
Action = TLI.getOperationAction(Node->getOpcode(), Node->getValueType(0));
break;
case ISD::SMULFIX:
@@ -1184,10 +1184,10 @@ void SelectionDAGLegalize::LegalizeOp(SDNode *Node) {
Action = TLI.getOperationAction(
Node->getOpcode(), Node->getOperand(0).getValueType());
break;
- case ISD::VECREDUCE_SEQ_FADD:
- Action = TLI.getOperationAction(
- Node->getOpcode(), Node->getOperand(1).getValueType());
- break;
+ case ISD::VECREDUCE_SEQ_FADD:
+ Action = TLI.getOperationAction(
+ Node->getOpcode(), Node->getOperand(1).getValueType());
+ break;
default:
if (Node->getOpcode() >= ISD::BUILTIN_OP_END) {
Action = TargetLowering::Legal;
@@ -1440,12 +1440,12 @@ SDValue SelectionDAGLegalize::ExpandVectorBuildThroughStack(SDNode* Node) {
SmallVector<SDValue, 8> Stores;
unsigned TypeByteSize = MemVT.getSizeInBits() / 8;
assert(TypeByteSize > 0 && "Vector element type too small for stack store!");
-
- // If the destination vector element type of a BUILD_VECTOR is narrower than
- // the source element type, only store the bits necessary.
- bool Truncate = isa<BuildVectorSDNode>(Node) &&
- MemVT.bitsLT(Node->getOperand(0).getValueType());
-
+
+ // If the destination vector element type of a BUILD_VECTOR is narrower than
+ // the source element type, only store the bits necessary.
+ bool Truncate = isa<BuildVectorSDNode>(Node) &&
+ MemVT.bitsLT(Node->getOperand(0).getValueType());
+
// Store (in the right endianness) the elements to memory.
for (unsigned i = 0, e = Node->getNumOperands(); i != e; ++i) {
// Ignore undef elements.
@@ -1453,9 +1453,9 @@ SDValue SelectionDAGLegalize::ExpandVectorBuildThroughStack(SDNode* Node) {
unsigned Offset = TypeByteSize*i;
- SDValue Idx = DAG.getMemBasePlusOffset(FIPtr, TypeSize::Fixed(Offset), dl);
+ SDValue Idx = DAG.getMemBasePlusOffset(FIPtr, TypeSize::Fixed(Offset), dl);
- if (Truncate)
+ if (Truncate)
Stores.push_back(DAG.getTruncStore(DAG.getEntryNode(), dl,
Node->getOperand(i), Idx,
PtrInfo.getWithOffset(Offset), MemVT));
@@ -1481,7 +1481,7 @@ void SelectionDAGLegalize::getSignAsIntValue(FloatSignAsInt &State,
const SDLoc &DL,
SDValue Value) const {
EVT FloatVT = Value.getValueType();
- unsigned NumBits = FloatVT.getScalarSizeInBits();
+ unsigned NumBits = FloatVT.getScalarSizeInBits();
State.FloatVT = FloatVT;
EVT IVT = EVT::getIntegerVT(*DAG.getContext(), NumBits);
// Convert to an integer of the same size.
@@ -1513,9 +1513,9 @@ void SelectionDAGLegalize::getSignAsIntValue(FloatSignAsInt &State,
State.IntPointerInfo = State.FloatPointerInfo;
} else {
// Advance the pointer so that the loaded byte will contain the sign bit.
- unsigned ByteOffset = (NumBits / 8) - 1;
- IntPtr =
- DAG.getMemBasePlusOffset(StackPtr, TypeSize::Fixed(ByteOffset), DL);
+ unsigned ByteOffset = (NumBits / 8) - 1;
+ IntPtr =
+ DAG.getMemBasePlusOffset(StackPtr, TypeSize::Fixed(ByteOffset), DL);
State.IntPointerInfo = MachinePointerInfo::getFixedStack(MF, FI,
ByteOffset);
}
@@ -1523,7 +1523,7 @@ void SelectionDAGLegalize::getSignAsIntValue(FloatSignAsInt &State,
State.IntPtr = IntPtr;
State.IntValue = DAG.getExtLoad(ISD::EXTLOAD, DL, LoadTy, State.Chain, IntPtr,
State.IntPointerInfo, MVT::i8);
- State.SignMask = APInt::getOneBitSet(LoadTy.getScalarSizeInBits(), 7);
+ State.SignMask = APInt::getOneBitSet(LoadTy.getScalarSizeInBits(), 7);
State.SignBit = 7;
}
@@ -1578,8 +1578,8 @@ SDValue SelectionDAGLegalize::ExpandFCOPYSIGN(SDNode *Node) const {
// Get the signbit at the right position for MagAsInt.
int ShiftAmount = SignAsInt.SignBit - MagAsInt.SignBit;
EVT ShiftVT = IntVT;
- if (SignBit.getScalarValueSizeInBits() <
- ClearedSign.getScalarValueSizeInBits()) {
+ if (SignBit.getScalarValueSizeInBits() <
+ ClearedSign.getScalarValueSizeInBits()) {
SignBit = DAG.getNode(ISD::ZERO_EXTEND, DL, MagVT, SignBit);
ShiftVT = MagVT;
}
@@ -1590,8 +1590,8 @@ SDValue SelectionDAGLegalize::ExpandFCOPYSIGN(SDNode *Node) const {
SDValue ShiftCnst = DAG.getConstant(-ShiftAmount, DL, ShiftVT);
SignBit = DAG.getNode(ISD::SHL, DL, ShiftVT, SignBit, ShiftCnst);
}
- if (SignBit.getScalarValueSizeInBits() >
- ClearedSign.getScalarValueSizeInBits()) {
+ if (SignBit.getScalarValueSizeInBits() >
+ ClearedSign.getScalarValueSizeInBits()) {
SignBit = DAG.getNode(ISD::TRUNCATE, DL, MagVT, SignBit);
}
@@ -1600,22 +1600,22 @@ SDValue SelectionDAGLegalize::ExpandFCOPYSIGN(SDNode *Node) const {
return modifySignAsInt(MagAsInt, DL, CopiedSign);
}
-SDValue SelectionDAGLegalize::ExpandFNEG(SDNode *Node) const {
- // Get the sign bit as an integer.
- SDLoc DL(Node);
- FloatSignAsInt SignAsInt;
- getSignAsIntValue(SignAsInt, DL, Node->getOperand(0));
- EVT IntVT = SignAsInt.IntValue.getValueType();
-
- // Flip the sign.
- SDValue SignMask = DAG.getConstant(SignAsInt.SignMask, DL, IntVT);
- SDValue SignFlip =
- DAG.getNode(ISD::XOR, DL, IntVT, SignAsInt.IntValue, SignMask);
-
- // Convert back to float.
- return modifySignAsInt(SignAsInt, DL, SignFlip);
-}
-
+SDValue SelectionDAGLegalize::ExpandFNEG(SDNode *Node) const {
+ // Get the sign bit as an integer.
+ SDLoc DL(Node);
+ FloatSignAsInt SignAsInt;
+ getSignAsIntValue(SignAsInt, DL, Node->getOperand(0));
+ EVT IntVT = SignAsInt.IntValue.getValueType();
+
+ // Flip the sign.
+ SDValue SignMask = DAG.getConstant(SignAsInt.SignMask, DL, IntVT);
+ SDValue SignFlip =
+ DAG.getNode(ISD::XOR, DL, IntVT, SignAsInt.IntValue, SignMask);
+
+ // Convert back to float.
+ return modifySignAsInt(SignAsInt, DL, SignFlip);
+}
+
SDValue SelectionDAGLegalize::ExpandFABS(SDNode *Node) const {
SDLoc DL(Node);
SDValue Value = Node->getOperand(0);
@@ -1639,7 +1639,7 @@ SDValue SelectionDAGLegalize::ExpandFABS(SDNode *Node) const {
void SelectionDAGLegalize::ExpandDYNAMIC_STACKALLOC(SDNode* Node,
SmallVectorImpl<SDValue> &Results) {
- Register SPReg = TLI.getStackPointerRegisterToSaveRestore();
+ Register SPReg = TLI.getStackPointerRegisterToSaveRestore();
assert(SPReg && "Target cannot require DYNAMIC_STACKALLOC expansion and"
" not tell us which reg is the stack pointer!");
SDLoc dl(Node);
@@ -1733,36 +1733,36 @@ bool SelectionDAGLegalize::LegalizeSetCCCondCode(
unsigned Opc = 0;
switch (CCCode) {
default: llvm_unreachable("Don't know how to expand this condition!");
- case ISD::SETUO:
- if (TLI.isCondCodeLegal(ISD::SETUNE, OpVT)) {
- CC1 = ISD::SETUNE; CC2 = ISD::SETUNE; Opc = ISD::OR;
- break;
- }
- assert(TLI.isCondCodeLegal(ISD::SETOEQ, OpVT) &&
- "If SETUE is expanded, SETOEQ or SETUNE must be legal!");
- NeedInvert = true;
- LLVM_FALLTHROUGH;
+ case ISD::SETUO:
+ if (TLI.isCondCodeLegal(ISD::SETUNE, OpVT)) {
+ CC1 = ISD::SETUNE; CC2 = ISD::SETUNE; Opc = ISD::OR;
+ break;
+ }
+ assert(TLI.isCondCodeLegal(ISD::SETOEQ, OpVT) &&
+ "If SETUE is expanded, SETOEQ or SETUNE must be legal!");
+ NeedInvert = true;
+ LLVM_FALLTHROUGH;
case ISD::SETO:
assert(TLI.isCondCodeLegal(ISD::SETOEQ, OpVT)
&& "If SETO is expanded, SETOEQ must be legal!");
CC1 = ISD::SETOEQ; CC2 = ISD::SETOEQ; Opc = ISD::AND; break;
- case ISD::SETONE:
- case ISD::SETUEQ:
- // If the SETUO or SETO CC isn't legal, we might be able to use
- // SETOGT || SETOLT, inverting the result for SETUEQ. We only need one
- // of SETOGT/SETOLT to be legal, the other can be emulated by swapping
- // the operands.
- CC2 = ((unsigned)CCCode & 0x8U) ? ISD::SETUO : ISD::SETO;
- if (!TLI.isCondCodeLegal(CC2, OpVT) &&
- (TLI.isCondCodeLegal(ISD::SETOGT, OpVT) ||
- TLI.isCondCodeLegal(ISD::SETOLT, OpVT))) {
- CC1 = ISD::SETOGT;
- CC2 = ISD::SETOLT;
- Opc = ISD::OR;
- NeedInvert = ((unsigned)CCCode & 0x8U);
- break;
- }
- LLVM_FALLTHROUGH;
+ case ISD::SETONE:
+ case ISD::SETUEQ:
+ // If the SETUO or SETO CC isn't legal, we might be able to use
+ // SETOGT || SETOLT, inverting the result for SETUEQ. We only need one
+ // of SETOGT/SETOLT to be legal, the other can be emulated by swapping
+ // the operands.
+ CC2 = ((unsigned)CCCode & 0x8U) ? ISD::SETUO : ISD::SETO;
+ if (!TLI.isCondCodeLegal(CC2, OpVT) &&
+ (TLI.isCondCodeLegal(ISD::SETOGT, OpVT) ||
+ TLI.isCondCodeLegal(ISD::SETOLT, OpVT))) {
+ CC1 = ISD::SETOGT;
+ CC2 = ISD::SETOLT;
+ Opc = ISD::OR;
+ NeedInvert = ((unsigned)CCCode & 0x8U);
+ break;
+ }
+ LLVM_FALLTHROUGH;
case ISD::SETOEQ:
case ISD::SETOGT:
case ISD::SETOGE:
@@ -1799,16 +1799,16 @@ bool SelectionDAGLegalize::LegalizeSetCCCondCode(
if (CCCode != ISD::SETO && CCCode != ISD::SETUO) {
// If we aren't the ordered or unorder operation,
// then the pattern is (LHS CC1 RHS) Opc (LHS CC2 RHS).
- SetCC1 = DAG.getSetCC(dl, VT, LHS, RHS, CC1, Chain,
- IsSignaling);
- SetCC2 = DAG.getSetCC(dl, VT, LHS, RHS, CC2, Chain,
- IsSignaling);
+ SetCC1 = DAG.getSetCC(dl, VT, LHS, RHS, CC1, Chain,
+ IsSignaling);
+ SetCC2 = DAG.getSetCC(dl, VT, LHS, RHS, CC2, Chain,
+ IsSignaling);
} else {
// Otherwise, the pattern is (LHS CC1 LHS) Opc (RHS CC2 RHS)
- SetCC1 = DAG.getSetCC(dl, VT, LHS, LHS, CC1, Chain,
- IsSignaling);
- SetCC2 = DAG.getSetCC(dl, VT, RHS, RHS, CC2, Chain,
- IsSignaling);
+ SetCC1 = DAG.getSetCC(dl, VT, LHS, LHS, CC1, Chain,
+ IsSignaling);
+ SetCC2 = DAG.getSetCC(dl, VT, RHS, RHS, CC2, Chain,
+ IsSignaling);
}
if (Chain)
Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, SetCC1.getValue(1),
@@ -1834,23 +1834,23 @@ SDValue SelectionDAGLegalize::EmitStackConvert(SDValue SrcOp, EVT SlotVT,
SDValue SelectionDAGLegalize::EmitStackConvert(SDValue SrcOp, EVT SlotVT,
EVT DestVT, const SDLoc &dl,
SDValue Chain) {
- unsigned SrcSize = SrcOp.getValueSizeInBits();
- unsigned SlotSize = SlotVT.getSizeInBits();
- unsigned DestSize = DestVT.getSizeInBits();
- Type *DestType = DestVT.getTypeForEVT(*DAG.getContext());
- Align DestAlign = DAG.getDataLayout().getPrefTypeAlign(DestType);
-
- // Don't convert with stack if the load/store is expensive.
- if ((SrcSize > SlotSize &&
- !TLI.isTruncStoreLegalOrCustom(SrcOp.getValueType(), SlotVT)) ||
- (SlotSize < DestSize &&
- !TLI.isLoadExtLegalOrCustom(ISD::EXTLOAD, DestVT, SlotVT)))
- return SDValue();
-
+ unsigned SrcSize = SrcOp.getValueSizeInBits();
+ unsigned SlotSize = SlotVT.getSizeInBits();
+ unsigned DestSize = DestVT.getSizeInBits();
+ Type *DestType = DestVT.getTypeForEVT(*DAG.getContext());
+ Align DestAlign = DAG.getDataLayout().getPrefTypeAlign(DestType);
+
+ // Don't convert with stack if the load/store is expensive.
+ if ((SrcSize > SlotSize &&
+ !TLI.isTruncStoreLegalOrCustom(SrcOp.getValueType(), SlotVT)) ||
+ (SlotSize < DestSize &&
+ !TLI.isLoadExtLegalOrCustom(ISD::EXTLOAD, DestVT, SlotVT)))
+ return SDValue();
+
// Create the stack frame object.
- Align SrcAlign = DAG.getDataLayout().getPrefTypeAlign(
+ Align SrcAlign = DAG.getDataLayout().getPrefTypeAlign(
SrcOp.getValueType().getTypeForEVT(*DAG.getContext()));
- SDValue FIPtr = DAG.CreateStackTemporary(SlotVT.getStoreSize(), SrcAlign);
+ SDValue FIPtr = DAG.CreateStackTemporary(SlotVT.getStoreSize(), SrcAlign);
FrameIndexSDNode *StackPtrFI = cast<FrameIndexSDNode>(FIPtr);
int SPFI = StackPtrFI->getIndex();
@@ -1861,7 +1861,7 @@ SDValue SelectionDAGLegalize::EmitStackConvert(SDValue SrcOp, EVT SlotVT,
// later than DestVT.
SDValue Store;
- if (SrcSize > SlotSize)
+ if (SrcSize > SlotSize)
Store = DAG.getTruncStore(Chain, dl, SrcOp, FIPtr, PtrInfo,
SlotVT, SrcAlign);
else {
@@ -1873,7 +1873,7 @@ SDValue SelectionDAGLegalize::EmitStackConvert(SDValue SrcOp, EVT SlotVT,
// Result is a load from the stack slot.
if (SlotSize == DestSize)
return DAG.getLoad(DestVT, dl, Store, FIPtr, PtrInfo, DestAlign);
-
+
assert(SlotSize < DestSize && "Unknown extension!");
return DAG.getExtLoad(ISD::EXTLOAD, dl, DestVT, Store, FIPtr, PtrInfo, SlotVT,
DestAlign);
@@ -2194,7 +2194,7 @@ void SelectionDAGLegalize::ExpandFPLibCall(SDNode* Node,
if (Node->isStrictFPOpcode()) {
EVT RetVT = Node->getValueType(0);
- SmallVector<SDValue, 4> Ops(drop_begin(Node->ops()));
+ SmallVector<SDValue, 4> Ops(drop_begin(Node->ops()));
TargetLowering::MakeLibCallOptions CallOptions;
// FIXME: This doesn't support tail calls.
std::pair<SDValue, SDValue> Tmp = TLI.makeLibCall(DAG, LC, RetVT,
@@ -2444,11 +2444,11 @@ SDValue SelectionDAGLegalize::ExpandLegalINT_TO_FP(SDNode *Node,
// TODO: Should any fast-math-flags be set for the created nodes?
LLVM_DEBUG(dbgs() << "Legalizing INT_TO_FP\n");
- if (SrcVT == MVT::i32 && TLI.isTypeLegal(MVT::f64) &&
- (DestVT.bitsLE(MVT::f64) ||
- TLI.isOperationLegal(Node->isStrictFPOpcode() ? ISD::STRICT_FP_EXTEND
- : ISD::FP_EXTEND,
- DestVT))) {
+ if (SrcVT == MVT::i32 && TLI.isTypeLegal(MVT::f64) &&
+ (DestVT.bitsLE(MVT::f64) ||
+ TLI.isOperationLegal(Node->isStrictFPOpcode() ? ISD::STRICT_FP_EXTEND
+ : ISD::FP_EXTEND,
+ DestVT))) {
LLVM_DEBUG(dbgs() << "32-bit [signed|unsigned] integer to float/double "
"expansion\n");
@@ -2475,7 +2475,7 @@ SDValue SelectionDAGLegalize::ExpandLegalINT_TO_FP(SDNode *Node,
SDValue Store1 = DAG.getStore(MemChain, dl, Lo, StackSlot,
MachinePointerInfo());
// Store the hi of the constructed double.
- SDValue HiPtr = DAG.getMemBasePlusOffset(StackSlot, TypeSize::Fixed(4), dl);
+ SDValue HiPtr = DAG.getMemBasePlusOffset(StackSlot, TypeSize::Fixed(4), dl);
SDValue Store2 =
DAG.getStore(MemChain, dl, Hi, HiPtr, MachinePointerInfo());
MemChain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Store1, Store2);
@@ -2511,23 +2511,23 @@ SDValue SelectionDAGLegalize::ExpandLegalINT_TO_FP(SDNode *Node,
return Result;
}
- if (isSigned)
- return SDValue();
-
+ if (isSigned)
+ return SDValue();
+
// TODO: Generalize this for use with other types.
- if (((SrcVT == MVT::i32 || SrcVT == MVT::i64) && DestVT == MVT::f32) ||
- (SrcVT == MVT::i64 && DestVT == MVT::f64)) {
- LLVM_DEBUG(dbgs() << "Converting unsigned i32/i64 to f32/f64\n");
+ if (((SrcVT == MVT::i32 || SrcVT == MVT::i64) && DestVT == MVT::f32) ||
+ (SrcVT == MVT::i64 && DestVT == MVT::f64)) {
+ LLVM_DEBUG(dbgs() << "Converting unsigned i32/i64 to f32/f64\n");
// For unsigned conversions, convert them to signed conversions using the
// algorithm from the x86_64 __floatundisf in compiler_rt. That method
// should be valid for i32->f32 as well.
- // More generally this transform should be valid if there are 3 more bits
- // in the integer type than the significand. Rounding uses the first bit
- // after the width of the significand and the OR of all bits after that. So
- // we need to be able to OR the shifted out bit into one of the bits that
- // participate in the OR.
-
+ // More generally this transform should be valid if there are 3 more bits
+ // in the integer type than the significand. Rounding uses the first bit
+ // after the width of the significand and the OR of all bits after that. So
+ // we need to be able to OR the shifted out bit into one of the bits that
+ // participate in the OR.
+
// TODO: This really should be implemented using a branch rather than a
// select. We happen to get lucky and machinesink does the right
// thing most of the time. This would be a good candidate for a
@@ -2571,11 +2571,11 @@ SDValue SelectionDAGLegalize::ExpandLegalINT_TO_FP(SDNode *Node,
return DAG.getSelect(dl, DestVT, SignBitTest, Slow, Fast);
}
- // Don't expand it if there isn't cheap fadd.
- if (!TLI.isOperationLegalOrCustom(
- Node->isStrictFPOpcode() ? ISD::STRICT_FADD : ISD::FADD, DestVT))
- return SDValue();
-
+ // Don't expand it if there isn't cheap fadd.
+ if (!TLI.isOperationLegalOrCustom(
+ Node->isStrictFPOpcode() ? ISD::STRICT_FADD : ISD::FADD, DestVT))
+ return SDValue();
+
// The following optimization is valid only if every value in SrcVT (when
// treated as signed) is representable in DestVT. Check that the mantissa
// size of DestVT is >= than the number of bits in SrcVT -1.
@@ -2602,8 +2602,8 @@ SDValue SelectionDAGLegalize::ExpandLegalINT_TO_FP(SDNode *Node,
// offset depending on the data type.
uint64_t FF;
switch (SrcVT.getSimpleVT().SimpleTy) {
- default:
- return SDValue();
+ default:
+ return SDValue();
case MVT::i8 : FF = 0x43800000ULL; break; // 2^8 (as a float)
case MVT::i16: FF = 0x47800000ULL; break; // 2^16 (as a float)
case MVT::i32: FF = 0x4F800000ULL; break; // 2^32 (as a float)
@@ -2758,30 +2758,30 @@ void SelectionDAGLegalize::PromoteLegalFP_TO_INT(SDNode *N, const SDLoc &dl,
Results.push_back(Operation.getValue(1));
}
-/// Promote FP_TO_*INT_SAT operation to a larger result type. At this point
-/// the result and operand types are legal and there must be a legal
-/// FP_TO_*INT_SAT operation for a larger result type.
-SDValue SelectionDAGLegalize::PromoteLegalFP_TO_INT_SAT(SDNode *Node,
- const SDLoc &dl) {
- unsigned Opcode = Node->getOpcode();
-
- // Scan for the appropriate larger type to use.
- EVT NewOutTy = Node->getValueType(0);
- while (true) {
- NewOutTy = (MVT::SimpleValueType)(NewOutTy.getSimpleVT().SimpleTy + 1);
- assert(NewOutTy.isInteger() && "Ran out of possibilities!");
-
- if (TLI.isOperationLegalOrCustom(Opcode, NewOutTy))
- break;
- }
-
- // Saturation width is determined by second operand, so we don't have to
- // perform any fixup and can directly truncate the result.
- SDValue Result = DAG.getNode(Opcode, dl, NewOutTy, Node->getOperand(0),
- Node->getOperand(1));
- return DAG.getNode(ISD::TRUNCATE, dl, Node->getValueType(0), Result);
-}
-
+/// Promote FP_TO_*INT_SAT operation to a larger result type. At this point
+/// the result and operand types are legal and there must be a legal
+/// FP_TO_*INT_SAT operation for a larger result type.
+SDValue SelectionDAGLegalize::PromoteLegalFP_TO_INT_SAT(SDNode *Node,
+ const SDLoc &dl) {
+ unsigned Opcode = Node->getOpcode();
+
+ // Scan for the appropriate larger type to use.
+ EVT NewOutTy = Node->getValueType(0);
+ while (true) {
+ NewOutTy = (MVT::SimpleValueType)(NewOutTy.getSimpleVT().SimpleTy + 1);
+ assert(NewOutTy.isInteger() && "Ran out of possibilities!");
+
+ if (TLI.isOperationLegalOrCustom(Opcode, NewOutTy))
+ break;
+ }
+
+ // Saturation width is determined by second operand, so we don't have to
+ // perform any fixup and can directly truncate the result.
+ SDValue Result = DAG.getNode(Opcode, dl, NewOutTy, Node->getOperand(0),
+ Node->getOperand(1));
+ return DAG.getNode(ISD::TRUNCATE, dl, Node->getValueType(0), Result);
+}
+
/// Legalize a BITREVERSE scalar/vector operation as a series of mask + shifts.
SDValue SelectionDAGLegalize::ExpandBITREVERSE(SDValue Op, const SDLoc &dl) {
EVT VT = Op.getValueType();
@@ -2898,28 +2898,28 @@ SDValue SelectionDAGLegalize::ExpandBSWAP(SDValue Op, const SDLoc &dl) {
}
}
-/// Open code the operations for PARITY of the specified operation.
-SDValue SelectionDAGLegalize::ExpandPARITY(SDValue Op, const SDLoc &dl) {
- EVT VT = Op.getValueType();
- EVT ShVT = TLI.getShiftAmountTy(VT, DAG.getDataLayout());
- unsigned Sz = VT.getScalarSizeInBits();
-
- // If CTPOP is legal, use it. Otherwise use shifts and xor.
- SDValue Result;
- if (TLI.isOperationLegal(ISD::CTPOP, VT)) {
- Result = DAG.getNode(ISD::CTPOP, dl, VT, Op);
- } else {
- Result = Op;
- for (unsigned i = Log2_32_Ceil(Sz); i != 0;) {
- SDValue Shift = DAG.getNode(ISD::SRL, dl, VT, Result,
- DAG.getConstant(1ULL << (--i), dl, ShVT));
- Result = DAG.getNode(ISD::XOR, dl, VT, Result, Shift);
- }
- }
-
- return DAG.getNode(ISD::AND, dl, VT, Result, DAG.getConstant(1, dl, VT));
-}
-
+/// Open code the operations for PARITY of the specified operation.
+SDValue SelectionDAGLegalize::ExpandPARITY(SDValue Op, const SDLoc &dl) {
+ EVT VT = Op.getValueType();
+ EVT ShVT = TLI.getShiftAmountTy(VT, DAG.getDataLayout());
+ unsigned Sz = VT.getScalarSizeInBits();
+
+ // If CTPOP is legal, use it. Otherwise use shifts and xor.
+ SDValue Result;
+ if (TLI.isOperationLegal(ISD::CTPOP, VT)) {
+ Result = DAG.getNode(ISD::CTPOP, dl, VT, Op);
+ } else {
+ Result = Op;
+ for (unsigned i = Log2_32_Ceil(Sz); i != 0;) {
+ SDValue Shift = DAG.getNode(ISD::SRL, dl, VT, Result,
+ DAG.getConstant(1ULL << (--i), dl, ShVT));
+ Result = DAG.getNode(ISD::XOR, dl, VT, Result, Shift);
+ }
+ }
+
+ return DAG.getNode(ISD::AND, dl, VT, Result, DAG.getConstant(1, dl, VT));
+}
+
bool SelectionDAGLegalize::ExpandNode(SDNode *Node) {
LLVM_DEBUG(dbgs() << "Trying to expand node\n");
SmallVector<SDValue, 8> Results;
@@ -2951,9 +2951,9 @@ bool SelectionDAGLegalize::ExpandNode(SDNode *Node) {
case ISD::BSWAP:
Results.push_back(ExpandBSWAP(Node->getOperand(0), dl));
break;
- case ISD::PARITY:
- Results.push_back(ExpandPARITY(Node->getOperand(0), dl));
- break;
+ case ISD::PARITY:
+ Results.push_back(ExpandPARITY(Node->getOperand(0), dl));
+ break;
case ISD::FRAMEADDR:
case ISD::RETURNADDR:
case ISD::FRAME_TO_ARGS_OFFSET:
@@ -3098,19 +3098,19 @@ bool SelectionDAGLegalize::ExpandNode(SDNode *Node) {
break;
// We fall back to use stack operation when the FP_ROUND operation
// isn't available.
- if ((Tmp1 = EmitStackConvert(Node->getOperand(1), Node->getValueType(0),
- Node->getValueType(0), dl,
- Node->getOperand(0)))) {
- ReplaceNode(Node, Tmp1.getNode());
- LLVM_DEBUG(dbgs() << "Successfully expanded STRICT_FP_ROUND node\n");
- return true;
- }
- break;
+ if ((Tmp1 = EmitStackConvert(Node->getOperand(1), Node->getValueType(0),
+ Node->getValueType(0), dl,
+ Node->getOperand(0)))) {
+ ReplaceNode(Node, Tmp1.getNode());
+ LLVM_DEBUG(dbgs() << "Successfully expanded STRICT_FP_ROUND node\n");
+ return true;
+ }
+ break;
case ISD::FP_ROUND:
case ISD::BITCAST:
- if ((Tmp1 = EmitStackConvert(Node->getOperand(0), Node->getValueType(0),
- Node->getValueType(0), dl)))
- Results.push_back(Tmp1);
+ if ((Tmp1 = EmitStackConvert(Node->getOperand(0), Node->getValueType(0),
+ Node->getValueType(0), dl)))
+ Results.push_back(Tmp1);
break;
case ISD::STRICT_FP_EXTEND:
// When strict mode is enforced we can't do expansion because it
@@ -3125,19 +3125,19 @@ bool SelectionDAGLegalize::ExpandNode(SDNode *Node) {
break;
// We fall back to use stack operation when the FP_EXTEND operation
// isn't available.
- if ((Tmp1 = EmitStackConvert(
- Node->getOperand(1), Node->getOperand(1).getValueType(),
- Node->getValueType(0), dl, Node->getOperand(0)))) {
- ReplaceNode(Node, Tmp1.getNode());
- LLVM_DEBUG(dbgs() << "Successfully expanded STRICT_FP_EXTEND node\n");
- return true;
- }
- break;
+ if ((Tmp1 = EmitStackConvert(
+ Node->getOperand(1), Node->getOperand(1).getValueType(),
+ Node->getValueType(0), dl, Node->getOperand(0)))) {
+ ReplaceNode(Node, Tmp1.getNode());
+ LLVM_DEBUG(dbgs() << "Successfully expanded STRICT_FP_EXTEND node\n");
+ return true;
+ }
+ break;
case ISD::FP_EXTEND:
- if ((Tmp1 = EmitStackConvert(Node->getOperand(0),
- Node->getOperand(0).getValueType(),
- Node->getValueType(0), dl)))
- Results.push_back(Tmp1);
+ if ((Tmp1 = EmitStackConvert(Node->getOperand(0),
+ Node->getOperand(0).getValueType(),
+ Node->getValueType(0), dl)))
+ Results.push_back(Tmp1);
break;
case ISD::SIGN_EXTEND_INREG: {
EVT ExtraVT = cast<VTSDNode>(Node->getOperand(1))->getVT();
@@ -3182,11 +3182,11 @@ bool SelectionDAGLegalize::ExpandNode(SDNode *Node) {
LLVM_FALLTHROUGH;
case ISD::SINT_TO_FP:
case ISD::STRICT_SINT_TO_FP:
- if ((Tmp1 = ExpandLegalINT_TO_FP(Node, Tmp2))) {
- Results.push_back(Tmp1);
- if (Node->isStrictFPOpcode())
- Results.push_back(Tmp2);
- }
+ if ((Tmp1 = ExpandLegalINT_TO_FP(Node, Tmp2))) {
+ Results.push_back(Tmp1);
+ if (Node->isStrictFPOpcode())
+ Results.push_back(Tmp2);
+ }
break;
case ISD::FP_TO_SINT:
if (TLI.expandFP_TO_SINT(Node, Tmp1, DAG))
@@ -3213,10 +3213,10 @@ bool SelectionDAGLegalize::ExpandNode(SDNode *Node) {
return true;
}
break;
- case ISD::FP_TO_SINT_SAT:
- case ISD::FP_TO_UINT_SAT:
- Results.push_back(TLI.expandFP_TO_INT_SAT(Node, DAG));
- break;
+ case ISD::FP_TO_SINT_SAT:
+ case ISD::FP_TO_UINT_SAT:
+ Results.push_back(TLI.expandFP_TO_INT_SAT(Node, DAG));
+ break;
case ISD::VAARG:
Results.push_back(DAG.expandVAArg(Node));
Results.push_back(Results[0].getValue(1));
@@ -3345,7 +3345,7 @@ bool SelectionDAGLegalize::ExpandNode(SDNode *Node) {
case ISD::STACKSAVE:
// Expand to CopyFromReg if the target set
// StackPointerRegisterToSaveRestore.
- if (Register SP = TLI.getStackPointerRegisterToSaveRestore()) {
+ if (Register SP = TLI.getStackPointerRegisterToSaveRestore()) {
Results.push_back(DAG.getCopyFromReg(Node->getOperand(0), dl, SP,
Node->getValueType(0)));
Results.push_back(Results[0].getValue(1));
@@ -3357,7 +3357,7 @@ bool SelectionDAGLegalize::ExpandNode(SDNode *Node) {
case ISD::STACKRESTORE:
// Expand to CopyToReg if the target set
// StackPointerRegisterToSaveRestore.
- if (Register SP = TLI.getStackPointerRegisterToSaveRestore()) {
+ if (Register SP = TLI.getStackPointerRegisterToSaveRestore()) {
Results.push_back(DAG.getCopyToReg(Node->getOperand(0), dl, SP,
Node->getOperand(1)));
} else {
@@ -3372,7 +3372,7 @@ bool SelectionDAGLegalize::ExpandNode(SDNode *Node) {
Results.push_back(ExpandFCOPYSIGN(Node));
break;
case ISD::FNEG:
- Results.push_back(ExpandFNEG(Node));
+ Results.push_back(ExpandFNEG(Node));
break;
case ISD::FABS:
Results.push_back(ExpandFABS(Node));
@@ -3468,7 +3468,7 @@ bool SelectionDAGLegalize::ExpandNode(SDNode *Node) {
// Check to see if this FP immediate is already legal.
// If this is a legal constant, turn it into a TargetConstantFP node.
if (!TLI.isFPImmLegal(CFP->getValueAPF(), Node->getValueType(0),
- DAG.shouldOptForSize()))
+ DAG.shouldOptForSize()))
Results.push_back(ExpandConstantFP(CFP, true));
break;
}
@@ -3547,7 +3547,7 @@ bool SelectionDAGLegalize::ExpandNode(SDNode *Node) {
SmallVector<SDValue, 4> Halves;
EVT HalfType = EVT(VT).getHalfSizedIntegerVT(*DAG.getContext());
assert(TLI.isTypeLegal(HalfType));
- if (TLI.expandMUL_LOHI(Node->getOpcode(), VT, dl, LHS, RHS, Halves,
+ if (TLI.expandMUL_LOHI(Node->getOpcode(), VT, dl, LHS, RHS, Halves,
HalfType, DAG,
TargetLowering::MulExpansionKind::Always)) {
for (unsigned i = 0; i < 2; ++i) {
@@ -3616,7 +3616,7 @@ bool SelectionDAGLegalize::ExpandNode(SDNode *Node) {
break;
case ISD::ROTL:
case ISD::ROTR:
- if (TLI.expandROT(Node, true /*AllowVectorOps*/, Tmp1, DAG))
+ if (TLI.expandROT(Node, true /*AllowVectorOps*/, Tmp1, DAG))
Results.push_back(Tmp1);
break;
case ISD::SADDSAT:
@@ -3625,10 +3625,10 @@ bool SelectionDAGLegalize::ExpandNode(SDNode *Node) {
case ISD::USUBSAT:
Results.push_back(TLI.expandAddSubSat(Node, DAG));
break;
- case ISD::SSHLSAT:
- case ISD::USHLSAT:
- Results.push_back(TLI.expandShlSat(Node, DAG));
- break;
+ case ISD::SSHLSAT:
+ case ISD::USHLSAT:
+ Results.push_back(TLI.expandShlSat(Node, DAG));
+ break;
case ISD::SMULFIX:
case ISD::SMULFIXSAT:
case ISD::UMULFIX:
@@ -3969,13 +3969,13 @@ bool SelectionDAGLegalize::ExpandNode(SDNode *Node) {
// If we expanded the SETCC by swapping LHS and RHS, create a new BR_CC
// node.
if (Tmp4.getNode()) {
- assert(!NeedInvert && "Don't know how to invert BR_CC!");
-
+ assert(!NeedInvert && "Don't know how to invert BR_CC!");
+
Tmp1 = DAG.getNode(ISD::BR_CC, dl, Node->getValueType(0), Tmp1,
Tmp4, Tmp2, Tmp3, Node->getOperand(4));
} else {
Tmp3 = DAG.getConstant(0, dl, Tmp2.getValueType());
- Tmp4 = DAG.getCondCode(NeedInvert ? ISD::SETEQ : ISD::SETNE);
+ Tmp4 = DAG.getCondCode(NeedInvert ? ISD::SETEQ : ISD::SETNE);
Tmp1 = DAG.getNode(ISD::BR_CC, dl, Node->getValueType(0), Tmp1, Tmp4,
Tmp2, Tmp3, Node->getOperand(4));
}
@@ -4056,27 +4056,27 @@ bool SelectionDAGLegalize::ExpandNode(SDNode *Node) {
== TargetLowering::Legal)
return true;
break;
- case ISD::STRICT_FSUB: {
- if (TLI.getStrictFPOperationAction(
- ISD::STRICT_FSUB, Node->getValueType(0)) == TargetLowering::Legal)
- return true;
- if (TLI.getStrictFPOperationAction(
- ISD::STRICT_FADD, Node->getValueType(0)) != TargetLowering::Legal)
- break;
-
- EVT VT = Node->getValueType(0);
- const SDNodeFlags Flags = Node->getFlags();
- SDValue Neg = DAG.getNode(ISD::FNEG, dl, VT, Node->getOperand(2), Flags);
- SDValue Fadd = DAG.getNode(ISD::STRICT_FADD, dl, Node->getVTList(),
- {Node->getOperand(0), Node->getOperand(1), Neg},
- Flags);
-
- Results.push_back(Fadd);
- Results.push_back(Fadd.getValue(1));
- break;
- }
- case ISD::STRICT_SINT_TO_FP:
- case ISD::STRICT_UINT_TO_FP:
+ case ISD::STRICT_FSUB: {
+ if (TLI.getStrictFPOperationAction(
+ ISD::STRICT_FSUB, Node->getValueType(0)) == TargetLowering::Legal)
+ return true;
+ if (TLI.getStrictFPOperationAction(
+ ISD::STRICT_FADD, Node->getValueType(0)) != TargetLowering::Legal)
+ break;
+
+ EVT VT = Node->getValueType(0);
+ const SDNodeFlags Flags = Node->getFlags();
+ SDValue Neg = DAG.getNode(ISD::FNEG, dl, VT, Node->getOperand(2), Flags);
+ SDValue Fadd = DAG.getNode(ISD::STRICT_FADD, dl, Node->getVTList(),
+ {Node->getOperand(0), Node->getOperand(1), Neg},
+ Flags);
+
+ Results.push_back(Fadd);
+ Results.push_back(Fadd.getValue(1));
+ break;
+ }
+ case ISD::STRICT_SINT_TO_FP:
+ case ISD::STRICT_UINT_TO_FP:
case ISD::STRICT_LRINT:
case ISD::STRICT_LLRINT:
case ISD::STRICT_LROUND:
@@ -4145,23 +4145,23 @@ void SelectionDAGLegalize::ConvertNodeToLibcall(SDNode *Node) {
case ISD::ATOMIC_LOAD_UMAX:
case ISD::ATOMIC_CMP_SWAP: {
MVT VT = cast<AtomicSDNode>(Node)->getMemoryVT().getSimpleVT();
- AtomicOrdering Order = cast<AtomicSDNode>(Node)->getOrdering();
- RTLIB::Libcall LC = RTLIB::getOUTLINE_ATOMIC(Opc, Order, VT);
+ AtomicOrdering Order = cast<AtomicSDNode>(Node)->getOrdering();
+ RTLIB::Libcall LC = RTLIB::getOUTLINE_ATOMIC(Opc, Order, VT);
EVT RetVT = Node->getValueType(0);
TargetLowering::MakeLibCallOptions CallOptions;
- SmallVector<SDValue, 4> Ops;
- if (TLI.getLibcallName(LC)) {
- // If outline atomic available, prepare its arguments and expand.
- Ops.append(Node->op_begin() + 2, Node->op_end());
- Ops.push_back(Node->getOperand(1));
-
- } else {
- LC = RTLIB::getSYNC(Opc, VT);
- assert(LC != RTLIB::UNKNOWN_LIBCALL &&
- "Unexpected atomic op or value type!");
- // Arguments for expansion to sync libcall
- Ops.append(Node->op_begin() + 1, Node->op_end());
- }
+ SmallVector<SDValue, 4> Ops;
+ if (TLI.getLibcallName(LC)) {
+ // If outline atomic available, prepare its arguments and expand.
+ Ops.append(Node->op_begin() + 2, Node->op_end());
+ Ops.push_back(Node->getOperand(1));
+
+ } else {
+ LC = RTLIB::getSYNC(Opc, VT);
+ assert(LC != RTLIB::UNKNOWN_LIBCALL &&
+ "Unexpected atomic op or value type!");
+ // Arguments for expansion to sync libcall
+ Ops.append(Node->op_begin() + 1, Node->op_end());
+ }
std::pair<SDValue, SDValue> Tmp = TLI.makeLibCall(DAG, LC, RetVT,
Ops, CallOptions,
SDLoc(Node),
@@ -4409,131 +4409,131 @@ void SelectionDAGLegalize::ConvertNodeToLibcall(SDNode *Node) {
Results.push_back(ExpandLibCall(LC, Node, false));
break;
}
- case ISD::STRICT_SINT_TO_FP:
- case ISD::STRICT_UINT_TO_FP:
- case ISD::SINT_TO_FP:
- case ISD::UINT_TO_FP: {
- // TODO - Common the code with DAGTypeLegalizer::SoftenFloatRes_XINT_TO_FP
- bool IsStrict = Node->isStrictFPOpcode();
- bool Signed = Node->getOpcode() == ISD::SINT_TO_FP ||
- Node->getOpcode() == ISD::STRICT_SINT_TO_FP;
- EVT SVT = Node->getOperand(IsStrict ? 1 : 0).getValueType();
- EVT RVT = Node->getValueType(0);
- EVT NVT = EVT();
- SDLoc dl(Node);
-
- // Even if the input is legal, no libcall may exactly match, eg. we don't
- // have i1 -> fp conversions. So, it needs to be promoted to a larger type,
- // eg: i13 -> fp. Then, look for an appropriate libcall.
- RTLIB::Libcall LC = RTLIB::UNKNOWN_LIBCALL;
- for (unsigned t = MVT::FIRST_INTEGER_VALUETYPE;
- t <= MVT::LAST_INTEGER_VALUETYPE && LC == RTLIB::UNKNOWN_LIBCALL;
- ++t) {
- NVT = (MVT::SimpleValueType)t;
- // The source needs to big enough to hold the operand.
- if (NVT.bitsGE(SVT))
- LC = Signed ? RTLIB::getSINTTOFP(NVT, RVT)
- : RTLIB::getUINTTOFP(NVT, RVT);
- }
- assert(LC != RTLIB::UNKNOWN_LIBCALL && "Unable to legalize as libcall");
-
- SDValue Chain = IsStrict ? Node->getOperand(0) : SDValue();
- // Sign/zero extend the argument if the libcall takes a larger type.
- SDValue Op = DAG.getNode(Signed ? ISD::SIGN_EXTEND : ISD::ZERO_EXTEND, dl,
- NVT, Node->getOperand(IsStrict ? 1 : 0));
- TargetLowering::MakeLibCallOptions CallOptions;
- CallOptions.setSExt(Signed);
- std::pair<SDValue, SDValue> Tmp =
- TLI.makeLibCall(DAG, LC, RVT, Op, CallOptions, dl, Chain);
- Results.push_back(Tmp.first);
- if (IsStrict)
- Results.push_back(Tmp.second);
- break;
- }
- case ISD::FP_TO_SINT:
- case ISD::FP_TO_UINT:
- case ISD::STRICT_FP_TO_SINT:
- case ISD::STRICT_FP_TO_UINT: {
- // TODO - Common the code with DAGTypeLegalizer::SoftenFloatOp_FP_TO_XINT.
- bool IsStrict = Node->isStrictFPOpcode();
- bool Signed = Node->getOpcode() == ISD::FP_TO_SINT ||
- Node->getOpcode() == ISD::STRICT_FP_TO_SINT;
-
- SDValue Op = Node->getOperand(IsStrict ? 1 : 0);
- EVT SVT = Op.getValueType();
- EVT RVT = Node->getValueType(0);
- EVT NVT = EVT();
- SDLoc dl(Node);
-
- // Even if the result is legal, no libcall may exactly match, eg. we don't
- // have fp -> i1 conversions. So, it needs to be promoted to a larger type,
- // eg: fp -> i32. Then, look for an appropriate libcall.
- RTLIB::Libcall LC = RTLIB::UNKNOWN_LIBCALL;
- for (unsigned IntVT = MVT::FIRST_INTEGER_VALUETYPE;
- IntVT <= MVT::LAST_INTEGER_VALUETYPE && LC == RTLIB::UNKNOWN_LIBCALL;
- ++IntVT) {
- NVT = (MVT::SimpleValueType)IntVT;
- // The type needs to big enough to hold the result.
- if (NVT.bitsGE(RVT))
- LC = Signed ? RTLIB::getFPTOSINT(SVT, NVT)
- : RTLIB::getFPTOUINT(SVT, NVT);
- }
- assert(LC != RTLIB::UNKNOWN_LIBCALL && "Unable to legalize as libcall");
-
- SDValue Chain = IsStrict ? Node->getOperand(0) : SDValue();
- TargetLowering::MakeLibCallOptions CallOptions;
- std::pair<SDValue, SDValue> Tmp =
- TLI.makeLibCall(DAG, LC, NVT, Op, CallOptions, dl, Chain);
-
- // Truncate the result if the libcall returns a larger type.
- Results.push_back(DAG.getNode(ISD::TRUNCATE, dl, RVT, Tmp.first));
- if (IsStrict)
- Results.push_back(Tmp.second);
- break;
- }
-
- case ISD::FP_ROUND:
- case ISD::STRICT_FP_ROUND: {
- // X = FP_ROUND(Y, TRUNC)
- // TRUNC is a flag, which is always an integer that is zero or one.
- // If TRUNC is 0, this is a normal rounding, if it is 1, this FP_ROUND
- // is known to not change the value of Y.
- // We can only expand it into libcall if the TRUNC is 0.
- bool IsStrict = Node->isStrictFPOpcode();
- SDValue Op = Node->getOperand(IsStrict ? 1 : 0);
- SDValue Chain = IsStrict ? Node->getOperand(0) : SDValue();
- EVT VT = Node->getValueType(0);
- assert(cast<ConstantSDNode>(Node->getOperand(IsStrict ? 2 : 1))
- ->isNullValue() &&
- "Unable to expand as libcall if it is not normal rounding");
-
- RTLIB::Libcall LC = RTLIB::getFPROUND(Op.getValueType(), VT);
- assert(LC != RTLIB::UNKNOWN_LIBCALL && "Unable to legalize as libcall");
-
- TargetLowering::MakeLibCallOptions CallOptions;
- std::pair<SDValue, SDValue> Tmp =
- TLI.makeLibCall(DAG, LC, VT, Op, CallOptions, SDLoc(Node), Chain);
- Results.push_back(Tmp.first);
- if (IsStrict)
- Results.push_back(Tmp.second);
- break;
- }
- case ISD::FP_EXTEND: {
- Results.push_back(
- ExpandLibCall(RTLIB::getFPEXT(Node->getOperand(0).getValueType(),
- Node->getValueType(0)),
- Node, false));
- break;
- }
- case ISD::STRICT_FP_EXTEND:
+ case ISD::STRICT_SINT_TO_FP:
+ case ISD::STRICT_UINT_TO_FP:
+ case ISD::SINT_TO_FP:
+ case ISD::UINT_TO_FP: {
+ // TODO - Common the code with DAGTypeLegalizer::SoftenFloatRes_XINT_TO_FP
+ bool IsStrict = Node->isStrictFPOpcode();
+ bool Signed = Node->getOpcode() == ISD::SINT_TO_FP ||
+ Node->getOpcode() == ISD::STRICT_SINT_TO_FP;
+ EVT SVT = Node->getOperand(IsStrict ? 1 : 0).getValueType();
+ EVT RVT = Node->getValueType(0);
+ EVT NVT = EVT();
+ SDLoc dl(Node);
+
+ // Even if the input is legal, no libcall may exactly match, eg. we don't
+ // have i1 -> fp conversions. So, it needs to be promoted to a larger type,
+ // eg: i13 -> fp. Then, look for an appropriate libcall.
+ RTLIB::Libcall LC = RTLIB::UNKNOWN_LIBCALL;
+ for (unsigned t = MVT::FIRST_INTEGER_VALUETYPE;
+ t <= MVT::LAST_INTEGER_VALUETYPE && LC == RTLIB::UNKNOWN_LIBCALL;
+ ++t) {
+ NVT = (MVT::SimpleValueType)t;
+ // The source needs to big enough to hold the operand.
+ if (NVT.bitsGE(SVT))
+ LC = Signed ? RTLIB::getSINTTOFP(NVT, RVT)
+ : RTLIB::getUINTTOFP(NVT, RVT);
+ }
+ assert(LC != RTLIB::UNKNOWN_LIBCALL && "Unable to legalize as libcall");
+
+ SDValue Chain = IsStrict ? Node->getOperand(0) : SDValue();
+ // Sign/zero extend the argument if the libcall takes a larger type.
+ SDValue Op = DAG.getNode(Signed ? ISD::SIGN_EXTEND : ISD::ZERO_EXTEND, dl,
+ NVT, Node->getOperand(IsStrict ? 1 : 0));
+ TargetLowering::MakeLibCallOptions CallOptions;
+ CallOptions.setSExt(Signed);
+ std::pair<SDValue, SDValue> Tmp =
+ TLI.makeLibCall(DAG, LC, RVT, Op, CallOptions, dl, Chain);
+ Results.push_back(Tmp.first);
+ if (IsStrict)
+ Results.push_back(Tmp.second);
+ break;
+ }
+ case ISD::FP_TO_SINT:
+ case ISD::FP_TO_UINT:
+ case ISD::STRICT_FP_TO_SINT:
+ case ISD::STRICT_FP_TO_UINT: {
+ // TODO - Common the code with DAGTypeLegalizer::SoftenFloatOp_FP_TO_XINT.
+ bool IsStrict = Node->isStrictFPOpcode();
+ bool Signed = Node->getOpcode() == ISD::FP_TO_SINT ||
+ Node->getOpcode() == ISD::STRICT_FP_TO_SINT;
+
+ SDValue Op = Node->getOperand(IsStrict ? 1 : 0);
+ EVT SVT = Op.getValueType();
+ EVT RVT = Node->getValueType(0);
+ EVT NVT = EVT();
+ SDLoc dl(Node);
+
+ // Even if the result is legal, no libcall may exactly match, eg. we don't
+ // have fp -> i1 conversions. So, it needs to be promoted to a larger type,
+ // eg: fp -> i32. Then, look for an appropriate libcall.
+ RTLIB::Libcall LC = RTLIB::UNKNOWN_LIBCALL;
+ for (unsigned IntVT = MVT::FIRST_INTEGER_VALUETYPE;
+ IntVT <= MVT::LAST_INTEGER_VALUETYPE && LC == RTLIB::UNKNOWN_LIBCALL;
+ ++IntVT) {
+ NVT = (MVT::SimpleValueType)IntVT;
+ // The type needs to big enough to hold the result.
+ if (NVT.bitsGE(RVT))
+ LC = Signed ? RTLIB::getFPTOSINT(SVT, NVT)
+ : RTLIB::getFPTOUINT(SVT, NVT);
+ }
+ assert(LC != RTLIB::UNKNOWN_LIBCALL && "Unable to legalize as libcall");
+
+ SDValue Chain = IsStrict ? Node->getOperand(0) : SDValue();
+ TargetLowering::MakeLibCallOptions CallOptions;
+ std::pair<SDValue, SDValue> Tmp =
+ TLI.makeLibCall(DAG, LC, NVT, Op, CallOptions, dl, Chain);
+
+ // Truncate the result if the libcall returns a larger type.
+ Results.push_back(DAG.getNode(ISD::TRUNCATE, dl, RVT, Tmp.first));
+ if (IsStrict)
+ Results.push_back(Tmp.second);
+ break;
+ }
+
+ case ISD::FP_ROUND:
+ case ISD::STRICT_FP_ROUND: {
+ // X = FP_ROUND(Y, TRUNC)
+ // TRUNC is a flag, which is always an integer that is zero or one.
+ // If TRUNC is 0, this is a normal rounding, if it is 1, this FP_ROUND
+ // is known to not change the value of Y.
+ // We can only expand it into libcall if the TRUNC is 0.
+ bool IsStrict = Node->isStrictFPOpcode();
+ SDValue Op = Node->getOperand(IsStrict ? 1 : 0);
+ SDValue Chain = IsStrict ? Node->getOperand(0) : SDValue();
+ EVT VT = Node->getValueType(0);
+ assert(cast<ConstantSDNode>(Node->getOperand(IsStrict ? 2 : 1))
+ ->isNullValue() &&
+ "Unable to expand as libcall if it is not normal rounding");
+
+ RTLIB::Libcall LC = RTLIB::getFPROUND(Op.getValueType(), VT);
+ assert(LC != RTLIB::UNKNOWN_LIBCALL && "Unable to legalize as libcall");
+
+ TargetLowering::MakeLibCallOptions CallOptions;
+ std::pair<SDValue, SDValue> Tmp =
+ TLI.makeLibCall(DAG, LC, VT, Op, CallOptions, SDLoc(Node), Chain);
+ Results.push_back(Tmp.first);
+ if (IsStrict)
+ Results.push_back(Tmp.second);
+ break;
+ }
+ case ISD::FP_EXTEND: {
+ Results.push_back(
+ ExpandLibCall(RTLIB::getFPEXT(Node->getOperand(0).getValueType(),
+ Node->getValueType(0)),
+ Node, false));
+ break;
+ }
+ case ISD::STRICT_FP_EXTEND:
case ISD::STRICT_FP_TO_FP16: {
RTLIB::Libcall LC =
- Node->getOpcode() == ISD::STRICT_FP_TO_FP16
- ? RTLIB::getFPROUND(Node->getOperand(1).getValueType(), MVT::f16)
- : RTLIB::getFPEXT(Node->getOperand(1).getValueType(),
- Node->getValueType(0));
- assert(LC != RTLIB::UNKNOWN_LIBCALL && "Unable to legalize as libcall");
-
+ Node->getOpcode() == ISD::STRICT_FP_TO_FP16
+ ? RTLIB::getFPROUND(Node->getOperand(1).getValueType(), MVT::f16)
+ : RTLIB::getFPEXT(Node->getOperand(1).getValueType(),
+ Node->getValueType(0));
+ assert(LC != RTLIB::UNKNOWN_LIBCALL && "Unable to legalize as libcall");
+
TargetLowering::MakeLibCallOptions CallOptions;
std::pair<SDValue, SDValue> Tmp =
TLI.makeLibCall(DAG, LC, Node->getValueType(0), Node->getOperand(1),
@@ -4630,9 +4630,9 @@ void SelectionDAGLegalize::PromoteNode(SDNode *Node) {
OVT = Node->getOperand(0).getSimpleValueType();
}
if (Node->getOpcode() == ISD::STRICT_UINT_TO_FP ||
- Node->getOpcode() == ISD::STRICT_SINT_TO_FP ||
- Node->getOpcode() == ISD::STRICT_FSETCC ||
- Node->getOpcode() == ISD::STRICT_FSETCCS)
+ Node->getOpcode() == ISD::STRICT_SINT_TO_FP ||
+ Node->getOpcode() == ISD::STRICT_FSETCC ||
+ Node->getOpcode() == ISD::STRICT_FSETCCS)
OVT = Node->getOperand(1).getSimpleValueType();
if (Node->getOpcode() == ISD::BR_CC)
OVT = Node->getOperand(2).getSimpleValueType();
@@ -4692,10 +4692,10 @@ void SelectionDAGLegalize::PromoteNode(SDNode *Node) {
case ISD::STRICT_FP_TO_SINT:
PromoteLegalFP_TO_INT(Node, dl, Results);
break;
- case ISD::FP_TO_UINT_SAT:
- case ISD::FP_TO_SINT_SAT:
- Results.push_back(PromoteLegalFP_TO_INT_SAT(Node, dl));
- break;
+ case ISD::FP_TO_UINT_SAT:
+ case ISD::FP_TO_SINT_SAT:
+ Results.push_back(PromoteLegalFP_TO_INT_SAT(Node, dl));
+ break;
case ISD::UINT_TO_FP:
case ISD::STRICT_UINT_TO_FP:
case ISD::SINT_TO_FP:
@@ -4830,29 +4830,29 @@ void SelectionDAGLegalize::PromoteNode(SDNode *Node) {
Results.push_back(Tmp1);
break;
}
- case ISD::SETCC:
- case ISD::STRICT_FSETCC:
- case ISD::STRICT_FSETCCS: {
+ case ISD::SETCC:
+ case ISD::STRICT_FSETCC:
+ case ISD::STRICT_FSETCCS: {
unsigned ExtOp = ISD::FP_EXTEND;
if (NVT.isInteger()) {
- ISD::CondCode CCCode = cast<CondCodeSDNode>(Node->getOperand(2))->get();
+ ISD::CondCode CCCode = cast<CondCodeSDNode>(Node->getOperand(2))->get();
ExtOp = isSignedIntSetCC(CCCode) ? ISD::SIGN_EXTEND : ISD::ZERO_EXTEND;
}
- if (Node->isStrictFPOpcode()) {
- SDValue InChain = Node->getOperand(0);
- std::tie(Tmp1, std::ignore) =
- DAG.getStrictFPExtendOrRound(Node->getOperand(1), InChain, dl, NVT);
- std::tie(Tmp2, std::ignore) =
- DAG.getStrictFPExtendOrRound(Node->getOperand(2), InChain, dl, NVT);
- SmallVector<SDValue, 2> TmpChains = {Tmp1.getValue(1), Tmp2.getValue(1)};
- SDValue OutChain = DAG.getTokenFactor(dl, TmpChains);
- SDVTList VTs = DAG.getVTList(Node->getValueType(0), MVT::Other);
- Results.push_back(DAG.getNode(Node->getOpcode(), dl, VTs,
- {OutChain, Tmp1, Tmp2, Node->getOperand(3)},
- Node->getFlags()));
- Results.push_back(Results.back().getValue(1));
- break;
- }
+ if (Node->isStrictFPOpcode()) {
+ SDValue InChain = Node->getOperand(0);
+ std::tie(Tmp1, std::ignore) =
+ DAG.getStrictFPExtendOrRound(Node->getOperand(1), InChain, dl, NVT);
+ std::tie(Tmp2, std::ignore) =
+ DAG.getStrictFPExtendOrRound(Node->getOperand(2), InChain, dl, NVT);
+ SmallVector<SDValue, 2> TmpChains = {Tmp1.getValue(1), Tmp2.getValue(1)};
+ SDValue OutChain = DAG.getTokenFactor(dl, TmpChains);
+ SDVTList VTs = DAG.getVTList(Node->getValueType(0), MVT::Other);
+ Results.push_back(DAG.getNode(Node->getOpcode(), dl, VTs,
+ {OutChain, Tmp1, Tmp2, Node->getOperand(3)},
+ Node->getFlags()));
+ Results.push_back(Results.back().getValue(1));
+ break;
+ }
Tmp1 = DAG.getNode(ExtOp, dl, NVT, Node->getOperand(0));
Tmp2 = DAG.getNode(ExtOp, dl, NVT, Node->getOperand(1));
Results.push_back(DAG.getNode(ISD::SETCC, dl, Node->getValueType(0), Tmp1,
diff --git a/contrib/libs/llvm12/lib/CodeGen/SelectionDAG/LegalizeFloatTypes.cpp b/contrib/libs/llvm12/lib/CodeGen/SelectionDAG/LegalizeFloatTypes.cpp
index 966645e325..a3820fd211 100644
--- a/contrib/libs/llvm12/lib/CodeGen/SelectionDAG/LegalizeFloatTypes.cpp
+++ b/contrib/libs/llvm12/lib/CodeGen/SelectionDAG/LegalizeFloatTypes.cpp
@@ -134,16 +134,16 @@ void DAGTypeLegalizer::SoftenFloatResult(SDNode *N, unsigned ResNo) {
case ISD::UINT_TO_FP: R = SoftenFloatRes_XINT_TO_FP(N); break;
case ISD::UNDEF: R = SoftenFloatRes_UNDEF(N); break;
case ISD::VAARG: R = SoftenFloatRes_VAARG(N); break;
- case ISD::VECREDUCE_FADD:
- case ISD::VECREDUCE_FMUL:
- case ISD::VECREDUCE_FMIN:
- case ISD::VECREDUCE_FMAX:
- R = SoftenFloatRes_VECREDUCE(N);
- break;
- case ISD::VECREDUCE_SEQ_FADD:
- case ISD::VECREDUCE_SEQ_FMUL:
- R = SoftenFloatRes_VECREDUCE_SEQ(N);
- break;
+ case ISD::VECREDUCE_FADD:
+ case ISD::VECREDUCE_FMUL:
+ case ISD::VECREDUCE_FMIN:
+ case ISD::VECREDUCE_FMAX:
+ R = SoftenFloatRes_VECREDUCE(N);
+ break;
+ case ISD::VECREDUCE_SEQ_FADD:
+ case ISD::VECREDUCE_SEQ_FMUL:
+ R = SoftenFloatRes_VECREDUCE_SEQ(N);
+ break;
}
// If R is null, the sub-method took care of registering the result.
@@ -782,17 +782,17 @@ SDValue DAGTypeLegalizer::SoftenFloatRes_XINT_TO_FP(SDNode *N) {
return Tmp.first;
}
-SDValue DAGTypeLegalizer::SoftenFloatRes_VECREDUCE(SDNode *N) {
- // Expand and soften recursively.
- ReplaceValueWith(SDValue(N, 0), TLI.expandVecReduce(N, DAG));
- return SDValue();
-}
-
-SDValue DAGTypeLegalizer::SoftenFloatRes_VECREDUCE_SEQ(SDNode *N) {
- ReplaceValueWith(SDValue(N, 0), TLI.expandVecReduceSeq(N, DAG));
- return SDValue();
-}
+SDValue DAGTypeLegalizer::SoftenFloatRes_VECREDUCE(SDNode *N) {
+ // Expand and soften recursively.
+ ReplaceValueWith(SDValue(N, 0), TLI.expandVecReduce(N, DAG));
+ return SDValue();
+}
+SDValue DAGTypeLegalizer::SoftenFloatRes_VECREDUCE_SEQ(SDNode *N) {
+ ReplaceValueWith(SDValue(N, 0), TLI.expandVecReduceSeq(N, DAG));
+ return SDValue();
+}
+
//===----------------------------------------------------------------------===//
// Convert Float Operand to Integer
//===----------------------------------------------------------------------===//
@@ -819,9 +819,9 @@ bool DAGTypeLegalizer::SoftenFloatOperand(SDNode *N, unsigned OpNo) {
case ISD::STRICT_FP_TO_UINT:
case ISD::FP_TO_SINT:
case ISD::FP_TO_UINT: Res = SoftenFloatOp_FP_TO_XINT(N); break;
- case ISD::FP_TO_SINT_SAT:
- case ISD::FP_TO_UINT_SAT:
- Res = SoftenFloatOp_FP_TO_XINT_SAT(N); break;
+ case ISD::FP_TO_SINT_SAT:
+ case ISD::FP_TO_UINT_SAT:
+ Res = SoftenFloatOp_FP_TO_XINT_SAT(N); break;
case ISD::STRICT_LROUND:
case ISD::LROUND: Res = SoftenFloatOp_LROUND(N); break;
case ISD::STRICT_LLROUND:
@@ -913,24 +913,24 @@ SDValue DAGTypeLegalizer::SoftenFloatOp_BR_CC(SDNode *N) {
0);
}
-// Even if the result type is legal, no libcall may exactly match. (e.g. We
-// don't have FP-i8 conversions) This helper method looks for an appropriate
-// promoted libcall.
-static RTLIB::Libcall findFPToIntLibcall(EVT SrcVT, EVT RetVT, EVT &Promoted,
- bool Signed) {
- RTLIB::Libcall LC = RTLIB::UNKNOWN_LIBCALL;
- for (unsigned IntVT = MVT::FIRST_INTEGER_VALUETYPE;
- IntVT <= MVT::LAST_INTEGER_VALUETYPE && LC == RTLIB::UNKNOWN_LIBCALL;
- ++IntVT) {
- Promoted = (MVT::SimpleValueType)IntVT;
- // The type needs to big enough to hold the result.
- if (Promoted.bitsGE(RetVT))
- LC = Signed ? RTLIB::getFPTOSINT(SrcVT, Promoted)
- : RTLIB::getFPTOUINT(SrcVT, Promoted);
- }
- return LC;
-}
-
+// Even if the result type is legal, no libcall may exactly match. (e.g. We
+// don't have FP-i8 conversions) This helper method looks for an appropriate
+// promoted libcall.
+static RTLIB::Libcall findFPToIntLibcall(EVT SrcVT, EVT RetVT, EVT &Promoted,
+ bool Signed) {
+ RTLIB::Libcall LC = RTLIB::UNKNOWN_LIBCALL;
+ for (unsigned IntVT = MVT::FIRST_INTEGER_VALUETYPE;
+ IntVT <= MVT::LAST_INTEGER_VALUETYPE && LC == RTLIB::UNKNOWN_LIBCALL;
+ ++IntVT) {
+ Promoted = (MVT::SimpleValueType)IntVT;
+ // The type needs to big enough to hold the result.
+ if (Promoted.bitsGE(RetVT))
+ LC = Signed ? RTLIB::getFPTOSINT(SrcVT, Promoted)
+ : RTLIB::getFPTOUINT(SrcVT, Promoted);
+ }
+ return LC;
+}
+
SDValue DAGTypeLegalizer::SoftenFloatOp_FP_TO_XINT(SDNode *N) {
bool IsStrict = N->isStrictFPOpcode();
bool Signed = N->getOpcode() == ISD::FP_TO_SINT ||
@@ -946,9 +946,9 @@ SDValue DAGTypeLegalizer::SoftenFloatOp_FP_TO_XINT(SDNode *N) {
// a larger type, eg: fp -> i32. Even if it is legal, no libcall may exactly
// match, eg. we don't have fp -> i8 conversions.
// Look for an appropriate libcall.
- RTLIB::Libcall LC = findFPToIntLibcall(SVT, RVT, NVT, Signed);
- assert(LC != RTLIB::UNKNOWN_LIBCALL && NVT.isSimple() &&
- "Unsupported FP_TO_XINT!");
+ RTLIB::Libcall LC = findFPToIntLibcall(SVT, RVT, NVT, Signed);
+ assert(LC != RTLIB::UNKNOWN_LIBCALL && NVT.isSimple() &&
+ "Unsupported FP_TO_XINT!");
Op = GetSoftenedFloat(Op);
SDValue Chain = IsStrict ? N->getOperand(0) : SDValue();
@@ -968,11 +968,11 @@ SDValue DAGTypeLegalizer::SoftenFloatOp_FP_TO_XINT(SDNode *N) {
return SDValue();
}
-SDValue DAGTypeLegalizer::SoftenFloatOp_FP_TO_XINT_SAT(SDNode *N) {
- SDValue Res = TLI.expandFP_TO_INT_SAT(N, DAG);
- return Res;
-}
-
+SDValue DAGTypeLegalizer::SoftenFloatOp_FP_TO_XINT_SAT(SDNode *N) {
+ SDValue Res = TLI.expandFP_TO_INT_SAT(N, DAG);
+ return Res;
+}
+
SDValue DAGTypeLegalizer::SoftenFloatOp_SELECT_CC(SDNode *N) {
SDValue NewLHS = N->getOperand(0), NewRHS = N->getOperand(1);
ISD::CondCode CCCode = cast<CondCodeSDNode>(N->getOperand(4))->get();
@@ -1239,8 +1239,8 @@ void DAGTypeLegalizer::ExpandFloatResult(SDNode *N, unsigned ResNo) {
case ISD::STRICT_FTRUNC:
case ISD::FTRUNC: ExpandFloatRes_FTRUNC(N, Lo, Hi); break;
case ISD::LOAD: ExpandFloatRes_LOAD(N, Lo, Hi); break;
- case ISD::STRICT_SINT_TO_FP:
- case ISD::STRICT_UINT_TO_FP:
+ case ISD::STRICT_SINT_TO_FP:
+ case ISD::STRICT_UINT_TO_FP:
case ISD::SINT_TO_FP:
case ISD::UINT_TO_FP: ExpandFloatRes_XINT_TO_FP(N, Lo, Hi); break;
case ISD::STRICT_FREM:
@@ -1313,7 +1313,7 @@ void DAGTypeLegalizer::ExpandFloatRes_FABS(SDNode *N, SDValue &Lo,
void DAGTypeLegalizer::ExpandFloatRes_FMINNUM(SDNode *N, SDValue &Lo,
SDValue &Hi) {
- ExpandFloatRes_Binary(N, GetFPLibCall(N->getValueType(0),
+ ExpandFloatRes_Binary(N, GetFPLibCall(N->getValueType(0),
RTLIB::FMIN_F32, RTLIB::FMIN_F64,
RTLIB::FMIN_F80, RTLIB::FMIN_F128,
RTLIB::FMIN_PPCF128), Lo, Hi);
@@ -1639,18 +1639,18 @@ void DAGTypeLegalizer::ExpandFloatRes_XINT_TO_FP(SDNode *N, SDValue &Lo,
assert(N->getValueType(0) == MVT::ppcf128 && "Unsupported XINT_TO_FP!");
EVT VT = N->getValueType(0);
EVT NVT = TLI.getTypeToTransformTo(*DAG.getContext(), VT);
- bool Strict = N->isStrictFPOpcode();
- SDValue Src = N->getOperand(Strict ? 1 : 0);
+ bool Strict = N->isStrictFPOpcode();
+ SDValue Src = N->getOperand(Strict ? 1 : 0);
EVT SrcVT = Src.getValueType();
- bool isSigned = N->getOpcode() == ISD::SINT_TO_FP ||
- N->getOpcode() == ISD::STRICT_SINT_TO_FP;
+ bool isSigned = N->getOpcode() == ISD::SINT_TO_FP ||
+ N->getOpcode() == ISD::STRICT_SINT_TO_FP;
SDLoc dl(N);
- SDValue Chain = Strict ? N->getOperand(0) : DAG.getEntryNode();
-
- // TODO: Any other flags to propagate?
- SDNodeFlags Flags;
- Flags.setNoFPExcept(N->getFlags().hasNoFPExcept());
+ SDValue Chain = Strict ? N->getOperand(0) : DAG.getEntryNode();
+ // TODO: Any other flags to propagate?
+ SDNodeFlags Flags;
+ Flags.setNoFPExcept(N->getFlags().hasNoFPExcept());
+
// First do an SINT_TO_FP, whether the original was signed or unsigned.
// When promoting partial word types to i32 we must honor the signedness,
// though.
@@ -1658,12 +1658,12 @@ void DAGTypeLegalizer::ExpandFloatRes_XINT_TO_FP(SDNode *N, SDValue &Lo,
// The integer can be represented exactly in an f64.
Lo = DAG.getConstantFP(APFloat(DAG.EVTToAPFloatSemantics(NVT),
APInt(NVT.getSizeInBits(), 0)), dl, NVT);
- if (Strict) {
- Hi = DAG.getNode(N->getOpcode(), dl, DAG.getVTList(NVT, MVT::Other),
- {Chain, Src}, Flags);
- Chain = Hi.getValue(1);
- } else
- Hi = DAG.getNode(N->getOpcode(), dl, NVT, Src);
+ if (Strict) {
+ Hi = DAG.getNode(N->getOpcode(), dl, DAG.getVTList(NVT, MVT::Other),
+ {Chain, Src}, Flags);
+ Chain = Hi.getValue(1);
+ } else
+ Hi = DAG.getNode(N->getOpcode(), dl, NVT, Src);
} else {
RTLIB::Libcall LC = RTLIB::UNKNOWN_LIBCALL;
if (SrcVT.bitsLE(MVT::i64)) {
@@ -1678,25 +1678,25 @@ void DAGTypeLegalizer::ExpandFloatRes_XINT_TO_FP(SDNode *N, SDValue &Lo,
TargetLowering::MakeLibCallOptions CallOptions;
CallOptions.setSExt(true);
- std::pair<SDValue, SDValue> Tmp =
- TLI.makeLibCall(DAG, LC, VT, Src, CallOptions, dl, Chain);
- if (Strict)
- Chain = Tmp.second;
- GetPairElements(Tmp.first, Lo, Hi);
+ std::pair<SDValue, SDValue> Tmp =
+ TLI.makeLibCall(DAG, LC, VT, Src, CallOptions, dl, Chain);
+ if (Strict)
+ Chain = Tmp.second;
+ GetPairElements(Tmp.first, Lo, Hi);
}
- // No need to complement for unsigned 32-bit integers
- if (isSigned || SrcVT.bitsLE(MVT::i32)) {
- if (Strict)
- ReplaceValueWith(SDValue(N, 1), Chain);
-
+ // No need to complement for unsigned 32-bit integers
+ if (isSigned || SrcVT.bitsLE(MVT::i32)) {
+ if (Strict)
+ ReplaceValueWith(SDValue(N, 1), Chain);
+
return;
- }
+ }
// Unsigned - fix up the SINT_TO_FP value just calculated.
- // FIXME: For unsigned i128 to ppc_fp128 conversion, we need to carefully
- // keep semantics correctness if the integer is not exactly representable
- // here. See ExpandLegalINT_TO_FP.
+ // FIXME: For unsigned i128 to ppc_fp128 conversion, we need to carefully
+ // keep semantics correctness if the integer is not exactly representable
+ // here. See ExpandLegalINT_TO_FP.
Hi = DAG.getNode(ISD::BUILD_PAIR, dl, VT, Lo, Hi);
SrcVT = Src.getValueType();
@@ -1720,16 +1720,16 @@ void DAGTypeLegalizer::ExpandFloatRes_XINT_TO_FP(SDNode *N, SDValue &Lo,
break;
}
- // TODO: Are there other fast-math-flags to propagate to this FADD?
- SDValue NewLo = DAG.getConstantFP(
- APFloat(APFloat::PPCDoubleDouble(), APInt(128, Parts)), dl, MVT::ppcf128);
- if (Strict) {
- Lo = DAG.getNode(ISD::STRICT_FADD, dl, DAG.getVTList(VT, MVT::Other),
- {Chain, Hi, NewLo}, Flags);
- Chain = Lo.getValue(1);
- ReplaceValueWith(SDValue(N, 1), Chain);
- } else
- Lo = DAG.getNode(ISD::FADD, dl, VT, Hi, NewLo);
+ // TODO: Are there other fast-math-flags to propagate to this FADD?
+ SDValue NewLo = DAG.getConstantFP(
+ APFloat(APFloat::PPCDoubleDouble(), APInt(128, Parts)), dl, MVT::ppcf128);
+ if (Strict) {
+ Lo = DAG.getNode(ISD::STRICT_FADD, dl, DAG.getVTList(VT, MVT::Other),
+ {Chain, Hi, NewLo}, Flags);
+ Chain = Lo.getValue(1);
+ ReplaceValueWith(SDValue(N, 1), Chain);
+ } else
+ Lo = DAG.getNode(ISD::FADD, dl, VT, Hi, NewLo);
Lo = DAG.getSelectCC(dl, Src, DAG.getConstant(0, dl, SrcVT),
Lo, Hi, ISD::SETLT);
GetPairElements(Lo, Lo, Hi);
@@ -1770,15 +1770,15 @@ bool DAGTypeLegalizer::ExpandFloatOperand(SDNode *N, unsigned OpNo) {
case ISD::FP_ROUND: Res = ExpandFloatOp_FP_ROUND(N); break;
case ISD::STRICT_FP_TO_SINT:
case ISD::STRICT_FP_TO_UINT:
- case ISD::FP_TO_SINT:
- case ISD::FP_TO_UINT: Res = ExpandFloatOp_FP_TO_XINT(N); break;
+ case ISD::FP_TO_SINT:
+ case ISD::FP_TO_UINT: Res = ExpandFloatOp_FP_TO_XINT(N); break;
case ISD::LROUND: Res = ExpandFloatOp_LROUND(N); break;
case ISD::LLROUND: Res = ExpandFloatOp_LLROUND(N); break;
case ISD::LRINT: Res = ExpandFloatOp_LRINT(N); break;
case ISD::LLRINT: Res = ExpandFloatOp_LLRINT(N); break;
case ISD::SELECT_CC: Res = ExpandFloatOp_SELECT_CC(N); break;
- case ISD::STRICT_FSETCC:
- case ISD::STRICT_FSETCCS:
+ case ISD::STRICT_FSETCC:
+ case ISD::STRICT_FSETCCS:
case ISD::SETCC: Res = ExpandFloatOp_SETCC(N); break;
case ISD::STORE: Res = ExpandFloatOp_STORE(cast<StoreSDNode>(N),
OpNo); break;
@@ -1804,8 +1804,8 @@ bool DAGTypeLegalizer::ExpandFloatOperand(SDNode *N, unsigned OpNo) {
void DAGTypeLegalizer::FloatExpandSetCCOperands(SDValue &NewLHS,
SDValue &NewRHS,
ISD::CondCode &CCCode,
- const SDLoc &dl, SDValue &Chain,
- bool IsSignaling) {
+ const SDLoc &dl, SDValue &Chain,
+ bool IsSignaling) {
SDValue LHSLo, LHSHi, RHSLo, RHSHi;
GetExpandedFloat(NewLHS, LHSLo, LHSHi);
GetExpandedFloat(NewRHS, RHSLo, RHSHi);
@@ -1817,32 +1817,32 @@ void DAGTypeLegalizer::FloatExpandSetCCOperands(SDValue &NewLHS,
// BNE crN, L:
// FCMPU crN, lo1, lo2
// The following can be improved, but not that much.
- SDValue Tmp1, Tmp2, Tmp3, OutputChain;
- Tmp1 = DAG.getSetCC(dl, getSetCCResultType(LHSHi.getValueType()), LHSHi,
- RHSHi, ISD::SETOEQ, Chain, IsSignaling);
- OutputChain = Tmp1->getNumValues() > 1 ? Tmp1.getValue(1) : SDValue();
- Tmp2 = DAG.getSetCC(dl, getSetCCResultType(LHSLo.getValueType()), LHSLo,
- RHSLo, CCCode, OutputChain, IsSignaling);
- OutputChain = Tmp2->getNumValues() > 1 ? Tmp2.getValue(1) : SDValue();
+ SDValue Tmp1, Tmp2, Tmp3, OutputChain;
+ Tmp1 = DAG.getSetCC(dl, getSetCCResultType(LHSHi.getValueType()), LHSHi,
+ RHSHi, ISD::SETOEQ, Chain, IsSignaling);
+ OutputChain = Tmp1->getNumValues() > 1 ? Tmp1.getValue(1) : SDValue();
+ Tmp2 = DAG.getSetCC(dl, getSetCCResultType(LHSLo.getValueType()), LHSLo,
+ RHSLo, CCCode, OutputChain, IsSignaling);
+ OutputChain = Tmp2->getNumValues() > 1 ? Tmp2.getValue(1) : SDValue();
Tmp3 = DAG.getNode(ISD::AND, dl, Tmp1.getValueType(), Tmp1, Tmp2);
- Tmp1 =
- DAG.getSetCC(dl, getSetCCResultType(LHSHi.getValueType()), LHSHi, RHSHi,
- ISD::SETUNE, OutputChain, IsSignaling);
- OutputChain = Tmp1->getNumValues() > 1 ? Tmp1.getValue(1) : SDValue();
- Tmp2 = DAG.getSetCC(dl, getSetCCResultType(LHSHi.getValueType()), LHSHi,
- RHSHi, CCCode, OutputChain, IsSignaling);
- OutputChain = Tmp2->getNumValues() > 1 ? Tmp2.getValue(1) : SDValue();
+ Tmp1 =
+ DAG.getSetCC(dl, getSetCCResultType(LHSHi.getValueType()), LHSHi, RHSHi,
+ ISD::SETUNE, OutputChain, IsSignaling);
+ OutputChain = Tmp1->getNumValues() > 1 ? Tmp1.getValue(1) : SDValue();
+ Tmp2 = DAG.getSetCC(dl, getSetCCResultType(LHSHi.getValueType()), LHSHi,
+ RHSHi, CCCode, OutputChain, IsSignaling);
+ OutputChain = Tmp2->getNumValues() > 1 ? Tmp2.getValue(1) : SDValue();
Tmp1 = DAG.getNode(ISD::AND, dl, Tmp1.getValueType(), Tmp1, Tmp2);
NewLHS = DAG.getNode(ISD::OR, dl, Tmp1.getValueType(), Tmp1, Tmp3);
NewRHS = SDValue(); // LHS is the result, not a compare.
- Chain = OutputChain;
+ Chain = OutputChain;
}
SDValue DAGTypeLegalizer::ExpandFloatOp_BR_CC(SDNode *N) {
SDValue NewLHS = N->getOperand(2), NewRHS = N->getOperand(3);
ISD::CondCode CCCode = cast<CondCodeSDNode>(N->getOperand(1))->get();
- SDValue Chain;
- FloatExpandSetCCOperands(NewLHS, NewRHS, CCCode, SDLoc(N), Chain);
+ SDValue Chain;
+ FloatExpandSetCCOperands(NewLHS, NewRHS, CCCode, SDLoc(N), Chain);
// If ExpandSetCCOperands returned a scalar, we need to compare the result
// against zero to select between true and false values.
@@ -1897,23 +1897,23 @@ SDValue DAGTypeLegalizer::ExpandFloatOp_FP_ROUND(SDNode *N) {
return SDValue();
}
-SDValue DAGTypeLegalizer::ExpandFloatOp_FP_TO_XINT(SDNode *N) {
+SDValue DAGTypeLegalizer::ExpandFloatOp_FP_TO_XINT(SDNode *N) {
EVT RVT = N->getValueType(0);
SDLoc dl(N);
bool IsStrict = N->isStrictFPOpcode();
- bool Signed = N->getOpcode() == ISD::FP_TO_SINT ||
- N->getOpcode() == ISD::STRICT_FP_TO_SINT;
+ bool Signed = N->getOpcode() == ISD::FP_TO_SINT ||
+ N->getOpcode() == ISD::STRICT_FP_TO_SINT;
SDValue Op = N->getOperand(IsStrict ? 1 : 0);
SDValue Chain = IsStrict ? N->getOperand(0) : SDValue();
- EVT NVT;
- RTLIB::Libcall LC = findFPToIntLibcall(Op.getValueType(), RVT, NVT, Signed);
- assert(LC != RTLIB::UNKNOWN_LIBCALL && NVT.isSimple() &&
- "Unsupported FP_TO_XINT!");
+ EVT NVT;
+ RTLIB::Libcall LC = findFPToIntLibcall(Op.getValueType(), RVT, NVT, Signed);
+ assert(LC != RTLIB::UNKNOWN_LIBCALL && NVT.isSimple() &&
+ "Unsupported FP_TO_XINT!");
TargetLowering::MakeLibCallOptions CallOptions;
- std::pair<SDValue, SDValue> Tmp =
- TLI.makeLibCall(DAG, LC, NVT, Op, CallOptions, dl, Chain);
+ std::pair<SDValue, SDValue> Tmp =
+ TLI.makeLibCall(DAG, LC, NVT, Op, CallOptions, dl, Chain);
if (!IsStrict)
return Tmp.first;
@@ -1925,8 +1925,8 @@ SDValue DAGTypeLegalizer::ExpandFloatOp_FP_TO_XINT(SDNode *N) {
SDValue DAGTypeLegalizer::ExpandFloatOp_SELECT_CC(SDNode *N) {
SDValue NewLHS = N->getOperand(0), NewRHS = N->getOperand(1);
ISD::CondCode CCCode = cast<CondCodeSDNode>(N->getOperand(4))->get();
- SDValue Chain;
- FloatExpandSetCCOperands(NewLHS, NewRHS, CCCode, SDLoc(N), Chain);
+ SDValue Chain;
+ FloatExpandSetCCOperands(NewLHS, NewRHS, CCCode, SDLoc(N), Chain);
// If ExpandSetCCOperands returned a scalar, we need to compare the result
// against zero to select between true and false values.
@@ -1942,25 +1942,25 @@ SDValue DAGTypeLegalizer::ExpandFloatOp_SELECT_CC(SDNode *N) {
}
SDValue DAGTypeLegalizer::ExpandFloatOp_SETCC(SDNode *N) {
- bool IsStrict = N->isStrictFPOpcode();
- SDValue NewLHS = N->getOperand(IsStrict ? 1 : 0);
- SDValue NewRHS = N->getOperand(IsStrict ? 2 : 1);
- SDValue Chain = IsStrict ? N->getOperand(0) : SDValue();
- ISD::CondCode CCCode =
- cast<CondCodeSDNode>(N->getOperand(IsStrict ? 3 : 2))->get();
- FloatExpandSetCCOperands(NewLHS, NewRHS, CCCode, SDLoc(N), Chain,
- N->getOpcode() == ISD::STRICT_FSETCCS);
-
- // FloatExpandSetCCOperands always returned a scalar.
- assert(!NewRHS.getNode() && "Expect to return scalar");
- assert(NewLHS.getValueType() == N->getValueType(0) &&
- "Unexpected setcc expansion!");
- if (Chain) {
- ReplaceValueWith(SDValue(N, 0), NewLHS);
- ReplaceValueWith(SDValue(N, 1), Chain);
- return SDValue();
+ bool IsStrict = N->isStrictFPOpcode();
+ SDValue NewLHS = N->getOperand(IsStrict ? 1 : 0);
+ SDValue NewRHS = N->getOperand(IsStrict ? 2 : 1);
+ SDValue Chain = IsStrict ? N->getOperand(0) : SDValue();
+ ISD::CondCode CCCode =
+ cast<CondCodeSDNode>(N->getOperand(IsStrict ? 3 : 2))->get();
+ FloatExpandSetCCOperands(NewLHS, NewRHS, CCCode, SDLoc(N), Chain,
+ N->getOpcode() == ISD::STRICT_FSETCCS);
+
+ // FloatExpandSetCCOperands always returned a scalar.
+ assert(!NewRHS.getNode() && "Expect to return scalar");
+ assert(NewLHS.getValueType() == N->getValueType(0) &&
+ "Unexpected setcc expansion!");
+ if (Chain) {
+ ReplaceValueWith(SDValue(N, 0), NewLHS);
+ ReplaceValueWith(SDValue(N, 1), Chain);
+ return SDValue();
}
- return NewLHS;
+ return NewLHS;
}
SDValue DAGTypeLegalizer::ExpandFloatOp_STORE(SDNode *N, unsigned OpNo) {
@@ -2081,9 +2081,9 @@ bool DAGTypeLegalizer::PromoteFloatOperand(SDNode *N, unsigned OpNo) {
case ISD::FCOPYSIGN: R = PromoteFloatOp_FCOPYSIGN(N, OpNo); break;
case ISD::FP_TO_SINT:
case ISD::FP_TO_UINT: R = PromoteFloatOp_FP_TO_XINT(N, OpNo); break;
- case ISD::FP_TO_SINT_SAT:
- case ISD::FP_TO_UINT_SAT:
- R = PromoteFloatOp_FP_TO_XINT_SAT(N, OpNo); break;
+ case ISD::FP_TO_SINT_SAT:
+ case ISD::FP_TO_UINT_SAT:
+ R = PromoteFloatOp_FP_TO_XINT_SAT(N, OpNo); break;
case ISD::FP_EXTEND: R = PromoteFloatOp_FP_EXTEND(N, OpNo); break;
case ISD::SELECT_CC: R = PromoteFloatOp_SELECT_CC(N, OpNo); break;
case ISD::SETCC: R = PromoteFloatOp_SETCC(N, OpNo); break;
@@ -2127,13 +2127,13 @@ SDValue DAGTypeLegalizer::PromoteFloatOp_FP_TO_XINT(SDNode *N, unsigned OpNo) {
return DAG.getNode(N->getOpcode(), SDLoc(N), N->getValueType(0), Op);
}
-SDValue DAGTypeLegalizer::PromoteFloatOp_FP_TO_XINT_SAT(SDNode *N,
- unsigned OpNo) {
- SDValue Op = GetPromotedFloat(N->getOperand(0));
- return DAG.getNode(N->getOpcode(), SDLoc(N), N->getValueType(0), Op,
- N->getOperand(1));
-}
-
+SDValue DAGTypeLegalizer::PromoteFloatOp_FP_TO_XINT_SAT(SDNode *N,
+ unsigned OpNo) {
+ SDValue Op = GetPromotedFloat(N->getOperand(0));
+ return DAG.getNode(N->getOpcode(), SDLoc(N), N->getValueType(0), Op,
+ N->getOperand(1));
+}
+
SDValue DAGTypeLegalizer::PromoteFloatOp_FP_EXTEND(SDNode *N, unsigned OpNo) {
SDValue Op = GetPromotedFloat(N->getOperand(0));
EVT VT = N->getValueType(0);
@@ -2269,16 +2269,16 @@ void DAGTypeLegalizer::PromoteFloatResult(SDNode *N, unsigned ResNo) {
case ISD::UINT_TO_FP: R = PromoteFloatRes_XINT_TO_FP(N); break;
case ISD::UNDEF: R = PromoteFloatRes_UNDEF(N); break;
case ISD::ATOMIC_SWAP: R = BitcastToInt_ATOMIC_SWAP(N); break;
- case ISD::VECREDUCE_FADD:
- case ISD::VECREDUCE_FMUL:
- case ISD::VECREDUCE_FMIN:
- case ISD::VECREDUCE_FMAX:
- R = PromoteFloatRes_VECREDUCE(N);
- break;
- case ISD::VECREDUCE_SEQ_FADD:
- case ISD::VECREDUCE_SEQ_FMUL:
- R = PromoteFloatRes_VECREDUCE_SEQ(N);
- break;
+ case ISD::VECREDUCE_FADD:
+ case ISD::VECREDUCE_FMUL:
+ case ISD::VECREDUCE_FMIN:
+ case ISD::VECREDUCE_FMAX:
+ R = PromoteFloatRes_VECREDUCE(N);
+ break;
+ case ISD::VECREDUCE_SEQ_FADD:
+ case ISD::VECREDUCE_SEQ_FMUL:
+ R = PromoteFloatRes_VECREDUCE_SEQ(N);
+ break;
}
if (R.getNode())
@@ -2510,20 +2510,20 @@ SDValue DAGTypeLegalizer::PromoteFloatRes_UNDEF(SDNode *N) {
N->getValueType(0)));
}
-SDValue DAGTypeLegalizer::PromoteFloatRes_VECREDUCE(SDNode *N) {
- // Expand and promote recursively.
- // TODO: This is non-optimal, but dealing with the concurrently happening
- // vector-legalization is non-trivial. We could do something similar to
- // PromoteFloatRes_EXTRACT_VECTOR_ELT here.
- ReplaceValueWith(SDValue(N, 0), TLI.expandVecReduce(N, DAG));
- return SDValue();
-}
-
-SDValue DAGTypeLegalizer::PromoteFloatRes_VECREDUCE_SEQ(SDNode *N) {
- ReplaceValueWith(SDValue(N, 0), TLI.expandVecReduceSeq(N, DAG));
- return SDValue();
-}
-
+SDValue DAGTypeLegalizer::PromoteFloatRes_VECREDUCE(SDNode *N) {
+ // Expand and promote recursively.
+ // TODO: This is non-optimal, but dealing with the concurrently happening
+ // vector-legalization is non-trivial. We could do something similar to
+ // PromoteFloatRes_EXTRACT_VECTOR_ELT here.
+ ReplaceValueWith(SDValue(N, 0), TLI.expandVecReduce(N, DAG));
+ return SDValue();
+}
+
+SDValue DAGTypeLegalizer::PromoteFloatRes_VECREDUCE_SEQ(SDNode *N) {
+ ReplaceValueWith(SDValue(N, 0), TLI.expandVecReduceSeq(N, DAG));
+ return SDValue();
+}
+
SDValue DAGTypeLegalizer::BitcastToInt_ATOMIC_SWAP(SDNode *N) {
EVT VT = N->getValueType(0);
@@ -2632,16 +2632,16 @@ void DAGTypeLegalizer::SoftPromoteHalfResult(SDNode *N, unsigned ResNo) {
case ISD::UINT_TO_FP: R = SoftPromoteHalfRes_XINT_TO_FP(N); break;
case ISD::UNDEF: R = SoftPromoteHalfRes_UNDEF(N); break;
case ISD::ATOMIC_SWAP: R = BitcastToInt_ATOMIC_SWAP(N); break;
- case ISD::VECREDUCE_FADD:
- case ISD::VECREDUCE_FMUL:
- case ISD::VECREDUCE_FMIN:
- case ISD::VECREDUCE_FMAX:
- R = SoftPromoteHalfRes_VECREDUCE(N);
- break;
- case ISD::VECREDUCE_SEQ_FADD:
- case ISD::VECREDUCE_SEQ_FMUL:
- R = SoftPromoteHalfRes_VECREDUCE_SEQ(N);
- break;
+ case ISD::VECREDUCE_FADD:
+ case ISD::VECREDUCE_FMUL:
+ case ISD::VECREDUCE_FMIN:
+ case ISD::VECREDUCE_FMAX:
+ R = SoftPromoteHalfRes_VECREDUCE(N);
+ break;
+ case ISD::VECREDUCE_SEQ_FADD:
+ case ISD::VECREDUCE_SEQ_FMUL:
+ R = SoftPromoteHalfRes_VECREDUCE_SEQ(N);
+ break;
}
if (R.getNode())
@@ -2834,18 +2834,18 @@ SDValue DAGTypeLegalizer::SoftPromoteHalfRes_BinOp(SDNode *N) {
return DAG.getNode(ISD::FP_TO_FP16, dl, MVT::i16, Res);
}
-SDValue DAGTypeLegalizer::SoftPromoteHalfRes_VECREDUCE(SDNode *N) {
- // Expand and soften recursively.
- ReplaceValueWith(SDValue(N, 0), TLI.expandVecReduce(N, DAG));
- return SDValue();
-}
-
-SDValue DAGTypeLegalizer::SoftPromoteHalfRes_VECREDUCE_SEQ(SDNode *N) {
- // Expand and soften.
- ReplaceValueWith(SDValue(N, 0), TLI.expandVecReduceSeq(N, DAG));
- return SDValue();
-}
-
+SDValue DAGTypeLegalizer::SoftPromoteHalfRes_VECREDUCE(SDNode *N) {
+ // Expand and soften recursively.
+ ReplaceValueWith(SDValue(N, 0), TLI.expandVecReduce(N, DAG));
+ return SDValue();
+}
+
+SDValue DAGTypeLegalizer::SoftPromoteHalfRes_VECREDUCE_SEQ(SDNode *N) {
+ // Expand and soften.
+ ReplaceValueWith(SDValue(N, 0), TLI.expandVecReduceSeq(N, DAG));
+ return SDValue();
+}
+
//===----------------------------------------------------------------------===//
// Half Operand Soft Promotion
//===----------------------------------------------------------------------===//
@@ -2877,9 +2877,9 @@ bool DAGTypeLegalizer::SoftPromoteHalfOperand(SDNode *N, unsigned OpNo) {
case ISD::FCOPYSIGN: Res = SoftPromoteHalfOp_FCOPYSIGN(N, OpNo); break;
case ISD::FP_TO_SINT:
case ISD::FP_TO_UINT: Res = SoftPromoteHalfOp_FP_TO_XINT(N); break;
- case ISD::FP_TO_SINT_SAT:
- case ISD::FP_TO_UINT_SAT:
- Res = SoftPromoteHalfOp_FP_TO_XINT_SAT(N); break;
+ case ISD::FP_TO_SINT_SAT:
+ case ISD::FP_TO_UINT_SAT:
+ Res = SoftPromoteHalfOp_FP_TO_XINT_SAT(N); break;
case ISD::STRICT_FP_EXTEND:
case ISD::FP_EXTEND: Res = SoftPromoteHalfOp_FP_EXTEND(N); break;
case ISD::SELECT_CC: Res = SoftPromoteHalfOp_SELECT_CC(N, OpNo); break;
@@ -2949,20 +2949,20 @@ SDValue DAGTypeLegalizer::SoftPromoteHalfOp_FP_TO_XINT(SDNode *N) {
return DAG.getNode(N->getOpcode(), dl, N->getValueType(0), Res);
}
-SDValue DAGTypeLegalizer::SoftPromoteHalfOp_FP_TO_XINT_SAT(SDNode *N) {
- SDValue Op = N->getOperand(0);
- SDLoc dl(N);
-
- EVT NVT = TLI.getTypeToTransformTo(*DAG.getContext(), Op.getValueType());
-
- Op = GetSoftPromotedHalf(Op);
-
- SDValue Res = DAG.getNode(ISD::FP16_TO_FP, dl, NVT, Op);
-
- return DAG.getNode(N->getOpcode(), dl, N->getValueType(0), Res,
- N->getOperand(1));
-}
-
+SDValue DAGTypeLegalizer::SoftPromoteHalfOp_FP_TO_XINT_SAT(SDNode *N) {
+ SDValue Op = N->getOperand(0);
+ SDLoc dl(N);
+
+ EVT NVT = TLI.getTypeToTransformTo(*DAG.getContext(), Op.getValueType());
+
+ Op = GetSoftPromotedHalf(Op);
+
+ SDValue Res = DAG.getNode(ISD::FP16_TO_FP, dl, NVT, Op);
+
+ return DAG.getNode(N->getOpcode(), dl, N->getValueType(0), Res,
+ N->getOperand(1));
+}
+
SDValue DAGTypeLegalizer::SoftPromoteHalfOp_SELECT_CC(SDNode *N,
unsigned OpNo) {
assert(OpNo == 0 && "Can only soften the comparison values");
diff --git a/contrib/libs/llvm12/lib/CodeGen/SelectionDAG/LegalizeIntegerTypes.cpp b/contrib/libs/llvm12/lib/CodeGen/SelectionDAG/LegalizeIntegerTypes.cpp
index 4a686bc227..d9ccd1db85 100644
--- a/contrib/libs/llvm12/lib/CodeGen/SelectionDAG/LegalizeIntegerTypes.cpp
+++ b/contrib/libs/llvm12/lib/CodeGen/SelectionDAG/LegalizeIntegerTypes.cpp
@@ -62,8 +62,8 @@ void DAGTypeLegalizer::PromoteIntegerResult(SDNode *N, unsigned ResNo) {
case ISD::Constant: Res = PromoteIntRes_Constant(N); break;
case ISD::CTLZ_ZERO_UNDEF:
case ISD::CTLZ: Res = PromoteIntRes_CTLZ(N); break;
- case ISD::PARITY:
- case ISD::CTPOP: Res = PromoteIntRes_CTPOP_PARITY(N); break;
+ case ISD::PARITY:
+ case ISD::CTPOP: Res = PromoteIntRes_CTPOP_PARITY(N); break;
case ISD::CTTZ_ZERO_UNDEF:
case ISD::CTTZ: Res = PromoteIntRes_CTTZ(N); break;
case ISD::EXTRACT_VECTOR_ELT:
@@ -82,7 +82,7 @@ void DAGTypeLegalizer::PromoteIntegerResult(SDNode *N, unsigned ResNo) {
case ISD::SMIN:
case ISD::SMAX: Res = PromoteIntRes_SExtIntBinOp(N); break;
case ISD::UMIN:
- case ISD::UMAX: Res = PromoteIntRes_UMINUMAX(N); break;
+ case ISD::UMAX: Res = PromoteIntRes_UMINUMAX(N); break;
case ISD::SHL: Res = PromoteIntRes_SHL(N); break;
case ISD::SIGN_EXTEND_INREG:
@@ -123,10 +123,10 @@ void DAGTypeLegalizer::PromoteIntegerResult(SDNode *N, unsigned ResNo) {
case ISD::FP_TO_SINT:
case ISD::FP_TO_UINT: Res = PromoteIntRes_FP_TO_XINT(N); break;
- case ISD::FP_TO_SINT_SAT:
- case ISD::FP_TO_UINT_SAT:
- Res = PromoteIntRes_FP_TO_XINT_SAT(N); break;
-
+ case ISD::FP_TO_SINT_SAT:
+ case ISD::FP_TO_UINT_SAT:
+ Res = PromoteIntRes_FP_TO_XINT_SAT(N); break;
+
case ISD::FP_TO_FP16: Res = PromoteIntRes_FP_TO_FP16(N); break;
case ISD::FLT_ROUNDS_: Res = PromoteIntRes_FLT_ROUNDS(N); break;
@@ -156,15 +156,15 @@ void DAGTypeLegalizer::PromoteIntegerResult(SDNode *N, unsigned ResNo) {
case ISD::ADDCARRY:
case ISD::SUBCARRY: Res = PromoteIntRes_ADDSUBCARRY(N, ResNo); break;
- case ISD::SADDO_CARRY:
- case ISD::SSUBO_CARRY: Res = PromoteIntRes_SADDSUBO_CARRY(N, ResNo); break;
-
+ case ISD::SADDO_CARRY:
+ case ISD::SSUBO_CARRY: Res = PromoteIntRes_SADDSUBO_CARRY(N, ResNo); break;
+
case ISD::SADDSAT:
case ISD::UADDSAT:
case ISD::SSUBSAT:
- case ISD::USUBSAT:
- case ISD::SSHLSAT:
- case ISD::USHLSAT: Res = PromoteIntRes_ADDSUBSHLSAT(N); break;
+ case ISD::USUBSAT:
+ case ISD::SSHLSAT:
+ case ISD::USHLSAT: Res = PromoteIntRes_ADDSUBSHLSAT(N); break;
case ISD::SMULFIX:
case ISD::SMULFIXSAT:
@@ -215,16 +215,16 @@ void DAGTypeLegalizer::PromoteIntegerResult(SDNode *N, unsigned ResNo) {
case ISD::FREEZE:
Res = PromoteIntRes_FREEZE(N);
break;
-
- case ISD::ROTL:
- case ISD::ROTR:
- Res = PromoteIntRes_Rotate(N);
- break;
-
- case ISD::FSHL:
- case ISD::FSHR:
- Res = PromoteIntRes_FunnelShift(N);
- break;
+
+ case ISD::ROTL:
+ case ISD::ROTR:
+ Res = PromoteIntRes_Rotate(N);
+ break;
+
+ case ISD::FSHL:
+ case ISD::FSHR:
+ Res = PromoteIntRes_FunnelShift(N);
+ break;
}
// If the result is null then the sub-method took care of registering it.
@@ -511,10 +511,10 @@ SDValue DAGTypeLegalizer::PromoteIntRes_CTLZ(SDNode *N) {
NVT));
}
-SDValue DAGTypeLegalizer::PromoteIntRes_CTPOP_PARITY(SDNode *N) {
- // Zero extend to the promoted type and do the count or parity there.
+SDValue DAGTypeLegalizer::PromoteIntRes_CTPOP_PARITY(SDNode *N) {
+ // Zero extend to the promoted type and do the count or parity there.
SDValue Op = ZExtPromotedInteger(N->getOperand(0));
- return DAG.getNode(N->getOpcode(), SDLoc(N), Op.getValueType(), Op);
+ return DAG.getNode(N->getOpcode(), SDLoc(N), Op.getValueType(), Op);
}
SDValue DAGTypeLegalizer::PromoteIntRes_CTTZ(SDNode *N) {
@@ -579,8 +579,8 @@ SDValue DAGTypeLegalizer::PromoteIntRes_FP_TO_XINT(SDNode *N) {
SDValue Res;
if (N->isStrictFPOpcode()) {
- Res = DAG.getNode(NewOpc, dl, {NVT, MVT::Other},
- {N->getOperand(0), N->getOperand(1)});
+ Res = DAG.getNode(NewOpc, dl, {NVT, MVT::Other},
+ {N->getOperand(0), N->getOperand(1)});
// Legalize the chain result - switch anything that used the old chain to
// use the new one.
ReplaceValueWith(SDValue(N, 1), Res.getValue(1));
@@ -600,14 +600,14 @@ SDValue DAGTypeLegalizer::PromoteIntRes_FP_TO_XINT(SDNode *N) {
DAG.getValueType(N->getValueType(0).getScalarType()));
}
-SDValue DAGTypeLegalizer::PromoteIntRes_FP_TO_XINT_SAT(SDNode *N) {
- // Promote the result type, while keeping the original width in Op1.
- EVT NVT = TLI.getTypeToTransformTo(*DAG.getContext(), N->getValueType(0));
- SDLoc dl(N);
- return DAG.getNode(N->getOpcode(), dl, NVT, N->getOperand(0),
- N->getOperand(1));
-}
-
+SDValue DAGTypeLegalizer::PromoteIntRes_FP_TO_XINT_SAT(SDNode *N) {
+ // Promote the result type, while keeping the original width in Op1.
+ EVT NVT = TLI.getTypeToTransformTo(*DAG.getContext(), N->getValueType(0));
+ SDLoc dl(N);
+ return DAG.getNode(N->getOpcode(), dl, NVT, N->getOperand(0),
+ N->getOperand(1));
+}
+
SDValue DAGTypeLegalizer::PromoteIntRes_FP_TO_FP16(SDNode *N) {
EVT NVT = TLI.getTypeToTransformTo(*DAG.getContext(), N->getValueType(0));
SDLoc dl(N);
@@ -691,17 +691,17 @@ SDValue DAGTypeLegalizer::PromoteIntRes_MGATHER(MaskedGatherSDNode *N) {
assert(NVT == ExtPassThru.getValueType() &&
"Gather result type and the passThru argument type should be the same");
- ISD::LoadExtType ExtType = N->getExtensionType();
- if (ExtType == ISD::NON_EXTLOAD)
- ExtType = ISD::EXTLOAD;
-
+ ISD::LoadExtType ExtType = N->getExtensionType();
+ if (ExtType == ISD::NON_EXTLOAD)
+ ExtType = ISD::EXTLOAD;
+
SDLoc dl(N);
SDValue Ops[] = {N->getChain(), ExtPassThru, N->getMask(), N->getBasePtr(),
N->getIndex(), N->getScale() };
SDValue Res = DAG.getMaskedGather(DAG.getVTList(NVT, MVT::Other),
N->getMemoryVT(), dl, Ops,
- N->getMemOperand(), N->getIndexType(),
- ExtType);
+ N->getMemOperand(), N->getIndexType(),
+ ExtType);
// Legalize the chain result - switch anything that used the old chain to
// use the new one.
ReplaceValueWith(SDValue(N, 1), Res.getValue(1));
@@ -733,11 +733,11 @@ SDValue DAGTypeLegalizer::PromoteIntRes_Overflow(SDNode *N) {
return DAG.getBoolExtOrTrunc(Res.getValue(1), dl, NVT, VT);
}
-SDValue DAGTypeLegalizer::PromoteIntRes_ADDSUBSHLSAT(SDNode *N) {
+SDValue DAGTypeLegalizer::PromoteIntRes_ADDSUBSHLSAT(SDNode *N) {
// If the promoted type is legal, we can convert this to:
// 1. ANY_EXTEND iN to iM
// 2. SHL by M-N
- // 3. [US][ADD|SUB|SHL]SAT
+ // 3. [US][ADD|SUB|SHL]SAT
// 4. L/ASHR by M-N
// Else it is more efficient to convert this to a min and a max
// operation in the higher precision arithmetic.
@@ -747,13 +747,13 @@ SDValue DAGTypeLegalizer::PromoteIntRes_ADDSUBSHLSAT(SDNode *N) {
unsigned OldBits = Op1.getScalarValueSizeInBits();
unsigned Opcode = N->getOpcode();
- bool IsShift = Opcode == ISD::USHLSAT || Opcode == ISD::SSHLSAT;
+ bool IsShift = Opcode == ISD::USHLSAT || Opcode == ISD::SSHLSAT;
SDValue Op1Promoted, Op2Promoted;
- if (IsShift) {
- Op1Promoted = GetPromotedInteger(Op1);
- Op2Promoted = ZExtPromotedInteger(Op2);
- } else if (Opcode == ISD::UADDSAT || Opcode == ISD::USUBSAT) {
+ if (IsShift) {
+ Op1Promoted = GetPromotedInteger(Op1);
+ Op2Promoted = ZExtPromotedInteger(Op2);
+ } else if (Opcode == ISD::UADDSAT || Opcode == ISD::USUBSAT) {
Op1Promoted = ZExtPromotedInteger(Op1);
Op2Promoted = ZExtPromotedInteger(Op2);
} else {
@@ -763,24 +763,24 @@ SDValue DAGTypeLegalizer::PromoteIntRes_ADDSUBSHLSAT(SDNode *N) {
EVT PromotedType = Op1Promoted.getValueType();
unsigned NewBits = PromotedType.getScalarSizeInBits();
- // Shift cannot use a min/max expansion, we can't detect overflow if all of
- // the bits have been shifted out.
- if (IsShift || TLI.isOperationLegalOrCustom(Opcode, PromotedType)) {
+ // Shift cannot use a min/max expansion, we can't detect overflow if all of
+ // the bits have been shifted out.
+ if (IsShift || TLI.isOperationLegalOrCustom(Opcode, PromotedType)) {
unsigned ShiftOp;
switch (Opcode) {
case ISD::SADDSAT:
case ISD::SSUBSAT:
- case ISD::SSHLSAT:
+ case ISD::SSHLSAT:
ShiftOp = ISD::SRA;
break;
case ISD::UADDSAT:
case ISD::USUBSAT:
- case ISD::USHLSAT:
+ case ISD::USHLSAT:
ShiftOp = ISD::SRL;
break;
default:
llvm_unreachable("Expected opcode to be signed or unsigned saturation "
- "addition, subtraction or left shift");
+ "addition, subtraction or left shift");
}
unsigned SHLAmount = NewBits - OldBits;
@@ -788,9 +788,9 @@ SDValue DAGTypeLegalizer::PromoteIntRes_ADDSUBSHLSAT(SDNode *N) {
SDValue ShiftAmount = DAG.getConstant(SHLAmount, dl, SHVT);
Op1Promoted =
DAG.getNode(ISD::SHL, dl, PromotedType, Op1Promoted, ShiftAmount);
- if (!IsShift)
- Op2Promoted =
- DAG.getNode(ISD::SHL, dl, PromotedType, Op2Promoted, ShiftAmount);
+ if (!IsShift)
+ Op2Promoted =
+ DAG.getNode(ISD::SHL, dl, PromotedType, Op2Promoted, ShiftAmount);
SDValue Result =
DAG.getNode(Opcode, dl, PromotedType, Op1Promoted, Op2Promoted);
@@ -1118,15 +1118,15 @@ SDValue DAGTypeLegalizer::PromoteIntRes_ZExtIntBinOp(SDNode *N) {
LHS.getValueType(), LHS, RHS);
}
-SDValue DAGTypeLegalizer::PromoteIntRes_UMINUMAX(SDNode *N) {
- // It doesn't matter if we sign extend or zero extend in the inputs. So do
- // whatever is best for the target.
- SDValue LHS = SExtOrZExtPromotedInteger(N->getOperand(0));
- SDValue RHS = SExtOrZExtPromotedInteger(N->getOperand(1));
- return DAG.getNode(N->getOpcode(), SDLoc(N),
- LHS.getValueType(), LHS, RHS);
-}
-
+SDValue DAGTypeLegalizer::PromoteIntRes_UMINUMAX(SDNode *N) {
+ // It doesn't matter if we sign extend or zero extend in the inputs. So do
+ // whatever is best for the target.
+ SDValue LHS = SExtOrZExtPromotedInteger(N->getOperand(0));
+ SDValue RHS = SExtOrZExtPromotedInteger(N->getOperand(1));
+ return DAG.getNode(N->getOpcode(), SDLoc(N),
+ LHS.getValueType(), LHS, RHS);
+}
+
SDValue DAGTypeLegalizer::PromoteIntRes_SRA(SDNode *N) {
// The input value must be properly sign extended.
SDValue LHS = SExtPromotedInteger(N->getOperand(0));
@@ -1145,60 +1145,60 @@ SDValue DAGTypeLegalizer::PromoteIntRes_SRL(SDNode *N) {
return DAG.getNode(ISD::SRL, SDLoc(N), LHS.getValueType(), LHS, RHS);
}
-SDValue DAGTypeLegalizer::PromoteIntRes_Rotate(SDNode *N) {
- // Lower the rotate to shifts and ORs which can be promoted.
- SDValue Res;
- TLI.expandROT(N, true /*AllowVectorOps*/, Res, DAG);
- ReplaceValueWith(SDValue(N, 0), Res);
- return SDValue();
-}
-
-SDValue DAGTypeLegalizer::PromoteIntRes_FunnelShift(SDNode *N) {
- SDValue Hi = GetPromotedInteger(N->getOperand(0));
- SDValue Lo = GetPromotedInteger(N->getOperand(1));
- SDValue Amount = GetPromotedInteger(N->getOperand(2));
-
- SDLoc DL(N);
- EVT OldVT = N->getOperand(0).getValueType();
- EVT VT = Lo.getValueType();
- unsigned Opcode = N->getOpcode();
- bool IsFSHR = Opcode == ISD::FSHR;
- unsigned OldBits = OldVT.getScalarSizeInBits();
- unsigned NewBits = VT.getScalarSizeInBits();
-
- // Amount has to be interpreted modulo the old bit width.
- Amount =
- DAG.getNode(ISD::UREM, DL, VT, Amount, DAG.getConstant(OldBits, DL, VT));
-
- // If the promoted type is twice the size (or more), then we use the
- // traditional funnel 'double' shift codegen. This isn't necessary if the
- // shift amount is constant.
- // fshl(x,y,z) -> (((aext(x) << bw) | zext(y)) << (z % bw)) >> bw.
- // fshr(x,y,z) -> (((aext(x) << bw) | zext(y)) >> (z % bw)).
- if (NewBits >= (2 * OldBits) && !isa<ConstantSDNode>(Amount) &&
- !TLI.isOperationLegalOrCustom(Opcode, VT)) {
- SDValue HiShift = DAG.getConstant(OldBits, DL, VT);
- Hi = DAG.getNode(ISD::SHL, DL, VT, Hi, HiShift);
- Lo = DAG.getZeroExtendInReg(Lo, DL, OldVT);
- SDValue Res = DAG.getNode(ISD::OR, DL, VT, Hi, Lo);
- Res = DAG.getNode(IsFSHR ? ISD::SRL : ISD::SHL, DL, VT, Res, Amount);
- if (!IsFSHR)
- Res = DAG.getNode(ISD::SRL, DL, VT, Res, HiShift);
- return Res;
- }
-
- // Shift Lo up to occupy the upper bits of the promoted type.
- SDValue ShiftOffset = DAG.getConstant(NewBits - OldBits, DL, VT);
- Lo = DAG.getNode(ISD::SHL, DL, VT, Lo, ShiftOffset);
-
- // Increase Amount to shift the result into the lower bits of the promoted
- // type.
- if (IsFSHR)
- Amount = DAG.getNode(ISD::ADD, DL, VT, Amount, ShiftOffset);
-
- return DAG.getNode(Opcode, DL, VT, Hi, Lo, Amount);
-}
-
+SDValue DAGTypeLegalizer::PromoteIntRes_Rotate(SDNode *N) {
+ // Lower the rotate to shifts and ORs which can be promoted.
+ SDValue Res;
+ TLI.expandROT(N, true /*AllowVectorOps*/, Res, DAG);
+ ReplaceValueWith(SDValue(N, 0), Res);
+ return SDValue();
+}
+
+SDValue DAGTypeLegalizer::PromoteIntRes_FunnelShift(SDNode *N) {
+ SDValue Hi = GetPromotedInteger(N->getOperand(0));
+ SDValue Lo = GetPromotedInteger(N->getOperand(1));
+ SDValue Amount = GetPromotedInteger(N->getOperand(2));
+
+ SDLoc DL(N);
+ EVT OldVT = N->getOperand(0).getValueType();
+ EVT VT = Lo.getValueType();
+ unsigned Opcode = N->getOpcode();
+ bool IsFSHR = Opcode == ISD::FSHR;
+ unsigned OldBits = OldVT.getScalarSizeInBits();
+ unsigned NewBits = VT.getScalarSizeInBits();
+
+ // Amount has to be interpreted modulo the old bit width.
+ Amount =
+ DAG.getNode(ISD::UREM, DL, VT, Amount, DAG.getConstant(OldBits, DL, VT));
+
+ // If the promoted type is twice the size (or more), then we use the
+ // traditional funnel 'double' shift codegen. This isn't necessary if the
+ // shift amount is constant.
+ // fshl(x,y,z) -> (((aext(x) << bw) | zext(y)) << (z % bw)) >> bw.
+ // fshr(x,y,z) -> (((aext(x) << bw) | zext(y)) >> (z % bw)).
+ if (NewBits >= (2 * OldBits) && !isa<ConstantSDNode>(Amount) &&
+ !TLI.isOperationLegalOrCustom(Opcode, VT)) {
+ SDValue HiShift = DAG.getConstant(OldBits, DL, VT);
+ Hi = DAG.getNode(ISD::SHL, DL, VT, Hi, HiShift);
+ Lo = DAG.getZeroExtendInReg(Lo, DL, OldVT);
+ SDValue Res = DAG.getNode(ISD::OR, DL, VT, Hi, Lo);
+ Res = DAG.getNode(IsFSHR ? ISD::SRL : ISD::SHL, DL, VT, Res, Amount);
+ if (!IsFSHR)
+ Res = DAG.getNode(ISD::SRL, DL, VT, Res, HiShift);
+ return Res;
+ }
+
+ // Shift Lo up to occupy the upper bits of the promoted type.
+ SDValue ShiftOffset = DAG.getConstant(NewBits - OldBits, DL, VT);
+ Lo = DAG.getNode(ISD::SHL, DL, VT, Lo, ShiftOffset);
+
+ // Increase Amount to shift the result into the lower bits of the promoted
+ // type.
+ if (IsFSHR)
+ Amount = DAG.getNode(ISD::ADD, DL, VT, Amount, ShiftOffset);
+
+ return DAG.getNode(Opcode, DL, VT, Hi, Lo, Amount);
+}
+
SDValue DAGTypeLegalizer::PromoteIntRes_TRUNCATE(SDNode *N) {
EVT NVT = TLI.getTypeToTransformTo(*DAG.getContext(), N->getValueType(0));
SDValue Res;
@@ -1286,7 +1286,7 @@ SDValue DAGTypeLegalizer::PromoteIntRes_UADDSUBO(SDNode *N, unsigned ResNo) {
}
// Handle promotion for the ADDE/SUBE/ADDCARRY/SUBCARRY nodes. Notice that
-// the third operand of ADDE/SUBE nodes is carry flag, which differs from
+// the third operand of ADDE/SUBE nodes is carry flag, which differs from
// the ADDCARRY/SUBCARRY nodes in that the third operand is carry Boolean.
SDValue DAGTypeLegalizer::PromoteIntRes_ADDSUBCARRY(SDNode *N, unsigned ResNo) {
if (ResNo == 1)
@@ -1317,12 +1317,12 @@ SDValue DAGTypeLegalizer::PromoteIntRes_ADDSUBCARRY(SDNode *N, unsigned ResNo) {
return SDValue(Res.getNode(), 0);
}
-SDValue DAGTypeLegalizer::PromoteIntRes_SADDSUBO_CARRY(SDNode *N,
- unsigned ResNo) {
- assert(ResNo == 1 && "Don't know how to promote other results yet.");
- return PromoteIntRes_Overflow(N);
-}
-
+SDValue DAGTypeLegalizer::PromoteIntRes_SADDSUBO_CARRY(SDNode *N,
+ unsigned ResNo) {
+ assert(ResNo == 1 && "Don't know how to promote other results yet.");
+ return PromoteIntRes_Overflow(N);
+}
+
SDValue DAGTypeLegalizer::PromoteIntRes_ABS(SDNode *N) {
SDValue Op0 = SExtPromotedInteger(N->getOperand(0));
return DAG.getNode(ISD::ABS, SDLoc(N), Op0.getValueType(), Op0);
@@ -1505,8 +1505,8 @@ bool DAGTypeLegalizer::PromoteIntegerOperand(SDNode *N, unsigned OpNo) {
case ISD::ROTL:
case ISD::ROTR: Res = PromoteIntOp_Shift(N); break;
- case ISD::SADDO_CARRY:
- case ISD::SSUBO_CARRY:
+ case ISD::SADDO_CARRY:
+ case ISD::SSUBO_CARRY:
case ISD::ADDCARRY:
case ISD::SUBCARRY: Res = PromoteIntOp_ADDSUBCARRY(N, OpNo); break;
@@ -1733,9 +1733,9 @@ SDValue DAGTypeLegalizer::PromoteIntOp_SELECT(SDNode *N, unsigned OpNo) {
EVT OpTy = N->getOperand(1).getValueType();
if (N->getOpcode() == ISD::VSELECT)
- if (SDValue Res = WidenVSELECTMask(N))
- return DAG.getNode(N->getOpcode(), SDLoc(N), N->getValueType(0),
- Res, N->getOperand(1), N->getOperand(2));
+ if (SDValue Res = WidenVSELECTMask(N))
+ return DAG.getNode(N->getOpcode(), SDLoc(N), N->getValueType(0),
+ Res, N->getOperand(1), N->getOperand(2));
// Promote all the way up to the canonical SetCC type.
EVT OpVT = N->getOpcode() == ISD::SELECT ? OpTy.getScalarType() : OpTy;
@@ -1877,7 +1877,7 @@ SDValue DAGTypeLegalizer::PromoteIntOp_MGATHER(MaskedGatherSDNode *N,
SDValue DAGTypeLegalizer::PromoteIntOp_MSCATTER(MaskedScatterSDNode *N,
unsigned OpNo) {
- bool TruncateStore = N->isTruncatingStore();
+ bool TruncateStore = N->isTruncatingStore();
SmallVector<SDValue, 5> NewOps(N->op_begin(), N->op_end());
if (OpNo == 2) {
// The Mask
@@ -1890,17 +1890,17 @@ SDValue DAGTypeLegalizer::PromoteIntOp_MSCATTER(MaskedScatterSDNode *N,
NewOps[OpNo] = SExtPromotedInteger(N->getOperand(OpNo));
else
NewOps[OpNo] = ZExtPromotedInteger(N->getOperand(OpNo));
-
- N->setIndexType(TLI.getCanonicalIndexType(N->getIndexType(),
- N->getMemoryVT(), NewOps[OpNo]));
- } else {
+
+ N->setIndexType(TLI.getCanonicalIndexType(N->getIndexType(),
+ N->getMemoryVT(), NewOps[OpNo]));
+ } else {
NewOps[OpNo] = GetPromotedInteger(N->getOperand(OpNo));
- TruncateStore = true;
- }
-
- return DAG.getMaskedScatter(DAG.getVTList(MVT::Other), N->getMemoryVT(),
- SDLoc(N), NewOps, N->getMemOperand(),
- N->getIndexType(), TruncateStore);
+ TruncateStore = true;
+ }
+
+ return DAG.getMaskedScatter(DAG.getVTList(MVT::Other), N->getMemoryVT(),
+ SDLoc(N), NewOps, N->getMemOperand(),
+ N->getIndexType(), TruncateStore);
}
SDValue DAGTypeLegalizer::PromoteIntOp_TRUNCATE(SDNode *N) {
@@ -2044,7 +2044,7 @@ void DAGTypeLegalizer::ExpandIntegerResult(SDNode *N, unsigned ResNo) {
case ISD::AssertZext: ExpandIntRes_AssertZext(N, Lo, Hi); break;
case ISD::BITREVERSE: ExpandIntRes_BITREVERSE(N, Lo, Hi); break;
case ISD::BSWAP: ExpandIntRes_BSWAP(N, Lo, Hi); break;
- case ISD::PARITY: ExpandIntRes_PARITY(N, Lo, Hi); break;
+ case ISD::PARITY: ExpandIntRes_PARITY(N, Lo, Hi); break;
case ISD::Constant: ExpandIntRes_Constant(N, Lo, Hi); break;
case ISD::ABS: ExpandIntRes_ABS(N, Lo, Hi); break;
case ISD::CTLZ_ZERO_UNDEF:
@@ -2057,8 +2057,8 @@ void DAGTypeLegalizer::ExpandIntegerResult(SDNode *N, unsigned ResNo) {
case ISD::FP_TO_SINT: ExpandIntRes_FP_TO_SINT(N, Lo, Hi); break;
case ISD::STRICT_FP_TO_UINT:
case ISD::FP_TO_UINT: ExpandIntRes_FP_TO_UINT(N, Lo, Hi); break;
- case ISD::FP_TO_SINT_SAT:
- case ISD::FP_TO_UINT_SAT: ExpandIntRes_FP_TO_XINT_SAT(N, Lo, Hi); break;
+ case ISD::FP_TO_SINT_SAT:
+ case ISD::FP_TO_UINT_SAT: ExpandIntRes_FP_TO_XINT_SAT(N, Lo, Hi); break;
case ISD::STRICT_LLROUND:
case ISD::STRICT_LLRINT:
case ISD::LLROUND:
@@ -2135,9 +2135,9 @@ void DAGTypeLegalizer::ExpandIntegerResult(SDNode *N, unsigned ResNo) {
case ISD::ADDCARRY:
case ISD::SUBCARRY: ExpandIntRes_ADDSUBCARRY(N, Lo, Hi); break;
- case ISD::SADDO_CARRY:
- case ISD::SSUBO_CARRY: ExpandIntRes_SADDSUBO_CARRY(N, Lo, Hi); break;
-
+ case ISD::SADDO_CARRY:
+ case ISD::SSUBO_CARRY: ExpandIntRes_SADDSUBO_CARRY(N, Lo, Hi); break;
+
case ISD::SHL:
case ISD::SRA:
case ISD::SRL: ExpandIntRes_Shift(N, Lo, Hi); break;
@@ -2154,9 +2154,9 @@ void DAGTypeLegalizer::ExpandIntegerResult(SDNode *N, unsigned ResNo) {
case ISD::SSUBSAT:
case ISD::USUBSAT: ExpandIntRes_ADDSUBSAT(N, Lo, Hi); break;
- case ISD::SSHLSAT:
- case ISD::USHLSAT: ExpandIntRes_SHLSAT(N, Lo, Hi); break;
-
+ case ISD::SSHLSAT:
+ case ISD::USHLSAT: ExpandIntRes_SHLSAT(N, Lo, Hi); break;
+
case ISD::SMULFIX:
case ISD::SMULFIXSAT:
case ISD::UMULFIX:
@@ -2176,16 +2176,16 @@ void DAGTypeLegalizer::ExpandIntegerResult(SDNode *N, unsigned ResNo) {
case ISD::VECREDUCE_SMIN:
case ISD::VECREDUCE_UMAX:
case ISD::VECREDUCE_UMIN: ExpandIntRes_VECREDUCE(N, Lo, Hi); break;
-
- case ISD::ROTL:
- case ISD::ROTR:
- ExpandIntRes_Rotate(N, Lo, Hi);
- break;
-
- case ISD::FSHL:
- case ISD::FSHR:
- ExpandIntRes_FunnelShift(N, Lo, Hi);
- break;
+
+ case ISD::ROTL:
+ case ISD::ROTR:
+ ExpandIntRes_Rotate(N, Lo, Hi);
+ break;
+
+ case ISD::FSHL:
+ case ISD::FSHR:
+ ExpandIntRes_FunnelShift(N, Lo, Hi);
+ break;
}
// If Lo/Hi is null, the sub-method took care of registering results etc.
@@ -2197,22 +2197,22 @@ void DAGTypeLegalizer::ExpandIntegerResult(SDNode *N, unsigned ResNo) {
std::pair <SDValue, SDValue> DAGTypeLegalizer::ExpandAtomic(SDNode *Node) {
unsigned Opc = Node->getOpcode();
MVT VT = cast<AtomicSDNode>(Node)->getMemoryVT().getSimpleVT();
- AtomicOrdering order = cast<AtomicSDNode>(Node)->getOrdering();
- // Lower to outline atomic libcall if outline atomics enabled,
- // or to sync libcall otherwise
- RTLIB::Libcall LC = RTLIB::getOUTLINE_ATOMIC(Opc, order, VT);
+ AtomicOrdering order = cast<AtomicSDNode>(Node)->getOrdering();
+ // Lower to outline atomic libcall if outline atomics enabled,
+ // or to sync libcall otherwise
+ RTLIB::Libcall LC = RTLIB::getOUTLINE_ATOMIC(Opc, order, VT);
EVT RetVT = Node->getValueType(0);
TargetLowering::MakeLibCallOptions CallOptions;
- SmallVector<SDValue, 4> Ops;
- if (TLI.getLibcallName(LC)) {
- Ops.append(Node->op_begin() + 2, Node->op_end());
- Ops.push_back(Node->getOperand(1));
- } else {
- LC = RTLIB::getSYNC(Opc, VT);
- assert(LC != RTLIB::UNKNOWN_LIBCALL &&
- "Unexpected atomic op or value type!");
- Ops.append(Node->op_begin() + 1, Node->op_end());
- }
+ SmallVector<SDValue, 4> Ops;
+ if (TLI.getLibcallName(LC)) {
+ Ops.append(Node->op_begin() + 2, Node->op_end());
+ Ops.push_back(Node->getOperand(1));
+ } else {
+ LC = RTLIB::getSYNC(Opc, VT);
+ assert(LC != RTLIB::UNKNOWN_LIBCALL &&
+ "Unexpected atomic op or value type!");
+ Ops.append(Node->op_begin() + 1, Node->op_end());
+ }
return TLI.makeLibCall(DAG, LC, RetVT, Ops, CallOptions, SDLoc(Node),
Node->getOperand(0));
}
@@ -2771,26 +2771,26 @@ void DAGTypeLegalizer::ExpandIntRes_ADDSUBCARRY(SDNode *N,
ReplaceValueWith(SDValue(N, 1), Hi.getValue(1));
}
-void DAGTypeLegalizer::ExpandIntRes_SADDSUBO_CARRY(SDNode *N,
- SDValue &Lo, SDValue &Hi) {
- // Expand the subcomponents.
- SDValue LHSL, LHSH, RHSL, RHSH;
- SDLoc dl(N);
- GetExpandedInteger(N->getOperand(0), LHSL, LHSH);
- GetExpandedInteger(N->getOperand(1), RHSL, RHSH);
- SDVTList VTList = DAG.getVTList(LHSL.getValueType(), N->getValueType(1));
-
- // We need to use an unsigned carry op for the lo part.
- unsigned CarryOp = N->getOpcode() == ISD::SADDO_CARRY ? ISD::ADDCARRY
- : ISD::SUBCARRY;
- Lo = DAG.getNode(CarryOp, dl, VTList, { LHSL, RHSL, N->getOperand(2) });
- Hi = DAG.getNode(N->getOpcode(), dl, VTList, { LHSH, RHSH, Lo.getValue(1) });
-
- // Legalized the flag result - switch anything that used the old flag to
- // use the new one.
- ReplaceValueWith(SDValue(N, 1), Hi.getValue(1));
-}
-
+void DAGTypeLegalizer::ExpandIntRes_SADDSUBO_CARRY(SDNode *N,
+ SDValue &Lo, SDValue &Hi) {
+ // Expand the subcomponents.
+ SDValue LHSL, LHSH, RHSL, RHSH;
+ SDLoc dl(N);
+ GetExpandedInteger(N->getOperand(0), LHSL, LHSH);
+ GetExpandedInteger(N->getOperand(1), RHSL, RHSH);
+ SDVTList VTList = DAG.getVTList(LHSL.getValueType(), N->getValueType(1));
+
+ // We need to use an unsigned carry op for the lo part.
+ unsigned CarryOp = N->getOpcode() == ISD::SADDO_CARRY ? ISD::ADDCARRY
+ : ISD::SUBCARRY;
+ Lo = DAG.getNode(CarryOp, dl, VTList, { LHSL, RHSL, N->getOperand(2) });
+ Hi = DAG.getNode(N->getOpcode(), dl, VTList, { LHSH, RHSH, Lo.getValue(1) });
+
+ // Legalized the flag result - switch anything that used the old flag to
+ // use the new one.
+ ReplaceValueWith(SDValue(N, 1), Hi.getValue(1));
+}
+
void DAGTypeLegalizer::ExpandIntRes_ANY_EXTEND(SDNode *N,
SDValue &Lo, SDValue &Hi) {
EVT NVT = TLI.getTypeToTransformTo(*DAG.getContext(), N->getValueType(0));
@@ -2872,17 +2872,17 @@ void DAGTypeLegalizer::ExpandIntRes_BSWAP(SDNode *N,
Hi = DAG.getNode(ISD::BSWAP, dl, Hi.getValueType(), Hi);
}
-void DAGTypeLegalizer::ExpandIntRes_PARITY(SDNode *N, SDValue &Lo,
- SDValue &Hi) {
- SDLoc dl(N);
- // parity(HiLo) -> parity(Lo^Hi)
- GetExpandedInteger(N->getOperand(0), Lo, Hi);
- EVT NVT = Lo.getValueType();
- Lo =
- DAG.getNode(ISD::PARITY, dl, NVT, DAG.getNode(ISD::XOR, dl, NVT, Lo, Hi));
- Hi = DAG.getConstant(0, dl, NVT);
-}
-
+void DAGTypeLegalizer::ExpandIntRes_PARITY(SDNode *N, SDValue &Lo,
+ SDValue &Hi) {
+ SDLoc dl(N);
+ // parity(HiLo) -> parity(Lo^Hi)
+ GetExpandedInteger(N->getOperand(0), Lo, Hi);
+ EVT NVT = Lo.getValueType();
+ Lo =
+ DAG.getNode(ISD::PARITY, dl, NVT, DAG.getNode(ISD::XOR, dl, NVT, Lo, Hi));
+ Hi = DAG.getConstant(0, dl, NVT);
+}
+
void DAGTypeLegalizer::ExpandIntRes_Constant(SDNode *N,
SDValue &Lo, SDValue &Hi) {
EVT NVT = TLI.getTypeToTransformTo(*DAG.getContext(), N->getValueType(0));
@@ -2900,31 +2900,31 @@ void DAGTypeLegalizer::ExpandIntRes_Constant(SDNode *N,
void DAGTypeLegalizer::ExpandIntRes_ABS(SDNode *N, SDValue &Lo, SDValue &Hi) {
SDLoc dl(N);
- SDValue N0 = N->getOperand(0);
- GetExpandedInteger(N0, Lo, Hi);
- EVT NVT = Lo.getValueType();
-
- // If we have ADDCARRY, use the expanded form of the sra+add+xor sequence we
- // use in LegalizeDAG. The ADD part of the expansion is based on
- // ExpandIntRes_ADDSUB which also uses ADDCARRY/UADDO after checking that
- // ADDCARRY is LegalOrCustom. Each of the pieces here can be further expanded
- // if needed. Shift expansion has a special case for filling with sign bits
- // so that we will only end up with one SRA.
- bool HasAddCarry = TLI.isOperationLegalOrCustom(
- ISD::ADDCARRY, TLI.getTypeToExpandTo(*DAG.getContext(), NVT));
- if (HasAddCarry) {
- EVT ShiftAmtTy = getShiftAmountTyForConstant(NVT, TLI, DAG);
- SDValue Sign =
- DAG.getNode(ISD::SRA, dl, NVT, Hi,
- DAG.getConstant(NVT.getSizeInBits() - 1, dl, ShiftAmtTy));
- SDVTList VTList = DAG.getVTList(NVT, getSetCCResultType(NVT));
- Lo = DAG.getNode(ISD::UADDO, dl, VTList, Lo, Sign);
- Hi = DAG.getNode(ISD::ADDCARRY, dl, VTList, Hi, Sign, Lo.getValue(1));
- Lo = DAG.getNode(ISD::XOR, dl, NVT, Lo, Sign);
- Hi = DAG.getNode(ISD::XOR, dl, NVT, Hi, Sign);
- return;
- }
-
+ SDValue N0 = N->getOperand(0);
+ GetExpandedInteger(N0, Lo, Hi);
+ EVT NVT = Lo.getValueType();
+
+ // If we have ADDCARRY, use the expanded form of the sra+add+xor sequence we
+ // use in LegalizeDAG. The ADD part of the expansion is based on
+ // ExpandIntRes_ADDSUB which also uses ADDCARRY/UADDO after checking that
+ // ADDCARRY is LegalOrCustom. Each of the pieces here can be further expanded
+ // if needed. Shift expansion has a special case for filling with sign bits
+ // so that we will only end up with one SRA.
+ bool HasAddCarry = TLI.isOperationLegalOrCustom(
+ ISD::ADDCARRY, TLI.getTypeToExpandTo(*DAG.getContext(), NVT));
+ if (HasAddCarry) {
+ EVT ShiftAmtTy = getShiftAmountTyForConstant(NVT, TLI, DAG);
+ SDValue Sign =
+ DAG.getNode(ISD::SRA, dl, NVT, Hi,
+ DAG.getConstant(NVT.getSizeInBits() - 1, dl, ShiftAmtTy));
+ SDVTList VTList = DAG.getVTList(NVT, getSetCCResultType(NVT));
+ Lo = DAG.getNode(ISD::UADDO, dl, VTList, Lo, Sign);
+ Hi = DAG.getNode(ISD::ADDCARRY, dl, VTList, Hi, Sign, Lo.getValue(1));
+ Lo = DAG.getNode(ISD::XOR, dl, NVT, Lo, Sign);
+ Hi = DAG.getNode(ISD::XOR, dl, NVT, Hi, Sign);
+ return;
+ }
+
// abs(HiLo) -> (Hi < 0 ? -HiLo : HiLo)
EVT VT = N->getValueType(0);
SDValue Neg = DAG.getNode(ISD::SUB, dl, VT,
@@ -3064,12 +3064,12 @@ void DAGTypeLegalizer::ExpandIntRes_FP_TO_UINT(SDNode *N, SDValue &Lo,
ReplaceValueWith(SDValue(N, 1), Tmp.second);
}
-void DAGTypeLegalizer::ExpandIntRes_FP_TO_XINT_SAT(SDNode *N, SDValue &Lo,
- SDValue &Hi) {
- SDValue Res = TLI.expandFP_TO_INT_SAT(N, DAG);
- SplitInteger(Res, Lo, Hi);
-}
-
+void DAGTypeLegalizer::ExpandIntRes_FP_TO_XINT_SAT(SDNode *N, SDValue &Lo,
+ SDValue &Hi) {
+ SDValue Res = TLI.expandFP_TO_INT_SAT(N, DAG);
+ SplitInteger(Res, Lo, Hi);
+}
+
void DAGTypeLegalizer::ExpandIntRes_LLROUND_LLRINT(SDNode *N, SDValue &Lo,
SDValue &Hi) {
SDValue Op = N->getOperand(N->isStrictFPOpcode() ? 1 : 0);
@@ -3140,7 +3140,7 @@ void DAGTypeLegalizer::ExpandIntRes_LOAD(LoadSDNode *N,
ReplaceValueWith(SDValue(N, 1), Swap.getValue(2));
return;
}
-
+
if (ISD::isNormalLoad(N)) {
ExpandRes_NormalLoad(N, Lo, Hi);
return;
@@ -3194,7 +3194,7 @@ void DAGTypeLegalizer::ExpandIntRes_LOAD(LoadSDNode *N,
// Increment the pointer to the other half.
unsigned IncrementSize = NVT.getSizeInBits()/8;
- Ptr = DAG.getMemBasePlusOffset(Ptr, TypeSize::Fixed(IncrementSize), dl);
+ Ptr = DAG.getMemBasePlusOffset(Ptr, TypeSize::Fixed(IncrementSize), dl);
Hi = DAG.getExtLoad(ExtType, dl, NVT, Ch, Ptr,
N->getPointerInfo().getWithOffset(IncrementSize), NEVT,
N->getOriginalAlign(), MMOFlags, AAInfo);
@@ -3218,7 +3218,7 @@ void DAGTypeLegalizer::ExpandIntRes_LOAD(LoadSDNode *N,
N->getOriginalAlign(), MMOFlags, AAInfo);
// Increment the pointer to the other half.
- Ptr = DAG.getMemBasePlusOffset(Ptr, TypeSize::Fixed(IncrementSize), dl);
+ Ptr = DAG.getMemBasePlusOffset(Ptr, TypeSize::Fixed(IncrementSize), dl);
// Load the rest of the low bits.
Lo = DAG.getExtLoad(ISD::ZEXTLOAD, dl, NVT, Ch, Ptr,
N->getPointerInfo().getWithOffset(IncrementSize),
@@ -3358,12 +3358,12 @@ void DAGTypeLegalizer::ExpandIntRes_ADDSUBSAT(SDNode *N, SDValue &Lo,
SplitInteger(Result, Lo, Hi);
}
-void DAGTypeLegalizer::ExpandIntRes_SHLSAT(SDNode *N, SDValue &Lo,
- SDValue &Hi) {
- SDValue Result = TLI.expandShlSat(N, DAG);
- SplitInteger(Result, Lo, Hi);
-}
-
+void DAGTypeLegalizer::ExpandIntRes_SHLSAT(SDNode *N, SDValue &Lo,
+ SDValue &Hi) {
+ SDValue Result = TLI.expandShlSat(N, DAG);
+ SplitInteger(Result, Lo, Hi);
+}
+
/// This performs an expansion of the integer result for a fixed point
/// multiplication. The default expansion performs rounding down towards
/// negative infinity, though targets that do care about rounding should specify
@@ -3602,66 +3602,66 @@ void DAGTypeLegalizer::ExpandIntRes_SADDSUBO(SDNode *Node,
SDValue RHS = Node->getOperand(1);
SDLoc dl(Node);
- SDValue Ovf;
-
- unsigned CarryOp;
- switch(Node->getOpcode()) {
- default: llvm_unreachable("Node has unexpected Opcode");
- case ISD::SADDO: CarryOp = ISD::SADDO_CARRY; break;
- case ISD::SSUBO: CarryOp = ISD::SSUBO_CARRY; break;
- }
-
- bool HasCarryOp = TLI.isOperationLegalOrCustom(
- CarryOp, TLI.getTypeToExpandTo(*DAG.getContext(), LHS.getValueType()));
-
- if (HasCarryOp) {
- // Expand the subcomponents.
- SDValue LHSL, LHSH, RHSL, RHSH;
- GetExpandedInteger(LHS, LHSL, LHSH);
- GetExpandedInteger(RHS, RHSL, RHSH);
- SDVTList VTList = DAG.getVTList(LHSL.getValueType(), Node->getValueType(1));
-
- Lo = DAG.getNode(Node->getOpcode() == ISD::SADDO ?
- ISD::UADDO : ISD::USUBO, dl, VTList, { LHSL, RHSL });
- Hi = DAG.getNode(CarryOp, dl, VTList, { LHSH, RHSH, Lo.getValue(1) });
-
- Ovf = Hi.getValue(1);
- } else {
- // Expand the result by simply replacing it with the equivalent
- // non-overflow-checking operation.
- SDValue Sum = DAG.getNode(Node->getOpcode() == ISD::SADDO ?
- ISD::ADD : ISD::SUB, dl, LHS.getValueType(),
- LHS, RHS);
- SplitInteger(Sum, Lo, Hi);
-
- // Compute the overflow.
- //
- // LHSSign -> LHS >= 0
- // RHSSign -> RHS >= 0
- // SumSign -> Sum >= 0
- //
- // Add:
- // Overflow -> (LHSSign == RHSSign) && (LHSSign != SumSign)
- // Sub:
- // Overflow -> (LHSSign != RHSSign) && (LHSSign != SumSign)
- //
- EVT OType = Node->getValueType(1);
- SDValue Zero = DAG.getConstant(0, dl, LHS.getValueType());
-
- SDValue LHSSign = DAG.getSetCC(dl, OType, LHS, Zero, ISD::SETGE);
- SDValue RHSSign = DAG.getSetCC(dl, OType, RHS, Zero, ISD::SETGE);
- SDValue SignsMatch = DAG.getSetCC(dl, OType, LHSSign, RHSSign,
- Node->getOpcode() == ISD::SADDO ?
- ISD::SETEQ : ISD::SETNE);
-
- SDValue SumSign = DAG.getSetCC(dl, OType, Sum, Zero, ISD::SETGE);
- SDValue SumSignNE = DAG.getSetCC(dl, OType, LHSSign, SumSign, ISD::SETNE);
-
- Ovf = DAG.getNode(ISD::AND, dl, OType, SignsMatch, SumSignNE);
- }
-
+ SDValue Ovf;
+
+ unsigned CarryOp;
+ switch(Node->getOpcode()) {
+ default: llvm_unreachable("Node has unexpected Opcode");
+ case ISD::SADDO: CarryOp = ISD::SADDO_CARRY; break;
+ case ISD::SSUBO: CarryOp = ISD::SSUBO_CARRY; break;
+ }
+
+ bool HasCarryOp = TLI.isOperationLegalOrCustom(
+ CarryOp, TLI.getTypeToExpandTo(*DAG.getContext(), LHS.getValueType()));
+
+ if (HasCarryOp) {
+ // Expand the subcomponents.
+ SDValue LHSL, LHSH, RHSL, RHSH;
+ GetExpandedInteger(LHS, LHSL, LHSH);
+ GetExpandedInteger(RHS, RHSL, RHSH);
+ SDVTList VTList = DAG.getVTList(LHSL.getValueType(), Node->getValueType(1));
+
+ Lo = DAG.getNode(Node->getOpcode() == ISD::SADDO ?
+ ISD::UADDO : ISD::USUBO, dl, VTList, { LHSL, RHSL });
+ Hi = DAG.getNode(CarryOp, dl, VTList, { LHSH, RHSH, Lo.getValue(1) });
+
+ Ovf = Hi.getValue(1);
+ } else {
+ // Expand the result by simply replacing it with the equivalent
+ // non-overflow-checking operation.
+ SDValue Sum = DAG.getNode(Node->getOpcode() == ISD::SADDO ?
+ ISD::ADD : ISD::SUB, dl, LHS.getValueType(),
+ LHS, RHS);
+ SplitInteger(Sum, Lo, Hi);
+
+ // Compute the overflow.
+ //
+ // LHSSign -> LHS >= 0
+ // RHSSign -> RHS >= 0
+ // SumSign -> Sum >= 0
+ //
+ // Add:
+ // Overflow -> (LHSSign == RHSSign) && (LHSSign != SumSign)
+ // Sub:
+ // Overflow -> (LHSSign != RHSSign) && (LHSSign != SumSign)
+ //
+ EVT OType = Node->getValueType(1);
+ SDValue Zero = DAG.getConstant(0, dl, LHS.getValueType());
+
+ SDValue LHSSign = DAG.getSetCC(dl, OType, LHS, Zero, ISD::SETGE);
+ SDValue RHSSign = DAG.getSetCC(dl, OType, RHS, Zero, ISD::SETGE);
+ SDValue SignsMatch = DAG.getSetCC(dl, OType, LHSSign, RHSSign,
+ Node->getOpcode() == ISD::SADDO ?
+ ISD::SETEQ : ISD::SETNE);
+
+ SDValue SumSign = DAG.getSetCC(dl, OType, Sum, Zero, ISD::SETGE);
+ SDValue SumSignNE = DAG.getSetCC(dl, OType, LHSSign, SumSign, ISD::SETNE);
+
+ Ovf = DAG.getNode(ISD::AND, dl, OType, SignsMatch, SumSignNE);
+ }
+
// Use the calculated overflow everywhere.
- ReplaceValueWith(SDValue(Node, 1), Ovf);
+ ReplaceValueWith(SDValue(Node, 1), Ovf);
}
void DAGTypeLegalizer::ExpandIntRes_SDIV(SDNode *N,
@@ -4117,22 +4117,22 @@ void DAGTypeLegalizer::ExpandIntRes_VECREDUCE(SDNode *N,
SplitInteger(Res, Lo, Hi);
}
-void DAGTypeLegalizer::ExpandIntRes_Rotate(SDNode *N,
- SDValue &Lo, SDValue &Hi) {
- // Lower the rotate to shifts and ORs which can be expanded.
- SDValue Res;
- TLI.expandROT(N, true /*AllowVectorOps*/, Res, DAG);
- SplitInteger(Res, Lo, Hi);
-}
-
-void DAGTypeLegalizer::ExpandIntRes_FunnelShift(SDNode *N,
- SDValue &Lo, SDValue &Hi) {
- // Lower the funnel shift to shifts and ORs which can be expanded.
- SDValue Res;
- TLI.expandFunnelShift(N, Res, DAG);
- SplitInteger(Res, Lo, Hi);
-}
-
+void DAGTypeLegalizer::ExpandIntRes_Rotate(SDNode *N,
+ SDValue &Lo, SDValue &Hi) {
+ // Lower the rotate to shifts and ORs which can be expanded.
+ SDValue Res;
+ TLI.expandROT(N, true /*AllowVectorOps*/, Res, DAG);
+ SplitInteger(Res, Lo, Hi);
+}
+
+void DAGTypeLegalizer::ExpandIntRes_FunnelShift(SDNode *N,
+ SDValue &Lo, SDValue &Hi) {
+ // Lower the funnel shift to shifts and ORs which can be expanded.
+ SDValue Res;
+ TLI.expandFunnelShift(N, Res, DAG);
+ SplitInteger(Res, Lo, Hi);
+}
+
//===----------------------------------------------------------------------===//
// Integer Operand Expansion
//===----------------------------------------------------------------------===//
@@ -4505,7 +4505,7 @@ SDValue DAGTypeLegalizer::ExpandIntOp_STORE(StoreSDNode *N, unsigned OpNo) {
// Increment the pointer to the other half.
unsigned IncrementSize = NVT.getSizeInBits()/8;
- Ptr = DAG.getObjectPtrOffset(dl, Ptr, TypeSize::Fixed(IncrementSize));
+ Ptr = DAG.getObjectPtrOffset(dl, Ptr, TypeSize::Fixed(IncrementSize));
Hi = DAG.getTruncStore(Ch, dl, Hi, Ptr,
N->getPointerInfo().getWithOffset(IncrementSize),
NEVT, N->getOriginalAlign(), MMOFlags, AAInfo);
@@ -4540,7 +4540,7 @@ SDValue DAGTypeLegalizer::ExpandIntOp_STORE(StoreSDNode *N, unsigned OpNo) {
N->getOriginalAlign(), MMOFlags, AAInfo);
// Increment the pointer to the other half.
- Ptr = DAG.getObjectPtrOffset(dl, Ptr, TypeSize::Fixed(IncrementSize));
+ Ptr = DAG.getObjectPtrOffset(dl, Ptr, TypeSize::Fixed(IncrementSize));
// Store the lowest ExcessBits bits in the second half.
Lo = DAG.getTruncStore(Ch, dl, Lo, Ptr,
N->getPointerInfo().getWithOffset(IncrementSize),
@@ -4845,23 +4845,23 @@ SDValue DAGTypeLegalizer::PromoteIntOp_EXTRACT_SUBVECTOR(SDNode *N) {
SDValue DAGTypeLegalizer::PromoteIntOp_CONCAT_VECTORS(SDNode *N) {
SDLoc dl(N);
-
- EVT ResVT = N->getValueType(0);
+
+ EVT ResVT = N->getValueType(0);
unsigned NumElems = N->getNumOperands();
- if (ResVT.isScalableVector()) {
- SDValue ResVec = DAG.getUNDEF(ResVT);
-
- for (unsigned OpIdx = 0; OpIdx < NumElems; ++OpIdx) {
- SDValue Op = N->getOperand(OpIdx);
- unsigned OpNumElts = Op.getValueType().getVectorMinNumElements();
- ResVec = DAG.getNode(ISD::INSERT_SUBVECTOR, dl, ResVT, ResVec, Op,
- DAG.getIntPtrConstant(OpIdx * OpNumElts, dl));
- }
-
- return ResVec;
- }
-
+ if (ResVT.isScalableVector()) {
+ SDValue ResVec = DAG.getUNDEF(ResVT);
+
+ for (unsigned OpIdx = 0; OpIdx < NumElems; ++OpIdx) {
+ SDValue Op = N->getOperand(OpIdx);
+ unsigned OpNumElts = Op.getValueType().getVectorMinNumElements();
+ ResVec = DAG.getNode(ISD::INSERT_SUBVECTOR, dl, ResVT, ResVec, Op,
+ DAG.getIntPtrConstant(OpIdx * OpNumElts, dl));
+ }
+
+ return ResVec;
+ }
+
EVT RetSclrTy = N->getValueType(0).getVectorElementType();
SmallVector<SDValue, 8> NewOps;
diff --git a/contrib/libs/llvm12/lib/CodeGen/SelectionDAG/LegalizeTypes.cpp b/contrib/libs/llvm12/lib/CodeGen/SelectionDAG/LegalizeTypes.cpp
index a59f038547..dda87ad719 100644
--- a/contrib/libs/llvm12/lib/CodeGen/SelectionDAG/LegalizeTypes.cpp
+++ b/contrib/libs/llvm12/lib/CodeGen/SelectionDAG/LegalizeTypes.cpp
@@ -663,7 +663,7 @@ void DAGTypeLegalizer::ReplaceValueWith(SDValue From, SDValue To) {
// Process the list of nodes that need to be reanalyzed.
while (!NodesToAnalyze.empty()) {
- SDNode *N = NodesToAnalyze.pop_back_val();
+ SDNode *N = NodesToAnalyze.pop_back_val();
if (N->getNodeId() != DAGTypeLegalizer::NewNode)
// The node was analyzed while reanalyzing an earlier node - it is safe
// to skip. Note that this is not a morphing node - otherwise it would
@@ -752,10 +752,10 @@ void DAGTypeLegalizer::SetScalarizedVector(SDValue Op, SDValue Result) {
// Note that in some cases vector operation operands may be greater than
// the vector element type. For example BUILD_VECTOR of type <1 x i1> with
// a constant i8 operand.
-
- // We don't currently support the scalarization of scalable vector types.
- assert(Result.getValueSizeInBits().getFixedSize() >=
- Op.getScalarValueSizeInBits() &&
+
+ // We don't currently support the scalarization of scalable vector types.
+ assert(Result.getValueSizeInBits().getFixedSize() >=
+ Op.getScalarValueSizeInBits() &&
"Invalid type for scalarized vector");
AnalyzeNewValue(Result);
@@ -957,11 +957,11 @@ bool DAGTypeLegalizer::CustomWidenLowerNode(SDNode *N, EVT VT) {
assert(Results.size() == N->getNumValues() &&
"Custom lowering returned the wrong number of results!");
for (unsigned i = 0, e = Results.size(); i != e; ++i) {
- // If this is a chain output or already widened just replace it.
- bool WasWidened = SDValue(N, i).getValueType() != Results[i].getValueType();
- if (WasWidened)
- SetWidenedVector(SDValue(N, i), Results[i]);
- else
+ // If this is a chain output or already widened just replace it.
+ bool WasWidened = SDValue(N, i).getValueType() != Results[i].getValueType();
+ if (WasWidened)
+ SetWidenedVector(SDValue(N, i), Results[i]);
+ else
ReplaceValueWith(SDValue(N, i), Results[i]);
}
return true;
diff --git a/contrib/libs/llvm12/lib/CodeGen/SelectionDAG/LegalizeTypes.h b/contrib/libs/llvm12/lib/CodeGen/SelectionDAG/LegalizeTypes.h
index 630a0a9ada..53e15d5de1 100644
--- a/contrib/libs/llvm12/lib/CodeGen/SelectionDAG/LegalizeTypes.h
+++ b/contrib/libs/llvm12/lib/CodeGen/SelectionDAG/LegalizeTypes.h
@@ -311,11 +311,11 @@ private:
SDValue PromoteIntRes_BUILD_PAIR(SDNode *N);
SDValue PromoteIntRes_Constant(SDNode *N);
SDValue PromoteIntRes_CTLZ(SDNode *N);
- SDValue PromoteIntRes_CTPOP_PARITY(SDNode *N);
+ SDValue PromoteIntRes_CTPOP_PARITY(SDNode *N);
SDValue PromoteIntRes_CTTZ(SDNode *N);
SDValue PromoteIntRes_EXTRACT_VECTOR_ELT(SDNode *N);
SDValue PromoteIntRes_FP_TO_XINT(SDNode *N);
- SDValue PromoteIntRes_FP_TO_XINT_SAT(SDNode *N);
+ SDValue PromoteIntRes_FP_TO_XINT_SAT(SDNode *N);
SDValue PromoteIntRes_FP_TO_FP16(SDNode *N);
SDValue PromoteIntRes_FREEZE(SDNode *N);
SDValue PromoteIntRes_INT_EXTEND(SDNode *N);
@@ -332,26 +332,26 @@ private:
SDValue PromoteIntRes_SimpleIntBinOp(SDNode *N);
SDValue PromoteIntRes_ZExtIntBinOp(SDNode *N);
SDValue PromoteIntRes_SExtIntBinOp(SDNode *N);
- SDValue PromoteIntRes_UMINUMAX(SDNode *N);
+ SDValue PromoteIntRes_UMINUMAX(SDNode *N);
SDValue PromoteIntRes_SIGN_EXTEND_INREG(SDNode *N);
SDValue PromoteIntRes_SRA(SDNode *N);
SDValue PromoteIntRes_SRL(SDNode *N);
SDValue PromoteIntRes_TRUNCATE(SDNode *N);
SDValue PromoteIntRes_UADDSUBO(SDNode *N, unsigned ResNo);
SDValue PromoteIntRes_ADDSUBCARRY(SDNode *N, unsigned ResNo);
- SDValue PromoteIntRes_SADDSUBO_CARRY(SDNode *N, unsigned ResNo);
+ SDValue PromoteIntRes_SADDSUBO_CARRY(SDNode *N, unsigned ResNo);
SDValue PromoteIntRes_UNDEF(SDNode *N);
SDValue PromoteIntRes_VAARG(SDNode *N);
SDValue PromoteIntRes_VSCALE(SDNode *N);
SDValue PromoteIntRes_XMULO(SDNode *N, unsigned ResNo);
- SDValue PromoteIntRes_ADDSUBSHLSAT(SDNode *N);
+ SDValue PromoteIntRes_ADDSUBSHLSAT(SDNode *N);
SDValue PromoteIntRes_MULFIX(SDNode *N);
SDValue PromoteIntRes_DIVFIX(SDNode *N);
SDValue PromoteIntRes_FLT_ROUNDS(SDNode *N);
SDValue PromoteIntRes_VECREDUCE(SDNode *N);
SDValue PromoteIntRes_ABS(SDNode *N);
- SDValue PromoteIntRes_Rotate(SDNode *N);
- SDValue PromoteIntRes_FunnelShift(SDNode *N);
+ SDValue PromoteIntRes_Rotate(SDNode *N);
+ SDValue PromoteIntRes_FunnelShift(SDNode *N);
// Integer Operand Promotion.
bool PromoteIntegerOperand(SDNode *N, unsigned OpNo);
@@ -425,7 +425,7 @@ private:
void ExpandIntRes_FLT_ROUNDS (SDNode *N, SDValue &Lo, SDValue &Hi);
void ExpandIntRes_FP_TO_SINT (SDNode *N, SDValue &Lo, SDValue &Hi);
void ExpandIntRes_FP_TO_UINT (SDNode *N, SDValue &Lo, SDValue &Hi);
- void ExpandIntRes_FP_TO_XINT_SAT (SDNode *N, SDValue &Lo, SDValue &Hi);
+ void ExpandIntRes_FP_TO_XINT_SAT (SDNode *N, SDValue &Lo, SDValue &Hi);
void ExpandIntRes_LLROUND_LLRINT (SDNode *N, SDValue &Lo, SDValue &Hi);
void ExpandIntRes_Logical (SDNode *N, SDValue &Lo, SDValue &Hi);
@@ -433,10 +433,10 @@ private:
void ExpandIntRes_ADDSUBC (SDNode *N, SDValue &Lo, SDValue &Hi);
void ExpandIntRes_ADDSUBE (SDNode *N, SDValue &Lo, SDValue &Hi);
void ExpandIntRes_ADDSUBCARRY (SDNode *N, SDValue &Lo, SDValue &Hi);
- void ExpandIntRes_SADDSUBO_CARRY (SDNode *N, SDValue &Lo, SDValue &Hi);
+ void ExpandIntRes_SADDSUBO_CARRY (SDNode *N, SDValue &Lo, SDValue &Hi);
void ExpandIntRes_BITREVERSE (SDNode *N, SDValue &Lo, SDValue &Hi);
void ExpandIntRes_BSWAP (SDNode *N, SDValue &Lo, SDValue &Hi);
- void ExpandIntRes_PARITY (SDNode *N, SDValue &Lo, SDValue &Hi);
+ void ExpandIntRes_PARITY (SDNode *N, SDValue &Lo, SDValue &Hi);
void ExpandIntRes_MUL (SDNode *N, SDValue &Lo, SDValue &Hi);
void ExpandIntRes_SDIV (SDNode *N, SDValue &Lo, SDValue &Hi);
void ExpandIntRes_SREM (SDNode *N, SDValue &Lo, SDValue &Hi);
@@ -450,16 +450,16 @@ private:
void ExpandIntRes_UADDSUBO (SDNode *N, SDValue &Lo, SDValue &Hi);
void ExpandIntRes_XMULO (SDNode *N, SDValue &Lo, SDValue &Hi);
void ExpandIntRes_ADDSUBSAT (SDNode *N, SDValue &Lo, SDValue &Hi);
- void ExpandIntRes_SHLSAT (SDNode *N, SDValue &Lo, SDValue &Hi);
+ void ExpandIntRes_SHLSAT (SDNode *N, SDValue &Lo, SDValue &Hi);
void ExpandIntRes_MULFIX (SDNode *N, SDValue &Lo, SDValue &Hi);
void ExpandIntRes_DIVFIX (SDNode *N, SDValue &Lo, SDValue &Hi);
void ExpandIntRes_ATOMIC_LOAD (SDNode *N, SDValue &Lo, SDValue &Hi);
void ExpandIntRes_VECREDUCE (SDNode *N, SDValue &Lo, SDValue &Hi);
- void ExpandIntRes_Rotate (SDNode *N, SDValue &Lo, SDValue &Hi);
- void ExpandIntRes_FunnelShift (SDNode *N, SDValue &Lo, SDValue &Hi);
-
+ void ExpandIntRes_Rotate (SDNode *N, SDValue &Lo, SDValue &Hi);
+ void ExpandIntRes_FunnelShift (SDNode *N, SDValue &Lo, SDValue &Hi);
+
void ExpandShiftByConstant(SDNode *N, const APInt &Amt,
SDValue &Lo, SDValue &Hi);
bool ExpandShiftWithKnownAmountBit(SDNode *N, SDValue &Lo, SDValue &Hi);
@@ -553,8 +553,8 @@ private:
SDValue SoftenFloatRes_UNDEF(SDNode *N);
SDValue SoftenFloatRes_VAARG(SDNode *N);
SDValue SoftenFloatRes_XINT_TO_FP(SDNode *N);
- SDValue SoftenFloatRes_VECREDUCE(SDNode *N);
- SDValue SoftenFloatRes_VECREDUCE_SEQ(SDNode *N);
+ SDValue SoftenFloatRes_VECREDUCE(SDNode *N);
+ SDValue SoftenFloatRes_VECREDUCE_SEQ(SDNode *N);
// Convert Float Operand to Integer.
bool SoftenFloatOperand(SDNode *N, unsigned OpNo);
@@ -563,7 +563,7 @@ private:
SDValue SoftenFloatOp_BR_CC(SDNode *N);
SDValue SoftenFloatOp_FP_ROUND(SDNode *N);
SDValue SoftenFloatOp_FP_TO_XINT(SDNode *N);
- SDValue SoftenFloatOp_FP_TO_XINT_SAT(SDNode *N);
+ SDValue SoftenFloatOp_FP_TO_XINT_SAT(SDNode *N);
SDValue SoftenFloatOp_LROUND(SDNode *N);
SDValue SoftenFloatOp_LLROUND(SDNode *N);
SDValue SoftenFloatOp_LRINT(SDNode *N);
@@ -632,7 +632,7 @@ private:
SDValue ExpandFloatOp_BR_CC(SDNode *N);
SDValue ExpandFloatOp_FCOPYSIGN(SDNode *N);
SDValue ExpandFloatOp_FP_ROUND(SDNode *N);
- SDValue ExpandFloatOp_FP_TO_XINT(SDNode *N);
+ SDValue ExpandFloatOp_FP_TO_XINT(SDNode *N);
SDValue ExpandFloatOp_LROUND(SDNode *N);
SDValue ExpandFloatOp_LLROUND(SDNode *N);
SDValue ExpandFloatOp_LRINT(SDNode *N);
@@ -642,8 +642,8 @@ private:
SDValue ExpandFloatOp_STORE(SDNode *N, unsigned OpNo);
void FloatExpandSetCCOperands(SDValue &NewLHS, SDValue &NewRHS,
- ISD::CondCode &CCCode, const SDLoc &dl,
- SDValue &Chain, bool IsSignaling = false);
+ ISD::CondCode &CCCode, const SDLoc &dl,
+ SDValue &Chain, bool IsSignaling = false);
//===--------------------------------------------------------------------===//
// Float promotion support: LegalizeFloatTypes.cpp
@@ -673,15 +673,15 @@ private:
SDValue PromoteFloatRes_UNDEF(SDNode *N);
SDValue BitcastToInt_ATOMIC_SWAP(SDNode *N);
SDValue PromoteFloatRes_XINT_TO_FP(SDNode *N);
- SDValue PromoteFloatRes_VECREDUCE(SDNode *N);
- SDValue PromoteFloatRes_VECREDUCE_SEQ(SDNode *N);
+ SDValue PromoteFloatRes_VECREDUCE(SDNode *N);
+ SDValue PromoteFloatRes_VECREDUCE_SEQ(SDNode *N);
bool PromoteFloatOperand(SDNode *N, unsigned OpNo);
SDValue PromoteFloatOp_BITCAST(SDNode *N, unsigned OpNo);
SDValue PromoteFloatOp_FCOPYSIGN(SDNode *N, unsigned OpNo);
SDValue PromoteFloatOp_FP_EXTEND(SDNode *N, unsigned OpNo);
SDValue PromoteFloatOp_FP_TO_XINT(SDNode *N, unsigned OpNo);
- SDValue PromoteFloatOp_FP_TO_XINT_SAT(SDNode *N, unsigned OpNo);
+ SDValue PromoteFloatOp_FP_TO_XINT_SAT(SDNode *N, unsigned OpNo);
SDValue PromoteFloatOp_STORE(SDNode *N, unsigned OpNo);
SDValue PromoteFloatOp_SELECT_CC(SDNode *N, unsigned OpNo);
SDValue PromoteFloatOp_SETCC(SDNode *N, unsigned OpNo);
@@ -713,15 +713,15 @@ private:
SDValue SoftPromoteHalfRes_UnaryOp(SDNode *N);
SDValue SoftPromoteHalfRes_XINT_TO_FP(SDNode *N);
SDValue SoftPromoteHalfRes_UNDEF(SDNode *N);
- SDValue SoftPromoteHalfRes_VECREDUCE(SDNode *N);
- SDValue SoftPromoteHalfRes_VECREDUCE_SEQ(SDNode *N);
+ SDValue SoftPromoteHalfRes_VECREDUCE(SDNode *N);
+ SDValue SoftPromoteHalfRes_VECREDUCE_SEQ(SDNode *N);
bool SoftPromoteHalfOperand(SDNode *N, unsigned OpNo);
SDValue SoftPromoteHalfOp_BITCAST(SDNode *N);
SDValue SoftPromoteHalfOp_FCOPYSIGN(SDNode *N, unsigned OpNo);
SDValue SoftPromoteHalfOp_FP_EXTEND(SDNode *N);
SDValue SoftPromoteHalfOp_FP_TO_XINT(SDNode *N);
- SDValue SoftPromoteHalfOp_FP_TO_XINT_SAT(SDNode *N);
+ SDValue SoftPromoteHalfOp_FP_TO_XINT_SAT(SDNode *N);
SDValue SoftPromoteHalfOp_SETCC(SDNode *N);
SDValue SoftPromoteHalfOp_SELECT_CC(SDNode *N, unsigned OpNo);
SDValue SoftPromoteHalfOp_STORE(SDNode *N, unsigned OpNo);
@@ -766,7 +766,7 @@ private:
SDValue ScalarizeVecRes_SETCC(SDNode *N);
SDValue ScalarizeVecRes_UNDEF(SDNode *N);
SDValue ScalarizeVecRes_VECTOR_SHUFFLE(SDNode *N);
- SDValue ScalarizeVecRes_FP_TO_XINT_SAT(SDNode *N);
+ SDValue ScalarizeVecRes_FP_TO_XINT_SAT(SDNode *N);
SDValue ScalarizeVecRes_FIX(SDNode *N);
@@ -782,10 +782,10 @@ private:
SDValue ScalarizeVecOp_STORE(StoreSDNode *N, unsigned OpNo);
SDValue ScalarizeVecOp_FP_ROUND(SDNode *N, unsigned OpNo);
SDValue ScalarizeVecOp_STRICT_FP_ROUND(SDNode *N, unsigned OpNo);
- SDValue ScalarizeVecOp_FP_EXTEND(SDNode *N);
- SDValue ScalarizeVecOp_STRICT_FP_EXTEND(SDNode *N);
+ SDValue ScalarizeVecOp_FP_EXTEND(SDNode *N);
+ SDValue ScalarizeVecOp_STRICT_FP_EXTEND(SDNode *N);
SDValue ScalarizeVecOp_VECREDUCE(SDNode *N);
- SDValue ScalarizeVecOp_VECREDUCE_SEQ(SDNode *N);
+ SDValue ScalarizeVecOp_VECREDUCE_SEQ(SDNode *N);
//===--------------------------------------------------------------------===//
// Vector Splitting Support: LegalizeVectorTypes.cpp
@@ -803,8 +803,8 @@ private:
// Helper function for incrementing the pointer when splitting
// memory operations
- void IncrementPointer(MemSDNode *N, EVT MemVT, MachinePointerInfo &MPI,
- SDValue &Ptr, uint64_t *ScaledOffset = nullptr);
+ void IncrementPointer(MemSDNode *N, EVT MemVT, MachinePointerInfo &MPI,
+ SDValue &Ptr, uint64_t *ScaledOffset = nullptr);
// Vector Result Splitting: <128 x ty> -> 2 x <64 x ty>.
void SplitVectorResult(SDNode *N, unsigned ResNo);
@@ -831,23 +831,23 @@ private:
void SplitVecRes_LOAD(LoadSDNode *LD, SDValue &Lo, SDValue &Hi);
void SplitVecRes_MLOAD(MaskedLoadSDNode *MLD, SDValue &Lo, SDValue &Hi);
void SplitVecRes_MGATHER(MaskedGatherSDNode *MGT, SDValue &Lo, SDValue &Hi);
- void SplitVecRes_ScalarOp(SDNode *N, SDValue &Lo, SDValue &Hi);
+ void SplitVecRes_ScalarOp(SDNode *N, SDValue &Lo, SDValue &Hi);
void SplitVecRes_SETCC(SDNode *N, SDValue &Lo, SDValue &Hi);
void SplitVecRes_VECTOR_SHUFFLE(ShuffleVectorSDNode *N, SDValue &Lo,
SDValue &Hi);
void SplitVecRes_VAARG(SDNode *N, SDValue &Lo, SDValue &Hi);
- void SplitVecRes_FP_TO_XINT_SAT(SDNode *N, SDValue &Lo, SDValue &Hi);
+ void SplitVecRes_FP_TO_XINT_SAT(SDNode *N, SDValue &Lo, SDValue &Hi);
// Vector Operand Splitting: <128 x ty> -> 2 x <64 x ty>.
bool SplitVectorOperand(SDNode *N, unsigned OpNo);
SDValue SplitVecOp_VSELECT(SDNode *N, unsigned OpNo);
SDValue SplitVecOp_VECREDUCE(SDNode *N, unsigned OpNo);
- SDValue SplitVecOp_VECREDUCE_SEQ(SDNode *N);
+ SDValue SplitVecOp_VECREDUCE_SEQ(SDNode *N);
SDValue SplitVecOp_UnaryOp(SDNode *N);
SDValue SplitVecOp_TruncateHelper(SDNode *N);
SDValue SplitVecOp_BITCAST(SDNode *N);
- SDValue SplitVecOp_INSERT_SUBVECTOR(SDNode *N, unsigned OpNo);
+ SDValue SplitVecOp_INSERT_SUBVECTOR(SDNode *N, unsigned OpNo);
SDValue SplitVecOp_EXTRACT_SUBVECTOR(SDNode *N);
SDValue SplitVecOp_EXTRACT_VECTOR_ELT(SDNode *N);
SDValue SplitVecOp_ExtVecInRegOp(SDNode *N);
@@ -859,7 +859,7 @@ private:
SDValue SplitVecOp_VSETCC(SDNode *N);
SDValue SplitVecOp_FP_ROUND(SDNode *N);
SDValue SplitVecOp_FCOPYSIGN(SDNode *N);
- SDValue SplitVecOp_FP_TO_XINT_SAT(SDNode *N);
+ SDValue SplitVecOp_FP_TO_XINT_SAT(SDNode *N);
//===--------------------------------------------------------------------===//
// Vector Widening Support: LegalizeVectorTypes.cpp
@@ -891,9 +891,9 @@ private:
SDValue WidenVecRes_LOAD(SDNode* N);
SDValue WidenVecRes_MLOAD(MaskedLoadSDNode* N);
SDValue WidenVecRes_MGATHER(MaskedGatherSDNode* N);
- SDValue WidenVecRes_ScalarOp(SDNode* N);
+ SDValue WidenVecRes_ScalarOp(SDNode* N);
SDValue WidenVecRes_SELECT(SDNode* N);
- SDValue WidenVSELECTMask(SDNode *N);
+ SDValue WidenVSELECTMask(SDNode *N);
SDValue WidenVecRes_SELECT_CC(SDNode* N);
SDValue WidenVecRes_SETCC(SDNode* N);
SDValue WidenVecRes_STRICT_FSETCC(SDNode* N);
@@ -908,7 +908,7 @@ private:
SDValue WidenVecRes_OverflowOp(SDNode *N, unsigned ResNo);
SDValue WidenVecRes_Convert(SDNode *N);
SDValue WidenVecRes_Convert_StrictFP(SDNode *N);
- SDValue WidenVecRes_FP_TO_XINT_SAT(SDNode *N);
+ SDValue WidenVecRes_FP_TO_XINT_SAT(SDNode *N);
SDValue WidenVecRes_FCOPYSIGN(SDNode *N);
SDValue WidenVecRes_POWI(SDNode *N);
SDValue WidenVecRes_Unary(SDNode *N);
@@ -930,10 +930,10 @@ private:
SDValue WidenVecOp_VSELECT(SDNode *N);
SDValue WidenVecOp_Convert(SDNode *N);
- SDValue WidenVecOp_FP_TO_XINT_SAT(SDNode *N);
+ SDValue WidenVecOp_FP_TO_XINT_SAT(SDNode *N);
SDValue WidenVecOp_FCOPYSIGN(SDNode *N);
SDValue WidenVecOp_VECREDUCE(SDNode *N);
- SDValue WidenVecOp_VECREDUCE_SEQ(SDNode *N);
+ SDValue WidenVecOp_VECREDUCE_SEQ(SDNode *N);
/// Helper function to generate a set of operations to perform
/// a vector operation for a wider type.
diff --git a/contrib/libs/llvm12/lib/CodeGen/SelectionDAG/LegalizeTypesGeneric.cpp b/contrib/libs/llvm12/lib/CodeGen/SelectionDAG/LegalizeTypesGeneric.cpp
index 81cc2bf10d..c0a391edef 100644
--- a/contrib/libs/llvm12/lib/CodeGen/SelectionDAG/LegalizeTypesGeneric.cpp
+++ b/contrib/libs/llvm12/lib/CodeGen/SelectionDAG/LegalizeTypesGeneric.cpp
@@ -175,8 +175,8 @@ void DAGTypeLegalizer::ExpandRes_BITCAST(SDNode *N, SDValue &Lo, SDValue &Hi) {
// Increment the pointer to the other half.
unsigned IncrementSize = NOutVT.getSizeInBits() / 8;
- StackPtr =
- DAG.getMemBasePlusOffset(StackPtr, TypeSize::Fixed(IncrementSize), dl);
+ StackPtr =
+ DAG.getMemBasePlusOffset(StackPtr, TypeSize::Fixed(IncrementSize), dl);
// Load the second half from the stack slot.
Hi = DAG.getLoad(NOutVT, dl, Store, StackPtr,
@@ -267,7 +267,7 @@ void DAGTypeLegalizer::ExpandRes_NormalLoad(SDNode *N, SDValue &Lo,
// Increment the pointer to the other half.
unsigned IncrementSize = NVT.getSizeInBits() / 8;
- Ptr = DAG.getMemBasePlusOffset(Ptr, TypeSize::Fixed(IncrementSize), dl);
+ Ptr = DAG.getMemBasePlusOffset(Ptr, TypeSize::Fixed(IncrementSize), dl);
Hi = DAG.getLoad(
NVT, dl, Chain, Ptr, LD->getPointerInfo().getWithOffset(IncrementSize),
LD->getOriginalAlign(), LD->getMemOperand()->getFlags(), AAInfo);
@@ -482,7 +482,7 @@ SDValue DAGTypeLegalizer::ExpandOp_NormalStore(SDNode *N, unsigned OpNo) {
St->getOriginalAlign(), St->getMemOperand()->getFlags(),
AAInfo);
- Ptr = DAG.getObjectPtrOffset(dl, Ptr, TypeSize::Fixed(IncrementSize));
+ Ptr = DAG.getObjectPtrOffset(dl, Ptr, TypeSize::Fixed(IncrementSize));
Hi = DAG.getStore(
Chain, dl, Hi, Ptr, St->getPointerInfo().getWithOffset(IncrementSize),
St->getOriginalAlign(), St->getMemOperand()->getFlags(), AAInfo);
@@ -515,8 +515,8 @@ void DAGTypeLegalizer::SplitRes_SELECT(SDNode *N, SDValue &Lo, SDValue &Hi) {
SDValue Cond = N->getOperand(0);
CL = CH = Cond;
if (Cond.getValueType().isVector()) {
- if (SDValue Res = WidenVSELECTMask(N))
- std::tie(CL, CH) = DAG.SplitVector(Res, dl);
+ if (SDValue Res = WidenVSELECTMask(N))
+ std::tie(CL, CH) = DAG.SplitVector(Res, dl);
// Check if there are already splitted versions of the vector available and
// use those instead of splitting the mask operand again.
else if (getTypeAction(Cond.getValueType()) ==
diff --git a/contrib/libs/llvm12/lib/CodeGen/SelectionDAG/LegalizeVectorOps.cpp b/contrib/libs/llvm12/lib/CodeGen/SelectionDAG/LegalizeVectorOps.cpp
index 4015a5a0ce..5e193511e4 100644
--- a/contrib/libs/llvm12/lib/CodeGen/SelectionDAG/LegalizeVectorOps.cpp
+++ b/contrib/libs/llvm12/lib/CodeGen/SelectionDAG/LegalizeVectorOps.cpp
@@ -453,10 +453,10 @@ SDValue VectorLegalizer::LegalizeOp(SDValue Op) {
case ISD::UADDSAT:
case ISD::SSUBSAT:
case ISD::USUBSAT:
- case ISD::SSHLSAT:
- case ISD::USHLSAT:
- case ISD::FP_TO_SINT_SAT:
- case ISD::FP_TO_UINT_SAT:
+ case ISD::SSHLSAT:
+ case ISD::USHLSAT:
+ case ISD::FP_TO_SINT_SAT:
+ case ISD::FP_TO_UINT_SAT:
Action = TLI.getOperationAction(Node->getOpcode(), Node->getValueType(0));
break;
case ISD::SMULFIX:
@@ -490,11 +490,11 @@ SDValue VectorLegalizer::LegalizeOp(SDValue Op) {
Action = TLI.getOperationAction(Node->getOpcode(),
Node->getOperand(0).getValueType());
break;
- case ISD::VECREDUCE_SEQ_FADD:
- case ISD::VECREDUCE_SEQ_FMUL:
- Action = TLI.getOperationAction(Node->getOpcode(),
- Node->getOperand(1).getValueType());
- break;
+ case ISD::VECREDUCE_SEQ_FADD:
+ case ISD::VECREDUCE_SEQ_FMUL:
+ Action = TLI.getOperationAction(Node->getOpcode(),
+ Node->getOperand(1).getValueType());
+ break;
}
LLVM_DEBUG(dbgs() << "\nLegalizing vector op: "; Node->dump(&DAG));
@@ -802,7 +802,7 @@ void VectorLegalizer::Expand(SDNode *Node, SmallVectorImpl<SDValue> &Results) {
break;
case ISD::ROTL:
case ISD::ROTR:
- if (TLI.expandROT(Node, false /*AllowVectorOps*/, Tmp, DAG)) {
+ if (TLI.expandROT(Node, false /*AllowVectorOps*/, Tmp, DAG)) {
Results.push_back(Tmp);
return;
}
@@ -814,15 +814,15 @@ void VectorLegalizer::Expand(SDNode *Node, SmallVectorImpl<SDValue> &Results) {
return;
}
break;
- case ISD::SMIN:
- case ISD::SMAX:
- case ISD::UMIN:
- case ISD::UMAX:
- if (SDValue Expanded = TLI.expandIntMINMAX(Node, DAG)) {
- Results.push_back(Expanded);
- return;
- }
- break;
+ case ISD::SMIN:
+ case ISD::SMAX:
+ case ISD::UMIN:
+ case ISD::UMAX:
+ if (SDValue Expanded = TLI.expandIntMINMAX(Node, DAG)) {
+ Results.push_back(Expanded);
+ return;
+ }
+ break;
case ISD::UADDO:
case ISD::USUBO:
ExpandUADDSUBO(Node, Results);
@@ -885,10 +885,10 @@ void VectorLegalizer::Expand(SDNode *Node, SmallVectorImpl<SDValue> &Results) {
case ISD::VECREDUCE_FMIN:
Results.push_back(TLI.expandVecReduce(Node, DAG));
return;
- case ISD::VECREDUCE_SEQ_FADD:
- case ISD::VECREDUCE_SEQ_FMUL:
- Results.push_back(TLI.expandVecReduceSeq(Node, DAG));
- return;
+ case ISD::VECREDUCE_SEQ_FADD:
+ case ISD::VECREDUCE_SEQ_FMUL:
+ Results.push_back(TLI.expandVecReduceSeq(Node, DAG));
+ return;
case ISD::SREM:
case ISD::UREM:
ExpandREM(Node, Results);
diff --git a/contrib/libs/llvm12/lib/CodeGen/SelectionDAG/LegalizeVectorTypes.cpp b/contrib/libs/llvm12/lib/CodeGen/SelectionDAG/LegalizeVectorTypes.cpp
index 57cb364f19..5e878e0973 100644
--- a/contrib/libs/llvm12/lib/CodeGen/SelectionDAG/LegalizeVectorTypes.cpp
+++ b/contrib/libs/llvm12/lib/CodeGen/SelectionDAG/LegalizeVectorTypes.cpp
@@ -129,8 +129,8 @@ void DAGTypeLegalizer::ScalarizeVectorResult(SDNode *N, unsigned ResNo) {
case ISD::UADDSAT:
case ISD::SSUBSAT:
case ISD::USUBSAT:
- case ISD::SSHLSAT:
- case ISD::USHLSAT:
+ case ISD::SSHLSAT:
+ case ISD::USHLSAT:
case ISD::FPOW:
case ISD::FREM:
@@ -146,13 +146,13 @@ void DAGTypeLegalizer::ScalarizeVectorResult(SDNode *N, unsigned ResNo) {
case ISD::SHL:
case ISD::SRA:
case ISD::SRL:
- case ISD::ROTL:
- case ISD::ROTR:
+ case ISD::ROTL:
+ case ISD::ROTR:
R = ScalarizeVecRes_BinOp(N);
break;
case ISD::FMA:
- case ISD::FSHL:
- case ISD::FSHR:
+ case ISD::FSHL:
+ case ISD::FSHR:
R = ScalarizeVecRes_TernaryOp(N);
break;
@@ -162,11 +162,11 @@ void DAGTypeLegalizer::ScalarizeVectorResult(SDNode *N, unsigned ResNo) {
R = ScalarizeVecRes_StrictFPOp(N);
break;
- case ISD::FP_TO_UINT_SAT:
- case ISD::FP_TO_SINT_SAT:
- R = ScalarizeVecRes_FP_TO_XINT_SAT(N);
- break;
-
+ case ISD::FP_TO_UINT_SAT:
+ case ISD::FP_TO_SINT_SAT:
+ R = ScalarizeVecRes_FP_TO_XINT_SAT(N);
+ break;
+
case ISD::UADDO:
case ISD::SADDO:
case ISD::USUBO:
@@ -521,23 +521,23 @@ SDValue DAGTypeLegalizer::ScalarizeVecRes_VECTOR_SHUFFLE(SDNode *N) {
return GetScalarizedVector(N->getOperand(Op));
}
-SDValue DAGTypeLegalizer::ScalarizeVecRes_FP_TO_XINT_SAT(SDNode *N) {
- SDValue Src = N->getOperand(0);
- EVT SrcVT = Src.getValueType();
- SDLoc dl(N);
-
- // Handle case where result is scalarized but operand is not
- if (getTypeAction(SrcVT) == TargetLowering::TypeScalarizeVector)
- Src = GetScalarizedVector(Src);
- else
- Src = DAG.getNode(
- ISD::EXTRACT_VECTOR_ELT, dl, SrcVT.getVectorElementType(), Src,
- DAG.getConstant(0, dl, TLI.getVectorIdxTy(DAG.getDataLayout())));
-
- EVT DstVT = N->getValueType(0).getVectorElementType();
- return DAG.getNode(N->getOpcode(), dl, DstVT, Src, N->getOperand(1));
-}
-
+SDValue DAGTypeLegalizer::ScalarizeVecRes_FP_TO_XINT_SAT(SDNode *N) {
+ SDValue Src = N->getOperand(0);
+ EVT SrcVT = Src.getValueType();
+ SDLoc dl(N);
+
+ // Handle case where result is scalarized but operand is not
+ if (getTypeAction(SrcVT) == TargetLowering::TypeScalarizeVector)
+ Src = GetScalarizedVector(Src);
+ else
+ Src = DAG.getNode(
+ ISD::EXTRACT_VECTOR_ELT, dl, SrcVT.getVectorElementType(), Src,
+ DAG.getConstant(0, dl, TLI.getVectorIdxTy(DAG.getDataLayout())));
+
+ EVT DstVT = N->getValueType(0).getVectorElementType();
+ return DAG.getNode(N->getOpcode(), dl, DstVT, Src, N->getOperand(1));
+}
+
SDValue DAGTypeLegalizer::ScalarizeVecRes_SETCC(SDNode *N) {
assert(N->getValueType(0).isVector() &&
N->getOperand(0).getValueType().isVector() &&
@@ -580,80 +580,80 @@ bool DAGTypeLegalizer::ScalarizeVectorOperand(SDNode *N, unsigned OpNo) {
dbgs() << "\n");
SDValue Res = SDValue();
- switch (N->getOpcode()) {
- default:
+ switch (N->getOpcode()) {
+ default:
#ifndef NDEBUG
- dbgs() << "ScalarizeVectorOperand Op #" << OpNo << ": ";
- N->dump(&DAG);
- dbgs() << "\n";
+ dbgs() << "ScalarizeVectorOperand Op #" << OpNo << ": ";
+ N->dump(&DAG);
+ dbgs() << "\n";
#endif
- report_fatal_error("Do not know how to scalarize this operator's "
- "operand!\n");
- case ISD::BITCAST:
- Res = ScalarizeVecOp_BITCAST(N);
- break;
- case ISD::ANY_EXTEND:
- case ISD::ZERO_EXTEND:
- case ISD::SIGN_EXTEND:
- case ISD::TRUNCATE:
- case ISD::FP_TO_SINT:
- case ISD::FP_TO_UINT:
- case ISD::SINT_TO_FP:
- case ISD::UINT_TO_FP:
- Res = ScalarizeVecOp_UnaryOp(N);
- break;
- case ISD::STRICT_SINT_TO_FP:
- case ISD::STRICT_UINT_TO_FP:
- case ISD::STRICT_FP_TO_SINT:
- case ISD::STRICT_FP_TO_UINT:
- Res = ScalarizeVecOp_UnaryOp_StrictFP(N);
- break;
- case ISD::CONCAT_VECTORS:
- Res = ScalarizeVecOp_CONCAT_VECTORS(N);
- break;
- case ISD::EXTRACT_VECTOR_ELT:
- Res = ScalarizeVecOp_EXTRACT_VECTOR_ELT(N);
- break;
- case ISD::VSELECT:
- Res = ScalarizeVecOp_VSELECT(N);
- break;
- case ISD::SETCC:
- Res = ScalarizeVecOp_VSETCC(N);
- break;
- case ISD::STORE:
- Res = ScalarizeVecOp_STORE(cast<StoreSDNode>(N), OpNo);
- break;
- case ISD::STRICT_FP_ROUND:
- Res = ScalarizeVecOp_STRICT_FP_ROUND(N, OpNo);
- break;
- case ISD::FP_ROUND:
- Res = ScalarizeVecOp_FP_ROUND(N, OpNo);
- break;
- case ISD::STRICT_FP_EXTEND:
- Res = ScalarizeVecOp_STRICT_FP_EXTEND(N);
- break;
- case ISD::FP_EXTEND:
- Res = ScalarizeVecOp_FP_EXTEND(N);
- break;
- case ISD::VECREDUCE_FADD:
- case ISD::VECREDUCE_FMUL:
- case ISD::VECREDUCE_ADD:
- case ISD::VECREDUCE_MUL:
- case ISD::VECREDUCE_AND:
- case ISD::VECREDUCE_OR:
- case ISD::VECREDUCE_XOR:
- case ISD::VECREDUCE_SMAX:
- case ISD::VECREDUCE_SMIN:
- case ISD::VECREDUCE_UMAX:
- case ISD::VECREDUCE_UMIN:
- case ISD::VECREDUCE_FMAX:
- case ISD::VECREDUCE_FMIN:
- Res = ScalarizeVecOp_VECREDUCE(N);
- break;
- case ISD::VECREDUCE_SEQ_FADD:
- case ISD::VECREDUCE_SEQ_FMUL:
- Res = ScalarizeVecOp_VECREDUCE_SEQ(N);
- break;
+ report_fatal_error("Do not know how to scalarize this operator's "
+ "operand!\n");
+ case ISD::BITCAST:
+ Res = ScalarizeVecOp_BITCAST(N);
+ break;
+ case ISD::ANY_EXTEND:
+ case ISD::ZERO_EXTEND:
+ case ISD::SIGN_EXTEND:
+ case ISD::TRUNCATE:
+ case ISD::FP_TO_SINT:
+ case ISD::FP_TO_UINT:
+ case ISD::SINT_TO_FP:
+ case ISD::UINT_TO_FP:
+ Res = ScalarizeVecOp_UnaryOp(N);
+ break;
+ case ISD::STRICT_SINT_TO_FP:
+ case ISD::STRICT_UINT_TO_FP:
+ case ISD::STRICT_FP_TO_SINT:
+ case ISD::STRICT_FP_TO_UINT:
+ Res = ScalarizeVecOp_UnaryOp_StrictFP(N);
+ break;
+ case ISD::CONCAT_VECTORS:
+ Res = ScalarizeVecOp_CONCAT_VECTORS(N);
+ break;
+ case ISD::EXTRACT_VECTOR_ELT:
+ Res = ScalarizeVecOp_EXTRACT_VECTOR_ELT(N);
+ break;
+ case ISD::VSELECT:
+ Res = ScalarizeVecOp_VSELECT(N);
+ break;
+ case ISD::SETCC:
+ Res = ScalarizeVecOp_VSETCC(N);
+ break;
+ case ISD::STORE:
+ Res = ScalarizeVecOp_STORE(cast<StoreSDNode>(N), OpNo);
+ break;
+ case ISD::STRICT_FP_ROUND:
+ Res = ScalarizeVecOp_STRICT_FP_ROUND(N, OpNo);
+ break;
+ case ISD::FP_ROUND:
+ Res = ScalarizeVecOp_FP_ROUND(N, OpNo);
+ break;
+ case ISD::STRICT_FP_EXTEND:
+ Res = ScalarizeVecOp_STRICT_FP_EXTEND(N);
+ break;
+ case ISD::FP_EXTEND:
+ Res = ScalarizeVecOp_FP_EXTEND(N);
+ break;
+ case ISD::VECREDUCE_FADD:
+ case ISD::VECREDUCE_FMUL:
+ case ISD::VECREDUCE_ADD:
+ case ISD::VECREDUCE_MUL:
+ case ISD::VECREDUCE_AND:
+ case ISD::VECREDUCE_OR:
+ case ISD::VECREDUCE_XOR:
+ case ISD::VECREDUCE_SMAX:
+ case ISD::VECREDUCE_SMIN:
+ case ISD::VECREDUCE_UMAX:
+ case ISD::VECREDUCE_UMIN:
+ case ISD::VECREDUCE_FMAX:
+ case ISD::VECREDUCE_FMIN:
+ Res = ScalarizeVecOp_VECREDUCE(N);
+ break;
+ case ISD::VECREDUCE_SEQ_FADD:
+ case ISD::VECREDUCE_SEQ_FMUL:
+ Res = ScalarizeVecOp_VECREDUCE_SEQ(N);
+ break;
}
// If the result is null, the sub-method took care of registering results etc.
@@ -798,7 +798,7 @@ SDValue DAGTypeLegalizer::ScalarizeVecOp_STORE(StoreSDNode *N, unsigned OpNo){
/// If the value to round is a vector that needs to be scalarized, it must be
/// <1 x ty>. Convert the element instead.
SDValue DAGTypeLegalizer::ScalarizeVecOp_FP_ROUND(SDNode *N, unsigned OpNo) {
- assert(OpNo == 0 && "Wrong operand for scalarization!");
+ assert(OpNo == 0 && "Wrong operand for scalarization!");
SDValue Elt = GetScalarizedVector(N->getOperand(0));
SDValue Res = DAG.getNode(ISD::FP_ROUND, SDLoc(N),
N->getValueType(0).getVectorElementType(), Elt,
@@ -824,37 +824,37 @@ SDValue DAGTypeLegalizer::ScalarizeVecOp_STRICT_FP_ROUND(SDNode *N,
// handled all replacements since caller can only handle a single result.
ReplaceValueWith(SDValue(N, 0), Res);
return SDValue();
-}
-
-/// If the value to extend is a vector that needs to be scalarized, it must be
-/// <1 x ty>. Convert the element instead.
-SDValue DAGTypeLegalizer::ScalarizeVecOp_FP_EXTEND(SDNode *N) {
- SDValue Elt = GetScalarizedVector(N->getOperand(0));
- SDValue Res = DAG.getNode(ISD::FP_EXTEND, SDLoc(N),
- N->getValueType(0).getVectorElementType(), Elt);
- return DAG.getNode(ISD::SCALAR_TO_VECTOR, SDLoc(N), N->getValueType(0), Res);
-}
-
-/// If the value to extend is a vector that needs to be scalarized, it must be
-/// <1 x ty>. Convert the element instead.
-SDValue DAGTypeLegalizer::ScalarizeVecOp_STRICT_FP_EXTEND(SDNode *N) {
- SDValue Elt = GetScalarizedVector(N->getOperand(1));
- SDValue Res =
- DAG.getNode(ISD::STRICT_FP_EXTEND, SDLoc(N),
- {N->getValueType(0).getVectorElementType(), MVT::Other},
- {N->getOperand(0), Elt});
- // Legalize the chain result - switch anything that used the old chain to
- // use the new one.
- ReplaceValueWith(SDValue(N, 1), Res.getValue(1));
-
- Res = DAG.getNode(ISD::SCALAR_TO_VECTOR, SDLoc(N), N->getValueType(0), Res);
-
- // Do our own replacement and return SDValue() to tell the caller that we
- // handled all replacements since caller can only handle a single result.
- ReplaceValueWith(SDValue(N, 0), Res);
- return SDValue();
-}
-
+}
+
+/// If the value to extend is a vector that needs to be scalarized, it must be
+/// <1 x ty>. Convert the element instead.
+SDValue DAGTypeLegalizer::ScalarizeVecOp_FP_EXTEND(SDNode *N) {
+ SDValue Elt = GetScalarizedVector(N->getOperand(0));
+ SDValue Res = DAG.getNode(ISD::FP_EXTEND, SDLoc(N),
+ N->getValueType(0).getVectorElementType(), Elt);
+ return DAG.getNode(ISD::SCALAR_TO_VECTOR, SDLoc(N), N->getValueType(0), Res);
+}
+
+/// If the value to extend is a vector that needs to be scalarized, it must be
+/// <1 x ty>. Convert the element instead.
+SDValue DAGTypeLegalizer::ScalarizeVecOp_STRICT_FP_EXTEND(SDNode *N) {
+ SDValue Elt = GetScalarizedVector(N->getOperand(1));
+ SDValue Res =
+ DAG.getNode(ISD::STRICT_FP_EXTEND, SDLoc(N),
+ {N->getValueType(0).getVectorElementType(), MVT::Other},
+ {N->getOperand(0), Elt});
+ // Legalize the chain result - switch anything that used the old chain to
+ // use the new one.
+ ReplaceValueWith(SDValue(N, 1), Res.getValue(1));
+
+ Res = DAG.getNode(ISD::SCALAR_TO_VECTOR, SDLoc(N), N->getValueType(0), Res);
+
+ // Do our own replacement and return SDValue() to tell the caller that we
+ // handled all replacements since caller can only handle a single result.
+ ReplaceValueWith(SDValue(N, 0), Res);
+ return SDValue();
+}
+
SDValue DAGTypeLegalizer::ScalarizeVecOp_VECREDUCE(SDNode *N) {
SDValue Res = GetScalarizedVector(N->getOperand(0));
// Result type may be wider than element type.
@@ -863,17 +863,17 @@ SDValue DAGTypeLegalizer::ScalarizeVecOp_VECREDUCE(SDNode *N) {
return Res;
}
-SDValue DAGTypeLegalizer::ScalarizeVecOp_VECREDUCE_SEQ(SDNode *N) {
- SDValue AccOp = N->getOperand(0);
- SDValue VecOp = N->getOperand(1);
-
- unsigned BaseOpc = ISD::getVecReduceBaseOpcode(N->getOpcode());
-
- SDValue Op = GetScalarizedVector(VecOp);
- return DAG.getNode(BaseOpc, SDLoc(N), N->getValueType(0),
- AccOp, Op, N->getFlags());
-}
-
+SDValue DAGTypeLegalizer::ScalarizeVecOp_VECREDUCE_SEQ(SDNode *N) {
+ SDValue AccOp = N->getOperand(0);
+ SDValue VecOp = N->getOperand(1);
+
+ unsigned BaseOpc = ISD::getVecReduceBaseOpcode(N->getOpcode());
+
+ SDValue Op = GetScalarizedVector(VecOp);
+ return DAG.getNode(BaseOpc, SDLoc(N), N->getValueType(0),
+ AccOp, Op, N->getFlags());
+}
+
//===----------------------------------------------------------------------===//
// Result Vector Splitting
//===----------------------------------------------------------------------===//
@@ -913,10 +913,10 @@ void DAGTypeLegalizer::SplitVectorResult(SDNode *N, unsigned ResNo) {
case ISD::FPOWI: SplitVecRes_FPOWI(N, Lo, Hi); break;
case ISD::FCOPYSIGN: SplitVecRes_FCOPYSIGN(N, Lo, Hi); break;
case ISD::INSERT_VECTOR_ELT: SplitVecRes_INSERT_VECTOR_ELT(N, Lo, Hi); break;
- case ISD::SPLAT_VECTOR:
- case ISD::SCALAR_TO_VECTOR:
- SplitVecRes_ScalarOp(N, Lo, Hi);
- break;
+ case ISD::SPLAT_VECTOR:
+ case ISD::SCALAR_TO_VECTOR:
+ SplitVecRes_ScalarOp(N, Lo, Hi);
+ break;
case ISD::SIGN_EXTEND_INREG: SplitVecRes_InregOp(N, Lo, Hi); break;
case ISD::LOAD:
SplitVecRes_LOAD(cast<LoadSDNode>(N), Lo, Hi);
@@ -1019,15 +1019,15 @@ void DAGTypeLegalizer::SplitVectorResult(SDNode *N, unsigned ResNo) {
case ISD::UADDSAT:
case ISD::SSUBSAT:
case ISD::USUBSAT:
- case ISD::SSHLSAT:
- case ISD::USHLSAT:
- case ISD::ROTL:
- case ISD::ROTR:
+ case ISD::SSHLSAT:
+ case ISD::USHLSAT:
+ case ISD::ROTL:
+ case ISD::ROTR:
SplitVecRes_BinOp(N, Lo, Hi);
break;
case ISD::FMA:
- case ISD::FSHL:
- case ISD::FSHR:
+ case ISD::FSHL:
+ case ISD::FSHR:
SplitVecRes_TernaryOp(N, Lo, Hi);
break;
@@ -1037,11 +1037,11 @@ void DAGTypeLegalizer::SplitVectorResult(SDNode *N, unsigned ResNo) {
SplitVecRes_StrictFPOp(N, Lo, Hi);
break;
- case ISD::FP_TO_UINT_SAT:
- case ISD::FP_TO_SINT_SAT:
- SplitVecRes_FP_TO_XINT_SAT(N, Lo, Hi);
- break;
-
+ case ISD::FP_TO_UINT_SAT:
+ case ISD::FP_TO_SINT_SAT:
+ SplitVecRes_FP_TO_XINT_SAT(N, Lo, Hi);
+ break;
+
case ISD::UADDO:
case ISD::SADDO:
case ISD::USUBO:
@@ -1068,26 +1068,26 @@ void DAGTypeLegalizer::SplitVectorResult(SDNode *N, unsigned ResNo) {
}
void DAGTypeLegalizer::IncrementPointer(MemSDNode *N, EVT MemVT,
- MachinePointerInfo &MPI, SDValue &Ptr,
- uint64_t *ScaledOffset) {
+ MachinePointerInfo &MPI, SDValue &Ptr,
+ uint64_t *ScaledOffset) {
SDLoc DL(N);
unsigned IncrementSize = MemVT.getSizeInBits().getKnownMinSize() / 8;
if (MemVT.isScalableVector()) {
- SDNodeFlags Flags;
+ SDNodeFlags Flags;
SDValue BytesIncrement = DAG.getVScale(
DL, Ptr.getValueType(),
APInt(Ptr.getValueSizeInBits().getFixedSize(), IncrementSize));
MPI = MachinePointerInfo(N->getPointerInfo().getAddrSpace());
- Flags.setNoUnsignedWrap(true);
- if (ScaledOffset)
- *ScaledOffset += IncrementSize;
- Ptr = DAG.getNode(ISD::ADD, DL, Ptr.getValueType(), Ptr, BytesIncrement,
- Flags);
+ Flags.setNoUnsignedWrap(true);
+ if (ScaledOffset)
+ *ScaledOffset += IncrementSize;
+ Ptr = DAG.getNode(ISD::ADD, DL, Ptr.getValueType(), Ptr, BytesIncrement,
+ Flags);
} else {
MPI = N->getPointerInfo().getWithOffset(IncrementSize);
// Increment the pointer to the other half.
- Ptr = DAG.getObjectPtrOffset(DL, Ptr, TypeSize::Fixed(IncrementSize));
+ Ptr = DAG.getObjectPtrOffset(DL, Ptr, TypeSize::Fixed(IncrementSize));
}
}
@@ -1296,8 +1296,8 @@ void DAGTypeLegalizer::SplitVecRes_INSERT_SUBVECTOR(SDNode *N, SDValue &Lo,
// Increment the pointer to the other part.
unsigned IncrementSize = Lo.getValueSizeInBits() / 8;
- StackPtr =
- DAG.getMemBasePlusOffset(StackPtr, TypeSize::Fixed(IncrementSize), dl);
+ StackPtr =
+ DAG.getMemBasePlusOffset(StackPtr, TypeSize::Fixed(IncrementSize), dl);
// Load the Hi part from the stack slot.
Hi = DAG.getLoad(Hi.getValueType(), dl, Store, StackPtr,
@@ -1545,16 +1545,16 @@ void DAGTypeLegalizer::SplitVecRes_INSERT_VECTOR_ELT(SDNode *N, SDValue &Lo,
if (ConstantSDNode *CIdx = dyn_cast<ConstantSDNode>(Idx)) {
unsigned IdxVal = CIdx->getZExtValue();
- unsigned LoNumElts = Lo.getValueType().getVectorMinNumElements();
- if (IdxVal < LoNumElts) {
+ unsigned LoNumElts = Lo.getValueType().getVectorMinNumElements();
+ if (IdxVal < LoNumElts) {
Lo = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl,
Lo.getValueType(), Lo, Elt, Idx);
- return;
- } else if (!Vec.getValueType().isScalableVector()) {
+ return;
+ } else if (!Vec.getValueType().isScalableVector()) {
Hi = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, Hi.getValueType(), Hi, Elt,
DAG.getVectorIdxConstant(IdxVal - LoNumElts, dl));
- return;
- }
+ return;
+ }
}
// See if the target wants to custom expand this node.
@@ -1567,7 +1567,7 @@ void DAGTypeLegalizer::SplitVecRes_INSERT_VECTOR_ELT(SDNode *N, SDValue &Lo,
if (VecVT.getScalarSizeInBits() < 8) {
EltVT = MVT::i8;
VecVT = EVT::getVectorVT(*DAG.getContext(), EltVT,
- VecVT.getVectorElementCount());
+ VecVT.getVectorElementCount());
Vec = DAG.getNode(ISD::ANY_EXTEND, dl, VecVT, Vec);
// Extend the element type to match if needed.
if (EltVT.bitsGT(Elt.getValueType()))
@@ -1592,8 +1592,8 @@ void DAGTypeLegalizer::SplitVecRes_INSERT_VECTOR_ELT(SDNode *N, SDValue &Lo,
SDValue EltPtr = TLI.getVectorElementPointer(DAG, StackPtr, VecVT, Idx);
Store = DAG.getTruncStore(
Store, dl, Elt, EltPtr, MachinePointerInfo::getUnknownStack(MF), EltVT,
- commonAlignment(SmallestAlign,
- EltVT.getFixedSizeInBits() / 8));
+ commonAlignment(SmallestAlign,
+ EltVT.getFixedSizeInBits() / 8));
EVT LoVT, HiVT;
std::tie(LoVT, HiVT) = DAG.GetSplitDestVTs(VecVT);
@@ -1602,11 +1602,11 @@ void DAGTypeLegalizer::SplitVecRes_INSERT_VECTOR_ELT(SDNode *N, SDValue &Lo,
Lo = DAG.getLoad(LoVT, dl, Store, StackPtr, PtrInfo, SmallestAlign);
// Increment the pointer to the other part.
- auto Load = cast<LoadSDNode>(Lo);
- MachinePointerInfo MPI = Load->getPointerInfo();
- IncrementPointer(Load, LoVT, MPI, StackPtr);
+ auto Load = cast<LoadSDNode>(Lo);
+ MachinePointerInfo MPI = Load->getPointerInfo();
+ IncrementPointer(Load, LoVT, MPI, StackPtr);
- Hi = DAG.getLoad(HiVT, dl, Store, StackPtr, MPI, SmallestAlign);
+ Hi = DAG.getLoad(HiVT, dl, Store, StackPtr, MPI, SmallestAlign);
// If we adjusted the original type, we need to truncate the results.
std::tie(LoVT, HiVT) = DAG.GetSplitDestVTs(N->getValueType(0));
@@ -1616,18 +1616,18 @@ void DAGTypeLegalizer::SplitVecRes_INSERT_VECTOR_ELT(SDNode *N, SDValue &Lo,
Hi = DAG.getNode(ISD::TRUNCATE, dl, HiVT, Hi);
}
-void DAGTypeLegalizer::SplitVecRes_ScalarOp(SDNode *N, SDValue &Lo,
- SDValue &Hi) {
+void DAGTypeLegalizer::SplitVecRes_ScalarOp(SDNode *N, SDValue &Lo,
+ SDValue &Hi) {
EVT LoVT, HiVT;
SDLoc dl(N);
std::tie(LoVT, HiVT) = DAG.GetSplitDestVTs(N->getValueType(0));
- Lo = DAG.getNode(N->getOpcode(), dl, LoVT, N->getOperand(0));
- if (N->getOpcode() == ISD::SCALAR_TO_VECTOR) {
- Hi = DAG.getUNDEF(HiVT);
- } else {
- assert(N->getOpcode() == ISD::SPLAT_VECTOR && "Unexpected opcode");
- Hi = Lo;
- }
+ Lo = DAG.getNode(N->getOpcode(), dl, LoVT, N->getOperand(0));
+ if (N->getOpcode() == ISD::SCALAR_TO_VECTOR) {
+ Hi = DAG.getUNDEF(HiVT);
+ } else {
+ assert(N->getOpcode() == ISD::SPLAT_VECTOR && "Unexpected opcode");
+ Hi = Lo;
+ }
}
void DAGTypeLegalizer::SplitVecRes_LOAD(LoadSDNode *LD, SDValue &Lo,
@@ -1715,10 +1715,10 @@ void DAGTypeLegalizer::SplitVecRes_MLOAD(MaskedLoadSDNode *MLD,
else
std::tie(PassThruLo, PassThruHi) = DAG.SplitVector(PassThru, dl);
- unsigned LoSize = MemoryLocation::getSizeOrUnknown(LoMemVT.getStoreSize());
+ unsigned LoSize = MemoryLocation::getSizeOrUnknown(LoMemVT.getStoreSize());
MachineMemOperand *MMO = DAG.getMachineFunction().getMachineMemOperand(
- MLD->getPointerInfo(), MachineMemOperand::MOLoad, LoSize, Alignment,
- MLD->getAAInfo(), MLD->getRanges());
+ MLD->getPointerInfo(), MachineMemOperand::MOLoad, LoSize, Alignment,
+ MLD->getAAInfo(), MLD->getRanges());
Lo = DAG.getMaskedLoad(LoVT, dl, Ch, Ptr, Offset, MaskLo, PassThruLo, LoMemVT,
MMO, MLD->getAddressingMode(), ExtType,
@@ -1732,18 +1732,18 @@ void DAGTypeLegalizer::SplitVecRes_MLOAD(MaskedLoadSDNode *MLD,
// Generate hi masked load.
Ptr = TLI.IncrementMemoryAddress(Ptr, MaskLo, dl, LoMemVT, DAG,
MLD->isExpandingLoad());
- unsigned HiSize = MemoryLocation::getSizeOrUnknown(HiMemVT.getStoreSize());
-
- MachinePointerInfo MPI;
- if (LoMemVT.isScalableVector())
- MPI = MachinePointerInfo(MLD->getPointerInfo().getAddrSpace());
- else
- MPI = MLD->getPointerInfo().getWithOffset(
- LoMemVT.getStoreSize().getFixedSize());
-
+ unsigned HiSize = MemoryLocation::getSizeOrUnknown(HiMemVT.getStoreSize());
+
+ MachinePointerInfo MPI;
+ if (LoMemVT.isScalableVector())
+ MPI = MachinePointerInfo(MLD->getPointerInfo().getAddrSpace());
+ else
+ MPI = MLD->getPointerInfo().getWithOffset(
+ LoMemVT.getStoreSize().getFixedSize());
+
MMO = DAG.getMachineFunction().getMachineMemOperand(
- MPI, MachineMemOperand::MOLoad, HiSize, Alignment, MLD->getAAInfo(),
- MLD->getRanges());
+ MPI, MachineMemOperand::MOLoad, HiSize, Alignment, MLD->getAAInfo(),
+ MLD->getRanges());
Hi = DAG.getMaskedLoad(HiVT, dl, Ch, Ptr, Offset, MaskHi, PassThruHi,
HiMemVT, MMO, MLD->getAddressingMode(), ExtType,
@@ -1773,9 +1773,9 @@ void DAGTypeLegalizer::SplitVecRes_MGATHER(MaskedGatherSDNode *MGT,
SDValue PassThru = MGT->getPassThru();
SDValue Index = MGT->getIndex();
SDValue Scale = MGT->getScale();
- EVT MemoryVT = MGT->getMemoryVT();
+ EVT MemoryVT = MGT->getMemoryVT();
Align Alignment = MGT->getOriginalAlign();
- ISD::LoadExtType ExtType = MGT->getExtensionType();
+ ISD::LoadExtType ExtType = MGT->getExtensionType();
// Split Mask operand
SDValue MaskLo, MaskHi;
@@ -1788,10 +1788,10 @@ void DAGTypeLegalizer::SplitVecRes_MGATHER(MaskedGatherSDNode *MGT,
std::tie(MaskLo, MaskHi) = DAG.SplitVector(Mask, dl);
}
- EVT LoMemVT, HiMemVT;
- // Split MemoryVT
- std::tie(LoMemVT, HiMemVT) = DAG.GetSplitDestVTs(MemoryVT);
-
+ EVT LoMemVT, HiMemVT;
+ // Split MemoryVT
+ std::tie(LoMemVT, HiMemVT) = DAG.GetSplitDestVTs(MemoryVT);
+
SDValue PassThruLo, PassThruHi;
if (getTypeAction(PassThru.getValueType()) == TargetLowering::TypeSplitVector)
GetSplitVector(PassThru, PassThruLo, PassThruHi);
@@ -1810,12 +1810,12 @@ void DAGTypeLegalizer::SplitVecRes_MGATHER(MaskedGatherSDNode *MGT,
MGT->getRanges());
SDValue OpsLo[] = {Ch, PassThruLo, MaskLo, Ptr, IndexLo, Scale};
- Lo = DAG.getMaskedGather(DAG.getVTList(LoVT, MVT::Other), LoMemVT, dl, OpsLo,
- MMO, MGT->getIndexType(), ExtType);
+ Lo = DAG.getMaskedGather(DAG.getVTList(LoVT, MVT::Other), LoMemVT, dl, OpsLo,
+ MMO, MGT->getIndexType(), ExtType);
SDValue OpsHi[] = {Ch, PassThruHi, MaskHi, Ptr, IndexHi, Scale};
- Hi = DAG.getMaskedGather(DAG.getVTList(HiVT, MVT::Other), HiMemVT, dl, OpsHi,
- MMO, MGT->getIndexType(), ExtType);
+ Hi = DAG.getMaskedGather(DAG.getVTList(HiVT, MVT::Other), HiMemVT, dl, OpsHi,
+ MMO, MGT->getIndexType(), ExtType);
// Build a factor node to remember that this load is independent of the
// other one.
@@ -1903,8 +1903,8 @@ void DAGTypeLegalizer::SplitVecRes_ExtendOp(SDNode *N, SDValue &Lo,
// more effectively move in the right direction and prevent falling down
// to scalarization in many cases due to the input vector being split too
// far.
- if (SrcVT.getVectorElementCount().isKnownEven() &&
- SrcVT.getScalarSizeInBits() * 2 < DestVT.getScalarSizeInBits()) {
+ if (SrcVT.getVectorElementCount().isKnownEven() &&
+ SrcVT.getScalarSizeInBits() * 2 < DestVT.getScalarSizeInBits()) {
LLVMContext &Ctx = *DAG.getContext();
EVT NewSrcVT = SrcVT.widenIntegerVectorElementType(Ctx);
EVT SplitSrcVT = SrcVT.getHalfNumVectorElementsVT(Ctx);
@@ -2059,23 +2059,23 @@ void DAGTypeLegalizer::SplitVecRes_VAARG(SDNode *N, SDValue &Lo, SDValue &Hi) {
ReplaceValueWith(SDValue(N, 1), Chain);
}
-void DAGTypeLegalizer::SplitVecRes_FP_TO_XINT_SAT(SDNode *N, SDValue &Lo,
- SDValue &Hi) {
- EVT DstVTLo, DstVTHi;
- std::tie(DstVTLo, DstVTHi) = DAG.GetSplitDestVTs(N->getValueType(0));
- SDLoc dl(N);
-
- SDValue SrcLo, SrcHi;
- EVT SrcVT = N->getOperand(0).getValueType();
- if (getTypeAction(SrcVT) == TargetLowering::TypeSplitVector)
- GetSplitVector(N->getOperand(0), SrcLo, SrcHi);
- else
- std::tie(SrcLo, SrcHi) = DAG.SplitVectorOperand(N, 0);
-
- Lo = DAG.getNode(N->getOpcode(), dl, DstVTLo, SrcLo, N->getOperand(1));
- Hi = DAG.getNode(N->getOpcode(), dl, DstVTHi, SrcHi, N->getOperand(1));
-}
-
+void DAGTypeLegalizer::SplitVecRes_FP_TO_XINT_SAT(SDNode *N, SDValue &Lo,
+ SDValue &Hi) {
+ EVT DstVTLo, DstVTHi;
+ std::tie(DstVTLo, DstVTHi) = DAG.GetSplitDestVTs(N->getValueType(0));
+ SDLoc dl(N);
+
+ SDValue SrcLo, SrcHi;
+ EVT SrcVT = N->getOperand(0).getValueType();
+ if (getTypeAction(SrcVT) == TargetLowering::TypeSplitVector)
+ GetSplitVector(N->getOperand(0), SrcLo, SrcHi);
+ else
+ std::tie(SrcLo, SrcHi) = DAG.SplitVectorOperand(N, 0);
+
+ Lo = DAG.getNode(N->getOpcode(), dl, DstVTLo, SrcLo, N->getOperand(1));
+ Hi = DAG.getNode(N->getOpcode(), dl, DstVTHi, SrcHi, N->getOperand(1));
+}
+
//===----------------------------------------------------------------------===//
// Operand Vector Splitting
//===----------------------------------------------------------------------===//
@@ -2092,95 +2092,95 @@ bool DAGTypeLegalizer::SplitVectorOperand(SDNode *N, unsigned OpNo) {
if (CustomLowerNode(N, N->getOperand(OpNo).getValueType(), false))
return false;
- switch (N->getOpcode()) {
- default:
+ switch (N->getOpcode()) {
+ default:
#ifndef NDEBUG
- dbgs() << "SplitVectorOperand Op #" << OpNo << ": ";
- N->dump(&DAG);
- dbgs() << "\n";
+ dbgs() << "SplitVectorOperand Op #" << OpNo << ": ";
+ N->dump(&DAG);
+ dbgs() << "\n";
#endif
- report_fatal_error("Do not know how to split this operator's "
- "operand!\n");
-
- case ISD::SETCC: Res = SplitVecOp_VSETCC(N); break;
- case ISD::BITCAST: Res = SplitVecOp_BITCAST(N); break;
- case ISD::EXTRACT_SUBVECTOR: Res = SplitVecOp_EXTRACT_SUBVECTOR(N); break;
- case ISD::INSERT_SUBVECTOR: Res = SplitVecOp_INSERT_SUBVECTOR(N, OpNo); break;
- case ISD::EXTRACT_VECTOR_ELT:Res = SplitVecOp_EXTRACT_VECTOR_ELT(N); break;
- case ISD::CONCAT_VECTORS: Res = SplitVecOp_CONCAT_VECTORS(N); break;
- case ISD::TRUNCATE:
- Res = SplitVecOp_TruncateHelper(N);
- break;
- case ISD::STRICT_FP_ROUND:
- case ISD::FP_ROUND: Res = SplitVecOp_FP_ROUND(N); break;
- case ISD::FCOPYSIGN: Res = SplitVecOp_FCOPYSIGN(N); break;
- case ISD::STORE:
- Res = SplitVecOp_STORE(cast<StoreSDNode>(N), OpNo);
- break;
- case ISD::MSTORE:
- Res = SplitVecOp_MSTORE(cast<MaskedStoreSDNode>(N), OpNo);
- break;
- case ISD::MSCATTER:
- Res = SplitVecOp_MSCATTER(cast<MaskedScatterSDNode>(N), OpNo);
- break;
- case ISD::MGATHER:
- Res = SplitVecOp_MGATHER(cast<MaskedGatherSDNode>(N), OpNo);
- break;
- case ISD::VSELECT:
- Res = SplitVecOp_VSELECT(N, OpNo);
- break;
- case ISD::STRICT_SINT_TO_FP:
- case ISD::STRICT_UINT_TO_FP:
- case ISD::SINT_TO_FP:
- case ISD::UINT_TO_FP:
- if (N->getValueType(0).bitsLT(
- N->getOperand(N->isStrictFPOpcode() ? 1 : 0).getValueType()))
+ report_fatal_error("Do not know how to split this operator's "
+ "operand!\n");
+
+ case ISD::SETCC: Res = SplitVecOp_VSETCC(N); break;
+ case ISD::BITCAST: Res = SplitVecOp_BITCAST(N); break;
+ case ISD::EXTRACT_SUBVECTOR: Res = SplitVecOp_EXTRACT_SUBVECTOR(N); break;
+ case ISD::INSERT_SUBVECTOR: Res = SplitVecOp_INSERT_SUBVECTOR(N, OpNo); break;
+ case ISD::EXTRACT_VECTOR_ELT:Res = SplitVecOp_EXTRACT_VECTOR_ELT(N); break;
+ case ISD::CONCAT_VECTORS: Res = SplitVecOp_CONCAT_VECTORS(N); break;
+ case ISD::TRUNCATE:
+ Res = SplitVecOp_TruncateHelper(N);
+ break;
+ case ISD::STRICT_FP_ROUND:
+ case ISD::FP_ROUND: Res = SplitVecOp_FP_ROUND(N); break;
+ case ISD::FCOPYSIGN: Res = SplitVecOp_FCOPYSIGN(N); break;
+ case ISD::STORE:
+ Res = SplitVecOp_STORE(cast<StoreSDNode>(N), OpNo);
+ break;
+ case ISD::MSTORE:
+ Res = SplitVecOp_MSTORE(cast<MaskedStoreSDNode>(N), OpNo);
+ break;
+ case ISD::MSCATTER:
+ Res = SplitVecOp_MSCATTER(cast<MaskedScatterSDNode>(N), OpNo);
+ break;
+ case ISD::MGATHER:
+ Res = SplitVecOp_MGATHER(cast<MaskedGatherSDNode>(N), OpNo);
+ break;
+ case ISD::VSELECT:
+ Res = SplitVecOp_VSELECT(N, OpNo);
+ break;
+ case ISD::STRICT_SINT_TO_FP:
+ case ISD::STRICT_UINT_TO_FP:
+ case ISD::SINT_TO_FP:
+ case ISD::UINT_TO_FP:
+ if (N->getValueType(0).bitsLT(
+ N->getOperand(N->isStrictFPOpcode() ? 1 : 0).getValueType()))
Res = SplitVecOp_TruncateHelper(N);
- else
+ else
Res = SplitVecOp_UnaryOp(N);
- break;
- case ISD::FP_TO_SINT_SAT:
- case ISD::FP_TO_UINT_SAT:
- Res = SplitVecOp_FP_TO_XINT_SAT(N);
- break;
- case ISD::FP_TO_SINT:
- case ISD::FP_TO_UINT:
- case ISD::STRICT_FP_TO_SINT:
- case ISD::STRICT_FP_TO_UINT:
- case ISD::STRICT_FP_EXTEND:
- case ISD::FP_EXTEND:
- case ISD::SIGN_EXTEND:
- case ISD::ZERO_EXTEND:
- case ISD::ANY_EXTEND:
- case ISD::FTRUNC:
- Res = SplitVecOp_UnaryOp(N);
- break;
-
- case ISD::ANY_EXTEND_VECTOR_INREG:
- case ISD::SIGN_EXTEND_VECTOR_INREG:
- case ISD::ZERO_EXTEND_VECTOR_INREG:
- Res = SplitVecOp_ExtVecInRegOp(N);
- break;
-
- case ISD::VECREDUCE_FADD:
- case ISD::VECREDUCE_FMUL:
- case ISD::VECREDUCE_ADD:
- case ISD::VECREDUCE_MUL:
- case ISD::VECREDUCE_AND:
- case ISD::VECREDUCE_OR:
- case ISD::VECREDUCE_XOR:
- case ISD::VECREDUCE_SMAX:
- case ISD::VECREDUCE_SMIN:
- case ISD::VECREDUCE_UMAX:
- case ISD::VECREDUCE_UMIN:
- case ISD::VECREDUCE_FMAX:
- case ISD::VECREDUCE_FMIN:
- Res = SplitVecOp_VECREDUCE(N, OpNo);
- break;
- case ISD::VECREDUCE_SEQ_FADD:
- case ISD::VECREDUCE_SEQ_FMUL:
- Res = SplitVecOp_VECREDUCE_SEQ(N);
- break;
+ break;
+ case ISD::FP_TO_SINT_SAT:
+ case ISD::FP_TO_UINT_SAT:
+ Res = SplitVecOp_FP_TO_XINT_SAT(N);
+ break;
+ case ISD::FP_TO_SINT:
+ case ISD::FP_TO_UINT:
+ case ISD::STRICT_FP_TO_SINT:
+ case ISD::STRICT_FP_TO_UINT:
+ case ISD::STRICT_FP_EXTEND:
+ case ISD::FP_EXTEND:
+ case ISD::SIGN_EXTEND:
+ case ISD::ZERO_EXTEND:
+ case ISD::ANY_EXTEND:
+ case ISD::FTRUNC:
+ Res = SplitVecOp_UnaryOp(N);
+ break;
+
+ case ISD::ANY_EXTEND_VECTOR_INREG:
+ case ISD::SIGN_EXTEND_VECTOR_INREG:
+ case ISD::ZERO_EXTEND_VECTOR_INREG:
+ Res = SplitVecOp_ExtVecInRegOp(N);
+ break;
+
+ case ISD::VECREDUCE_FADD:
+ case ISD::VECREDUCE_FMUL:
+ case ISD::VECREDUCE_ADD:
+ case ISD::VECREDUCE_MUL:
+ case ISD::VECREDUCE_AND:
+ case ISD::VECREDUCE_OR:
+ case ISD::VECREDUCE_XOR:
+ case ISD::VECREDUCE_SMAX:
+ case ISD::VECREDUCE_SMIN:
+ case ISD::VECREDUCE_UMAX:
+ case ISD::VECREDUCE_UMIN:
+ case ISD::VECREDUCE_FMAX:
+ case ISD::VECREDUCE_FMIN:
+ Res = SplitVecOp_VECREDUCE(N, OpNo);
+ break;
+ case ISD::VECREDUCE_SEQ_FADD:
+ case ISD::VECREDUCE_SEQ_FMUL:
+ Res = SplitVecOp_VECREDUCE_SEQ(N);
+ break;
}
// If the result is null, the sub-method took care of registering results etc.
@@ -2250,33 +2250,33 @@ SDValue DAGTypeLegalizer::SplitVecOp_VECREDUCE(SDNode *N, unsigned OpNo) {
// Use the appropriate scalar instruction on the split subvectors before
// reducing the now partially reduced smaller vector.
- unsigned CombineOpc = ISD::getVecReduceBaseOpcode(N->getOpcode());
+ unsigned CombineOpc = ISD::getVecReduceBaseOpcode(N->getOpcode());
SDValue Partial = DAG.getNode(CombineOpc, dl, LoOpVT, Lo, Hi, N->getFlags());
return DAG.getNode(N->getOpcode(), dl, ResVT, Partial, N->getFlags());
}
-SDValue DAGTypeLegalizer::SplitVecOp_VECREDUCE_SEQ(SDNode *N) {
- EVT ResVT = N->getValueType(0);
- SDValue Lo, Hi;
- SDLoc dl(N);
-
- SDValue AccOp = N->getOperand(0);
- SDValue VecOp = N->getOperand(1);
- SDNodeFlags Flags = N->getFlags();
-
- EVT VecVT = VecOp.getValueType();
- assert(VecVT.isVector() && "Can only split reduce vector operand");
- GetSplitVector(VecOp, Lo, Hi);
- EVT LoOpVT, HiOpVT;
- std::tie(LoOpVT, HiOpVT) = DAG.GetSplitDestVTs(VecVT);
-
- // Reduce low half.
- SDValue Partial = DAG.getNode(N->getOpcode(), dl, ResVT, AccOp, Lo, Flags);
-
- // Reduce high half, using low half result as initial value.
- return DAG.getNode(N->getOpcode(), dl, ResVT, Partial, Hi, Flags);
-}
-
+SDValue DAGTypeLegalizer::SplitVecOp_VECREDUCE_SEQ(SDNode *N) {
+ EVT ResVT = N->getValueType(0);
+ SDValue Lo, Hi;
+ SDLoc dl(N);
+
+ SDValue AccOp = N->getOperand(0);
+ SDValue VecOp = N->getOperand(1);
+ SDNodeFlags Flags = N->getFlags();
+
+ EVT VecVT = VecOp.getValueType();
+ assert(VecVT.isVector() && "Can only split reduce vector operand");
+ GetSplitVector(VecOp, Lo, Hi);
+ EVT LoOpVT, HiOpVT;
+ std::tie(LoOpVT, HiOpVT) = DAG.GetSplitDestVTs(VecVT);
+
+ // Reduce low half.
+ SDValue Partial = DAG.getNode(N->getOpcode(), dl, ResVT, AccOp, Lo, Flags);
+
+ // Reduce high half, using low half result as initial value.
+ return DAG.getNode(N->getOpcode(), dl, ResVT, Partial, Hi, Flags);
+}
+
SDValue DAGTypeLegalizer::SplitVecOp_UnaryOp(SDNode *N) {
// The result has a legal vector type, but the input needs splitting.
EVT ResVT = N->getValueType(0);
@@ -2326,36 +2326,36 @@ SDValue DAGTypeLegalizer::SplitVecOp_BITCAST(SDNode *N) {
JoinIntegers(Lo, Hi));
}
-SDValue DAGTypeLegalizer::SplitVecOp_INSERT_SUBVECTOR(SDNode *N,
- unsigned OpNo) {
- assert(OpNo == 1 && "Invalid OpNo; can only split SubVec.");
- // We know that the result type is legal.
- EVT ResVT = N->getValueType(0);
-
- SDValue Vec = N->getOperand(0);
- SDValue SubVec = N->getOperand(1);
- SDValue Idx = N->getOperand(2);
- SDLoc dl(N);
-
- SDValue Lo, Hi;
- GetSplitVector(SubVec, Lo, Hi);
-
- uint64_t IdxVal = cast<ConstantSDNode>(Idx)->getZExtValue();
- uint64_t LoElts = Lo.getValueType().getVectorMinNumElements();
-
- SDValue FirstInsertion =
- DAG.getNode(ISD::INSERT_SUBVECTOR, dl, ResVT, Vec, Lo, Idx);
- SDValue SecondInsertion =
- DAG.getNode(ISD::INSERT_SUBVECTOR, dl, ResVT, FirstInsertion, Hi,
- DAG.getVectorIdxConstant(IdxVal + LoElts, dl));
-
- return SecondInsertion;
-}
-
+SDValue DAGTypeLegalizer::SplitVecOp_INSERT_SUBVECTOR(SDNode *N,
+ unsigned OpNo) {
+ assert(OpNo == 1 && "Invalid OpNo; can only split SubVec.");
+ // We know that the result type is legal.
+ EVT ResVT = N->getValueType(0);
+
+ SDValue Vec = N->getOperand(0);
+ SDValue SubVec = N->getOperand(1);
+ SDValue Idx = N->getOperand(2);
+ SDLoc dl(N);
+
+ SDValue Lo, Hi;
+ GetSplitVector(SubVec, Lo, Hi);
+
+ uint64_t IdxVal = cast<ConstantSDNode>(Idx)->getZExtValue();
+ uint64_t LoElts = Lo.getValueType().getVectorMinNumElements();
+
+ SDValue FirstInsertion =
+ DAG.getNode(ISD::INSERT_SUBVECTOR, dl, ResVT, Vec, Lo, Idx);
+ SDValue SecondInsertion =
+ DAG.getNode(ISD::INSERT_SUBVECTOR, dl, ResVT, FirstInsertion, Hi,
+ DAG.getVectorIdxConstant(IdxVal + LoElts, dl));
+
+ return SecondInsertion;
+}
+
SDValue DAGTypeLegalizer::SplitVecOp_EXTRACT_SUBVECTOR(SDNode *N) {
// We know that the extracted result type is legal.
EVT SubVT = N->getValueType(0);
-
+
SDValue Idx = N->getOperand(1);
SDLoc dl(N);
SDValue Lo, Hi;
@@ -2391,14 +2391,14 @@ SDValue DAGTypeLegalizer::SplitVecOp_EXTRACT_VECTOR_ELT(SDNode *N) {
SDValue Lo, Hi;
GetSplitVector(Vec, Lo, Hi);
- uint64_t LoElts = Lo.getValueType().getVectorMinNumElements();
+ uint64_t LoElts = Lo.getValueType().getVectorMinNumElements();
if (IdxVal < LoElts)
return SDValue(DAG.UpdateNodeOperands(N, Lo, Idx), 0);
- else if (!Vec.getValueType().isScalableVector())
- return SDValue(DAG.UpdateNodeOperands(N, Hi,
- DAG.getConstant(IdxVal - LoElts, SDLoc(N),
- Idx.getValueType())), 0);
+ else if (!Vec.getValueType().isScalableVector())
+ return SDValue(DAG.UpdateNodeOperands(N, Hi,
+ DAG.getConstant(IdxVal - LoElts, SDLoc(N),
+ Idx.getValueType())), 0);
}
// See if the target wants to custom expand this node.
@@ -2411,7 +2411,7 @@ SDValue DAGTypeLegalizer::SplitVecOp_EXTRACT_VECTOR_ELT(SDNode *N) {
if (VecVT.getScalarSizeInBits() < 8) {
EltVT = MVT::i8;
VecVT = EVT::getVectorVT(*DAG.getContext(), EltVT,
- VecVT.getVectorElementCount());
+ VecVT.getVectorElementCount());
Vec = DAG.getNode(ISD::ANY_EXTEND, dl, VecVT, Vec);
}
@@ -2441,7 +2441,7 @@ SDValue DAGTypeLegalizer::SplitVecOp_EXTRACT_VECTOR_ELT(SDNode *N) {
return DAG.getExtLoad(
ISD::EXTLOAD, dl, N->getValueType(0), Store, StackPtr,
MachinePointerInfo::getUnknownStack(DAG.getMachineFunction()), EltVT,
- commonAlignment(SmallestAlign, EltVT.getFixedSizeInBits() / 8));
+ commonAlignment(SmallestAlign, EltVT.getFixedSizeInBits() / 8));
}
SDValue DAGTypeLegalizer::SplitVecOp_ExtVecInRegOp(SDNode *N) {
@@ -2467,7 +2467,7 @@ SDValue DAGTypeLegalizer::SplitVecOp_MGATHER(MaskedGatherSDNode *MGT,
SDValue Mask = MGT->getMask();
SDValue PassThru = MGT->getPassThru();
Align Alignment = MGT->getOriginalAlign();
- ISD::LoadExtType ExtType = MGT->getExtensionType();
+ ISD::LoadExtType ExtType = MGT->getExtensionType();
SDValue MaskLo, MaskHi;
if (getTypeAction(Mask.getValueType()) == TargetLowering::TypeSplitVector)
@@ -2498,12 +2498,12 @@ SDValue DAGTypeLegalizer::SplitVecOp_MGATHER(MaskedGatherSDNode *MGT,
MGT->getRanges());
SDValue OpsLo[] = {Ch, PassThruLo, MaskLo, Ptr, IndexLo, Scale};
- SDValue Lo = DAG.getMaskedGather(DAG.getVTList(LoVT, MVT::Other), LoMemVT, dl,
- OpsLo, MMO, MGT->getIndexType(), ExtType);
+ SDValue Lo = DAG.getMaskedGather(DAG.getVTList(LoVT, MVT::Other), LoMemVT, dl,
+ OpsLo, MMO, MGT->getIndexType(), ExtType);
SDValue OpsHi[] = {Ch, PassThruHi, MaskHi, Ptr, IndexHi, Scale};
- SDValue Hi = DAG.getMaskedGather(DAG.getVTList(HiVT, MVT::Other), HiMemVT, dl,
- OpsHi, MMO, MGT->getIndexType(), ExtType);
+ SDValue Hi = DAG.getMaskedGather(DAG.getVTList(HiVT, MVT::Other), HiMemVT, dl,
+ OpsHi, MMO, MGT->getIndexType(), ExtType);
// Build a factor node to remember that this load is independent of the
// other one.
@@ -2557,10 +2557,10 @@ SDValue DAGTypeLegalizer::SplitVecOp_MSTORE(MaskedStoreSDNode *N,
DAG.GetDependentSplitDestVTs(MemoryVT, DataLo.getValueType(), &HiIsEmpty);
SDValue Lo, Hi, Res;
- unsigned LoSize = MemoryLocation::getSizeOrUnknown(LoMemVT.getStoreSize());
+ unsigned LoSize = MemoryLocation::getSizeOrUnknown(LoMemVT.getStoreSize());
MachineMemOperand *MMO = DAG.getMachineFunction().getMachineMemOperand(
- N->getPointerInfo(), MachineMemOperand::MOStore, LoSize, Alignment,
- N->getAAInfo(), N->getRanges());
+ N->getPointerInfo(), MachineMemOperand::MOStore, LoSize, Alignment,
+ N->getAAInfo(), N->getRanges());
Lo = DAG.getMaskedStore(Ch, DL, DataLo, Ptr, Offset, MaskLo, LoMemVT, MMO,
N->getAddressingMode(), N->isTruncatingStore(),
@@ -2575,19 +2575,19 @@ SDValue DAGTypeLegalizer::SplitVecOp_MSTORE(MaskedStoreSDNode *N,
Ptr = TLI.IncrementMemoryAddress(Ptr, MaskLo, DL, LoMemVT, DAG,
N->isCompressingStore());
- MachinePointerInfo MPI;
- if (LoMemVT.isScalableVector()) {
- Alignment = commonAlignment(
- Alignment, LoMemVT.getSizeInBits().getKnownMinSize() / 8);
- MPI = MachinePointerInfo(N->getPointerInfo().getAddrSpace());
- } else
- MPI = N->getPointerInfo().getWithOffset(
- LoMemVT.getStoreSize().getFixedSize());
-
- unsigned HiSize = MemoryLocation::getSizeOrUnknown(HiMemVT.getStoreSize());
+ MachinePointerInfo MPI;
+ if (LoMemVT.isScalableVector()) {
+ Alignment = commonAlignment(
+ Alignment, LoMemVT.getSizeInBits().getKnownMinSize() / 8);
+ MPI = MachinePointerInfo(N->getPointerInfo().getAddrSpace());
+ } else
+ MPI = N->getPointerInfo().getWithOffset(
+ LoMemVT.getStoreSize().getFixedSize());
+
+ unsigned HiSize = MemoryLocation::getSizeOrUnknown(HiMemVT.getStoreSize());
MMO = DAG.getMachineFunction().getMachineMemOperand(
- MPI, MachineMemOperand::MOStore, HiSize, Alignment, N->getAAInfo(),
- N->getRanges());
+ MPI, MachineMemOperand::MOStore, HiSize, Alignment, N->getAAInfo(),
+ N->getRanges());
Hi = DAG.getMaskedStore(Ch, DL, DataHi, Ptr, Offset, MaskHi, HiMemVT, MMO,
N->getAddressingMode(), N->isTruncatingStore(),
@@ -2609,15 +2609,15 @@ SDValue DAGTypeLegalizer::SplitVecOp_MSCATTER(MaskedScatterSDNode *N,
SDValue Index = N->getIndex();
SDValue Scale = N->getScale();
SDValue Data = N->getValue();
- EVT MemoryVT = N->getMemoryVT();
+ EVT MemoryVT = N->getMemoryVT();
Align Alignment = N->getOriginalAlign();
SDLoc DL(N);
// Split all operands
- EVT LoMemVT, HiMemVT;
- std::tie(LoMemVT, HiMemVT) = DAG.GetSplitDestVTs(MemoryVT);
-
+ EVT LoMemVT, HiMemVT;
+ std::tie(LoMemVT, HiMemVT) = DAG.GetSplitDestVTs(MemoryVT);
+
SDValue DataLo, DataHi;
if (getTypeAction(Data.getValueType()) == TargetLowering::TypeSplitVector)
// Split Data operand
@@ -2648,17 +2648,17 @@ SDValue DAGTypeLegalizer::SplitVecOp_MSCATTER(MaskedScatterSDNode *N,
MemoryLocation::UnknownSize, Alignment, N->getAAInfo(), N->getRanges());
SDValue OpsLo[] = {Ch, DataLo, MaskLo, Ptr, IndexLo, Scale};
- Lo = DAG.getMaskedScatter(DAG.getVTList(MVT::Other), LoMemVT,
- DL, OpsLo, MMO, N->getIndexType(),
- N->isTruncatingStore());
+ Lo = DAG.getMaskedScatter(DAG.getVTList(MVT::Other), LoMemVT,
+ DL, OpsLo, MMO, N->getIndexType(),
+ N->isTruncatingStore());
// The order of the Scatter operation after split is well defined. The "Hi"
// part comes after the "Lo". So these two operations should be chained one
// after another.
SDValue OpsHi[] = {Lo, DataHi, MaskHi, Ptr, IndexHi, Scale};
- return DAG.getMaskedScatter(DAG.getVTList(MVT::Other), HiMemVT,
- DL, OpsHi, MMO, N->getIndexType(),
- N->isTruncatingStore());
+ return DAG.getMaskedScatter(DAG.getVTList(MVT::Other), HiMemVT,
+ DL, OpsHi, MMO, N->getIndexType(),
+ N->isTruncatingStore());
}
SDValue DAGTypeLegalizer::SplitVecOp_STORE(StoreSDNode *N, unsigned OpNo) {
@@ -2784,7 +2784,7 @@ SDValue DAGTypeLegalizer::SplitVecOp_TruncateHelper(SDNode *N) {
EVT::getFloatingPointVT(InElementSize/2) :
EVT::getIntegerVT(*DAG.getContext(), InElementSize/2);
EVT HalfVT = EVT::getVectorVT(*DAG.getContext(), HalfElementVT,
- NumElements.divideCoefficientBy(2));
+ NumElements.divideCoefficientBy(2));
SDValue HalfLo;
SDValue HalfHi;
@@ -2863,7 +2863,7 @@ SDValue DAGTypeLegalizer::SplitVecOp_FP_ROUND(SDNode *N) {
EVT InVT = Lo.getValueType();
EVT OutVT = EVT::getVectorVT(*DAG.getContext(), ResVT.getVectorElementType(),
- InVT.getVectorElementCount());
+ InVT.getVectorElementCount());
if (N->isStrictFPOpcode()) {
Lo = DAG.getNode(N->getOpcode(), DL, { OutVT, MVT::Other },
@@ -2889,23 +2889,23 @@ SDValue DAGTypeLegalizer::SplitVecOp_FCOPYSIGN(SDNode *N) {
return DAG.UnrollVectorOp(N, N->getValueType(0).getVectorNumElements());
}
-SDValue DAGTypeLegalizer::SplitVecOp_FP_TO_XINT_SAT(SDNode *N) {
- EVT ResVT = N->getValueType(0);
- SDValue Lo, Hi;
- SDLoc dl(N);
- GetSplitVector(N->getOperand(0), Lo, Hi);
- EVT InVT = Lo.getValueType();
-
- EVT NewResVT =
- EVT::getVectorVT(*DAG.getContext(), ResVT.getVectorElementType(),
- InVT.getVectorElementCount());
-
- Lo = DAG.getNode(N->getOpcode(), dl, NewResVT, Lo, N->getOperand(1));
- Hi = DAG.getNode(N->getOpcode(), dl, NewResVT, Hi, N->getOperand(1));
-
- return DAG.getNode(ISD::CONCAT_VECTORS, dl, ResVT, Lo, Hi);
-}
-
+SDValue DAGTypeLegalizer::SplitVecOp_FP_TO_XINT_SAT(SDNode *N) {
+ EVT ResVT = N->getValueType(0);
+ SDValue Lo, Hi;
+ SDLoc dl(N);
+ GetSplitVector(N->getOperand(0), Lo, Hi);
+ EVT InVT = Lo.getValueType();
+
+ EVT NewResVT =
+ EVT::getVectorVT(*DAG.getContext(), ResVT.getVectorElementType(),
+ InVT.getVectorElementCount());
+
+ Lo = DAG.getNode(N->getOpcode(), dl, NewResVT, Lo, N->getOperand(1));
+ Hi = DAG.getNode(N->getOpcode(), dl, NewResVT, Hi, N->getOperand(1));
+
+ return DAG.getNode(ISD::CONCAT_VECTORS, dl, ResVT, Lo, Hi);
+}
+
//===----------------------------------------------------------------------===//
// Result Vector Widening
//===----------------------------------------------------------------------===//
@@ -2935,10 +2935,10 @@ void DAGTypeLegalizer::WidenVectorResult(SDNode *N, unsigned ResNo) {
case ISD::EXTRACT_SUBVECTOR: Res = WidenVecRes_EXTRACT_SUBVECTOR(N); break;
case ISD::INSERT_VECTOR_ELT: Res = WidenVecRes_INSERT_VECTOR_ELT(N); break;
case ISD::LOAD: Res = WidenVecRes_LOAD(N); break;
- case ISD::SPLAT_VECTOR:
- case ISD::SCALAR_TO_VECTOR:
- Res = WidenVecRes_ScalarOp(N);
- break;
+ case ISD::SPLAT_VECTOR:
+ case ISD::SCALAR_TO_VECTOR:
+ Res = WidenVecRes_ScalarOp(N);
+ break;
case ISD::SIGN_EXTEND_INREG: Res = WidenVecRes_InregOp(N); break;
case ISD::VSELECT:
case ISD::SELECT: Res = WidenVecRes_SELECT(N); break;
@@ -2963,9 +2963,9 @@ void DAGTypeLegalizer::WidenVectorResult(SDNode *N, unsigned ResNo) {
case ISD::OR:
case ISD::SUB:
case ISD::XOR:
- case ISD::SHL:
- case ISD::SRA:
- case ISD::SRL:
+ case ISD::SHL:
+ case ISD::SRA:
+ case ISD::SRL:
case ISD::FMINNUM:
case ISD::FMAXNUM:
case ISD::FMINIMUM:
@@ -2978,10 +2978,10 @@ void DAGTypeLegalizer::WidenVectorResult(SDNode *N, unsigned ResNo) {
case ISD::SADDSAT:
case ISD::USUBSAT:
case ISD::SSUBSAT:
- case ISD::SSHLSAT:
- case ISD::USHLSAT:
- case ISD::ROTL:
- case ISD::ROTR:
+ case ISD::SSHLSAT:
+ case ISD::USHLSAT:
+ case ISD::ROTL:
+ case ISD::ROTR:
Res = WidenVecRes_Binary(N);
break;
@@ -3049,11 +3049,11 @@ void DAGTypeLegalizer::WidenVectorResult(SDNode *N, unsigned ResNo) {
Res = WidenVecRes_Convert(N);
break;
- case ISD::FP_TO_SINT_SAT:
- case ISD::FP_TO_UINT_SAT:
- Res = WidenVecRes_FP_TO_XINT_SAT(N);
- break;
-
+ case ISD::FP_TO_SINT_SAT:
+ case ISD::FP_TO_UINT_SAT:
+ Res = WidenVecRes_FP_TO_XINT_SAT(N);
+ break;
+
case ISD::FABS:
case ISD::FCEIL:
case ISD::FCOS:
@@ -3101,8 +3101,8 @@ void DAGTypeLegalizer::WidenVectorResult(SDNode *N, unsigned ResNo) {
Res = WidenVecRes_Unary(N);
break;
case ISD::FMA:
- case ISD::FSHL:
- case ISD::FSHR:
+ case ISD::FSHL:
+ case ISD::FSHR:
Res = WidenVecRes_Ternary(N);
break;
}
@@ -3468,34 +3468,34 @@ SDValue DAGTypeLegalizer::WidenVecRes_OverflowOp(SDNode *N, unsigned ResNo) {
}
SDValue DAGTypeLegalizer::WidenVecRes_Convert(SDNode *N) {
- LLVMContext &Ctx = *DAG.getContext();
+ LLVMContext &Ctx = *DAG.getContext();
SDValue InOp = N->getOperand(0);
SDLoc DL(N);
- EVT WidenVT = TLI.getTypeToTransformTo(Ctx, N->getValueType(0));
+ EVT WidenVT = TLI.getTypeToTransformTo(Ctx, N->getValueType(0));
unsigned WidenNumElts = WidenVT.getVectorNumElements();
EVT InVT = InOp.getValueType();
unsigned Opcode = N->getOpcode();
- const SDNodeFlags Flags = N->getFlags();
-
- // Handle the case of ZERO_EXTEND where the promoted InVT element size does
- // not equal that of WidenVT.
- if (N->getOpcode() == ISD::ZERO_EXTEND &&
- getTypeAction(InVT) == TargetLowering::TypePromoteInteger &&
- TLI.getTypeToTransformTo(Ctx, InVT).getScalarSizeInBits() !=
- WidenVT.getScalarSizeInBits()) {
- InOp = ZExtPromotedInteger(InOp);
- InVT = InOp.getValueType();
- if (WidenVT.getScalarSizeInBits() < InVT.getScalarSizeInBits())
- Opcode = ISD::TRUNCATE;
- }
-
- EVT InEltVT = InVT.getVectorElementType();
- EVT InWidenVT = EVT::getVectorVT(Ctx, InEltVT, WidenNumElts);
+ const SDNodeFlags Flags = N->getFlags();
+
+ // Handle the case of ZERO_EXTEND where the promoted InVT element size does
+ // not equal that of WidenVT.
+ if (N->getOpcode() == ISD::ZERO_EXTEND &&
+ getTypeAction(InVT) == TargetLowering::TypePromoteInteger &&
+ TLI.getTypeToTransformTo(Ctx, InVT).getScalarSizeInBits() !=
+ WidenVT.getScalarSizeInBits()) {
+ InOp = ZExtPromotedInteger(InOp);
+ InVT = InOp.getValueType();
+ if (WidenVT.getScalarSizeInBits() < InVT.getScalarSizeInBits())
+ Opcode = ISD::TRUNCATE;
+ }
+
+ EVT InEltVT = InVT.getVectorElementType();
+ EVT InWidenVT = EVT::getVectorVT(Ctx, InEltVT, WidenNumElts);
unsigned InVTNumElts = InVT.getVectorNumElements();
-
+
if (getTypeAction(InVT) == TargetLowering::TypeWidenVector) {
InOp = GetWidenedVector(N->getOperand(0));
InVT = InOp.getValueType();
@@ -3563,27 +3563,27 @@ SDValue DAGTypeLegalizer::WidenVecRes_Convert(SDNode *N) {
return DAG.getBuildVector(WidenVT, DL, Ops);
}
-SDValue DAGTypeLegalizer::WidenVecRes_FP_TO_XINT_SAT(SDNode *N) {
- SDLoc dl(N);
- EVT WidenVT = TLI.getTypeToTransformTo(*DAG.getContext(), N->getValueType(0));
- ElementCount WidenNumElts = WidenVT.getVectorElementCount();
-
- SDValue Src = N->getOperand(0);
- EVT SrcVT = Src.getValueType();
-
- // Also widen the input.
- if (getTypeAction(SrcVT) == TargetLowering::TypeWidenVector) {
- Src = GetWidenedVector(Src);
- SrcVT = Src.getValueType();
- }
-
- // Input and output not widened to the same size, give up.
- if (WidenNumElts != SrcVT.getVectorElementCount())
- return DAG.UnrollVectorOp(N, WidenNumElts.getKnownMinValue());
-
- return DAG.getNode(N->getOpcode(), dl, WidenVT, Src, N->getOperand(1));
-}
-
+SDValue DAGTypeLegalizer::WidenVecRes_FP_TO_XINT_SAT(SDNode *N) {
+ SDLoc dl(N);
+ EVT WidenVT = TLI.getTypeToTransformTo(*DAG.getContext(), N->getValueType(0));
+ ElementCount WidenNumElts = WidenVT.getVectorElementCount();
+
+ SDValue Src = N->getOperand(0);
+ EVT SrcVT = Src.getValueType();
+
+ // Also widen the input.
+ if (getTypeAction(SrcVT) == TargetLowering::TypeWidenVector) {
+ Src = GetWidenedVector(Src);
+ SrcVT = Src.getValueType();
+ }
+
+ // Input and output not widened to the same size, give up.
+ if (WidenNumElts != SrcVT.getVectorElementCount())
+ return DAG.UnrollVectorOp(N, WidenNumElts.getKnownMinValue());
+
+ return DAG.getNode(N->getOpcode(), dl, WidenVT, Src, N->getOperand(1));
+}
+
SDValue DAGTypeLegalizer::WidenVecRes_Convert_StrictFP(SDNode *N) {
SDValue InOp = N->getOperand(1);
SDLoc DL(N);
@@ -4044,13 +4044,13 @@ SDValue DAGTypeLegalizer::WidenVecRes_MGATHER(MaskedGatherSDNode *N) {
Index = ModifyToType(Index, WideIndexVT);
SDValue Ops[] = { N->getChain(), PassThru, Mask, N->getBasePtr(), Index,
Scale };
-
- // Widen the MemoryType
- EVT WideMemVT = EVT::getVectorVT(*DAG.getContext(),
- N->getMemoryVT().getScalarType(), NumElts);
+
+ // Widen the MemoryType
+ EVT WideMemVT = EVT::getVectorVT(*DAG.getContext(),
+ N->getMemoryVT().getScalarType(), NumElts);
SDValue Res = DAG.getMaskedGather(DAG.getVTList(WideVT, MVT::Other),
- WideMemVT, dl, Ops, N->getMemOperand(),
- N->getIndexType(), N->getExtensionType());
+ WideMemVT, dl, Ops, N->getMemOperand(),
+ N->getIndexType(), N->getExtensionType());
// Legalize the chain result - switch anything that used the old chain to
// use the new one.
@@ -4058,9 +4058,9 @@ SDValue DAGTypeLegalizer::WidenVecRes_MGATHER(MaskedGatherSDNode *N) {
return Res;
}
-SDValue DAGTypeLegalizer::WidenVecRes_ScalarOp(SDNode *N) {
+SDValue DAGTypeLegalizer::WidenVecRes_ScalarOp(SDNode *N) {
EVT WidenVT = TLI.getTypeToTransformTo(*DAG.getContext(), N->getValueType(0));
- return DAG.getNode(N->getOpcode(), SDLoc(N), WidenVT, N->getOperand(0));
+ return DAG.getNode(N->getOpcode(), SDLoc(N), WidenVT, N->getOperand(0));
}
// Return true is this is a SETCC node or a strict version of it.
@@ -4180,11 +4180,11 @@ SDValue DAGTypeLegalizer::convertMask(SDValue InMask, EVT MaskVT,
return Mask;
}
-// This method tries to handle some special cases for the vselect mask
-// and if needed adjusting the mask vector type to match that of the VSELECT.
-// Without it, many cases end up with scalarization of the SETCC, with many
-// unnecessary instructions.
-SDValue DAGTypeLegalizer::WidenVSELECTMask(SDNode *N) {
+// This method tries to handle some special cases for the vselect mask
+// and if needed adjusting the mask vector type to match that of the VSELECT.
+// Without it, many cases end up with scalarization of the SETCC, with many
+// unnecessary instructions.
+SDValue DAGTypeLegalizer::WidenVSELECTMask(SDNode *N) {
LLVMContext &Ctx = *DAG.getContext();
SDValue Cond = N->getOperand(0);
@@ -4231,8 +4231,8 @@ SDValue DAGTypeLegalizer::WidenVSELECTMask(SDNode *N) {
return SDValue();
}
- // Widen the vselect result type if needed.
- if (getTypeAction(VSelVT) == TargetLowering::TypeWidenVector)
+ // Widen the vselect result type if needed.
+ if (getTypeAction(VSelVT) == TargetLowering::TypeWidenVector)
VSelVT = TLI.getTypeToTransformTo(Ctx, VSelVT);
// The mask of the VSELECT should have integer elements.
@@ -4282,7 +4282,7 @@ SDValue DAGTypeLegalizer::WidenVSELECTMask(SDNode *N) {
} else
return SDValue();
- return Mask;
+ return Mask;
}
SDValue DAGTypeLegalizer::WidenVecRes_SELECT(SDNode *N) {
@@ -4292,13 +4292,13 @@ SDValue DAGTypeLegalizer::WidenVecRes_SELECT(SDNode *N) {
SDValue Cond1 = N->getOperand(0);
EVT CondVT = Cond1.getValueType();
if (CondVT.isVector()) {
- if (SDValue WideCond = WidenVSELECTMask(N)) {
- SDValue InOp1 = GetWidenedVector(N->getOperand(1));
- SDValue InOp2 = GetWidenedVector(N->getOperand(2));
- assert(InOp1.getValueType() == WidenVT && InOp2.getValueType() == WidenVT);
- return DAG.getNode(N->getOpcode(), SDLoc(N),
- WidenVT, WideCond, InOp1, InOp2);
- }
+ if (SDValue WideCond = WidenVSELECTMask(N)) {
+ SDValue InOp1 = GetWidenedVector(N->getOperand(1));
+ SDValue InOp2 = GetWidenedVector(N->getOperand(2));
+ assert(InOp1.getValueType() == WidenVT && InOp2.getValueType() == WidenVT);
+ return DAG.getNode(N->getOpcode(), SDLoc(N),
+ WidenVT, WideCond, InOp1, InOp2);
+ }
EVT CondEltVT = CondVT.getVectorElementType();
EVT CondWidenVT = EVT::getVectorVT(*DAG.getContext(),
@@ -4505,11 +4505,11 @@ bool DAGTypeLegalizer::WidenVectorOperand(SDNode *N, unsigned OpNo) {
Res = WidenVecOp_Convert(N);
break;
- case ISD::FP_TO_SINT_SAT:
- case ISD::FP_TO_UINT_SAT:
- Res = WidenVecOp_FP_TO_XINT_SAT(N);
- break;
-
+ case ISD::FP_TO_SINT_SAT:
+ case ISD::FP_TO_UINT_SAT:
+ Res = WidenVecOp_FP_TO_XINT_SAT(N);
+ break;
+
case ISD::VECREDUCE_FADD:
case ISD::VECREDUCE_FMUL:
case ISD::VECREDUCE_ADD:
@@ -4525,10 +4525,10 @@ bool DAGTypeLegalizer::WidenVectorOperand(SDNode *N, unsigned OpNo) {
case ISD::VECREDUCE_FMIN:
Res = WidenVecOp_VECREDUCE(N);
break;
- case ISD::VECREDUCE_SEQ_FADD:
- case ISD::VECREDUCE_SEQ_FMUL:
- Res = WidenVecOp_VECREDUCE_SEQ(N);
- break;
+ case ISD::VECREDUCE_SEQ_FADD:
+ case ISD::VECREDUCE_SEQ_FMUL:
+ Res = WidenVecOp_VECREDUCE_SEQ(N);
+ break;
}
// If Res is null, the sub-method took care of registering the result.
@@ -4683,28 +4683,28 @@ SDValue DAGTypeLegalizer::WidenVecOp_Convert(SDNode *N) {
return DAG.getBuildVector(VT, dl, Ops);
}
-SDValue DAGTypeLegalizer::WidenVecOp_FP_TO_XINT_SAT(SDNode *N) {
- EVT DstVT = N->getValueType(0);
- SDValue Src = GetWidenedVector(N->getOperand(0));
- EVT SrcVT = Src.getValueType();
- ElementCount WideNumElts = SrcVT.getVectorElementCount();
- SDLoc dl(N);
-
- // See if a widened result type would be legal, if so widen the node.
- EVT WideDstVT = EVT::getVectorVT(*DAG.getContext(),
- DstVT.getVectorElementType(), WideNumElts);
- if (TLI.isTypeLegal(WideDstVT)) {
- SDValue Res =
- DAG.getNode(N->getOpcode(), dl, WideDstVT, Src, N->getOperand(1));
- return DAG.getNode(
- ISD::EXTRACT_SUBVECTOR, dl, DstVT, Res,
- DAG.getConstant(0, dl, TLI.getVectorIdxTy(DAG.getDataLayout())));
- }
-
- // Give up and unroll.
- return DAG.UnrollVectorOp(N);
-}
-
+SDValue DAGTypeLegalizer::WidenVecOp_FP_TO_XINT_SAT(SDNode *N) {
+ EVT DstVT = N->getValueType(0);
+ SDValue Src = GetWidenedVector(N->getOperand(0));
+ EVT SrcVT = Src.getValueType();
+ ElementCount WideNumElts = SrcVT.getVectorElementCount();
+ SDLoc dl(N);
+
+ // See if a widened result type would be legal, if so widen the node.
+ EVT WideDstVT = EVT::getVectorVT(*DAG.getContext(),
+ DstVT.getVectorElementType(), WideNumElts);
+ if (TLI.isTypeLegal(WideDstVT)) {
+ SDValue Res =
+ DAG.getNode(N->getOpcode(), dl, WideDstVT, Src, N->getOperand(1));
+ return DAG.getNode(
+ ISD::EXTRACT_SUBVECTOR, dl, DstVT, Res,
+ DAG.getConstant(0, dl, TLI.getVectorIdxTy(DAG.getDataLayout())));
+ }
+
+ // Give up and unroll.
+ return DAG.UnrollVectorOp(N);
+}
+
SDValue DAGTypeLegalizer::WidenVecOp_BITCAST(SDNode *N) {
EVT VT = N->getValueType(0);
SDValue InOp = GetWidenedVector(N->getOperand(0));
@@ -4806,11 +4806,11 @@ SDValue DAGTypeLegalizer::WidenVecOp_STORE(SDNode *N) {
return TLI.scalarizeVectorStore(ST, DAG);
if (ST->isTruncatingStore())
- return TLI.scalarizeVectorStore(ST, DAG);
-
- SmallVector<SDValue, 16> StChain;
- GenWidenVectorStores(StChain, ST);
+ return TLI.scalarizeVectorStore(ST, DAG);
+ SmallVector<SDValue, 16> StChain;
+ GenWidenVectorStores(StChain, ST);
+
if (StChain.size() == 1)
return StChain[0];
else
@@ -4871,8 +4871,8 @@ SDValue DAGTypeLegalizer::WidenVecOp_MGATHER(SDNode *N, unsigned OpNo) {
SDValue Ops[] = {MG->getChain(), DataOp, Mask, MG->getBasePtr(), Index,
Scale};
SDValue Res = DAG.getMaskedGather(MG->getVTList(), MG->getMemoryVT(), dl, Ops,
- MG->getMemOperand(), MG->getIndexType(),
- MG->getExtensionType());
+ MG->getMemOperand(), MG->getIndexType(),
+ MG->getExtensionType());
ReplaceValueWith(SDValue(N, 1), Res.getValue(1));
ReplaceValueWith(SDValue(N, 0), Res.getValue(0));
return SDValue();
@@ -4884,7 +4884,7 @@ SDValue DAGTypeLegalizer::WidenVecOp_MSCATTER(SDNode *N, unsigned OpNo) {
SDValue Mask = MSC->getMask();
SDValue Index = MSC->getIndex();
SDValue Scale = MSC->getScale();
- EVT WideMemVT = MSC->getMemoryVT();
+ EVT WideMemVT = MSC->getMemoryVT();
if (OpNo == 1) {
DataOp = GetWidenedVector(DataOp);
@@ -4901,10 +4901,10 @@ SDValue DAGTypeLegalizer::WidenVecOp_MSCATTER(SDNode *N, unsigned OpNo) {
EVT WideMaskVT = EVT::getVectorVT(*DAG.getContext(),
MaskVT.getVectorElementType(), NumElts);
Mask = ModifyToType(Mask, WideMaskVT, true);
-
- // Widen the MemoryType
- WideMemVT = EVT::getVectorVT(*DAG.getContext(),
- MSC->getMemoryVT().getScalarType(), NumElts);
+
+ // Widen the MemoryType
+ WideMemVT = EVT::getVectorVT(*DAG.getContext(),
+ MSC->getMemoryVT().getScalarType(), NumElts);
} else if (OpNo == 4) {
// Just widen the index. It's allowed to have extra elements.
Index = GetWidenedVector(Index);
@@ -4913,9 +4913,9 @@ SDValue DAGTypeLegalizer::WidenVecOp_MSCATTER(SDNode *N, unsigned OpNo) {
SDValue Ops[] = {MSC->getChain(), DataOp, Mask, MSC->getBasePtr(), Index,
Scale};
- return DAG.getMaskedScatter(DAG.getVTList(MVT::Other), WideMemVT, SDLoc(N),
- Ops, MSC->getMemOperand(), MSC->getIndexType(),
- MSC->isTruncatingStore());
+ return DAG.getMaskedScatter(DAG.getVTList(MVT::Other), WideMemVT, SDLoc(N),
+ Ops, MSC->getMemOperand(), MSC->getIndexType(),
+ MSC->isTruncatingStore());
}
SDValue DAGTypeLegalizer::WidenVecOp_SETCC(SDNode *N) {
@@ -4994,37 +4994,12 @@ SDValue DAGTypeLegalizer::WidenVecOp_VECREDUCE(SDNode *N) {
EVT OrigVT = N->getOperand(0).getValueType();
EVT WideVT = Op.getValueType();
EVT ElemVT = OrigVT.getVectorElementType();
- SDNodeFlags Flags = N->getFlags();
-
- unsigned Opc = N->getOpcode();
- unsigned BaseOpc = ISD::getVecReduceBaseOpcode(Opc);
- SDValue NeutralElem = DAG.getNeutralElement(BaseOpc, dl, ElemVT, Flags);
- assert(NeutralElem && "Neutral element must exist");
-
- // Pad the vector with the neutral element.
- unsigned OrigElts = OrigVT.getVectorNumElements();
- unsigned WideElts = WideVT.getVectorNumElements();
- for (unsigned Idx = OrigElts; Idx < WideElts; Idx++)
- Op = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, WideVT, Op, NeutralElem,
- DAG.getVectorIdxConstant(Idx, dl));
-
- return DAG.getNode(Opc, dl, N->getValueType(0), Op, Flags);
-}
-
-SDValue DAGTypeLegalizer::WidenVecOp_VECREDUCE_SEQ(SDNode *N) {
- SDLoc dl(N);
- SDValue AccOp = N->getOperand(0);
- SDValue VecOp = N->getOperand(1);
- SDValue Op = GetWidenedVector(VecOp);
-
- EVT OrigVT = VecOp.getValueType();
- EVT WideVT = Op.getValueType();
- EVT ElemVT = OrigVT.getVectorElementType();
- SDNodeFlags Flags = N->getFlags();
+ SDNodeFlags Flags = N->getFlags();
- unsigned Opc = N->getOpcode();
- unsigned BaseOpc = ISD::getVecReduceBaseOpcode(Opc);
- SDValue NeutralElem = DAG.getNeutralElement(BaseOpc, dl, ElemVT, Flags);
+ unsigned Opc = N->getOpcode();
+ unsigned BaseOpc = ISD::getVecReduceBaseOpcode(Opc);
+ SDValue NeutralElem = DAG.getNeutralElement(BaseOpc, dl, ElemVT, Flags);
+ assert(NeutralElem && "Neutral element must exist");
// Pad the vector with the neutral element.
unsigned OrigElts = OrigVT.getVectorNumElements();
@@ -5033,9 +5008,34 @@ SDValue DAGTypeLegalizer::WidenVecOp_VECREDUCE_SEQ(SDNode *N) {
Op = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, WideVT, Op, NeutralElem,
DAG.getVectorIdxConstant(Idx, dl));
- return DAG.getNode(Opc, dl, N->getValueType(0), AccOp, Op, Flags);
-}
-
+ return DAG.getNode(Opc, dl, N->getValueType(0), Op, Flags);
+}
+
+SDValue DAGTypeLegalizer::WidenVecOp_VECREDUCE_SEQ(SDNode *N) {
+ SDLoc dl(N);
+ SDValue AccOp = N->getOperand(0);
+ SDValue VecOp = N->getOperand(1);
+ SDValue Op = GetWidenedVector(VecOp);
+
+ EVT OrigVT = VecOp.getValueType();
+ EVT WideVT = Op.getValueType();
+ EVT ElemVT = OrigVT.getVectorElementType();
+ SDNodeFlags Flags = N->getFlags();
+
+ unsigned Opc = N->getOpcode();
+ unsigned BaseOpc = ISD::getVecReduceBaseOpcode(Opc);
+ SDValue NeutralElem = DAG.getNeutralElement(BaseOpc, dl, ElemVT, Flags);
+
+ // Pad the vector with the neutral element.
+ unsigned OrigElts = OrigVT.getVectorNumElements();
+ unsigned WideElts = WideVT.getVectorNumElements();
+ for (unsigned Idx = OrigElts; Idx < WideElts; Idx++)
+ Op = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, WideVT, Op, NeutralElem,
+ DAG.getVectorIdxConstant(Idx, dl));
+
+ return DAG.getNode(Opc, dl, N->getValueType(0), AccOp, Op, Flags);
+}
+
SDValue DAGTypeLegalizer::WidenVecOp_VSELECT(SDNode *N) {
// This only gets called in the case that the left and right inputs and
// result are of a legal odd vector type, and the condition is illegal i1 of
@@ -5076,7 +5076,7 @@ static EVT FindMemType(SelectionDAG& DAG, const TargetLowering &TLI,
// If we have one element to load/store, return it.
EVT RetVT = WidenEltVT;
- if (!Scalable && Width == WidenEltWidth)
+ if (!Scalable && Width == WidenEltWidth)
return RetVT;
// See if there is larger legal integer than the element type to load/store.
@@ -5122,14 +5122,14 @@ static EVT FindMemType(SelectionDAG& DAG, const TargetLowering &TLI,
isPowerOf2_32(WidenWidth / MemVTWidth) &&
(MemVTWidth <= Width ||
(Align!=0 && MemVTWidth<=AlignInBits && MemVTWidth<=Width+WidenEx))) {
- if (RetVT.getFixedSizeInBits() < MemVTWidth || MemVT == WidenVT)
+ if (RetVT.getFixedSizeInBits() < MemVTWidth || MemVT == WidenVT)
return MemVT;
}
}
- if (Scalable)
- report_fatal_error("Using element-wise loads and stores for widening "
- "operations is not supported for scalable vectors");
+ if (Scalable)
+ report_fatal_error("Using element-wise loads and stores for widening "
+ "operations is not supported for scalable vectors");
return RetVT;
}
@@ -5175,7 +5175,7 @@ SDValue DAGTypeLegalizer::GenWidenVectorLoads(SmallVectorImpl<SDValue> &LdChain,
EVT LdVT = LD->getMemoryVT();
SDLoc dl(LD);
assert(LdVT.isVector() && WidenVT.isVector());
- assert(LdVT.isScalableVector() == WidenVT.isScalableVector());
+ assert(LdVT.isScalableVector() == WidenVT.isScalableVector());
assert(LdVT.getVectorElementType() == WidenVT.getVectorElementType());
// Load information
@@ -5184,25 +5184,25 @@ SDValue DAGTypeLegalizer::GenWidenVectorLoads(SmallVectorImpl<SDValue> &LdChain,
MachineMemOperand::Flags MMOFlags = LD->getMemOperand()->getFlags();
AAMDNodes AAInfo = LD->getAAInfo();
- TypeSize LdWidth = LdVT.getSizeInBits();
- TypeSize WidenWidth = WidenVT.getSizeInBits();
- TypeSize WidthDiff = WidenWidth - LdWidth;
+ TypeSize LdWidth = LdVT.getSizeInBits();
+ TypeSize WidenWidth = WidenVT.getSizeInBits();
+ TypeSize WidthDiff = WidenWidth - LdWidth;
// Allow wider loads if they are sufficiently aligned to avoid memory faults
// and if the original load is simple.
unsigned LdAlign = (!LD->isSimple()) ? 0 : LD->getAlignment();
// Find the vector type that can load from.
- EVT NewVT = FindMemType(DAG, TLI, LdWidth.getKnownMinSize(), WidenVT, LdAlign,
- WidthDiff.getKnownMinSize());
- TypeSize NewVTWidth = NewVT.getSizeInBits();
+ EVT NewVT = FindMemType(DAG, TLI, LdWidth.getKnownMinSize(), WidenVT, LdAlign,
+ WidthDiff.getKnownMinSize());
+ TypeSize NewVTWidth = NewVT.getSizeInBits();
SDValue LdOp = DAG.getLoad(NewVT, dl, Chain, BasePtr, LD->getPointerInfo(),
LD->getOriginalAlign(), MMOFlags, AAInfo);
LdChain.push_back(LdOp.getValue(1));
// Check if we can load the element with one instruction.
- if (TypeSize::isKnownLE(LdWidth, NewVTWidth)) {
+ if (TypeSize::isKnownLE(LdWidth, NewVTWidth)) {
if (!NewVT.isVector()) {
- unsigned NumElts = WidenWidth.getFixedSize() / NewVTWidth.getFixedSize();
+ unsigned NumElts = WidenWidth.getFixedSize() / NewVTWidth.getFixedSize();
EVT NewVecVT = EVT::getVectorVT(*DAG.getContext(), NewVT, NumElts);
SDValue VecOp = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, NewVecVT, LdOp);
return DAG.getNode(ISD::BITCAST, dl, WidenVT, VecOp);
@@ -5210,9 +5210,9 @@ SDValue DAGTypeLegalizer::GenWidenVectorLoads(SmallVectorImpl<SDValue> &LdChain,
if (NewVT == WidenVT)
return LdOp;
- // TODO: We don't currently have any tests that exercise this code path.
- assert(WidenWidth.getFixedSize() % NewVTWidth.getFixedSize() == 0);
- unsigned NumConcat = WidenWidth.getFixedSize() / NewVTWidth.getFixedSize();
+ // TODO: We don't currently have any tests that exercise this code path.
+ assert(WidenWidth.getFixedSize() % NewVTWidth.getFixedSize() == 0);
+ unsigned NumConcat = WidenWidth.getFixedSize() / NewVTWidth.getFixedSize();
SmallVector<SDValue, 16> ConcatOps(NumConcat);
SDValue UndefVal = DAG.getUNDEF(NewVT);
ConcatOps[0] = LdOp;
@@ -5225,30 +5225,30 @@ SDValue DAGTypeLegalizer::GenWidenVectorLoads(SmallVectorImpl<SDValue> &LdChain,
SmallVector<SDValue, 16> LdOps;
LdOps.push_back(LdOp);
- uint64_t ScaledOffset = 0;
- MachinePointerInfo MPI = LD->getPointerInfo();
- do {
- LdWidth -= NewVTWidth;
- IncrementPointer(cast<LoadSDNode>(LdOp), NewVT, MPI, BasePtr,
- &ScaledOffset);
+ uint64_t ScaledOffset = 0;
+ MachinePointerInfo MPI = LD->getPointerInfo();
+ do {
+ LdWidth -= NewVTWidth;
+ IncrementPointer(cast<LoadSDNode>(LdOp), NewVT, MPI, BasePtr,
+ &ScaledOffset);
- if (TypeSize::isKnownLT(LdWidth, NewVTWidth)) {
+ if (TypeSize::isKnownLT(LdWidth, NewVTWidth)) {
// The current type we are using is too large. Find a better size.
- NewVT = FindMemType(DAG, TLI, LdWidth.getKnownMinSize(), WidenVT, LdAlign,
- WidthDiff.getKnownMinSize());
+ NewVT = FindMemType(DAG, TLI, LdWidth.getKnownMinSize(), WidenVT, LdAlign,
+ WidthDiff.getKnownMinSize());
NewVTWidth = NewVT.getSizeInBits();
}
- Align NewAlign = ScaledOffset == 0
- ? LD->getOriginalAlign()
- : commonAlignment(LD->getAlign(), ScaledOffset);
- SDValue L =
- DAG.getLoad(NewVT, dl, Chain, BasePtr, MPI, NewAlign, MMOFlags, AAInfo);
- LdChain.push_back(L.getValue(1));
-
+ Align NewAlign = ScaledOffset == 0
+ ? LD->getOriginalAlign()
+ : commonAlignment(LD->getAlign(), ScaledOffset);
+ SDValue L =
+ DAG.getLoad(NewVT, dl, Chain, BasePtr, MPI, NewAlign, MMOFlags, AAInfo);
+ LdChain.push_back(L.getValue(1));
+
LdOps.push_back(L);
LdOp = L;
- } while (TypeSize::isKnownGT(LdWidth, NewVTWidth));
+ } while (TypeSize::isKnownGT(LdWidth, NewVTWidth));
// Build the vector from the load operations.
unsigned End = LdOps.size();
@@ -5272,18 +5272,18 @@ SDValue DAGTypeLegalizer::GenWidenVectorLoads(SmallVectorImpl<SDValue> &LdChain,
}
ConcatOps[--Idx] = BuildVectorFromScalar(DAG, LdTy, LdOps, i + 1, End);
}
-
+
ConcatOps[--Idx] = LdOps[i];
for (--i; i >= 0; --i) {
EVT NewLdTy = LdOps[i].getValueType();
if (NewLdTy != LdTy) {
// Create a larger vector.
- TypeSize LdTySize = LdTy.getSizeInBits();
- TypeSize NewLdTySize = NewLdTy.getSizeInBits();
- assert(NewLdTySize.isScalable() == LdTySize.isScalable() &&
- NewLdTySize.isKnownMultipleOf(LdTySize.getKnownMinSize()));
- unsigned NumOps =
- NewLdTySize.getKnownMinSize() / LdTySize.getKnownMinSize();
+ TypeSize LdTySize = LdTy.getSizeInBits();
+ TypeSize NewLdTySize = NewLdTy.getSizeInBits();
+ assert(NewLdTySize.isScalable() == LdTySize.isScalable() &&
+ NewLdTySize.isKnownMultipleOf(LdTySize.getKnownMinSize()));
+ unsigned NumOps =
+ NewLdTySize.getKnownMinSize() / LdTySize.getKnownMinSize();
SmallVector<SDValue, 16> WidenOps(NumOps);
unsigned j = 0;
for (; j != End-Idx; ++j)
@@ -5304,8 +5304,8 @@ SDValue DAGTypeLegalizer::GenWidenVectorLoads(SmallVectorImpl<SDValue> &LdChain,
makeArrayRef(&ConcatOps[Idx], End - Idx));
// We need to fill the rest with undefs to build the vector.
- unsigned NumOps =
- WidenWidth.getKnownMinSize() / LdTy.getSizeInBits().getKnownMinSize();
+ unsigned NumOps =
+ WidenWidth.getKnownMinSize() / LdTy.getSizeInBits().getKnownMinSize();
SmallVector<SDValue, 16> WidenOps(NumOps);
SDValue UndefVal = DAG.getUNDEF(LdTy);
{
@@ -5328,7 +5328,7 @@ DAGTypeLegalizer::GenWidenVectorExtLoads(SmallVectorImpl<SDValue> &LdChain,
EVT LdVT = LD->getMemoryVT();
SDLoc dl(LD);
assert(LdVT.isVector() && WidenVT.isVector());
- assert(LdVT.isScalableVector() == WidenVT.isScalableVector());
+ assert(LdVT.isScalableVector() == WidenVT.isScalableVector());
// Load information
SDValue Chain = LD->getChain();
@@ -5336,10 +5336,10 @@ DAGTypeLegalizer::GenWidenVectorExtLoads(SmallVectorImpl<SDValue> &LdChain,
MachineMemOperand::Flags MMOFlags = LD->getMemOperand()->getFlags();
AAMDNodes AAInfo = LD->getAAInfo();
- if (LdVT.isScalableVector())
- report_fatal_error("Generating widen scalable extending vector loads is "
- "not yet supported");
-
+ if (LdVT.isScalableVector())
+ report_fatal_error("Generating widen scalable extending vector loads is "
+ "not yet supported");
+
EVT EltVT = WidenVT.getVectorElementType();
EVT LdEltVT = LdVT.getVectorElementType();
unsigned NumElts = LdVT.getVectorNumElements();
@@ -5354,8 +5354,8 @@ DAGTypeLegalizer::GenWidenVectorExtLoads(SmallVectorImpl<SDValue> &LdChain,
LdChain.push_back(Ops[0].getValue(1));
unsigned i = 0, Offset = Increment;
for (i=1; i < NumElts; ++i, Offset += Increment) {
- SDValue NewBasePtr =
- DAG.getObjectPtrOffset(dl, BasePtr, TypeSize::Fixed(Offset));
+ SDValue NewBasePtr =
+ DAG.getObjectPtrOffset(dl, BasePtr, TypeSize::Fixed(Offset));
Ops[i] = DAG.getExtLoad(ExtType, dl, EltVT, Chain, NewBasePtr,
LD->getPointerInfo().getWithOffset(Offset), LdEltVT,
LD->getOriginalAlign(), MMOFlags, AAInfo);
@@ -5383,62 +5383,62 @@ void DAGTypeLegalizer::GenWidenVectorStores(SmallVectorImpl<SDValue> &StChain,
SDLoc dl(ST);
EVT StVT = ST->getMemoryVT();
- TypeSize StWidth = StVT.getSizeInBits();
+ TypeSize StWidth = StVT.getSizeInBits();
EVT ValVT = ValOp.getValueType();
- TypeSize ValWidth = ValVT.getSizeInBits();
+ TypeSize ValWidth = ValVT.getSizeInBits();
EVT ValEltVT = ValVT.getVectorElementType();
- unsigned ValEltWidth = ValEltVT.getFixedSizeInBits();
+ unsigned ValEltWidth = ValEltVT.getFixedSizeInBits();
assert(StVT.getVectorElementType() == ValEltVT);
- assert(StVT.isScalableVector() == ValVT.isScalableVector() &&
- "Mismatch between store and value types");
+ assert(StVT.isScalableVector() == ValVT.isScalableVector() &&
+ "Mismatch between store and value types");
int Idx = 0; // current index to store
-
- MachinePointerInfo MPI = ST->getPointerInfo();
- uint64_t ScaledOffset = 0;
- while (StWidth.isNonZero()) {
+
+ MachinePointerInfo MPI = ST->getPointerInfo();
+ uint64_t ScaledOffset = 0;
+ while (StWidth.isNonZero()) {
// Find the largest vector type we can store with.
- EVT NewVT = FindMemType(DAG, TLI, StWidth.getKnownMinSize(), ValVT);
- TypeSize NewVTWidth = NewVT.getSizeInBits();
-
+ EVT NewVT = FindMemType(DAG, TLI, StWidth.getKnownMinSize(), ValVT);
+ TypeSize NewVTWidth = NewVT.getSizeInBits();
+
if (NewVT.isVector()) {
- unsigned NumVTElts = NewVT.getVectorMinNumElements();
+ unsigned NumVTElts = NewVT.getVectorMinNumElements();
do {
- Align NewAlign = ScaledOffset == 0
- ? ST->getOriginalAlign()
- : commonAlignment(ST->getAlign(), ScaledOffset);
+ Align NewAlign = ScaledOffset == 0
+ ? ST->getOriginalAlign()
+ : commonAlignment(ST->getAlign(), ScaledOffset);
SDValue EOp = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, NewVT, ValOp,
DAG.getVectorIdxConstant(Idx, dl));
- SDValue PartStore = DAG.getStore(Chain, dl, EOp, BasePtr, MPI, NewAlign,
- MMOFlags, AAInfo);
- StChain.push_back(PartStore);
-
+ SDValue PartStore = DAG.getStore(Chain, dl, EOp, BasePtr, MPI, NewAlign,
+ MMOFlags, AAInfo);
+ StChain.push_back(PartStore);
+
StWidth -= NewVTWidth;
Idx += NumVTElts;
- IncrementPointer(cast<StoreSDNode>(PartStore), NewVT, MPI, BasePtr,
- &ScaledOffset);
- } while (StWidth.isNonZero() && TypeSize::isKnownGE(StWidth, NewVTWidth));
+ IncrementPointer(cast<StoreSDNode>(PartStore), NewVT, MPI, BasePtr,
+ &ScaledOffset);
+ } while (StWidth.isNonZero() && TypeSize::isKnownGE(StWidth, NewVTWidth));
} else {
// Cast the vector to the scalar type we can store.
- unsigned NumElts = ValWidth.getFixedSize() / NewVTWidth.getFixedSize();
+ unsigned NumElts = ValWidth.getFixedSize() / NewVTWidth.getFixedSize();
EVT NewVecVT = EVT::getVectorVT(*DAG.getContext(), NewVT, NumElts);
SDValue VecOp = DAG.getNode(ISD::BITCAST, dl, NewVecVT, ValOp);
// Readjust index position based on new vector type.
- Idx = Idx * ValEltWidth / NewVTWidth.getFixedSize();
+ Idx = Idx * ValEltWidth / NewVTWidth.getFixedSize();
do {
SDValue EOp = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, NewVT, VecOp,
DAG.getVectorIdxConstant(Idx++, dl));
- SDValue PartStore =
- DAG.getStore(Chain, dl, EOp, BasePtr, MPI, ST->getOriginalAlign(),
- MMOFlags, AAInfo);
- StChain.push_back(PartStore);
-
+ SDValue PartStore =
+ DAG.getStore(Chain, dl, EOp, BasePtr, MPI, ST->getOriginalAlign(),
+ MMOFlags, AAInfo);
+ StChain.push_back(PartStore);
+
StWidth -= NewVTWidth;
- IncrementPointer(cast<StoreSDNode>(PartStore), NewVT, MPI, BasePtr);
- } while (StWidth.isNonZero() && TypeSize::isKnownGE(StWidth, NewVTWidth));
+ IncrementPointer(cast<StoreSDNode>(PartStore), NewVT, MPI, BasePtr);
+ } while (StWidth.isNonZero() && TypeSize::isKnownGE(StWidth, NewVTWidth));
// Restore index back to be relative to the original widen element type.
- Idx = Idx * NewVTWidth.getFixedSize() / ValEltWidth;
+ Idx = Idx * NewVTWidth.getFixedSize() / ValEltWidth;
}
}
}
diff --git a/contrib/libs/llvm12/lib/CodeGen/SelectionDAG/ScheduleDAGFast.cpp b/contrib/libs/llvm12/lib/CodeGen/SelectionDAG/ScheduleDAGFast.cpp
index 0022e5ec31..e1d2c3c771 100644
--- a/contrib/libs/llvm12/lib/CodeGen/SelectionDAG/ScheduleDAGFast.cpp
+++ b/contrib/libs/llvm12/lib/CodeGen/SelectionDAG/ScheduleDAGFast.cpp
@@ -760,7 +760,7 @@ void ScheduleDAGLinearize::Schedule() {
MachineBasicBlock*
ScheduleDAGLinearize::EmitSchedule(MachineBasicBlock::iterator &InsertPos) {
- InstrEmitter Emitter(DAG->getTarget(), BB, InsertPos);
+ InstrEmitter Emitter(DAG->getTarget(), BB, InsertPos);
DenseMap<SDValue, Register> VRBaseMap;
LLVM_DEBUG({ dbgs() << "\n*** Final schedule ***\n"; });
diff --git a/contrib/libs/llvm12/lib/CodeGen/SelectionDAG/ScheduleDAGRRList.cpp b/contrib/libs/llvm12/lib/CodeGen/SelectionDAG/ScheduleDAGRRList.cpp
index 7a5e8ac607..163e31165e 100644
--- a/contrib/libs/llvm12/lib/CodeGen/SelectionDAG/ScheduleDAGRRList.cpp
+++ b/contrib/libs/llvm12/lib/CodeGen/SelectionDAG/ScheduleDAGRRList.cpp
@@ -1838,16 +1838,16 @@ protected:
template<class SF>
static SUnit *popFromQueueImpl(std::vector<SUnit *> &Q, SF &Picker) {
- unsigned BestIdx = 0;
- // Only compute the cost for the first 1000 items in the queue, to avoid
- // excessive compile-times for very large queues.
- for (unsigned I = 1, E = std::min(Q.size(), (decltype(Q.size()))1000); I != E;
- I++)
- if (Picker(Q[BestIdx], Q[I]))
- BestIdx = I;
- SUnit *V = Q[BestIdx];
- if (BestIdx + 1 != Q.size())
- std::swap(Q[BestIdx], Q.back());
+ unsigned BestIdx = 0;
+ // Only compute the cost for the first 1000 items in the queue, to avoid
+ // excessive compile-times for very large queues.
+ for (unsigned I = 1, E = std::min(Q.size(), (decltype(Q.size()))1000); I != E;
+ I++)
+ if (Picker(Q[BestIdx], Q[I]))
+ BestIdx = I;
+ SUnit *V = Q[BestIdx];
+ if (BestIdx + 1 != Q.size())
+ std::swap(Q[BestIdx], Q.back());
Q.pop_back();
return V;
}
diff --git a/contrib/libs/llvm12/lib/CodeGen/SelectionDAG/ScheduleDAGSDNodes.cpp b/contrib/libs/llvm12/lib/CodeGen/SelectionDAG/ScheduleDAGSDNodes.cpp
index debfdda90e..de0a4af09a 100644
--- a/contrib/libs/llvm12/lib/CodeGen/SelectionDAG/ScheduleDAGSDNodes.cpp
+++ b/contrib/libs/llvm12/lib/CodeGen/SelectionDAG/ScheduleDAGSDNodes.cpp
@@ -125,7 +125,7 @@ static void CheckForPhysRegDependency(SDNode *Def, SDNode *User, unsigned Op,
PhysReg = Reg;
} else if (Def->isMachineOpcode()) {
const MCInstrDesc &II = TII->get(Def->getMachineOpcode());
- if (ResNo >= II.getNumDefs() && II.hasImplicitDefOfPhysReg(Reg))
+ if (ResNo >= II.getNumDefs() && II.hasImplicitDefOfPhysReg(Reg))
PhysReg = Reg;
}
@@ -172,7 +172,7 @@ static bool AddGlue(SDNode *N, SDValue Glue, bool AddGlue, SelectionDAG *DAG) {
// Don't add glue to something that already has a glue value.
if (N->getValueType(N->getNumValues() - 1) == MVT::Glue) return false;
- SmallVector<EVT, 4> VTs(N->values());
+ SmallVector<EVT, 4> VTs(N->values());
if (AddGlue)
VTs.push_back(MVT::Glue);
@@ -829,7 +829,7 @@ EmitPhysRegCopy(SUnit *SU, DenseMap<SUnit*, Register> &VRBaseMap,
/// not necessarily refer to returned BB. The emitter may split blocks.
MachineBasicBlock *ScheduleDAGSDNodes::
EmitSchedule(MachineBasicBlock::iterator &InsertPos) {
- InstrEmitter Emitter(DAG->getTarget(), BB, InsertPos);
+ InstrEmitter Emitter(DAG->getTarget(), BB, InsertPos);
DenseMap<SDValue, Register> VRBaseMap;
DenseMap<SUnit*, Register> CopyVRBaseMap;
SmallVector<std::pair<unsigned, MachineInstr*>, 32> Orders;
@@ -1033,29 +1033,29 @@ EmitSchedule(MachineBasicBlock::iterator &InsertPos) {
}
InsertPos = Emitter.getInsertPos();
- // In some cases, DBG_VALUEs might be inserted after the first terminator,
- // which results in an invalid MBB. If that happens, move the DBG_VALUEs
- // before the first terminator.
- MachineBasicBlock *InsertBB = Emitter.getBlock();
- auto FirstTerm = InsertBB->getFirstTerminator();
- if (FirstTerm != InsertBB->end()) {
- assert(!FirstTerm->isDebugValue() &&
- "first terminator cannot be a debug value");
- for (MachineInstr &MI : make_early_inc_range(
- make_range(std::next(FirstTerm), InsertBB->end()))) {
- if (!MI.isDebugValue())
- continue;
-
- if (&MI == InsertPos)
- InsertPos = std::prev(InsertPos->getIterator());
-
- // The DBG_VALUE was referencing a value produced by a terminator. By
- // moving the DBG_VALUE, the referenced value also needs invalidating.
- MI.getOperand(0).ChangeToRegister(0, false);
- MI.moveBefore(&*FirstTerm);
- }
- }
- return InsertBB;
+ // In some cases, DBG_VALUEs might be inserted after the first terminator,
+ // which results in an invalid MBB. If that happens, move the DBG_VALUEs
+ // before the first terminator.
+ MachineBasicBlock *InsertBB = Emitter.getBlock();
+ auto FirstTerm = InsertBB->getFirstTerminator();
+ if (FirstTerm != InsertBB->end()) {
+ assert(!FirstTerm->isDebugValue() &&
+ "first terminator cannot be a debug value");
+ for (MachineInstr &MI : make_early_inc_range(
+ make_range(std::next(FirstTerm), InsertBB->end()))) {
+ if (!MI.isDebugValue())
+ continue;
+
+ if (&MI == InsertPos)
+ InsertPos = std::prev(InsertPos->getIterator());
+
+ // The DBG_VALUE was referencing a value produced by a terminator. By
+ // moving the DBG_VALUE, the referenced value also needs invalidating.
+ MI.getOperand(0).ChangeToRegister(0, false);
+ MI.moveBefore(&*FirstTerm);
+ }
+ }
+ return InsertBB;
}
/// Return the basic block label.
diff --git a/contrib/libs/llvm12/lib/CodeGen/SelectionDAG/SelectionDAG.cpp b/contrib/libs/llvm12/lib/CodeGen/SelectionDAG/SelectionDAG.cpp
index 2090762e2f..b0f50ffafd 100644
--- a/contrib/libs/llvm12/lib/CodeGen/SelectionDAG/SelectionDAG.cpp
+++ b/contrib/libs/llvm12/lib/CodeGen/SelectionDAG/SelectionDAG.cpp
@@ -28,7 +28,7 @@
#include "llvm/Analysis/MemoryLocation.h"
#include "llvm/Analysis/ProfileSummaryInfo.h"
#include "llvm/Analysis/ValueTracking.h"
-#include "llvm/CodeGen/FunctionLoweringInfo.h"
+#include "llvm/CodeGen/FunctionLoweringInfo.h"
#include "llvm/CodeGen/ISDOpcodes.h"
#include "llvm/CodeGen/MachineBasicBlock.h"
#include "llvm/CodeGen/MachineConstantPool.h"
@@ -139,15 +139,15 @@ bool ConstantFPSDNode::isValueValidForType(EVT VT,
//===----------------------------------------------------------------------===//
bool ISD::isConstantSplatVector(const SDNode *N, APInt &SplatVal) {
- if (N->getOpcode() == ISD::SPLAT_VECTOR) {
- unsigned EltSize =
- N->getValueType(0).getVectorElementType().getSizeInBits();
- if (auto *Op0 = dyn_cast<ConstantSDNode>(N->getOperand(0))) {
- SplatVal = Op0->getAPIntValue().truncOrSelf(EltSize);
- return true;
- }
- }
-
+ if (N->getOpcode() == ISD::SPLAT_VECTOR) {
+ unsigned EltSize =
+ N->getValueType(0).getVectorElementType().getSizeInBits();
+ if (auto *Op0 = dyn_cast<ConstantSDNode>(N->getOperand(0))) {
+ SplatVal = Op0->getAPIntValue().truncOrSelf(EltSize);
+ return true;
+ }
+ }
+
auto *BV = dyn_cast<BuildVectorSDNode>(N);
if (!BV)
return false;
@@ -164,16 +164,16 @@ bool ISD::isConstantSplatVector(const SDNode *N, APInt &SplatVal) {
// FIXME: AllOnes and AllZeros duplicate a lot of code. Could these be
// specializations of the more general isConstantSplatVector()?
-bool ISD::isConstantSplatVectorAllOnes(const SDNode *N, bool BuildVectorOnly) {
+bool ISD::isConstantSplatVectorAllOnes(const SDNode *N, bool BuildVectorOnly) {
// Look through a bit convert.
while (N->getOpcode() == ISD::BITCAST)
N = N->getOperand(0).getNode();
- if (!BuildVectorOnly && N->getOpcode() == ISD::SPLAT_VECTOR) {
- APInt SplatVal;
- return isConstantSplatVector(N, SplatVal) && SplatVal.isAllOnesValue();
- }
-
+ if (!BuildVectorOnly && N->getOpcode() == ISD::SPLAT_VECTOR) {
+ APInt SplatVal;
+ return isConstantSplatVector(N, SplatVal) && SplatVal.isAllOnesValue();
+ }
+
if (N->getOpcode() != ISD::BUILD_VECTOR) return false;
unsigned i = 0, e = N->getNumOperands();
@@ -213,16 +213,16 @@ bool ISD::isConstantSplatVectorAllOnes(const SDNode *N, bool BuildVectorOnly) {
return true;
}
-bool ISD::isConstantSplatVectorAllZeros(const SDNode *N, bool BuildVectorOnly) {
+bool ISD::isConstantSplatVectorAllZeros(const SDNode *N, bool BuildVectorOnly) {
// Look through a bit convert.
while (N->getOpcode() == ISD::BITCAST)
N = N->getOperand(0).getNode();
- if (!BuildVectorOnly && N->getOpcode() == ISD::SPLAT_VECTOR) {
- APInt SplatVal;
- return isConstantSplatVector(N, SplatVal) && SplatVal.isNullValue();
- }
-
+ if (!BuildVectorOnly && N->getOpcode() == ISD::SPLAT_VECTOR) {
+ APInt SplatVal;
+ return isConstantSplatVector(N, SplatVal) && SplatVal.isNullValue();
+ }
+
if (N->getOpcode() != ISD::BUILD_VECTOR) return false;
bool IsAllUndef = true;
@@ -255,14 +255,14 @@ bool ISD::isConstantSplatVectorAllZeros(const SDNode *N, bool BuildVectorOnly) {
return true;
}
-bool ISD::isBuildVectorAllOnes(const SDNode *N) {
- return isConstantSplatVectorAllOnes(N, /*BuildVectorOnly*/ true);
-}
-
-bool ISD::isBuildVectorAllZeros(const SDNode *N) {
- return isConstantSplatVectorAllZeros(N, /*BuildVectorOnly*/ true);
-}
-
+bool ISD::isBuildVectorAllOnes(const SDNode *N) {
+ return isConstantSplatVectorAllOnes(N, /*BuildVectorOnly*/ true);
+}
+
+bool ISD::isBuildVectorAllZeros(const SDNode *N) {
+ return isConstantSplatVectorAllZeros(N, /*BuildVectorOnly*/ true);
+}
+
bool ISD::isBuildVectorOfConstantSDNodes(const SDNode *N) {
if (N->getOpcode() != ISD::BUILD_VECTOR)
return false;
@@ -306,8 +306,8 @@ bool ISD::matchUnaryPredicate(SDValue Op,
return Match(Cst);
// FIXME: Add support for vector UNDEF cases?
- if (ISD::BUILD_VECTOR != Op.getOpcode() &&
- ISD::SPLAT_VECTOR != Op.getOpcode())
+ if (ISD::BUILD_VECTOR != Op.getOpcode() &&
+ ISD::SPLAT_VECTOR != Op.getOpcode())
return false;
EVT SVT = Op.getValueType().getScalarType();
@@ -361,76 +361,76 @@ bool ISD::matchBinaryPredicate(
return true;
}
-ISD::NodeType ISD::getVecReduceBaseOpcode(unsigned VecReduceOpcode) {
- switch (VecReduceOpcode) {
- default:
- llvm_unreachable("Expected VECREDUCE opcode");
- case ISD::VECREDUCE_FADD:
- case ISD::VECREDUCE_SEQ_FADD:
- return ISD::FADD;
- case ISD::VECREDUCE_FMUL:
- case ISD::VECREDUCE_SEQ_FMUL:
- return ISD::FMUL;
- case ISD::VECREDUCE_ADD:
- return ISD::ADD;
- case ISD::VECREDUCE_MUL:
- return ISD::MUL;
- case ISD::VECREDUCE_AND:
- return ISD::AND;
- case ISD::VECREDUCE_OR:
- return ISD::OR;
- case ISD::VECREDUCE_XOR:
- return ISD::XOR;
- case ISD::VECREDUCE_SMAX:
- return ISD::SMAX;
- case ISD::VECREDUCE_SMIN:
- return ISD::SMIN;
- case ISD::VECREDUCE_UMAX:
- return ISD::UMAX;
- case ISD::VECREDUCE_UMIN:
- return ISD::UMIN;
- case ISD::VECREDUCE_FMAX:
- return ISD::FMAXNUM;
- case ISD::VECREDUCE_FMIN:
- return ISD::FMINNUM;
- }
-}
-
-bool ISD::isVPOpcode(unsigned Opcode) {
- switch (Opcode) {
- default:
- return false;
-#define BEGIN_REGISTER_VP_SDNODE(SDOPC, ...) \
- case ISD::SDOPC: \
- return true;
-#include "llvm/IR/VPIntrinsics.def"
- }
-}
-
-/// The operand position of the vector mask.
-Optional<unsigned> ISD::getVPMaskIdx(unsigned Opcode) {
- switch (Opcode) {
- default:
- return None;
-#define BEGIN_REGISTER_VP_SDNODE(SDOPC, LEGALPOS, TDNAME, MASKPOS, ...) \
- case ISD::SDOPC: \
- return MASKPOS;
-#include "llvm/IR/VPIntrinsics.def"
- }
-}
-
-/// The operand position of the explicit vector length parameter.
-Optional<unsigned> ISD::getVPExplicitVectorLengthIdx(unsigned Opcode) {
- switch (Opcode) {
- default:
- return None;
-#define BEGIN_REGISTER_VP_SDNODE(SDOPC, LEGALPOS, TDNAME, MASKPOS, EVLPOS) \
- case ISD::SDOPC: \
- return EVLPOS;
-#include "llvm/IR/VPIntrinsics.def"
- }
-}
-
+ISD::NodeType ISD::getVecReduceBaseOpcode(unsigned VecReduceOpcode) {
+ switch (VecReduceOpcode) {
+ default:
+ llvm_unreachable("Expected VECREDUCE opcode");
+ case ISD::VECREDUCE_FADD:
+ case ISD::VECREDUCE_SEQ_FADD:
+ return ISD::FADD;
+ case ISD::VECREDUCE_FMUL:
+ case ISD::VECREDUCE_SEQ_FMUL:
+ return ISD::FMUL;
+ case ISD::VECREDUCE_ADD:
+ return ISD::ADD;
+ case ISD::VECREDUCE_MUL:
+ return ISD::MUL;
+ case ISD::VECREDUCE_AND:
+ return ISD::AND;
+ case ISD::VECREDUCE_OR:
+ return ISD::OR;
+ case ISD::VECREDUCE_XOR:
+ return ISD::XOR;
+ case ISD::VECREDUCE_SMAX:
+ return ISD::SMAX;
+ case ISD::VECREDUCE_SMIN:
+ return ISD::SMIN;
+ case ISD::VECREDUCE_UMAX:
+ return ISD::UMAX;
+ case ISD::VECREDUCE_UMIN:
+ return ISD::UMIN;
+ case ISD::VECREDUCE_FMAX:
+ return ISD::FMAXNUM;
+ case ISD::VECREDUCE_FMIN:
+ return ISD::FMINNUM;
+ }
+}
+
+bool ISD::isVPOpcode(unsigned Opcode) {
+ switch (Opcode) {
+ default:
+ return false;
+#define BEGIN_REGISTER_VP_SDNODE(SDOPC, ...) \
+ case ISD::SDOPC: \
+ return true;
+#include "llvm/IR/VPIntrinsics.def"
+ }
+}
+
+/// The operand position of the vector mask.
+Optional<unsigned> ISD::getVPMaskIdx(unsigned Opcode) {
+ switch (Opcode) {
+ default:
+ return None;
+#define BEGIN_REGISTER_VP_SDNODE(SDOPC, LEGALPOS, TDNAME, MASKPOS, ...) \
+ case ISD::SDOPC: \
+ return MASKPOS;
+#include "llvm/IR/VPIntrinsics.def"
+ }
+}
+
+/// The operand position of the explicit vector length parameter.
+Optional<unsigned> ISD::getVPExplicitVectorLengthIdx(unsigned Opcode) {
+ switch (Opcode) {
+ default:
+ return None;
+#define BEGIN_REGISTER_VP_SDNODE(SDOPC, LEGALPOS, TDNAME, MASKPOS, EVLPOS) \
+ case ISD::SDOPC: \
+ return EVLPOS;
+#include "llvm/IR/VPIntrinsics.def"
+ }
+}
+
ISD::NodeType ISD::getExtForLoadExtType(bool IsFP, ISD::LoadExtType ExtType) {
switch (ExtType) {
case ISD::EXTLOAD:
@@ -635,11 +635,11 @@ static void AddNodeIDCustom(FoldingSetNodeID &ID, const SDNode *N) {
ID.AddInteger(cast<LifetimeSDNode>(N)->getOffset());
}
break;
- case ISD::PSEUDO_PROBE:
- ID.AddInteger(cast<PseudoProbeSDNode>(N)->getGuid());
- ID.AddInteger(cast<PseudoProbeSDNode>(N)->getIndex());
- ID.AddInteger(cast<PseudoProbeSDNode>(N)->getAttributes());
- break;
+ case ISD::PSEUDO_PROBE:
+ ID.AddInteger(cast<PseudoProbeSDNode>(N)->getGuid());
+ ID.AddInteger(cast<PseudoProbeSDNode>(N)->getIndex());
+ ID.AddInteger(cast<PseudoProbeSDNode>(N)->getAttributes());
+ break;
case ISD::JumpTable:
case ISD::TargetJumpTable:
ID.AddInteger(cast<JumpTableSDNode>(N)->getIndex());
@@ -1333,7 +1333,7 @@ SDValue SelectionDAG::getConstant(uint64_t Val, const SDLoc &DL, EVT VT,
bool isT, bool isO) {
EVT EltVT = VT.getScalarType();
assert((EltVT.getSizeInBits() >= 64 ||
- (uint64_t)((int64_t)Val >> EltVT.getSizeInBits()) + 1 < 2) &&
+ (uint64_t)((int64_t)Val >> EltVT.getSizeInBits()) + 1 < 2) &&
"getConstant with a uint64_t value that doesn't fit in the type!");
return getConstant(APInt(EltVT.getSizeInBits(), Val), DL, VT, isT, isO);
}
@@ -1355,10 +1355,10 @@ SDValue SelectionDAG::getConstant(const ConstantInt &Val, const SDLoc &DL,
// inserted value (the type does not need to match the vector element type).
// Any extra bits introduced will be truncated away.
if (VT.isVector() && TLI->getTypeAction(*getContext(), EltVT) ==
- TargetLowering::TypePromoteInteger) {
- EltVT = TLI->getTypeToTransformTo(*getContext(), EltVT);
- APInt NewVal = Elt->getValue().zextOrTrunc(EltVT.getSizeInBits());
- Elt = ConstantInt::get(*getContext(), NewVal);
+ TargetLowering::TypePromoteInteger) {
+ EltVT = TLI->getTypeToTransformTo(*getContext(), EltVT);
+ APInt NewVal = Elt->getValue().zextOrTrunc(EltVT.getSizeInBits());
+ Elt = ConstantInt::get(*getContext(), NewVal);
}
// In other cases the element type is illegal and needs to be expanded, for
// example v2i64 on MIPS32. In this case, find the nearest legal type, split
@@ -1368,7 +1368,7 @@ SDValue SelectionDAG::getConstant(const ConstantInt &Val, const SDLoc &DL,
// only legalize if the DAG tells us we must produce legal types.
else if (NewNodesMustHaveLegalTypes && VT.isVector() &&
TLI->getTypeAction(*getContext(), EltVT) ==
- TargetLowering::TypeExpandInteger) {
+ TargetLowering::TypeExpandInteger) {
const APInt &NewVal = Elt->getValue();
EVT ViaEltVT = TLI->getTypeToTransformTo(*getContext(), EltVT);
unsigned ViaEltSizeInBits = ViaEltVT.getSizeInBits();
@@ -1382,9 +1382,9 @@ SDValue SelectionDAG::getConstant(const ConstantInt &Val, const SDLoc &DL,
SmallVector<SDValue, 2> EltParts;
for (unsigned i = 0; i < ViaVecNumElts / VT.getVectorNumElements(); ++i) {
- EltParts.push_back(getConstant(
- NewVal.lshr(i * ViaEltSizeInBits).zextOrTrunc(ViaEltSizeInBits), DL,
- ViaEltVT, isT, isO));
+ EltParts.push_back(getConstant(
+ NewVal.lshr(i * ViaEltSizeInBits).zextOrTrunc(ViaEltSizeInBits), DL,
+ ViaEltVT, isT, isO));
}
// EltParts is currently in little endian order. If we actually want
@@ -1401,10 +1401,10 @@ SDValue SelectionDAG::getConstant(const ConstantInt &Val, const SDLoc &DL,
SmallVector<SDValue, 8> Ops;
for (unsigned i = 0, e = VT.getVectorNumElements(); i != e; ++i)
- llvm::append_range(Ops, EltParts);
+ llvm::append_range(Ops, EltParts);
- SDValue V =
- getNode(ISD::BITCAST, DL, VT, getBuildVector(ViaVecVT, DL, Ops));
+ SDValue V =
+ getNode(ISD::BITCAST, DL, VT, getBuildVector(ViaVecVT, DL, Ops));
return V;
}
@@ -1485,9 +1485,9 @@ SDValue SelectionDAG::getConstantFP(const ConstantFP &V, const SDLoc &DL,
}
SDValue Result(N, 0);
- if (VT.isScalableVector())
- Result = getSplatVector(VT, DL, Result);
- else if (VT.isVector())
+ if (VT.isScalableVector())
+ Result = getSplatVector(VT, DL, Result);
+ else if (VT.isVector())
Result = getSplatBuildVector(VT, DL, Result);
NewSDValueDbgMsg(Result, "Creating fp constant: ", this);
return Result;
@@ -2130,14 +2130,14 @@ Align SelectionDAG::getReducedAlign(EVT VT, bool UseABI) {
SDValue SelectionDAG::CreateStackTemporary(TypeSize Bytes, Align Alignment) {
MachineFrameInfo &MFI = MF->getFrameInfo();
- const TargetFrameLowering *TFI = MF->getSubtarget().getFrameLowering();
- int StackID = 0;
- if (Bytes.isScalable())
- StackID = TFI->getStackIDForScalableVectors();
- // The stack id gives an indication of whether the object is scalable or
- // not, so it's safe to pass in the minimum size here.
- int FrameIdx = MFI.CreateStackObject(Bytes.getKnownMinSize(), Alignment,
- false, nullptr, StackID);
+ const TargetFrameLowering *TFI = MF->getSubtarget().getFrameLowering();
+ int StackID = 0;
+ if (Bytes.isScalable())
+ StackID = TFI->getStackIDForScalableVectors();
+ // The stack id gives an indication of whether the object is scalable or
+ // not, so it's safe to pass in the minimum size here.
+ int FrameIdx = MFI.CreateStackObject(Bytes.getKnownMinSize(), Alignment,
+ false, nullptr, StackID);
return getFrameIndex(FrameIdx, TLI->getFrameIndexTy(getDataLayout()));
}
@@ -2149,14 +2149,14 @@ SDValue SelectionDAG::CreateStackTemporary(EVT VT, unsigned minAlign) {
}
SDValue SelectionDAG::CreateStackTemporary(EVT VT1, EVT VT2) {
- TypeSize VT1Size = VT1.getStoreSize();
- TypeSize VT2Size = VT2.getStoreSize();
- assert(VT1Size.isScalable() == VT2Size.isScalable() &&
- "Don't know how to choose the maximum size when creating a stack "
- "temporary");
- TypeSize Bytes =
- VT1Size.getKnownMinSize() > VT2Size.getKnownMinSize() ? VT1Size : VT2Size;
-
+ TypeSize VT1Size = VT1.getStoreSize();
+ TypeSize VT2Size = VT2.getStoreSize();
+ assert(VT1Size.isScalable() == VT2Size.isScalable() &&
+ "Don't know how to choose the maximum size when creating a stack "
+ "temporary");
+ TypeSize Bytes =
+ VT1Size.getKnownMinSize() > VT2Size.getKnownMinSize() ? VT1Size : VT2Size;
+
Type *Ty1 = VT1.getTypeForEVT(*getContext());
Type *Ty2 = VT2.getTypeForEVT(*getContext());
const DataLayout &DL = getDataLayout();
@@ -2325,10 +2325,10 @@ SDValue SelectionDAG::FoldSetCC(EVT VT, SDValue N1, SDValue N2,
/// SimplifyMultipleUseDemandedBits and not generate any new nodes.
SDValue SelectionDAG::GetDemandedBits(SDValue V, const APInt &DemandedBits) {
EVT VT = V.getValueType();
-
- if (VT.isScalableVector())
- return SDValue();
-
+
+ if (VT.isScalableVector())
+ return SDValue();
+
APInt DemandedElts = VT.isVector()
? APInt::getAllOnesValue(VT.getVectorNumElements())
: APInt(1, 1);
@@ -2410,23 +2410,23 @@ bool SelectionDAG::MaskedValueIsAllOnes(SDValue V, const APInt &Mask,
/// sense to specify which elements are demanded or undefined, therefore
/// they are simply ignored.
bool SelectionDAG::isSplatValue(SDValue V, const APInt &DemandedElts,
- APInt &UndefElts, unsigned Depth) {
+ APInt &UndefElts, unsigned Depth) {
EVT VT = V.getValueType();
assert(VT.isVector() && "Vector type expected");
if (!VT.isScalableVector() && !DemandedElts)
return false; // No demanded elts, better to assume we don't know anything.
- if (Depth >= MaxRecursionDepth)
- return false; // Limit search depth.
-
+ if (Depth >= MaxRecursionDepth)
+ return false; // Limit search depth.
+
// Deal with some common cases here that work for both fixed and scalable
// vector types.
switch (V.getOpcode()) {
case ISD::SPLAT_VECTOR:
- UndefElts = V.getOperand(0).isUndef()
- ? APInt::getAllOnesValue(DemandedElts.getBitWidth())
- : APInt(DemandedElts.getBitWidth(), 0);
+ UndefElts = V.getOperand(0).isUndef()
+ ? APInt::getAllOnesValue(DemandedElts.getBitWidth())
+ : APInt(DemandedElts.getBitWidth(), 0);
return true;
case ISD::ADD:
case ISD::SUB:
@@ -2434,17 +2434,17 @@ bool SelectionDAG::isSplatValue(SDValue V, const APInt &DemandedElts,
APInt UndefLHS, UndefRHS;
SDValue LHS = V.getOperand(0);
SDValue RHS = V.getOperand(1);
- if (isSplatValue(LHS, DemandedElts, UndefLHS, Depth + 1) &&
- isSplatValue(RHS, DemandedElts, UndefRHS, Depth + 1)) {
+ if (isSplatValue(LHS, DemandedElts, UndefLHS, Depth + 1) &&
+ isSplatValue(RHS, DemandedElts, UndefRHS, Depth + 1)) {
UndefElts = UndefLHS | UndefRHS;
return true;
}
break;
}
- case ISD::TRUNCATE:
- case ISD::SIGN_EXTEND:
- case ISD::ZERO_EXTEND:
- return isSplatValue(V.getOperand(0), DemandedElts, UndefElts, Depth + 1);
+ case ISD::TRUNCATE:
+ case ISD::SIGN_EXTEND:
+ case ISD::ZERO_EXTEND:
+ return isSplatValue(V.getOperand(0), DemandedElts, UndefElts, Depth + 1);
}
// We don't support other cases than those above for scalable vectors at
@@ -2499,7 +2499,7 @@ bool SelectionDAG::isSplatValue(SDValue V, const APInt &DemandedElts,
unsigned NumSrcElts = Src.getValueType().getVectorNumElements();
APInt UndefSrcElts;
APInt DemandedSrcElts = DemandedElts.zextOrSelf(NumSrcElts).shl(Idx);
- if (isSplatValue(Src, DemandedSrcElts, UndefSrcElts, Depth + 1)) {
+ if (isSplatValue(Src, DemandedSrcElts, UndefSrcElts, Depth + 1)) {
UndefElts = UndefSrcElts.extractBits(NumElts, Idx);
return true;
}
@@ -2696,11 +2696,11 @@ KnownBits SelectionDAG::computeKnownBits(SDValue Op, const APInt &DemandedElts,
if (auto *C = dyn_cast<ConstantSDNode>(Op)) {
// We know all of the bits for a constant!
- return KnownBits::makeConstant(C->getAPIntValue());
+ return KnownBits::makeConstant(C->getAPIntValue());
}
if (auto *C = dyn_cast<ConstantFPSDNode>(Op)) {
// We know all of the bits for a constant fp!
- return KnownBits::makeConstant(C->getValueAPF().bitcastToAPInt());
+ return KnownBits::makeConstant(C->getValueAPF().bitcastToAPInt());
}
if (Depth >= MaxRecursionDepth)
@@ -2735,7 +2735,7 @@ KnownBits SelectionDAG::computeKnownBits(SDValue Op, const APInt &DemandedElts,
}
// Known bits are the values that are shared by every demanded element.
- Known = KnownBits::commonBits(Known, Known2);
+ Known = KnownBits::commonBits(Known, Known2);
// If we don't know any bits, early out.
if (Known.isUnknown())
@@ -2772,7 +2772,7 @@ KnownBits SelectionDAG::computeKnownBits(SDValue Op, const APInt &DemandedElts,
if (!!DemandedLHS) {
SDValue LHS = Op.getOperand(0);
Known2 = computeKnownBits(LHS, DemandedLHS, Depth + 1);
- Known = KnownBits::commonBits(Known, Known2);
+ Known = KnownBits::commonBits(Known, Known2);
}
// If we don't know any bits, early out.
if (Known.isUnknown())
@@ -2780,7 +2780,7 @@ KnownBits SelectionDAG::computeKnownBits(SDValue Op, const APInt &DemandedElts,
if (!!DemandedRHS) {
SDValue RHS = Op.getOperand(1);
Known2 = computeKnownBits(RHS, DemandedRHS, Depth + 1);
- Known = KnownBits::commonBits(Known, Known2);
+ Known = KnownBits::commonBits(Known, Known2);
}
break;
}
@@ -2796,7 +2796,7 @@ KnownBits SelectionDAG::computeKnownBits(SDValue Op, const APInt &DemandedElts,
if (!!DemandedSub) {
SDValue Sub = Op.getOperand(i);
Known2 = computeKnownBits(Sub, DemandedSub, Depth + 1);
- Known = KnownBits::commonBits(Known, Known2);
+ Known = KnownBits::commonBits(Known, Known2);
}
// If we don't know any bits, early out.
if (Known.isUnknown())
@@ -2824,7 +2824,7 @@ KnownBits SelectionDAG::computeKnownBits(SDValue Op, const APInt &DemandedElts,
}
if (!!DemandedSrcElts) {
Known2 = computeKnownBits(Src, DemandedSrcElts, Depth + 1);
- Known = KnownBits::commonBits(Known, Known2);
+ Known = KnownBits::commonBits(Known, Known2);
}
break;
}
@@ -2943,13 +2943,13 @@ KnownBits SelectionDAG::computeKnownBits(SDValue Op, const APInt &DemandedElts,
case ISD::MUL: {
Known = computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1);
Known2 = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
- Known = KnownBits::computeForMul(Known, Known2);
+ Known = KnownBits::computeForMul(Known, Known2);
break;
}
case ISD::UDIV: {
- Known = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
+ Known = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
Known2 = computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1);
- Known = KnownBits::udiv(Known, Known2);
+ Known = KnownBits::udiv(Known, Known2);
break;
}
case ISD::SELECT:
@@ -2961,7 +2961,7 @@ KnownBits SelectionDAG::computeKnownBits(SDValue Op, const APInt &DemandedElts,
Known2 = computeKnownBits(Op.getOperand(1), DemandedElts, Depth+1);
// Only known if known in both the LHS and RHS.
- Known = KnownBits::commonBits(Known, Known2);
+ Known = KnownBits::commonBits(Known, Known2);
break;
case ISD::SELECT_CC:
Known = computeKnownBits(Op.getOperand(3), DemandedElts, Depth+1);
@@ -2971,7 +2971,7 @@ KnownBits SelectionDAG::computeKnownBits(SDValue Op, const APInt &DemandedElts,
Known2 = computeKnownBits(Op.getOperand(2), DemandedElts, Depth+1);
// Only known if known in both the LHS and RHS.
- Known = KnownBits::commonBits(Known, Known2);
+ Known = KnownBits::commonBits(Known, Known2);
break;
case ISD::SMULO:
case ISD::UMULO:
@@ -3000,8 +3000,8 @@ KnownBits SelectionDAG::computeKnownBits(SDValue Op, const APInt &DemandedElts,
}
case ISD::SHL:
Known = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
- Known2 = computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1);
- Known = KnownBits::shl(Known, Known2);
+ Known2 = computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1);
+ Known = KnownBits::shl(Known, Known2);
// Minimum shift low bits are known zero.
if (const APInt *ShMinAmt =
@@ -3010,8 +3010,8 @@ KnownBits SelectionDAG::computeKnownBits(SDValue Op, const APInt &DemandedElts,
break;
case ISD::SRL:
Known = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
- Known2 = computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1);
- Known = KnownBits::lshr(Known, Known2);
+ Known2 = computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1);
+ Known = KnownBits::lshr(Known, Known2);
// Minimum shift high bits are known zero.
if (const APInt *ShMinAmt =
@@ -3019,10 +3019,10 @@ KnownBits SelectionDAG::computeKnownBits(SDValue Op, const APInt &DemandedElts,
Known.Zero.setHighBits(ShMinAmt->getZExtValue());
break;
case ISD::SRA:
- Known = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
- Known2 = computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1);
- Known = KnownBits::ashr(Known, Known2);
- // TODO: Add minimum shift high known sign bits.
+ Known = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
+ Known2 = computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1);
+ Known = KnownBits::ashr(Known, Known2);
+ // TODO: Add minimum shift high known sign bits.
break;
case ISD::FSHL:
case ISD::FSHR:
@@ -3057,9 +3057,9 @@ KnownBits SelectionDAG::computeKnownBits(SDValue Op, const APInt &DemandedElts,
}
break;
case ISD::SIGN_EXTEND_INREG: {
- Known = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
+ Known = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
EVT EVT = cast<VTSDNode>(Op.getOperand(1))->getVT();
- Known = Known.sextInReg(EVT.getScalarSizeInBits());
+ Known = Known.sextInReg(EVT.getScalarSizeInBits());
break;
}
case ISD::CTTZ:
@@ -3087,11 +3087,11 @@ KnownBits SelectionDAG::computeKnownBits(SDValue Op, const APInt &DemandedElts,
Known.Zero.setBitsFrom(Log2_32(PossibleOnes) + 1);
break;
}
- case ISD::PARITY: {
- // Parity returns 0 everywhere but the LSB.
- Known.Zero.setBitsFrom(1);
- break;
- }
+ case ISD::PARITY: {
+ // Parity returns 0 everywhere but the LSB.
+ Known.Zero.setBitsFrom(1);
+ break;
+ }
case ISD::LOAD: {
LoadSDNode *LD = cast<LoadSDNode>(Op);
const Constant *Cst = TLI->getTargetConstantFromLoad(LD);
@@ -3135,10 +3135,10 @@ KnownBits SelectionDAG::computeKnownBits(SDValue Op, const APInt &DemandedElts,
}
} else if (BitWidth == CstTy->getPrimitiveSizeInBits()) {
if (auto *CInt = dyn_cast<ConstantInt>(Cst)) {
- Known = KnownBits::makeConstant(CInt->getValue());
+ Known = KnownBits::makeConstant(CInt->getValue());
} else if (auto *CFP = dyn_cast<ConstantFP>(Cst)) {
- Known =
- KnownBits::makeConstant(CFP->getValueAPF().bitcastToAPInt());
+ Known =
+ KnownBits::makeConstant(CFP->getValueAPF().bitcastToAPInt());
}
}
}
@@ -3278,16 +3278,16 @@ KnownBits SelectionDAG::computeKnownBits(SDValue Op, const APInt &DemandedElts,
Known = KnownBits::computeForAddCarry(Known, Known2, Carry);
break;
}
- case ISD::SREM: {
- Known = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
- Known2 = computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1);
- Known = KnownBits::srem(Known, Known2);
+ case ISD::SREM: {
+ Known = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
+ Known2 = computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1);
+ Known = KnownBits::srem(Known, Known2);
break;
- }
+ }
case ISD::UREM: {
Known = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
Known2 = computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1);
- Known = KnownBits::urem(Known, Known2);
+ Known = KnownBits::urem(Known, Known2);
break;
}
case ISD::EXTRACT_ELEMENT: {
@@ -3307,9 +3307,9 @@ KnownBits SelectionDAG::computeKnownBits(SDValue Op, const APInt &DemandedElts,
SDValue InVec = Op.getOperand(0);
SDValue EltNo = Op.getOperand(1);
EVT VecVT = InVec.getValueType();
- // computeKnownBits not yet implemented for scalable vectors.
- if (VecVT.isScalableVector())
- break;
+ // computeKnownBits not yet implemented for scalable vectors.
+ if (VecVT.isScalableVector())
+ break;
const unsigned EltBitWidth = VecVT.getScalarSizeInBits();
const unsigned NumSrcElts = VecVT.getVectorNumElements();
@@ -3350,39 +3350,39 @@ KnownBits SelectionDAG::computeKnownBits(SDValue Op, const APInt &DemandedElts,
Known.Zero.setAllBits();
if (DemandedVal) {
Known2 = computeKnownBits(InVal, Depth + 1);
- Known = KnownBits::commonBits(Known, Known2.zextOrTrunc(BitWidth));
+ Known = KnownBits::commonBits(Known, Known2.zextOrTrunc(BitWidth));
}
if (!!DemandedVecElts) {
Known2 = computeKnownBits(InVec, DemandedVecElts, Depth + 1);
- Known = KnownBits::commonBits(Known, Known2);
+ Known = KnownBits::commonBits(Known, Known2);
}
break;
}
case ISD::BITREVERSE: {
Known2 = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
- Known = Known2.reverseBits();
+ Known = Known2.reverseBits();
break;
}
case ISD::BSWAP: {
Known2 = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
- Known = Known2.byteSwap();
+ Known = Known2.byteSwap();
break;
}
case ISD::ABS: {
Known2 = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
- Known = Known2.abs();
+ Known = Known2.abs();
break;
}
case ISD::UMIN: {
Known = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
Known2 = computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1);
- Known = KnownBits::umin(Known, Known2);
+ Known = KnownBits::umin(Known, Known2);
break;
}
case ISD::UMAX: {
Known = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
Known2 = computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1);
- Known = KnownBits::umax(Known, Known2);
+ Known = KnownBits::umax(Known, Known2);
break;
}
case ISD::SMIN:
@@ -3418,10 +3418,10 @@ KnownBits SelectionDAG::computeKnownBits(SDValue Op, const APInt &DemandedElts,
Known = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
Known2 = computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1);
- if (IsMax)
- Known = KnownBits::smax(Known, Known2);
- else
- Known = KnownBits::smin(Known, Known2);
+ if (IsMax)
+ Known = KnownBits::smax(Known, Known2);
+ else
+ Known = KnownBits::smin(Known, Known2);
break;
}
case ISD::FrameIndex:
@@ -4364,16 +4364,16 @@ static SDValue foldCONCAT_VECTORS(const SDLoc &DL, EVT VT,
for (SDValue Op : Elts)
SVT = (SVT.bitsLT(Op.getValueType()) ? Op.getValueType() : SVT);
- if (SVT.bitsGT(VT.getScalarType())) {
- for (SDValue &Op : Elts) {
- if (Op.isUndef())
- Op = DAG.getUNDEF(SVT);
- else
- Op = DAG.getTargetLoweringInfo().isZExtFree(Op.getValueType(), SVT)
- ? DAG.getZExtOrTrunc(Op, DL, SVT)
- : DAG.getSExtOrTrunc(Op, DL, SVT);
- }
- }
+ if (SVT.bitsGT(VT.getScalarType())) {
+ for (SDValue &Op : Elts) {
+ if (Op.isUndef())
+ Op = DAG.getUNDEF(SVT);
+ else
+ Op = DAG.getTargetLoweringInfo().isZExtFree(Op.getValueType(), SVT)
+ ? DAG.getZExtOrTrunc(Op, DL, SVT)
+ : DAG.getSExtOrTrunc(Op, DL, SVT);
+ }
+ }
SDValue V = DAG.getBuildVector(VT, DL, Elts);
NewSDValueDbgMsg(V, "New node fold concat vectors: ", &DAG);
@@ -4399,14 +4399,14 @@ SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, EVT VT) {
}
SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, EVT VT,
- SDValue Operand) {
- SDNodeFlags Flags;
- if (Inserter)
- Flags = Inserter->getFlags();
- return getNode(Opcode, DL, VT, Operand, Flags);
-}
-
-SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, EVT VT,
+ SDValue Operand) {
+ SDNodeFlags Flags;
+ if (Inserter)
+ Flags = Inserter->getFlags();
+ return getNode(Opcode, DL, VT, Operand, Flags);
+}
+
+SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, EVT VT,
SDValue Operand, const SDNodeFlags Flags) {
// Constant fold unary operations with an integer constant operand. Even
// opaque constant will be folded, because the folding of unary operations
@@ -4607,8 +4607,8 @@ SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, EVT VT,
Operand.getValueType().isFloatingPoint() && "Invalid FP cast!");
if (Operand.getValueType() == VT) return Operand; // noop conversion.
assert((!VT.isVector() ||
- VT.getVectorElementCount() ==
- Operand.getValueType().getVectorElementCount()) &&
+ VT.getVectorElementCount() ==
+ Operand.getValueType().getVectorElementCount()) &&
"Vector element count mismatch!");
assert(Operand.getValueType().bitsLT(VT) &&
"Invalid fpext node, dst < src!");
@@ -4793,25 +4793,25 @@ SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, EVT VT,
case ISD::VSCALE:
assert(VT == Operand.getValueType() && "Unexpected VT!");
break;
- case ISD::CTPOP:
- if (Operand.getValueType().getScalarType() == MVT::i1)
- return Operand;
- break;
- case ISD::CTLZ:
- case ISD::CTTZ:
- if (Operand.getValueType().getScalarType() == MVT::i1)
- return getNOT(DL, Operand, Operand.getValueType());
- break;
- case ISD::VECREDUCE_SMIN:
- case ISD::VECREDUCE_UMAX:
- if (Operand.getValueType().getScalarType() == MVT::i1)
- return getNode(ISD::VECREDUCE_OR, DL, VT, Operand);
- break;
- case ISD::VECREDUCE_SMAX:
- case ISD::VECREDUCE_UMIN:
- if (Operand.getValueType().getScalarType() == MVT::i1)
- return getNode(ISD::VECREDUCE_AND, DL, VT, Operand);
- break;
+ case ISD::CTPOP:
+ if (Operand.getValueType().getScalarType() == MVT::i1)
+ return Operand;
+ break;
+ case ISD::CTLZ:
+ case ISD::CTTZ:
+ if (Operand.getValueType().getScalarType() == MVT::i1)
+ return getNOT(DL, Operand, Operand.getValueType());
+ break;
+ case ISD::VECREDUCE_SMIN:
+ case ISD::VECREDUCE_UMAX:
+ if (Operand.getValueType().getScalarType() == MVT::i1)
+ return getNode(ISD::VECREDUCE_OR, DL, VT, Operand);
+ break;
+ case ISD::VECREDUCE_SMAX:
+ case ISD::VECREDUCE_UMIN:
+ if (Operand.getValueType().getScalarType() == MVT::i1)
+ return getNode(ISD::VECREDUCE_AND, DL, VT, Operand);
+ break;
}
SDNode *N;
@@ -5234,14 +5234,14 @@ SDValue SelectionDAG::getAssertAlign(const SDLoc &DL, SDValue Val, Align A) {
}
SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, EVT VT,
- SDValue N1, SDValue N2) {
- SDNodeFlags Flags;
- if (Inserter)
- Flags = Inserter->getFlags();
- return getNode(Opcode, DL, VT, N1, N2, Flags);
-}
-
-SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, EVT VT,
+ SDValue N1, SDValue N2) {
+ SDNodeFlags Flags;
+ if (Inserter)
+ Flags = Inserter->getFlags();
+ return getNode(Opcode, DL, VT, N1, N2, Flags);
+}
+
+SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, EVT VT,
SDValue N1, SDValue N2, const SDNodeFlags Flags) {
ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N1);
ConstantSDNode *N2C = dyn_cast<ConstantSDNode>(N2);
@@ -5329,22 +5329,22 @@ SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, EVT VT,
assert(N1.getValueType() == N2.getValueType() &&
N1.getValueType() == VT && "Binary operator types must match!");
break;
- case ISD::SMIN:
- case ISD::UMAX:
- assert(VT.isInteger() && "This operator does not apply to FP types!");
- assert(N1.getValueType() == N2.getValueType() &&
- N1.getValueType() == VT && "Binary operator types must match!");
- if (VT.isVector() && VT.getVectorElementType() == MVT::i1)
- return getNode(ISD::OR, DL, VT, N1, N2);
- break;
- case ISD::SMAX:
- case ISD::UMIN:
- assert(VT.isInteger() && "This operator does not apply to FP types!");
- assert(N1.getValueType() == N2.getValueType() &&
- N1.getValueType() == VT && "Binary operator types must match!");
- if (VT.isVector() && VT.getVectorElementType() == MVT::i1)
- return getNode(ISD::AND, DL, VT, N1, N2);
- break;
+ case ISD::SMIN:
+ case ISD::UMAX:
+ assert(VT.isInteger() && "This operator does not apply to FP types!");
+ assert(N1.getValueType() == N2.getValueType() &&
+ N1.getValueType() == VT && "Binary operator types must match!");
+ if (VT.isVector() && VT.getVectorElementType() == MVT::i1)
+ return getNode(ISD::OR, DL, VT, N1, N2);
+ break;
+ case ISD::SMAX:
+ case ISD::UMIN:
+ assert(VT.isInteger() && "This operator does not apply to FP types!");
+ assert(N1.getValueType() == N2.getValueType() &&
+ N1.getValueType() == VT && "Binary operator types must match!");
+ if (VT.isVector() && VT.getVectorElementType() == MVT::i1)
+ return getNode(ISD::AND, DL, VT, N1, N2);
+ break;
case ISD::FADD:
case ISD::FSUB:
case ISD::FMUL:
@@ -5386,8 +5386,8 @@ SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, EVT VT,
// amounts. This catches things like trying to shift an i1024 value by an
// i8, which is easy to fall into in generic code that uses
// TLI.getShiftAmount().
- assert(N2.getValueType().getScalarSizeInBits() >=
- Log2_32_Ceil(VT.getScalarSizeInBits()) &&
+ assert(N2.getValueType().getScalarSizeInBits() >=
+ Log2_32_Ceil(VT.getScalarSizeInBits()) &&
"Invalid use of small shift amount with oversized value!");
// Always fold shifts of i1 values so the code generator doesn't need to
@@ -5583,11 +5583,11 @@ SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, EVT VT,
(VT.getVectorMinNumElements() + N2C->getZExtValue()) <=
N1VT.getVectorMinNumElements()) &&
"Extract subvector overflow!");
- assert(N2C->getAPIntValue().getBitWidth() ==
- TLI->getVectorIdxTy(getDataLayout())
- .getSizeInBits()
- .getFixedSize() &&
- "Constant index for EXTRACT_SUBVECTOR has an invalid size");
+ assert(N2C->getAPIntValue().getBitWidth() ==
+ TLI->getVectorIdxTy(getDataLayout())
+ .getSizeInBits()
+ .getFixedSize() &&
+ "Constant index for EXTRACT_SUBVECTOR has an invalid size");
// Trivial extraction.
if (VT == N1VT)
@@ -5599,8 +5599,8 @@ SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, EVT VT,
// EXTRACT_SUBVECTOR of CONCAT_VECTOR can be simplified if the pieces of
// the concat have the same type as the extract.
- if (N1.getOpcode() == ISD::CONCAT_VECTORS && N1.getNumOperands() > 0 &&
- VT == N1.getOperand(0).getValueType()) {
+ if (N1.getOpcode() == ISD::CONCAT_VECTORS && N1.getNumOperands() > 0 &&
+ VT == N1.getOperand(0).getValueType()) {
unsigned Factor = VT.getVectorMinNumElements();
return N1.getOperand(N2C->getZExtValue() / Factor);
}
@@ -5697,14 +5697,14 @@ SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, EVT VT,
}
SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, EVT VT,
- SDValue N1, SDValue N2, SDValue N3) {
- SDNodeFlags Flags;
- if (Inserter)
- Flags = Inserter->getFlags();
- return getNode(Opcode, DL, VT, N1, N2, N3, Flags);
-}
-
-SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, EVT VT,
+ SDValue N1, SDValue N2, SDValue N3) {
+ SDNodeFlags Flags;
+ if (Inserter)
+ Flags = Inserter->getFlags();
+ return getNode(Opcode, DL, VT, N1, N2, N3, Flags);
+}
+
+SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, EVT VT,
SDValue N1, SDValue N2, SDValue N3,
const SDNodeFlags Flags) {
// Perform various simplifications.
@@ -5974,20 +5974,20 @@ static SDValue getMemsetStringVal(EVT VT, const SDLoc &dl, SelectionDAG &DAG,
return SDValue(nullptr, 0);
}
-SDValue SelectionDAG::getMemBasePlusOffset(SDValue Base, TypeSize Offset,
+SDValue SelectionDAG::getMemBasePlusOffset(SDValue Base, TypeSize Offset,
const SDLoc &DL,
const SDNodeFlags Flags) {
EVT VT = Base.getValueType();
- SDValue Index;
-
- if (Offset.isScalable())
- Index = getVScale(DL, Base.getValueType(),
- APInt(Base.getValueSizeInBits().getFixedSize(),
- Offset.getKnownMinSize()));
- else
- Index = getConstant(Offset.getFixedSize(), DL, VT);
-
- return getMemBasePlusOffset(Base, Index, DL, Flags);
+ SDValue Index;
+
+ if (Offset.isScalable())
+ Index = getVScale(DL, Base.getValueType(),
+ APInt(Base.getValueSizeInBits().getFixedSize(),
+ Offset.getKnownMinSize()));
+ else
+ Index = getConstant(Offset.getFixedSize(), DL, VT);
+
+ return getMemBasePlusOffset(Base, Index, DL, Flags);
}
SDValue SelectionDAG::getMemBasePlusOffset(SDValue Ptr, SDValue Offset,
@@ -6082,8 +6082,8 @@ static SDValue getMemcpyLoadsAndStores(SelectionDAG &DAG, const SDLoc &dl,
SrcAlign = Alignment;
assert(SrcAlign && "SrcAlign must be set");
ConstantDataArraySlice Slice;
- // If marked as volatile, perform a copy even when marked as constant.
- bool CopyFromConstant = !isVol && isMemSrcFromConstant(Src, Slice);
+ // If marked as volatile, perform a copy even when marked as constant.
+ bool CopyFromConstant = !isVol && isMemSrcFromConstant(Src, Slice);
bool isZeroConstant = CopyFromConstant && Slice.Array == nullptr;
unsigned Limit = AlwaysInline ? ~0U : TLI.getMaxStoresPerMemcpy(OptSize);
const MemOp Op = isZeroConstant
@@ -6155,9 +6155,9 @@ static SDValue getMemcpyLoadsAndStores(SelectionDAG &DAG, const SDLoc &dl,
Value = getMemsetStringVal(VT, dl, DAG, TLI, SubSlice);
if (Value.getNode()) {
Store = DAG.getStore(
- Chain, dl, Value,
- DAG.getMemBasePlusOffset(Dst, TypeSize::Fixed(DstOff), dl),
- DstPtrInfo.getWithOffset(DstOff), Alignment, MMOFlags);
+ Chain, dl, Value,
+ DAG.getMemBasePlusOffset(Dst, TypeSize::Fixed(DstOff), dl),
+ DstPtrInfo.getWithOffset(DstOff), Alignment, MMOFlags);
OutChains.push_back(Store);
}
}
@@ -6177,17 +6177,17 @@ static SDValue getMemcpyLoadsAndStores(SelectionDAG &DAG, const SDLoc &dl,
if (isDereferenceable)
SrcMMOFlags |= MachineMemOperand::MODereferenceable;
- Value = DAG.getExtLoad(
- ISD::EXTLOAD, dl, NVT, Chain,
- DAG.getMemBasePlusOffset(Src, TypeSize::Fixed(SrcOff), dl),
- SrcPtrInfo.getWithOffset(SrcOff), VT,
- commonAlignment(*SrcAlign, SrcOff), SrcMMOFlags);
+ Value = DAG.getExtLoad(
+ ISD::EXTLOAD, dl, NVT, Chain,
+ DAG.getMemBasePlusOffset(Src, TypeSize::Fixed(SrcOff), dl),
+ SrcPtrInfo.getWithOffset(SrcOff), VT,
+ commonAlignment(*SrcAlign, SrcOff), SrcMMOFlags);
OutLoadChains.push_back(Value.getValue(1));
Store = DAG.getTruncStore(
- Chain, dl, Value,
- DAG.getMemBasePlusOffset(Dst, TypeSize::Fixed(DstOff), dl),
- DstPtrInfo.getWithOffset(DstOff), VT, Alignment, MMOFlags);
+ Chain, dl, Value,
+ DAG.getMemBasePlusOffset(Dst, TypeSize::Fixed(DstOff), dl),
+ DstPtrInfo.getWithOffset(DstOff), VT, Alignment, MMOFlags);
OutStoreChains.push_back(Store);
}
SrcOff += VTSize;
@@ -6307,10 +6307,10 @@ static SDValue getMemmoveLoadsAndStores(SelectionDAG &DAG, const SDLoc &dl,
if (isDereferenceable)
SrcMMOFlags |= MachineMemOperand::MODereferenceable;
- Value =
- DAG.getLoad(VT, dl, Chain,
- DAG.getMemBasePlusOffset(Src, TypeSize::Fixed(SrcOff), dl),
- SrcPtrInfo.getWithOffset(SrcOff), *SrcAlign, SrcMMOFlags);
+ Value =
+ DAG.getLoad(VT, dl, Chain,
+ DAG.getMemBasePlusOffset(Src, TypeSize::Fixed(SrcOff), dl),
+ SrcPtrInfo.getWithOffset(SrcOff), *SrcAlign, SrcMMOFlags);
LoadValues.push_back(Value);
LoadChains.push_back(Value.getValue(1));
SrcOff += VTSize;
@@ -6322,10 +6322,10 @@ static SDValue getMemmoveLoadsAndStores(SelectionDAG &DAG, const SDLoc &dl,
unsigned VTSize = VT.getSizeInBits() / 8;
SDValue Store;
- Store =
- DAG.getStore(Chain, dl, LoadValues[i],
- DAG.getMemBasePlusOffset(Dst, TypeSize::Fixed(DstOff), dl),
- DstPtrInfo.getWithOffset(DstOff), Alignment, MMOFlags);
+ Store =
+ DAG.getStore(Chain, dl, LoadValues[i],
+ DAG.getMemBasePlusOffset(Dst, TypeSize::Fixed(DstOff), dl),
+ DstPtrInfo.getWithOffset(DstOff), Alignment, MMOFlags);
OutChains.push_back(Store);
DstOff += VTSize;
}
@@ -6423,9 +6423,9 @@ static SDValue getMemsetStores(SelectionDAG &DAG, const SDLoc &dl,
}
assert(Value.getValueType() == VT && "Value with wrong type.");
SDValue Store = DAG.getStore(
- Chain, dl, Value,
- DAG.getMemBasePlusOffset(Dst, TypeSize::Fixed(DstOff), dl),
- DstPtrInfo.getWithOffset(DstOff), Alignment,
+ Chain, dl, Value,
+ DAG.getMemBasePlusOffset(Dst, TypeSize::Fixed(DstOff), dl),
+ DstPtrInfo.getWithOffset(DstOff), Alignment,
isVol ? MachineMemOperand::MOVolatile : MachineMemOperand::MONone);
OutChains.push_back(Store);
DstOff += VT.getSizeInBits() / 8;
@@ -6439,7 +6439,7 @@ static void checkAddrSpaceIsValidForLibcall(const TargetLowering *TLI,
unsigned AS) {
// Lowering memcpy / memset / memmove intrinsics to calls is only valid if all
// pointer operands can be losslessly bitcasted to pointers of address space 0
- if (AS != 0 && !TLI->getTargetMachine().isNoopAddrSpaceCast(AS, 0)) {
+ if (AS != 0 && !TLI->getTargetMachine().isNoopAddrSpaceCast(AS, 0)) {
report_fatal_error("cannot lower memory intrinsic in address space " +
Twine(AS));
}
@@ -6931,30 +6931,30 @@ SDValue SelectionDAG::getLifetimeNode(bool IsStart, const SDLoc &dl,
return V;
}
-SDValue SelectionDAG::getPseudoProbeNode(const SDLoc &Dl, SDValue Chain,
- uint64_t Guid, uint64_t Index,
- uint32_t Attr) {
- const unsigned Opcode = ISD::PSEUDO_PROBE;
- const auto VTs = getVTList(MVT::Other);
- SDValue Ops[] = {Chain};
- FoldingSetNodeID ID;
- AddNodeIDNode(ID, Opcode, VTs, Ops);
- ID.AddInteger(Guid);
- ID.AddInteger(Index);
- void *IP = nullptr;
- if (SDNode *E = FindNodeOrInsertPos(ID, Dl, IP))
- return SDValue(E, 0);
-
- auto *N = newSDNode<PseudoProbeSDNode>(
- Opcode, Dl.getIROrder(), Dl.getDebugLoc(), VTs, Guid, Index, Attr);
- createOperands(N, Ops);
- CSEMap.InsertNode(N, IP);
- InsertNode(N);
- SDValue V(N, 0);
- NewSDValueDbgMsg(V, "Creating new node: ", this);
- return V;
-}
-
+SDValue SelectionDAG::getPseudoProbeNode(const SDLoc &Dl, SDValue Chain,
+ uint64_t Guid, uint64_t Index,
+ uint32_t Attr) {
+ const unsigned Opcode = ISD::PSEUDO_PROBE;
+ const auto VTs = getVTList(MVT::Other);
+ SDValue Ops[] = {Chain};
+ FoldingSetNodeID ID;
+ AddNodeIDNode(ID, Opcode, VTs, Ops);
+ ID.AddInteger(Guid);
+ ID.AddInteger(Index);
+ void *IP = nullptr;
+ if (SDNode *E = FindNodeOrInsertPos(ID, Dl, IP))
+ return SDValue(E, 0);
+
+ auto *N = newSDNode<PseudoProbeSDNode>(
+ Opcode, Dl.getIROrder(), Dl.getDebugLoc(), VTs, Guid, Index, Attr);
+ createOperands(N, Ops);
+ CSEMap.InsertNode(N, IP);
+ InsertNode(N);
+ SDValue V(N, 0);
+ NewSDValueDbgMsg(V, "Creating new node: ", this);
+ return V;
+}
+
/// InferPointerInfo - If the specified ptr/offset is a frame index, infer a
/// MachinePointerInfo record from it. This is particularly useful because the
/// code generator has many cases where it doesn't bother passing in a
@@ -7035,7 +7035,7 @@ SDValue SelectionDAG::getLoad(ISD::MemIndexedMode AM, ISD::LoadExtType ExtType,
assert(VT.isVector() == MemVT.isVector() &&
"Cannot use an ext load to convert to or from a vector!");
assert((!VT.isVector() ||
- VT.getVectorElementCount() == MemVT.getVectorElementCount()) &&
+ VT.getVectorElementCount() == MemVT.getVectorElementCount()) &&
"Cannot use an ext load to change the number of vector elements!");
}
@@ -7114,7 +7114,7 @@ SDValue SelectionDAG::getIndexedLoad(SDValue OrigLoad, const SDLoc &dl,
~(MachineMemOperand::MOInvariant | MachineMemOperand::MODereferenceable);
return getLoad(AM, LD->getExtensionType(), OrigLoad.getValueType(), dl,
LD->getChain(), Base, Offset, LD->getPointerInfo(),
- LD->getMemoryVT(), LD->getAlign(), MMOFlags, LD->getAAInfo());
+ LD->getMemoryVT(), LD->getAlign(), MMOFlags, LD->getAAInfo());
}
SDValue SelectionDAG::getStore(SDValue Chain, const SDLoc &dl, SDValue Val,
@@ -7184,8 +7184,8 @@ SDValue SelectionDAG::getTruncStore(SDValue Chain, const SDLoc &dl, SDValue Val,
MachineFunction &MF = getMachineFunction();
MachineMemOperand *MMO = MF.getMachineMemOperand(
- PtrInfo, MMOFlags, MemoryLocation::getSizeOrUnknown(SVT.getStoreSize()),
- Alignment, AAInfo);
+ PtrInfo, MMOFlags, MemoryLocation::getSizeOrUnknown(SVT.getStoreSize()),
+ Alignment, AAInfo);
return getTruncStore(Chain, dl, Val, Ptr, SVT, MMO);
}
@@ -7206,7 +7206,7 @@ SDValue SelectionDAG::getTruncStore(SDValue Chain, const SDLoc &dl, SDValue Val,
assert(VT.isVector() == SVT.isVector() &&
"Cannot use trunc store to convert to or from a vector!");
assert((!VT.isVector() ||
- VT.getVectorElementCount() == SVT.getVectorElementCount()) &&
+ VT.getVectorElementCount() == SVT.getVectorElementCount()) &&
"Cannot use trunc store to change the number of vector elements!");
SDVTList VTs = getVTList(MVT::Other);
@@ -7358,15 +7358,15 @@ SDValue SelectionDAG::getIndexedMaskedStore(SDValue OrigStore, const SDLoc &dl,
SDValue SelectionDAG::getMaskedGather(SDVTList VTs, EVT VT, const SDLoc &dl,
ArrayRef<SDValue> Ops,
MachineMemOperand *MMO,
- ISD::MemIndexType IndexType,
- ISD::LoadExtType ExtTy) {
+ ISD::MemIndexType IndexType,
+ ISD::LoadExtType ExtTy) {
assert(Ops.size() == 6 && "Incompatible number of operands");
FoldingSetNodeID ID;
AddNodeIDNode(ID, ISD::MGATHER, VTs, Ops);
ID.AddInteger(VT.getRawBits());
ID.AddInteger(getSyntheticNodeSubclassData<MaskedGatherSDNode>(
- dl.getIROrder(), VTs, VT, MMO, IndexType, ExtTy));
+ dl.getIROrder(), VTs, VT, MMO, IndexType, ExtTy));
ID.AddInteger(MMO->getPointerInfo().getAddrSpace());
void *IP = nullptr;
if (SDNode *E = FindNodeOrInsertPos(ID, dl, IP)) {
@@ -7374,22 +7374,22 @@ SDValue SelectionDAG::getMaskedGather(SDVTList VTs, EVT VT, const SDLoc &dl,
return SDValue(E, 0);
}
- IndexType = TLI->getCanonicalIndexType(IndexType, VT, Ops[4]);
+ IndexType = TLI->getCanonicalIndexType(IndexType, VT, Ops[4]);
auto *N = newSDNode<MaskedGatherSDNode>(dl.getIROrder(), dl.getDebugLoc(),
- VTs, VT, MMO, IndexType, ExtTy);
+ VTs, VT, MMO, IndexType, ExtTy);
createOperands(N, Ops);
assert(N->getPassThru().getValueType() == N->getValueType(0) &&
"Incompatible type of the PassThru value in MaskedGatherSDNode");
- assert(N->getMask().getValueType().getVectorElementCount() ==
- N->getValueType(0).getVectorElementCount() &&
+ assert(N->getMask().getValueType().getVectorElementCount() ==
+ N->getValueType(0).getVectorElementCount() &&
"Vector width mismatch between mask and data");
- assert(N->getIndex().getValueType().getVectorElementCount().isScalable() ==
- N->getValueType(0).getVectorElementCount().isScalable() &&
- "Scalable flags of index and data do not match");
- assert(ElementCount::isKnownGE(
- N->getIndex().getValueType().getVectorElementCount(),
- N->getValueType(0).getVectorElementCount()) &&
+ assert(N->getIndex().getValueType().getVectorElementCount().isScalable() ==
+ N->getValueType(0).getVectorElementCount().isScalable() &&
+ "Scalable flags of index and data do not match");
+ assert(ElementCount::isKnownGE(
+ N->getIndex().getValueType().getVectorElementCount(),
+ N->getValueType(0).getVectorElementCount()) &&
"Vector width mismatch between index and data");
assert(isa<ConstantSDNode>(N->getScale()) &&
cast<ConstantSDNode>(N->getScale())->getAPIntValue().isPowerOf2() &&
@@ -7405,37 +7405,37 @@ SDValue SelectionDAG::getMaskedGather(SDVTList VTs, EVT VT, const SDLoc &dl,
SDValue SelectionDAG::getMaskedScatter(SDVTList VTs, EVT VT, const SDLoc &dl,
ArrayRef<SDValue> Ops,
MachineMemOperand *MMO,
- ISD::MemIndexType IndexType,
- bool IsTrunc) {
+ ISD::MemIndexType IndexType,
+ bool IsTrunc) {
assert(Ops.size() == 6 && "Incompatible number of operands");
FoldingSetNodeID ID;
AddNodeIDNode(ID, ISD::MSCATTER, VTs, Ops);
ID.AddInteger(VT.getRawBits());
ID.AddInteger(getSyntheticNodeSubclassData<MaskedScatterSDNode>(
- dl.getIROrder(), VTs, VT, MMO, IndexType, IsTrunc));
+ dl.getIROrder(), VTs, VT, MMO, IndexType, IsTrunc));
ID.AddInteger(MMO->getPointerInfo().getAddrSpace());
void *IP = nullptr;
if (SDNode *E = FindNodeOrInsertPos(ID, dl, IP)) {
cast<MaskedScatterSDNode>(E)->refineAlignment(MMO);
return SDValue(E, 0);
}
-
- IndexType = TLI->getCanonicalIndexType(IndexType, VT, Ops[4]);
+
+ IndexType = TLI->getCanonicalIndexType(IndexType, VT, Ops[4]);
auto *N = newSDNode<MaskedScatterSDNode>(dl.getIROrder(), dl.getDebugLoc(),
- VTs, VT, MMO, IndexType, IsTrunc);
+ VTs, VT, MMO, IndexType, IsTrunc);
createOperands(N, Ops);
- assert(N->getMask().getValueType().getVectorElementCount() ==
- N->getValue().getValueType().getVectorElementCount() &&
+ assert(N->getMask().getValueType().getVectorElementCount() ==
+ N->getValue().getValueType().getVectorElementCount() &&
"Vector width mismatch between mask and data");
- assert(
- N->getIndex().getValueType().getVectorElementCount().isScalable() ==
- N->getValue().getValueType().getVectorElementCount().isScalable() &&
- "Scalable flags of index and data do not match");
- assert(ElementCount::isKnownGE(
- N->getIndex().getValueType().getVectorElementCount(),
- N->getValue().getValueType().getVectorElementCount()) &&
+ assert(
+ N->getIndex().getValueType().getVectorElementCount().isScalable() ==
+ N->getValue().getValueType().getVectorElementCount().isScalable() &&
+ "Scalable flags of index and data do not match");
+ assert(ElementCount::isKnownGE(
+ N->getIndex().getValueType().getVectorElementCount(),
+ N->getValue().getValueType().getVectorElementCount()) &&
"Vector width mismatch between index and data");
assert(isa<ConstantSDNode>(N->getScale()) &&
cast<ConstantSDNode>(N->getScale())->getAPIntValue().isPowerOf2() &&
@@ -7539,11 +7539,11 @@ SDValue SelectionDAG::simplifyFPBinop(unsigned Opcode, SDValue X, SDValue Y,
if (YC->getValueAPF().isExactlyValue(1.0))
return X;
- // X * 0.0 --> 0.0
- if (Opcode == ISD::FMUL && Flags.hasNoNaNs() && Flags.hasNoSignedZeros())
- if (YC->getValueAPF().isZero())
- return getConstantFP(0.0, SDLoc(Y), Y.getValueType());
-
+ // X * 0.0 --> 0.0
+ if (Opcode == ISD::FMUL && Flags.hasNoNaNs() && Flags.hasNoSignedZeros())
+ if (YC->getValueAPF().isZero())
+ return getConstantFP(0.0, SDLoc(Y), Y.getValueType());
+
return SDValue();
}
@@ -7570,14 +7570,14 @@ SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, EVT VT,
}
SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, EVT VT,
- ArrayRef<SDValue> Ops) {
- SDNodeFlags Flags;
- if (Inserter)
- Flags = Inserter->getFlags();
- return getNode(Opcode, DL, VT, Ops, Flags);
-}
-
-SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, EVT VT,
+ ArrayRef<SDValue> Ops) {
+ SDNodeFlags Flags;
+ if (Inserter)
+ Flags = Inserter->getFlags();
+ return getNode(Opcode, DL, VT, Ops, Flags);
+}
+
+SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, EVT VT,
ArrayRef<SDValue> Ops, const SDNodeFlags Flags) {
unsigned NumOps = Ops.size();
switch (NumOps) {
@@ -7649,14 +7649,14 @@ SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL,
}
SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, SDVTList VTList,
- ArrayRef<SDValue> Ops) {
- SDNodeFlags Flags;
- if (Inserter)
- Flags = Inserter->getFlags();
- return getNode(Opcode, DL, VTList, Ops, Flags);
-}
-
-SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, SDVTList VTList,
+ ArrayRef<SDValue> Ops) {
+ SDNodeFlags Flags;
+ if (Inserter)
+ Flags = Inserter->getFlags();
+ return getNode(Opcode, DL, VTList, Ops, Flags);
+}
+
+SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, SDVTList VTList,
ArrayRef<SDValue> Ops, const SDNodeFlags Flags) {
if (VTList.NumVTs == 1)
return getNode(Opcode, DL, VTList.VTs[0], Ops);
@@ -8353,14 +8353,14 @@ SDValue SelectionDAG::getTargetInsertSubreg(int SRIdx, const SDLoc &DL, EVT VT,
/// getNodeIfExists - Get the specified node if it's already available, or
/// else return NULL.
SDNode *SelectionDAG::getNodeIfExists(unsigned Opcode, SDVTList VTList,
- ArrayRef<SDValue> Ops) {
- SDNodeFlags Flags;
- if (Inserter)
- Flags = Inserter->getFlags();
- return getNodeIfExists(Opcode, VTList, Ops, Flags);
-}
-
-SDNode *SelectionDAG::getNodeIfExists(unsigned Opcode, SDVTList VTList,
+ ArrayRef<SDValue> Ops) {
+ SDNodeFlags Flags;
+ if (Inserter)
+ Flags = Inserter->getFlags();
+ return getNodeIfExists(Opcode, VTList, Ops, Flags);
+}
+
+SDNode *SelectionDAG::getNodeIfExists(unsigned Opcode, SDVTList VTList,
ArrayRef<SDValue> Ops,
const SDNodeFlags Flags) {
if (VTList.VTs[VTList.NumVTs - 1] != MVT::Glue) {
@@ -8375,19 +8375,19 @@ SDNode *SelectionDAG::getNodeIfExists(unsigned Opcode, SDVTList VTList,
return nullptr;
}
-/// doesNodeExist - Check if a node exists without modifying its flags.
-bool SelectionDAG::doesNodeExist(unsigned Opcode, SDVTList VTList,
- ArrayRef<SDValue> Ops) {
- if (VTList.VTs[VTList.NumVTs - 1] != MVT::Glue) {
- FoldingSetNodeID ID;
- AddNodeIDNode(ID, Opcode, VTList, Ops);
- void *IP = nullptr;
- if (FindNodeOrInsertPos(ID, SDLoc(), IP))
- return true;
- }
- return false;
-}
-
+/// doesNodeExist - Check if a node exists without modifying its flags.
+bool SelectionDAG::doesNodeExist(unsigned Opcode, SDVTList VTList,
+ ArrayRef<SDValue> Ops) {
+ if (VTList.VTs[VTList.NumVTs - 1] != MVT::Glue) {
+ FoldingSetNodeID ID;
+ AddNodeIDNode(ID, Opcode, VTList, Ops);
+ void *IP = nullptr;
+ if (FindNodeOrInsertPos(ID, SDLoc(), IP))
+ return true;
+ }
+ return false;
+}
+
/// getDbgValue - Creates a SDDbgValue node.
///
/// SDNode
@@ -8805,31 +8805,31 @@ namespace {
} // end anonymous namespace
-bool SelectionDAG::calculateDivergence(SDNode *N) {
- if (TLI->isSDNodeAlwaysUniform(N)) {
- assert(!TLI->isSDNodeSourceOfDivergence(N, FLI, DA) &&
- "Conflicting divergence information!");
- return false;
- }
- if (TLI->isSDNodeSourceOfDivergence(N, FLI, DA))
- return true;
+bool SelectionDAG::calculateDivergence(SDNode *N) {
+ if (TLI->isSDNodeAlwaysUniform(N)) {
+ assert(!TLI->isSDNodeSourceOfDivergence(N, FLI, DA) &&
+ "Conflicting divergence information!");
+ return false;
+ }
+ if (TLI->isSDNodeSourceOfDivergence(N, FLI, DA))
+ return true;
for (auto &Op : N->ops()) {
- if (Op.Val.getValueType() != MVT::Other && Op.getNode()->isDivergent())
- return true;
+ if (Op.Val.getValueType() != MVT::Other && Op.getNode()->isDivergent())
+ return true;
}
- return false;
-}
-
-void SelectionDAG::updateDivergence(SDNode *N) {
- SmallVector<SDNode *, 16> Worklist(1, N);
- do {
- N = Worklist.pop_back_val();
- bool IsDivergent = calculateDivergence(N);
- if (N->SDNodeBits.IsDivergent != IsDivergent) {
- N->SDNodeBits.IsDivergent = IsDivergent;
- llvm::append_range(Worklist, N->uses());
+ return false;
+}
+
+void SelectionDAG::updateDivergence(SDNode *N) {
+ SmallVector<SDNode *, 16> Worklist(1, N);
+ do {
+ N = Worklist.pop_back_val();
+ bool IsDivergent = calculateDivergence(N);
+ if (N->SDNodeBits.IsDivergent != IsDivergent) {
+ N->SDNodeBits.IsDivergent = IsDivergent;
+ llvm::append_range(Worklist, N->uses());
}
- } while (!Worklist.empty());
+ } while (!Worklist.empty());
}
void SelectionDAG::CreateTopologicalOrder(std::vector<SDNode *> &Order) {
@@ -8855,9 +8855,9 @@ void SelectionDAG::CreateTopologicalOrder(std::vector<SDNode *> &Order) {
void SelectionDAG::VerifyDAGDiverence() {
std::vector<SDNode *> TopoOrder;
CreateTopologicalOrder(TopoOrder);
- for (auto *N : TopoOrder) {
- assert(calculateDivergence(N) == N->isDivergent() &&
- "Divergence bit inconsistency detected");
+ for (auto *N : TopoOrder) {
+ assert(calculateDivergence(N) == N->isDivergent() &&
+ "Divergence bit inconsistency detected");
}
}
#endif
@@ -9026,32 +9026,32 @@ void SelectionDAG::AddDbgLabel(SDDbgLabel *DB) {
DbgInfo->add(DB);
}
-SDValue SelectionDAG::makeEquivalentMemoryOrdering(SDValue OldChain,
- SDValue NewMemOpChain) {
- assert(isa<MemSDNode>(NewMemOpChain) && "Expected a memop node");
- assert(NewMemOpChain.getValueType() == MVT::Other && "Expected a token VT");
+SDValue SelectionDAG::makeEquivalentMemoryOrdering(SDValue OldChain,
+ SDValue NewMemOpChain) {
+ assert(isa<MemSDNode>(NewMemOpChain) && "Expected a memop node");
+ assert(NewMemOpChain.getValueType() == MVT::Other && "Expected a token VT");
// The new memory operation must have the same position as the old load in
// terms of memory dependency. Create a TokenFactor for the old load and new
// memory operation and update uses of the old load's output chain to use that
// TokenFactor.
- if (OldChain == NewMemOpChain || OldChain.use_empty())
- return NewMemOpChain;
+ if (OldChain == NewMemOpChain || OldChain.use_empty())
+ return NewMemOpChain;
- SDValue TokenFactor = getNode(ISD::TokenFactor, SDLoc(OldChain), MVT::Other,
- OldChain, NewMemOpChain);
+ SDValue TokenFactor = getNode(ISD::TokenFactor, SDLoc(OldChain), MVT::Other,
+ OldChain, NewMemOpChain);
ReplaceAllUsesOfValueWith(OldChain, TokenFactor);
- UpdateNodeOperands(TokenFactor.getNode(), OldChain, NewMemOpChain);
+ UpdateNodeOperands(TokenFactor.getNode(), OldChain, NewMemOpChain);
return TokenFactor;
}
-SDValue SelectionDAG::makeEquivalentMemoryOrdering(LoadSDNode *OldLoad,
- SDValue NewMemOp) {
- assert(isa<MemSDNode>(NewMemOp.getNode()) && "Expected a memop node");
- SDValue OldChain = SDValue(OldLoad, 1);
- SDValue NewMemOpChain = NewMemOp.getValue(1);
- return makeEquivalentMemoryOrdering(OldChain, NewMemOpChain);
-}
-
+SDValue SelectionDAG::makeEquivalentMemoryOrdering(LoadSDNode *OldLoad,
+ SDValue NewMemOp) {
+ assert(isa<MemSDNode>(NewMemOp.getNode()) && "Expected a memop node");
+ SDValue OldChain = SDValue(OldLoad, 1);
+ SDValue NewMemOpChain = NewMemOp.getValue(1);
+ return makeEquivalentMemoryOrdering(OldChain, NewMemOpChain);
+}
+
SDValue SelectionDAG::getSymbolFunctionGlobalAddress(SDValue Op,
Function **OutFunction) {
assert(isa<ExternalSymbolSDNode>(Op) && "Node should be an ExternalSymbol");
@@ -9135,18 +9135,18 @@ ConstantSDNode *llvm::isConstOrConstSplat(SDValue N, bool AllowUndefs,
if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(N))
return CN;
- // SplatVectors can truncate their operands. Ignore that case here unless
- // AllowTruncation is set.
- if (N->getOpcode() == ISD::SPLAT_VECTOR) {
- EVT VecEltVT = N->getValueType(0).getVectorElementType();
- if (auto *CN = dyn_cast<ConstantSDNode>(N->getOperand(0))) {
- EVT CVT = CN->getValueType(0);
- assert(CVT.bitsGE(VecEltVT) && "Illegal splat_vector element extension");
- if (AllowTruncation || CVT == VecEltVT)
- return CN;
- }
- }
-
+ // SplatVectors can truncate their operands. Ignore that case here unless
+ // AllowTruncation is set.
+ if (N->getOpcode() == ISD::SPLAT_VECTOR) {
+ EVT VecEltVT = N->getValueType(0).getVectorElementType();
+ if (auto *CN = dyn_cast<ConstantSDNode>(N->getOperand(0))) {
+ EVT CVT = CN->getValueType(0);
+ assert(CVT.bitsGE(VecEltVT) && "Illegal splat_vector element extension");
+ if (AllowTruncation || CVT == VecEltVT)
+ return CN;
+ }
+ }
+
if (BuildVectorSDNode *BV = dyn_cast<BuildVectorSDNode>(N)) {
BitVector UndefElements;
ConstantSDNode *CN = BV->getConstantSplatNode(&UndefElements);
@@ -9200,10 +9200,10 @@ ConstantFPSDNode *llvm::isConstOrConstSplatFP(SDValue N, bool AllowUndefs) {
return CN;
}
- if (N.getOpcode() == ISD::SPLAT_VECTOR)
- if (ConstantFPSDNode *CN = dyn_cast<ConstantFPSDNode>(N.getOperand(0)))
- return CN;
-
+ if (N.getOpcode() == ISD::SPLAT_VECTOR)
+ if (ConstantFPSDNode *CN = dyn_cast<ConstantFPSDNode>(N.getOperand(0)))
+ return CN;
+
return nullptr;
}
@@ -9365,7 +9365,7 @@ bool SDNode::areOnlyUsersOf(ArrayRef<const SDNode *> Nodes, const SDNode *N) {
bool Seen = false;
for (SDNode::use_iterator I = N->use_begin(), E = N->use_end(); I != E; ++I) {
SDNode *User = *I;
- if (llvm::is_contained(Nodes, User))
+ if (llvm::is_contained(Nodes, User))
Seen = true;
else
return false;
@@ -9376,7 +9376,7 @@ bool SDNode::areOnlyUsersOf(ArrayRef<const SDNode *> Nodes, const SDNode *N) {
/// isOperand - Return true if this node is an operand of N.
bool SDValue::isOperandOf(const SDNode *N) const {
- return is_contained(N->op_values(), *this);
+ return is_contained(N->op_values(), *this);
}
bool SDNode::isOperandOf(const SDNode *N) const {
@@ -9765,19 +9765,19 @@ SelectionDAG::GetDependentSplitDestVTs(const EVT &VT, const EVT &EnvVT,
// custom VL=9 with enveloping VL=8/8 yields 8/1
// custom VL=10 with enveloping VL=8/8 yields 8/2
// etc.
- ElementCount VTNumElts = VT.getVectorElementCount();
- ElementCount EnvNumElts = EnvVT.getVectorElementCount();
- assert(VTNumElts.isScalable() == EnvNumElts.isScalable() &&
- "Mixing fixed width and scalable vectors when enveloping a type");
+ ElementCount VTNumElts = VT.getVectorElementCount();
+ ElementCount EnvNumElts = EnvVT.getVectorElementCount();
+ assert(VTNumElts.isScalable() == EnvNumElts.isScalable() &&
+ "Mixing fixed width and scalable vectors when enveloping a type");
EVT LoVT, HiVT;
- if (VTNumElts.getKnownMinValue() > EnvNumElts.getKnownMinValue()) {
+ if (VTNumElts.getKnownMinValue() > EnvNumElts.getKnownMinValue()) {
LoVT = EnvVT;
- HiVT = EVT::getVectorVT(*getContext(), EltTp, VTNumElts - EnvNumElts);
+ HiVT = EVT::getVectorVT(*getContext(), EltTp, VTNumElts - EnvNumElts);
*HiIsEmpty = false;
} else {
// Flag that hi type has zero storage size, but return split envelop type
// (this would be easier if vector types with zero elements were allowed).
- LoVT = EVT::getVectorVT(*getContext(), EltTp, VTNumElts);
+ LoVT = EVT::getVectorVT(*getContext(), EltTp, VTNumElts);
HiVT = EnvVT;
*HiIsEmpty = true;
}
@@ -9912,16 +9912,16 @@ bool BuildVectorSDNode::isConstantSplat(APInt &SplatValue, APInt &SplatUndef,
SDValue BuildVectorSDNode::getSplatValue(const APInt &DemandedElts,
BitVector *UndefElements) const {
- unsigned NumOps = getNumOperands();
+ unsigned NumOps = getNumOperands();
if (UndefElements) {
UndefElements->clear();
- UndefElements->resize(NumOps);
+ UndefElements->resize(NumOps);
}
- assert(NumOps == DemandedElts.getBitWidth() && "Unexpected vector size");
+ assert(NumOps == DemandedElts.getBitWidth() && "Unexpected vector size");
if (!DemandedElts)
return SDValue();
SDValue Splatted;
- for (unsigned i = 0; i != NumOps; ++i) {
+ for (unsigned i = 0; i != NumOps; ++i) {
if (!DemandedElts[i])
continue;
SDValue Op = getOperand(i);
@@ -9950,58 +9950,58 @@ SDValue BuildVectorSDNode::getSplatValue(BitVector *UndefElements) const {
return getSplatValue(DemandedElts, UndefElements);
}
-bool BuildVectorSDNode::getRepeatedSequence(const APInt &DemandedElts,
- SmallVectorImpl<SDValue> &Sequence,
- BitVector *UndefElements) const {
- unsigned NumOps = getNumOperands();
- Sequence.clear();
- if (UndefElements) {
- UndefElements->clear();
- UndefElements->resize(NumOps);
- }
- assert(NumOps == DemandedElts.getBitWidth() && "Unexpected vector size");
- if (!DemandedElts || NumOps < 2 || !isPowerOf2_32(NumOps))
- return false;
-
- // Set the undefs even if we don't find a sequence (like getSplatValue).
- if (UndefElements)
- for (unsigned I = 0; I != NumOps; ++I)
- if (DemandedElts[I] && getOperand(I).isUndef())
- (*UndefElements)[I] = true;
-
- // Iteratively widen the sequence length looking for repetitions.
- for (unsigned SeqLen = 1; SeqLen < NumOps; SeqLen *= 2) {
- Sequence.append(SeqLen, SDValue());
- for (unsigned I = 0; I != NumOps; ++I) {
- if (!DemandedElts[I])
- continue;
- SDValue &SeqOp = Sequence[I % SeqLen];
- SDValue Op = getOperand(I);
- if (Op.isUndef()) {
- if (!SeqOp)
- SeqOp = Op;
- continue;
- }
- if (SeqOp && !SeqOp.isUndef() && SeqOp != Op) {
- Sequence.clear();
- break;
- }
- SeqOp = Op;
- }
- if (!Sequence.empty())
- return true;
- }
-
- assert(Sequence.empty() && "Failed to empty non-repeating sequence pattern");
- return false;
-}
-
-bool BuildVectorSDNode::getRepeatedSequence(SmallVectorImpl<SDValue> &Sequence,
- BitVector *UndefElements) const {
- APInt DemandedElts = APInt::getAllOnesValue(getNumOperands());
- return getRepeatedSequence(DemandedElts, Sequence, UndefElements);
-}
-
+bool BuildVectorSDNode::getRepeatedSequence(const APInt &DemandedElts,
+ SmallVectorImpl<SDValue> &Sequence,
+ BitVector *UndefElements) const {
+ unsigned NumOps = getNumOperands();
+ Sequence.clear();
+ if (UndefElements) {
+ UndefElements->clear();
+ UndefElements->resize(NumOps);
+ }
+ assert(NumOps == DemandedElts.getBitWidth() && "Unexpected vector size");
+ if (!DemandedElts || NumOps < 2 || !isPowerOf2_32(NumOps))
+ return false;
+
+ // Set the undefs even if we don't find a sequence (like getSplatValue).
+ if (UndefElements)
+ for (unsigned I = 0; I != NumOps; ++I)
+ if (DemandedElts[I] && getOperand(I).isUndef())
+ (*UndefElements)[I] = true;
+
+ // Iteratively widen the sequence length looking for repetitions.
+ for (unsigned SeqLen = 1; SeqLen < NumOps; SeqLen *= 2) {
+ Sequence.append(SeqLen, SDValue());
+ for (unsigned I = 0; I != NumOps; ++I) {
+ if (!DemandedElts[I])
+ continue;
+ SDValue &SeqOp = Sequence[I % SeqLen];
+ SDValue Op = getOperand(I);
+ if (Op.isUndef()) {
+ if (!SeqOp)
+ SeqOp = Op;
+ continue;
+ }
+ if (SeqOp && !SeqOp.isUndef() && SeqOp != Op) {
+ Sequence.clear();
+ break;
+ }
+ SeqOp = Op;
+ }
+ if (!Sequence.empty())
+ return true;
+ }
+
+ assert(Sequence.empty() && "Failed to empty non-repeating sequence pattern");
+ return false;
+}
+
+bool BuildVectorSDNode::getRepeatedSequence(SmallVectorImpl<SDValue> &Sequence,
+ BitVector *UndefElements) const {
+ APInt DemandedElts = APInt::getAllOnesValue(getNumOperands());
+ return getRepeatedSequence(DemandedElts, Sequence, UndefElements);
+}
+
ConstantSDNode *
BuildVectorSDNode::getConstantSplatNode(const APInt &DemandedElts,
BitVector *UndefElements) const {
@@ -10074,7 +10074,7 @@ bool ShuffleVectorSDNode::isSplatMask(const int *Mask, EVT VT) {
// Returns the SDNode if it is a constant integer BuildVector
// or constant integer.
-SDNode *SelectionDAG::isConstantIntBuildVectorOrConstantInt(SDValue N) const {
+SDNode *SelectionDAG::isConstantIntBuildVectorOrConstantInt(SDValue N) const {
if (isa<ConstantSDNode>(N))
return N.getNode();
if (ISD::isBuildVectorOfConstantSDNodes(N.getNode()))
@@ -10085,15 +10085,15 @@ SDNode *SelectionDAG::isConstantIntBuildVectorOrConstantInt(SDValue N) const {
if (GA->getOpcode() == ISD::GlobalAddress &&
TLI->isOffsetFoldingLegal(GA))
return GA;
- if ((N.getOpcode() == ISD::SPLAT_VECTOR) &&
- isa<ConstantSDNode>(N.getOperand(0)))
- return N.getNode();
+ if ((N.getOpcode() == ISD::SPLAT_VECTOR) &&
+ isa<ConstantSDNode>(N.getOperand(0)))
+ return N.getNode();
return nullptr;
}
-// Returns the SDNode if it is a constant float BuildVector
-// or constant float.
-SDNode *SelectionDAG::isConstantFPBuildVectorOrConstantFP(SDValue N) const {
+// Returns the SDNode if it is a constant float BuildVector
+// or constant float.
+SDNode *SelectionDAG::isConstantFPBuildVectorOrConstantFP(SDValue N) const {
if (isa<ConstantFPSDNode>(N))
return N.getNode();
@@ -10115,14 +10115,14 @@ void SelectionDAG::createOperands(SDNode *Node, ArrayRef<SDValue> Vals) {
Ops[I].setUser(Node);
Ops[I].setInitial(Vals[I]);
if (Ops[I].Val.getValueType() != MVT::Other) // Skip Chain. It does not carry divergence.
- IsDivergent |= Ops[I].getNode()->isDivergent();
+ IsDivergent |= Ops[I].getNode()->isDivergent();
}
Node->NumOperands = Vals.size();
Node->OperandList = Ops;
- if (!TLI->isSDNodeAlwaysUniform(Node)) {
- IsDivergent |= TLI->isSDNodeSourceOfDivergence(Node, FLI, DA);
+ if (!TLI->isSDNodeAlwaysUniform(Node)) {
+ IsDivergent |= TLI->isSDNodeSourceOfDivergence(Node, FLI, DA);
Node->SDNodeBits.IsDivergent = IsDivergent;
- }
+ }
checkForCycles(Node);
}
@@ -10139,44 +10139,44 @@ SDValue SelectionDAG::getTokenFactor(const SDLoc &DL,
return getNode(ISD::TokenFactor, DL, MVT::Other, Vals);
}
-SDValue SelectionDAG::getNeutralElement(unsigned Opcode, const SDLoc &DL,
- EVT VT, SDNodeFlags Flags) {
- switch (Opcode) {
- default:
- return SDValue();
- case ISD::ADD:
- case ISD::OR:
- case ISD::XOR:
- case ISD::UMAX:
- return getConstant(0, DL, VT);
- case ISD::MUL:
- return getConstant(1, DL, VT);
- case ISD::AND:
- case ISD::UMIN:
- return getAllOnesConstant(DL, VT);
- case ISD::SMAX:
- return getConstant(APInt::getSignedMinValue(VT.getSizeInBits()), DL, VT);
- case ISD::SMIN:
- return getConstant(APInt::getSignedMaxValue(VT.getSizeInBits()), DL, VT);
- case ISD::FADD:
- return getConstantFP(-0.0, DL, VT);
- case ISD::FMUL:
- return getConstantFP(1.0, DL, VT);
- case ISD::FMINNUM:
- case ISD::FMAXNUM: {
- // Neutral element for fminnum is NaN, Inf or FLT_MAX, depending on FMF.
- const fltSemantics &Semantics = EVTToAPFloatSemantics(VT);
- APFloat NeutralAF = !Flags.hasNoNaNs() ? APFloat::getQNaN(Semantics) :
- !Flags.hasNoInfs() ? APFloat::getInf(Semantics) :
- APFloat::getLargest(Semantics);
- if (Opcode == ISD::FMAXNUM)
- NeutralAF.changeSign();
-
- return getConstantFP(NeutralAF, DL, VT);
- }
- }
-}
-
+SDValue SelectionDAG::getNeutralElement(unsigned Opcode, const SDLoc &DL,
+ EVT VT, SDNodeFlags Flags) {
+ switch (Opcode) {
+ default:
+ return SDValue();
+ case ISD::ADD:
+ case ISD::OR:
+ case ISD::XOR:
+ case ISD::UMAX:
+ return getConstant(0, DL, VT);
+ case ISD::MUL:
+ return getConstant(1, DL, VT);
+ case ISD::AND:
+ case ISD::UMIN:
+ return getAllOnesConstant(DL, VT);
+ case ISD::SMAX:
+ return getConstant(APInt::getSignedMinValue(VT.getSizeInBits()), DL, VT);
+ case ISD::SMIN:
+ return getConstant(APInt::getSignedMaxValue(VT.getSizeInBits()), DL, VT);
+ case ISD::FADD:
+ return getConstantFP(-0.0, DL, VT);
+ case ISD::FMUL:
+ return getConstantFP(1.0, DL, VT);
+ case ISD::FMINNUM:
+ case ISD::FMAXNUM: {
+ // Neutral element for fminnum is NaN, Inf or FLT_MAX, depending on FMF.
+ const fltSemantics &Semantics = EVTToAPFloatSemantics(VT);
+ APFloat NeutralAF = !Flags.hasNoNaNs() ? APFloat::getQNaN(Semantics) :
+ !Flags.hasNoInfs() ? APFloat::getInf(Semantics) :
+ APFloat::getLargest(Semantics);
+ if (Opcode == ISD::FMAXNUM)
+ NeutralAF.changeSign();
+
+ return getConstantFP(NeutralAF, DL, VT);
+ }
+ }
+}
+
#ifndef NDEBUG
static void checkForCyclesHelper(const SDNode *N,
SmallPtrSetImpl<const SDNode*> &Visited,
diff --git a/contrib/libs/llvm12/lib/CodeGen/SelectionDAG/SelectionDAGAddressAnalysis.cpp b/contrib/libs/llvm12/lib/CodeGen/SelectionDAG/SelectionDAGAddressAnalysis.cpp
index 20c7d771bf..1ffc833e7d 100644
--- a/contrib/libs/llvm12/lib/CodeGen/SelectionDAG/SelectionDAGAddressAnalysis.cpp
+++ b/contrib/libs/llvm12/lib/CodeGen/SelectionDAG/SelectionDAGAddressAnalysis.cpp
@@ -7,7 +7,7 @@
//===----------------------------------------------------------------------===//
#include "llvm/CodeGen/SelectionDAGAddressAnalysis.h"
-#include "llvm/Analysis/MemoryLocation.h"
+#include "llvm/Analysis/MemoryLocation.h"
#include "llvm/CodeGen/ISDOpcodes.h"
#include "llvm/CodeGen/MachineFrameInfo.h"
#include "llvm/CodeGen/MachineFunction.h"
@@ -97,28 +97,28 @@ bool BaseIndexOffset::computeAliasing(const SDNode *Op0,
int64_t PtrDiff;
if (NumBytes0.hasValue() && NumBytes1.hasValue() &&
BasePtr0.equalBaseIndex(BasePtr1, DAG, PtrDiff)) {
- // If the size of memory access is unknown, do not use it to analysis.
- // One example of unknown size memory access is to load/store scalable
- // vector objects on the stack.
+ // If the size of memory access is unknown, do not use it to analysis.
+ // One example of unknown size memory access is to load/store scalable
+ // vector objects on the stack.
// BasePtr1 is PtrDiff away from BasePtr0. They alias if none of the
// following situations arise:
- if (PtrDiff >= 0 &&
- *NumBytes0 != static_cast<int64_t>(MemoryLocation::UnknownSize)) {
- // [----BasePtr0----]
- // [---BasePtr1--]
- // ========PtrDiff========>
- IsAlias = !(*NumBytes0 <= PtrDiff);
- return true;
- }
- if (PtrDiff < 0 &&
- *NumBytes1 != static_cast<int64_t>(MemoryLocation::UnknownSize)) {
- // [----BasePtr0----]
- // [---BasePtr1--]
- // =====(-PtrDiff)====>
- IsAlias = !((PtrDiff + *NumBytes1) <= 0);
- return true;
- }
- return false;
+ if (PtrDiff >= 0 &&
+ *NumBytes0 != static_cast<int64_t>(MemoryLocation::UnknownSize)) {
+ // [----BasePtr0----]
+ // [---BasePtr1--]
+ // ========PtrDiff========>
+ IsAlias = !(*NumBytes0 <= PtrDiff);
+ return true;
+ }
+ if (PtrDiff < 0 &&
+ *NumBytes1 != static_cast<int64_t>(MemoryLocation::UnknownSize)) {
+ // [----BasePtr0----]
+ // [---BasePtr1--]
+ // =====(-PtrDiff)====>
+ IsAlias = !((PtrDiff + *NumBytes1) <= 0);
+ return true;
+ }
+ return false;
}
// If both BasePtr0 and BasePtr1 are FrameIndexes, we will not be
// able to calculate their relative offset if at least one arises
diff --git a/contrib/libs/llvm12/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp b/contrib/libs/llvm12/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp
index a6bd774934..d8749cdd7c 100644
--- a/contrib/libs/llvm12/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp
+++ b/contrib/libs/llvm12/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp
@@ -404,10 +404,10 @@ static SDValue getCopyFromPartsVector(SelectionDAG &DAG, const SDLoc &DL,
// vector widening case (e.g. <2 x float> -> <4 x float>). Extract the
// elements we want.
if (PartEVT.getVectorElementType() == ValueVT.getVectorElementType()) {
- assert((PartEVT.getVectorElementCount().getKnownMinValue() >
- ValueVT.getVectorElementCount().getKnownMinValue()) &&
- (PartEVT.getVectorElementCount().isScalable() ==
- ValueVT.getVectorElementCount().isScalable()) &&
+ assert((PartEVT.getVectorElementCount().getKnownMinValue() >
+ ValueVT.getVectorElementCount().getKnownMinValue()) &&
+ (PartEVT.getVectorElementCount().isScalable() ==
+ ValueVT.getVectorElementCount().isScalable()) &&
"Cannot narrow, it would be a lossy transformation");
return DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, ValueVT, Val,
DAG.getVectorIdxConstant(0, DL));
@@ -435,7 +435,7 @@ static SDValue getCopyFromPartsVector(SelectionDAG &DAG, const SDLoc &DL,
// are the same size, this is an obvious bitcast.
if (ValueVT.getSizeInBits() == PartEVT.getSizeInBits()) {
return DAG.getNode(ISD::BITCAST, DL, ValueVT, Val);
- } else if (ValueVT.bitsLT(PartEVT)) {
+ } else if (ValueVT.bitsLT(PartEVT)) {
// Bitcast Val back the original type and extract the corresponding
// vector we want.
unsigned Elts = PartEVT.getSizeInBits() / ValueVT.getScalarSizeInBits();
@@ -665,14 +665,14 @@ static void getCopyToPartsVector(SelectionDAG &DAG, const SDLoc &DL,
// Promoted vector extract
Val = DAG.getAnyExtOrTrunc(Val, DL, PartVT);
} else {
- if (ValueVT.getVectorElementCount().isScalar()) {
+ if (ValueVT.getVectorElementCount().isScalar()) {
Val = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, PartVT, Val,
DAG.getVectorIdxConstant(0, DL));
} else {
- uint64_t ValueSize = ValueVT.getFixedSizeInBits();
- assert(PartVT.getFixedSizeInBits() > ValueSize &&
+ uint64_t ValueSize = ValueVT.getFixedSizeInBits();
+ assert(PartVT.getFixedSizeInBits() > ValueSize &&
"lossy conversion of vector to scalar type");
- EVT IntermediateType = EVT::getIntegerVT(*DAG.getContext(), ValueSize);
+ EVT IntermediateType = EVT::getIntegerVT(*DAG.getContext(), ValueSize);
Val = DAG.getBitcast(IntermediateType, Val);
Val = DAG.getAnyExtOrTrunc(Val, DL, PartVT);
}
@@ -705,15 +705,15 @@ static void getCopyToPartsVector(SelectionDAG &DAG, const SDLoc &DL,
assert(IntermediateVT.isScalableVector() == ValueVT.isScalableVector() &&
"Mixing scalable and fixed vectors when copying in parts");
- Optional<ElementCount> DestEltCnt;
+ Optional<ElementCount> DestEltCnt;
if (IntermediateVT.isVector())
DestEltCnt = IntermediateVT.getVectorElementCount() * NumIntermediates;
else
- DestEltCnt = ElementCount::getFixed(NumIntermediates);
+ DestEltCnt = ElementCount::getFixed(NumIntermediates);
EVT BuiltVectorTy = EVT::getVectorVT(
- *DAG.getContext(), IntermediateVT.getScalarType(), DestEltCnt.getValue());
+ *DAG.getContext(), IntermediateVT.getScalarType(), DestEltCnt.getValue());
if (ValueVT != BuiltVectorTy) {
if (SDValue Widened = widenVectorToPartType(DAG, Val, DL, BuiltVectorTy))
Val = Widened;
@@ -957,7 +957,7 @@ void RegsForValue::AddInlineAsmOperands(unsigned Code, bool HasMatching,
// shouldn't try to apply any sort of splitting logic to them.
assert(Regs.size() == RegVTs.size() && Regs.size() == ValueVTs.size() &&
"No 1:1 mapping from clobbers to regs?");
- Register SP = TLI.getStackPointerRegisterToSaveRestore();
+ Register SP = TLI.getStackPointerRegisterToSaveRestore();
(void)SP;
for (unsigned I = 0, E = ValueVTs.size(); I != E; ++I) {
Ops.push_back(DAG.getRegister(Regs[I], RegVTs[I]));
@@ -980,14 +980,14 @@ void RegsForValue::AddInlineAsmOperands(unsigned Code, bool HasMatching,
}
}
-SmallVector<std::pair<unsigned, TypeSize>, 4>
+SmallVector<std::pair<unsigned, TypeSize>, 4>
RegsForValue::getRegsAndSizes() const {
- SmallVector<std::pair<unsigned, TypeSize>, 4> OutVec;
+ SmallVector<std::pair<unsigned, TypeSize>, 4> OutVec;
unsigned I = 0;
for (auto CountAndVT : zip_first(RegCount, RegVTs)) {
unsigned RegCount = std::get<0>(CountAndVT);
MVT RegisterVT = std::get<1>(CountAndVT);
- TypeSize RegisterSize = RegisterVT.getSizeInBits();
+ TypeSize RegisterSize = RegisterVT.getSizeInBits();
for (unsigned E = I + RegCount; I != E; ++I)
OutVec.push_back(std::make_pair(Regs[I], RegisterSize));
}
@@ -1141,7 +1141,7 @@ void SelectionDAGBuilder::dropDanglingDebugInfo(const DILocalVariable *Variable,
if (isMatchingDbgValue(DDI))
salvageUnresolvedDbgValue(DDI);
- erase_if(DDIV, isMatchingDbgValue);
+ erase_if(DDIV, isMatchingDbgValue);
}
}
@@ -1514,9 +1514,9 @@ SDValue SelectionDAGBuilder::getValueImpl(const Value *V) {
if (const BlockAddress *BA = dyn_cast<BlockAddress>(C))
return DAG.getBlockAddress(BA, VT);
- if (const auto *Equiv = dyn_cast<DSOLocalEquivalent>(C))
- return getValue(Equiv->getGlobalValue());
-
+ if (const auto *Equiv = dyn_cast<DSOLocalEquivalent>(C))
+ return getValue(Equiv->getGlobalValue());
+
VectorType *VecTy = cast<VectorType>(V->getType());
// Now that we know the number and type of the elements, get that number of
@@ -1637,32 +1637,32 @@ void SelectionDAGBuilder::visitCleanupPad(const CleanupPadInst &CPI) {
}
}
-// In wasm EH, even though a catchpad may not catch an exception if a tag does
-// not match, it is OK to add only the first unwind destination catchpad to the
-// successors, because there will be at least one invoke instruction within the
-// catch scope that points to the next unwind destination, if one exists, so
-// CFGSort cannot mess up with BB sorting order.
-// (All catchpads with 'catch (type)' clauses have a 'llvm.rethrow' intrinsic
-// call within them, and catchpads only consisting of 'catch (...)' have a
-// '__cxa_end_catch' call within them, both of which generate invokes in case
-// the next unwind destination exists, i.e., the next unwind destination is not
-// the caller.)
-//
-// Having at most one EH pad successor is also simpler and helps later
-// transformations.
-//
-// For example,
-// current:
-// invoke void @foo to ... unwind label %catch.dispatch
-// catch.dispatch:
-// %0 = catchswitch within ... [label %catch.start] unwind label %next
-// catch.start:
-// ...
-// ... in this BB or some other child BB dominated by this BB there will be an
-// invoke that points to 'next' BB as an unwind destination
-//
-// next: ; We don't need to add this to 'current' BB's successor
-// ...
+// In wasm EH, even though a catchpad may not catch an exception if a tag does
+// not match, it is OK to add only the first unwind destination catchpad to the
+// successors, because there will be at least one invoke instruction within the
+// catch scope that points to the next unwind destination, if one exists, so
+// CFGSort cannot mess up with BB sorting order.
+// (All catchpads with 'catch (type)' clauses have a 'llvm.rethrow' intrinsic
+// call within them, and catchpads only consisting of 'catch (...)' have a
+// '__cxa_end_catch' call within them, both of which generate invokes in case
+// the next unwind destination exists, i.e., the next unwind destination is not
+// the caller.)
+//
+// Having at most one EH pad successor is also simpler and helps later
+// transformations.
+//
+// For example,
+// current:
+// invoke void @foo to ... unwind label %catch.dispatch
+// catch.dispatch:
+// %0 = catchswitch within ... [label %catch.start] unwind label %next
+// catch.start:
+// ...
+// ... in this BB or some other child BB dominated by this BB there will be an
+// invoke that points to 'next' BB as an unwind destination
+//
+// next: ; We don't need to add this to 'current' BB's successor
+// ...
static void findWasmUnwindDestinations(
FunctionLoweringInfo &FuncInfo, const BasicBlock *EHPadBB,
BranchProbability Prob,
@@ -1825,8 +1825,8 @@ void SelectionDAGBuilder::visitRet(const ReturnInst &I) {
for (unsigned i = 0; i != NumValues; ++i) {
// An aggregate return value cannot wrap around the address space, so
// offsets to its parts don't wrap either.
- SDValue Ptr = DAG.getObjectPtrOffset(getCurSDLoc(), RetPtr,
- TypeSize::Fixed(Offsets[i]));
+ SDValue Ptr = DAG.getObjectPtrOffset(getCurSDLoc(), RetPtr,
+ TypeSize::Fixed(Offsets[i]));
SDValue Val = RetOp.getValue(RetOp.getResNo() + i);
if (MemVTs[i] != ValueVTs[i])
@@ -2107,19 +2107,19 @@ void SelectionDAGBuilder::FindMergedConditions(const Value *Cond,
}
const Instruction *BOp = dyn_cast<Instruction>(Cond);
- const Value *BOpOp0, *BOpOp1;
+ const Value *BOpOp0, *BOpOp1;
// Compute the effective opcode for Cond, taking into account whether it needs
// to be inverted, e.g.
// and (not (or A, B)), C
// gets lowered as
// and (and (not A, not B), C)
- Instruction::BinaryOps BOpc = (Instruction::BinaryOps)0;
+ Instruction::BinaryOps BOpc = (Instruction::BinaryOps)0;
if (BOp) {
- BOpc = match(BOp, m_LogicalAnd(m_Value(BOpOp0), m_Value(BOpOp1)))
- ? Instruction::And
- : (match(BOp, m_LogicalOr(m_Value(BOpOp0), m_Value(BOpOp1)))
- ? Instruction::Or
- : (Instruction::BinaryOps)0);
+ BOpc = match(BOp, m_LogicalAnd(m_Value(BOpOp0), m_Value(BOpOp1)))
+ ? Instruction::And
+ : (match(BOp, m_LogicalOr(m_Value(BOpOp0), m_Value(BOpOp1)))
+ ? Instruction::Or
+ : (Instruction::BinaryOps)0);
if (InvertCond) {
if (BOpc == Instruction::And)
BOpc = Instruction::Or;
@@ -2129,11 +2129,11 @@ void SelectionDAGBuilder::FindMergedConditions(const Value *Cond,
}
// If this node is not part of the or/and tree, emit it as a branch.
- // Note that all nodes in the tree should have same opcode.
- bool BOpIsInOrAndTree = BOpc && BOpc == Opc && BOp->hasOneUse();
- if (!BOpIsInOrAndTree || BOp->getParent() != CurBB->getBasicBlock() ||
- !InBlock(BOpOp0, CurBB->getBasicBlock()) ||
- !InBlock(BOpOp1, CurBB->getBasicBlock())) {
+ // Note that all nodes in the tree should have same opcode.
+ bool BOpIsInOrAndTree = BOpc && BOpc == Opc && BOp->hasOneUse();
+ if (!BOpIsInOrAndTree || BOp->getParent() != CurBB->getBasicBlock() ||
+ !InBlock(BOpOp0, CurBB->getBasicBlock()) ||
+ !InBlock(BOpOp1, CurBB->getBasicBlock())) {
EmitBranchForMergedCondition(Cond, TBB, FBB, CurBB, SwitchBB,
TProb, FProb, InvertCond);
return;
@@ -2169,15 +2169,15 @@ void SelectionDAGBuilder::FindMergedConditions(const Value *Cond,
auto NewTrueProb = TProb / 2;
auto NewFalseProb = TProb / 2 + FProb;
// Emit the LHS condition.
- FindMergedConditions(BOpOp0, TBB, TmpBB, CurBB, SwitchBB, Opc, NewTrueProb,
- NewFalseProb, InvertCond);
+ FindMergedConditions(BOpOp0, TBB, TmpBB, CurBB, SwitchBB, Opc, NewTrueProb,
+ NewFalseProb, InvertCond);
// Normalize A/2 and B to get A/(1+B) and 2B/(1+B).
SmallVector<BranchProbability, 2> Probs{TProb / 2, FProb};
BranchProbability::normalizeProbabilities(Probs.begin(), Probs.end());
// Emit the RHS condition into TmpBB.
- FindMergedConditions(BOpOp1, TBB, FBB, TmpBB, SwitchBB, Opc, Probs[0],
- Probs[1], InvertCond);
+ FindMergedConditions(BOpOp1, TBB, FBB, TmpBB, SwitchBB, Opc, Probs[0],
+ Probs[1], InvertCond);
} else {
assert(Opc == Instruction::And && "Unknown merge op!");
// Codegen X & Y as:
@@ -2202,15 +2202,15 @@ void SelectionDAGBuilder::FindMergedConditions(const Value *Cond,
auto NewTrueProb = TProb + FProb / 2;
auto NewFalseProb = FProb / 2;
// Emit the LHS condition.
- FindMergedConditions(BOpOp0, TmpBB, FBB, CurBB, SwitchBB, Opc, NewTrueProb,
- NewFalseProb, InvertCond);
+ FindMergedConditions(BOpOp0, TmpBB, FBB, CurBB, SwitchBB, Opc, NewTrueProb,
+ NewFalseProb, InvertCond);
// Normalize A and B/2 to get 2A/(1+A) and B/(1+A).
SmallVector<BranchProbability, 2> Probs{TProb, FProb / 2};
BranchProbability::normalizeProbabilities(Probs.begin(), Probs.end());
// Emit the RHS condition into TmpBB.
- FindMergedConditions(BOpOp1, TBB, FBB, TmpBB, SwitchBB, Opc, Probs[0],
- Probs[1], InvertCond);
+ FindMergedConditions(BOpOp1, TBB, FBB, TmpBB, SwitchBB, Opc, Probs[0],
+ Probs[1], InvertCond);
}
}
@@ -2287,20 +2287,20 @@ void SelectionDAGBuilder::visitBr(const BranchInst &I) {
// je foo
// cmp D, E
// jle foo
- const Instruction *BOp = dyn_cast<Instruction>(CondVal);
- if (!DAG.getTargetLoweringInfo().isJumpExpensive() && BOp &&
- BOp->hasOneUse() && !I.hasMetadata(LLVMContext::MD_unpredictable)) {
- Value *Vec;
- const Value *BOp0, *BOp1;
- Instruction::BinaryOps Opcode = (Instruction::BinaryOps)0;
- if (match(BOp, m_LogicalAnd(m_Value(BOp0), m_Value(BOp1))))
- Opcode = Instruction::And;
- else if (match(BOp, m_LogicalOr(m_Value(BOp0), m_Value(BOp1))))
- Opcode = Instruction::Or;
-
- if (Opcode && !(match(BOp0, m_ExtractElt(m_Value(Vec), m_Value())) &&
- match(BOp1, m_ExtractElt(m_Specific(Vec), m_Value())))) {
- FindMergedConditions(BOp, Succ0MBB, Succ1MBB, BrMBB, BrMBB, Opcode,
+ const Instruction *BOp = dyn_cast<Instruction>(CondVal);
+ if (!DAG.getTargetLoweringInfo().isJumpExpensive() && BOp &&
+ BOp->hasOneUse() && !I.hasMetadata(LLVMContext::MD_unpredictable)) {
+ Value *Vec;
+ const Value *BOp0, *BOp1;
+ Instruction::BinaryOps Opcode = (Instruction::BinaryOps)0;
+ if (match(BOp, m_LogicalAnd(m_Value(BOp0), m_Value(BOp1))))
+ Opcode = Instruction::And;
+ else if (match(BOp, m_LogicalOr(m_Value(BOp0), m_Value(BOp1))))
+ Opcode = Instruction::Or;
+
+ if (Opcode && !(match(BOp0, m_ExtractElt(m_Value(Vec), m_Value())) &&
+ match(BOp1, m_ExtractElt(m_Specific(Vec), m_Value())))) {
+ FindMergedConditions(BOp, Succ0MBB, Succ1MBB, BrMBB, BrMBB, Opcode,
getEdgeProbability(BrMBB, Succ0MBB),
getEdgeProbability(BrMBB, Succ1MBB),
/*InvertCond=*/false);
@@ -2549,7 +2549,7 @@ void SelectionDAGBuilder::visitSPDescriptorParent(StackProtectorDescriptor &SPD,
SDLoc dl = getCurSDLoc();
SDValue StackSlotPtr = DAG.getFrameIndex(FI, PtrTy);
const Module &M = *ParentBB->getParent()->getFunction().getParent();
- Align Align = DL->getPrefTypeAlign(Type::getInt8PtrTy(M.getContext()));
+ Align Align = DL->getPrefTypeAlign(Type::getInt8PtrTy(M.getContext()));
// Generate code to load the content of the guard slot.
SDValue GuardVal = DAG.getLoad(
@@ -2807,7 +2807,7 @@ void SelectionDAGBuilder::visitInvoke(const InvokeInst &I) {
case Intrinsic::experimental_gc_statepoint:
LowerStatepoint(cast<GCStatepointInst>(I), EHPadBB);
break;
- case Intrinsic::wasm_rethrow: {
+ case Intrinsic::wasm_rethrow: {
// This is usually done in visitTargetIntrinsic, but this intrinsic is
// special because it can be invoked, so we manually lower it to a DAG
// node here.
@@ -2815,7 +2815,7 @@ void SelectionDAGBuilder::visitInvoke(const InvokeInst &I) {
Ops.push_back(getRoot()); // inchain
const TargetLowering &TLI = DAG.getTargetLoweringInfo();
Ops.push_back(
- DAG.getTargetConstant(Intrinsic::wasm_rethrow, getCurSDLoc(),
+ DAG.getTargetConstant(Intrinsic::wasm_rethrow, getCurSDLoc(),
TLI.getPointerTy(DAG.getDataLayout())));
SDVTList VTs = DAG.getVTList(ArrayRef<EVT>({MVT::Other})); // outchain
DAG.setRoot(DAG.getNode(ISD::INTRINSIC_VOID, getCurSDLoc(), VTs, Ops));
@@ -3012,10 +3012,10 @@ void SelectionDAGBuilder::visitBinary(const User &I, unsigned Opcode) {
Flags.setNoSignedWrap(OFBinOp->hasNoSignedWrap());
Flags.setNoUnsignedWrap(OFBinOp->hasNoUnsignedWrap());
}
- if (auto *ExactOp = dyn_cast<PossiblyExactOperator>(&I))
+ if (auto *ExactOp = dyn_cast<PossiblyExactOperator>(&I))
Flags.setExact(ExactOp->isExact());
- if (auto *FPOp = dyn_cast<FPMathOperator>(&I))
- Flags.copyFMF(*FPOp);
+ if (auto *FPOp = dyn_cast<FPMathOperator>(&I))
+ Flags.copyFMF(*FPOp);
SDValue Op1 = getValue(I.getOperand(0));
SDValue Op2 = getValue(I.getOperand(1));
@@ -3125,14 +3125,14 @@ void SelectionDAGBuilder::visitFCmp(const User &I) {
SDValue Op2 = getValue(I.getOperand(1));
ISD::CondCode Condition = getFCmpCondCode(predicate);
- auto *FPMO = cast<FPMathOperator>(&I);
- if (FPMO->hasNoNaNs() || TM.Options.NoNaNsFPMath)
+ auto *FPMO = cast<FPMathOperator>(&I);
+ if (FPMO->hasNoNaNs() || TM.Options.NoNaNsFPMath)
Condition = getFCmpCodeWithoutNaN(Condition);
- SDNodeFlags Flags;
- Flags.copyFMF(*FPMO);
- SelectionDAG::FlagInserter FlagsInserter(DAG, Flags);
-
+ SDNodeFlags Flags;
+ Flags.copyFMF(*FPMO);
+ SelectionDAG::FlagInserter FlagsInserter(DAG, Flags);
+
EVT DestVT = DAG.getTargetLoweringInfo().getValueType(DAG.getDataLayout(),
I.getType());
setValue(&I, DAG.getSetCC(getCurSDLoc(), DestVT, Op1, Op2, Condition));
@@ -3162,12 +3162,12 @@ void SelectionDAGBuilder::visitSelect(const User &I) {
Cond.getValueType().isVector() ? ISD::VSELECT : ISD::SELECT;
bool IsUnaryAbs = false;
- bool Negate = false;
-
- SDNodeFlags Flags;
- if (auto *FPOp = dyn_cast<FPMathOperator>(&I))
- Flags.copyFMF(*FPOp);
+ bool Negate = false;
+ SDNodeFlags Flags;
+ if (auto *FPOp = dyn_cast<FPMathOperator>(&I))
+ Flags.copyFMF(*FPOp);
+
// Min/max matching is only viable if all output VTs are the same.
if (is_splat(ValueVTs)) {
EVT VT = ValueVTs[0];
@@ -3227,9 +3227,9 @@ void SelectionDAGBuilder::visitSelect(const User &I) {
break;
}
break;
- case SPF_NABS:
- Negate = true;
- LLVM_FALLTHROUGH;
+ case SPF_NABS:
+ Negate = true;
+ LLVM_FALLTHROUGH;
case SPF_ABS:
IsUnaryAbs = true;
Opc = ISD::ABS;
@@ -3260,13 +3260,13 @@ void SelectionDAGBuilder::visitSelect(const User &I) {
if (IsUnaryAbs) {
for (unsigned i = 0; i != NumValues; ++i) {
- SDLoc dl = getCurSDLoc();
- EVT VT = LHSVal.getNode()->getValueType(LHSVal.getResNo() + i);
+ SDLoc dl = getCurSDLoc();
+ EVT VT = LHSVal.getNode()->getValueType(LHSVal.getResNo() + i);
Values[i] =
- DAG.getNode(OpCode, dl, VT, LHSVal.getValue(LHSVal.getResNo() + i));
- if (Negate)
- Values[i] = DAG.getNode(ISD::SUB, dl, VT, DAG.getConstant(0, dl, VT),
- Values[i]);
+ DAG.getNode(OpCode, dl, VT, LHSVal.getValue(LHSVal.getResNo() + i));
+ if (Negate)
+ Values[i] = DAG.getNode(ISD::SUB, dl, VT, DAG.getConstant(0, dl, VT),
+ Values[i]);
}
} else {
for (unsigned i = 0; i != NumValues; ++i) {
@@ -3275,7 +3275,7 @@ void SelectionDAGBuilder::visitSelect(const User &I) {
Ops.push_back(SDValue(RHSVal.getNode(), RHSVal.getResNo() + i));
Values[i] = DAG.getNode(
OpCode, getCurSDLoc(),
- LHSVal.getNode()->getValueType(LHSVal.getResNo() + i), Ops, Flags);
+ LHSVal.getNode()->getValueType(LHSVal.getResNo() + i), Ops, Flags);
}
}
@@ -3417,7 +3417,7 @@ void SelectionDAGBuilder::visitAddrSpaceCast(const User &I) {
unsigned SrcAS = SV->getType()->getPointerAddressSpace();
unsigned DestAS = I.getType()->getPointerAddressSpace();
- if (!TM.isNoopAddrSpaceCast(SrcAS, DestAS))
+ if (!TM.isNoopAddrSpaceCast(SrcAS, DestAS))
N = DAG.getAddrSpaceCast(getCurSDLoc(), DestVT, N, SrcAS, DestAS);
setValue(&I, N);
@@ -3751,12 +3751,12 @@ void SelectionDAGBuilder::visitGetElementPtr(const User &I) {
bool IsVectorGEP = I.getType()->isVectorTy();
ElementCount VectorElementCount =
IsVectorGEP ? cast<VectorType>(I.getType())->getElementCount()
- : ElementCount::getFixed(0);
+ : ElementCount::getFixed(0);
if (IsVectorGEP && !N.getValueType().isVector()) {
LLVMContext &Context = *DAG.getContext();
EVT VT = EVT::getVectorVT(Context, N.getValueType(), VectorElementCount);
- if (VectorElementCount.isScalable())
+ if (VectorElementCount.isScalable())
N = DAG.getSplatVector(VT, dl, N);
else
N = DAG.getSplatBuildVector(VT, dl, N);
@@ -3829,7 +3829,7 @@ void SelectionDAGBuilder::visitGetElementPtr(const User &I) {
if (!IdxN.getValueType().isVector() && IsVectorGEP) {
EVT VT = EVT::getVectorVT(*Context, IdxN.getValueType(),
VectorElementCount);
- if (VectorElementCount.isScalable())
+ if (VectorElementCount.isScalable())
IdxN = DAG.getSplatVector(VT, dl, IdxN);
else
IdxN = DAG.getSplatBuildVector(VT, dl, IdxN);
@@ -3870,13 +3870,13 @@ void SelectionDAGBuilder::visitGetElementPtr(const User &I) {
}
}
- MVT PtrTy = TLI.getPointerTy(DAG.getDataLayout(), AS);
- MVT PtrMemTy = TLI.getPointerMemTy(DAG.getDataLayout(), AS);
- if (IsVectorGEP) {
- PtrTy = MVT::getVectorVT(PtrTy, VectorElementCount);
- PtrMemTy = MVT::getVectorVT(PtrMemTy, VectorElementCount);
- }
-
+ MVT PtrTy = TLI.getPointerTy(DAG.getDataLayout(), AS);
+ MVT PtrMemTy = TLI.getPointerMemTy(DAG.getDataLayout(), AS);
+ if (IsVectorGEP) {
+ PtrTy = MVT::getVectorVT(PtrTy, VectorElementCount);
+ PtrMemTy = MVT::getVectorVT(PtrMemTy, VectorElementCount);
+ }
+
if (PtrMemTy != PtrTy && !cast<GEPOperator>(I).isInBounds())
N = DAG.getPtrExtendInReg(N, dl, PtrMemTy);
@@ -4173,8 +4173,8 @@ void SelectionDAGBuilder::visitStore(const StoreInst &I) {
Root = Chain;
ChainI = 0;
}
- SDValue Add =
- DAG.getMemBasePlusOffset(Ptr, TypeSize::Fixed(Offsets[i]), dl, Flags);
+ SDValue Add =
+ DAG.getMemBasePlusOffset(Ptr, TypeSize::Fixed(Offsets[i]), dl, Flags);
SDValue Val = SDValue(Src.getNode(), Src.getResNo() + i);
if (MemVTs[i] != ValueVTs[i])
Val = DAG.getPtrExtOrTrunc(Val, dl, MemVTs[i]);
@@ -4336,12 +4336,12 @@ void SelectionDAGBuilder::visitMaskedScatter(const CallInst &I) {
if (!UniformBase) {
Base = DAG.getConstant(0, sdl, TLI.getPointerTy(DAG.getDataLayout()));
Index = getValue(Ptr);
- IndexType = ISD::SIGNED_UNSCALED;
+ IndexType = ISD::SIGNED_UNSCALED;
Scale = DAG.getTargetConstant(1, sdl, TLI.getPointerTy(DAG.getDataLayout()));
}
SDValue Ops[] = { getMemoryRoot(), Src0, Mask, Base, Index, Scale };
SDValue Scatter = DAG.getMaskedScatter(DAG.getVTList(MVT::Other), VT, sdl,
- Ops, MMO, IndexType, false);
+ Ops, MMO, IndexType, false);
DAG.setRoot(Scatter);
setValue(&I, Scatter);
}
@@ -4389,7 +4389,7 @@ void SelectionDAGBuilder::visitMaskedLoad(const CallInst &I, bool IsExpanding) {
// Do not serialize masked loads of constant memory with anything.
MemoryLocation ML;
if (VT.isScalableVector())
- ML = MemoryLocation::getAfter(PtrOperand);
+ ML = MemoryLocation::getAfter(PtrOperand);
else
ML = MemoryLocation(PtrOperand, LocationSize::precise(
DAG.getDataLayout().getTypeStoreSize(I.getType())),
@@ -4447,12 +4447,12 @@ void SelectionDAGBuilder::visitMaskedGather(const CallInst &I) {
if (!UniformBase) {
Base = DAG.getConstant(0, sdl, TLI.getPointerTy(DAG.getDataLayout()));
Index = getValue(Ptr);
- IndexType = ISD::SIGNED_UNSCALED;
+ IndexType = ISD::SIGNED_UNSCALED;
Scale = DAG.getTargetConstant(1, sdl, TLI.getPointerTy(DAG.getDataLayout()));
}
SDValue Ops[] = { Root, Src0, Mask, Base, Index, Scale };
SDValue Gather = DAG.getMaskedGather(DAG.getVTList(VT, MVT::Other), VT, sdl,
- Ops, MMO, IndexType, ISD::NON_EXTLOAD);
+ Ops, MMO, IndexType, ISD::NON_EXTLOAD);
PendingLoads.push_back(Gather.getValue(1));
setValue(&I, Gather);
@@ -4879,7 +4879,7 @@ static SDValue getLimitedPrecisionExp2(SDValue t0, const SDLoc &dl,
/// expandExp - Lower an exp intrinsic. Handles the special sequences for
/// limited-precision mode.
static SDValue expandExp(const SDLoc &dl, SDValue Op, SelectionDAG &DAG,
- const TargetLowering &TLI, SDNodeFlags Flags) {
+ const TargetLowering &TLI, SDNodeFlags Flags) {
if (Op.getValueType() == MVT::f32 &&
LimitFloatPrecision > 0 && LimitFloatPrecision <= 18) {
@@ -4895,13 +4895,13 @@ static SDValue expandExp(const SDLoc &dl, SDValue Op, SelectionDAG &DAG,
}
// No special expansion.
- return DAG.getNode(ISD::FEXP, dl, Op.getValueType(), Op, Flags);
+ return DAG.getNode(ISD::FEXP, dl, Op.getValueType(), Op, Flags);
}
/// expandLog - Lower a log intrinsic. Handles the special sequences for
/// limited-precision mode.
static SDValue expandLog(const SDLoc &dl, SDValue Op, SelectionDAG &DAG,
- const TargetLowering &TLI, SDNodeFlags Flags) {
+ const TargetLowering &TLI, SDNodeFlags Flags) {
// TODO: What fast-math-flags should be set on the floating-point nodes?
if (Op.getValueType() == MVT::f32 &&
@@ -4994,13 +4994,13 @@ static SDValue expandLog(const SDLoc &dl, SDValue Op, SelectionDAG &DAG,
}
// No special expansion.
- return DAG.getNode(ISD::FLOG, dl, Op.getValueType(), Op, Flags);
+ return DAG.getNode(ISD::FLOG, dl, Op.getValueType(), Op, Flags);
}
/// expandLog2 - Lower a log2 intrinsic. Handles the special sequences for
/// limited-precision mode.
static SDValue expandLog2(const SDLoc &dl, SDValue Op, SelectionDAG &DAG,
- const TargetLowering &TLI, SDNodeFlags Flags) {
+ const TargetLowering &TLI, SDNodeFlags Flags) {
// TODO: What fast-math-flags should be set on the floating-point nodes?
if (Op.getValueType() == MVT::f32 &&
@@ -5091,13 +5091,13 @@ static SDValue expandLog2(const SDLoc &dl, SDValue Op, SelectionDAG &DAG,
}
// No special expansion.
- return DAG.getNode(ISD::FLOG2, dl, Op.getValueType(), Op, Flags);
+ return DAG.getNode(ISD::FLOG2, dl, Op.getValueType(), Op, Flags);
}
/// expandLog10 - Lower a log10 intrinsic. Handles the special sequences for
/// limited-precision mode.
static SDValue expandLog10(const SDLoc &dl, SDValue Op, SelectionDAG &DAG,
- const TargetLowering &TLI, SDNodeFlags Flags) {
+ const TargetLowering &TLI, SDNodeFlags Flags) {
// TODO: What fast-math-flags should be set on the floating-point nodes?
if (Op.getValueType() == MVT::f32 &&
@@ -5181,26 +5181,26 @@ static SDValue expandLog10(const SDLoc &dl, SDValue Op, SelectionDAG &DAG,
}
// No special expansion.
- return DAG.getNode(ISD::FLOG10, dl, Op.getValueType(), Op, Flags);
+ return DAG.getNode(ISD::FLOG10, dl, Op.getValueType(), Op, Flags);
}
/// expandExp2 - Lower an exp2 intrinsic. Handles the special sequences for
/// limited-precision mode.
static SDValue expandExp2(const SDLoc &dl, SDValue Op, SelectionDAG &DAG,
- const TargetLowering &TLI, SDNodeFlags Flags) {
+ const TargetLowering &TLI, SDNodeFlags Flags) {
if (Op.getValueType() == MVT::f32 &&
LimitFloatPrecision > 0 && LimitFloatPrecision <= 18)
return getLimitedPrecisionExp2(Op, dl, DAG);
// No special expansion.
- return DAG.getNode(ISD::FEXP2, dl, Op.getValueType(), Op, Flags);
+ return DAG.getNode(ISD::FEXP2, dl, Op.getValueType(), Op, Flags);
}
/// visitPow - Lower a pow intrinsic. Handles the special sequences for
/// limited-precision mode with x == 10.0f.
static SDValue expandPow(const SDLoc &dl, SDValue LHS, SDValue RHS,
- SelectionDAG &DAG, const TargetLowering &TLI,
- SDNodeFlags Flags) {
+ SelectionDAG &DAG, const TargetLowering &TLI,
+ SDNodeFlags Flags) {
bool IsExp10 = false;
if (LHS.getValueType() == MVT::f32 && RHS.getValueType() == MVT::f32 &&
LimitFloatPrecision > 0 && LimitFloatPrecision <= 18) {
@@ -5223,7 +5223,7 @@ static SDValue expandPow(const SDLoc &dl, SDValue LHS, SDValue RHS,
}
// No special expansion.
- return DAG.getNode(ISD::FPOW, dl, LHS.getValueType(), LHS, RHS, Flags);
+ return DAG.getNode(ISD::FPOW, dl, LHS.getValueType(), LHS, RHS, Flags);
}
/// ExpandPowI - Expand a llvm.powi intrinsic.
@@ -5348,7 +5348,7 @@ static SDValue expandDivFix(unsigned Opcode, const SDLoc &DL,
// getUnderlyingArgRegs - Find underlying registers used for a truncated,
// bitcasted, or split argument. Returns a list of <Register, size in bits>
static void
-getUnderlyingArgRegs(SmallVectorImpl<std::pair<unsigned, TypeSize>> &Regs,
+getUnderlyingArgRegs(SmallVectorImpl<std::pair<unsigned, TypeSize>> &Regs,
const SDValue &N) {
switch (N.getOpcode()) {
case ISD::CopyFromReg: {
@@ -5459,7 +5459,7 @@ bool SelectionDAGBuilder::EmitFuncArgumentDbgValue(
if (FI != std::numeric_limits<int>::max())
Op = MachineOperand::CreateFI(FI);
- SmallVector<std::pair<unsigned, TypeSize>, 8> ArgRegsAndSizes;
+ SmallVector<std::pair<unsigned, TypeSize>, 8> ArgRegsAndSizes;
if (!Op && N.getNode()) {
getUnderlyingArgRegs(ArgRegsAndSizes, N);
Register Reg;
@@ -5489,8 +5489,8 @@ bool SelectionDAGBuilder::EmitFuncArgumentDbgValue(
if (!Op) {
// Create a DBG_VALUE for each decomposed value in ArgRegs to cover Reg
- auto splitMultiRegDbgValue = [&](ArrayRef<std::pair<unsigned, TypeSize>>
- SplitRegs) {
+ auto splitMultiRegDbgValue = [&](ArrayRef<std::pair<unsigned, TypeSize>>
+ SplitRegs) {
unsigned Offset = 0;
for (auto RegAndSize : SplitRegs) {
// If the expression is already a fragment, the current register
@@ -5644,10 +5644,10 @@ void SelectionDAGBuilder::visitIntrinsicCall(const CallInst &I,
DebugLoc dl = getCurDebugLoc();
SDValue Res;
- SDNodeFlags Flags;
- if (auto *FPOp = dyn_cast<FPMathOperator>(&I))
- Flags.copyFMF(*FPOp);
-
+ SDNodeFlags Flags;
+ if (auto *FPOp = dyn_cast<FPMathOperator>(&I))
+ Flags.copyFMF(*FPOp);
+
switch (Intrinsic) {
default:
// By default, turn this into a target intrinsic node.
@@ -6062,26 +6062,26 @@ void SelectionDAGBuilder::visitIntrinsicCall(const CallInst &I,
getValue(I.getArgOperand(1)), DAG));
return;
case Intrinsic::log:
- setValue(&I, expandLog(sdl, getValue(I.getArgOperand(0)), DAG, TLI, Flags));
+ setValue(&I, expandLog(sdl, getValue(I.getArgOperand(0)), DAG, TLI, Flags));
return;
case Intrinsic::log2:
- setValue(&I,
- expandLog2(sdl, getValue(I.getArgOperand(0)), DAG, TLI, Flags));
+ setValue(&I,
+ expandLog2(sdl, getValue(I.getArgOperand(0)), DAG, TLI, Flags));
return;
case Intrinsic::log10:
- setValue(&I,
- expandLog10(sdl, getValue(I.getArgOperand(0)), DAG, TLI, Flags));
+ setValue(&I,
+ expandLog10(sdl, getValue(I.getArgOperand(0)), DAG, TLI, Flags));
return;
case Intrinsic::exp:
- setValue(&I, expandExp(sdl, getValue(I.getArgOperand(0)), DAG, TLI, Flags));
+ setValue(&I, expandExp(sdl, getValue(I.getArgOperand(0)), DAG, TLI, Flags));
return;
case Intrinsic::exp2:
- setValue(&I,
- expandExp2(sdl, getValue(I.getArgOperand(0)), DAG, TLI, Flags));
+ setValue(&I,
+ expandExp2(sdl, getValue(I.getArgOperand(0)), DAG, TLI, Flags));
return;
case Intrinsic::pow:
setValue(&I, expandPow(sdl, getValue(I.getArgOperand(0)),
- getValue(I.getArgOperand(1)), DAG, TLI, Flags));
+ getValue(I.getArgOperand(1)), DAG, TLI, Flags));
return;
case Intrinsic::sqrt:
case Intrinsic::fabs:
@@ -6114,7 +6114,7 @@ void SelectionDAGBuilder::visitIntrinsicCall(const CallInst &I,
setValue(&I, DAG.getNode(Opcode, sdl,
getValue(I.getArgOperand(0)).getValueType(),
- getValue(I.getArgOperand(0)), Flags));
+ getValue(I.getArgOperand(0)), Flags));
return;
}
case Intrinsic::lround:
@@ -6139,47 +6139,47 @@ void SelectionDAGBuilder::visitIntrinsicCall(const CallInst &I,
setValue(&I, DAG.getNode(ISD::FMINNUM, sdl,
getValue(I.getArgOperand(0)).getValueType(),
getValue(I.getArgOperand(0)),
- getValue(I.getArgOperand(1)), Flags));
+ getValue(I.getArgOperand(1)), Flags));
return;
case Intrinsic::maxnum:
setValue(&I, DAG.getNode(ISD::FMAXNUM, sdl,
getValue(I.getArgOperand(0)).getValueType(),
getValue(I.getArgOperand(0)),
- getValue(I.getArgOperand(1)), Flags));
+ getValue(I.getArgOperand(1)), Flags));
return;
case Intrinsic::minimum:
setValue(&I, DAG.getNode(ISD::FMINIMUM, sdl,
getValue(I.getArgOperand(0)).getValueType(),
getValue(I.getArgOperand(0)),
- getValue(I.getArgOperand(1)), Flags));
+ getValue(I.getArgOperand(1)), Flags));
return;
case Intrinsic::maximum:
setValue(&I, DAG.getNode(ISD::FMAXIMUM, sdl,
getValue(I.getArgOperand(0)).getValueType(),
getValue(I.getArgOperand(0)),
- getValue(I.getArgOperand(1)), Flags));
+ getValue(I.getArgOperand(1)), Flags));
return;
case Intrinsic::copysign:
setValue(&I, DAG.getNode(ISD::FCOPYSIGN, sdl,
getValue(I.getArgOperand(0)).getValueType(),
getValue(I.getArgOperand(0)),
- getValue(I.getArgOperand(1)), Flags));
+ getValue(I.getArgOperand(1)), Flags));
return;
case Intrinsic::fma:
- setValue(&I, DAG.getNode(
- ISD::FMA, sdl, getValue(I.getArgOperand(0)).getValueType(),
- getValue(I.getArgOperand(0)), getValue(I.getArgOperand(1)),
- getValue(I.getArgOperand(2)), Flags));
+ setValue(&I, DAG.getNode(
+ ISD::FMA, sdl, getValue(I.getArgOperand(0)).getValueType(),
+ getValue(I.getArgOperand(0)), getValue(I.getArgOperand(1)),
+ getValue(I.getArgOperand(2)), Flags));
return;
#define INSTRUCTION(NAME, NARG, ROUND_MODE, INTRINSIC) \
case Intrinsic::INTRINSIC:
#include "llvm/IR/ConstrainedOps.def"
visitConstrainedFPIntrinsic(cast<ConstrainedFPIntrinsic>(I));
return;
-#define BEGIN_REGISTER_VP_INTRINSIC(VPID, ...) case Intrinsic::VPID:
-#include "llvm/IR/VPIntrinsics.def"
- visitVectorPredicationIntrinsic(cast<VPIntrinsic>(I));
- return;
+#define BEGIN_REGISTER_VP_INTRINSIC(VPID, ...) case Intrinsic::VPID:
+#include "llvm/IR/VPIntrinsics.def"
+ visitVectorPredicationIntrinsic(cast<VPIntrinsic>(I));
+ return;
case Intrinsic::fmuladd: {
EVT VT = TLI.getValueType(DAG.getDataLayout(), I.getType());
if (TM.Options.AllowFPOpFusion != FPOpFusion::Strict &&
@@ -6188,15 +6188,15 @@ void SelectionDAGBuilder::visitIntrinsicCall(const CallInst &I,
getValue(I.getArgOperand(0)).getValueType(),
getValue(I.getArgOperand(0)),
getValue(I.getArgOperand(1)),
- getValue(I.getArgOperand(2)), Flags));
+ getValue(I.getArgOperand(2)), Flags));
} else {
// TODO: Intrinsic calls should have fast-math-flags.
- SDValue Mul = DAG.getNode(
- ISD::FMUL, sdl, getValue(I.getArgOperand(0)).getValueType(),
- getValue(I.getArgOperand(0)), getValue(I.getArgOperand(1)), Flags);
+ SDValue Mul = DAG.getNode(
+ ISD::FMUL, sdl, getValue(I.getArgOperand(0)).getValueType(),
+ getValue(I.getArgOperand(0)), getValue(I.getArgOperand(1)), Flags);
SDValue Add = DAG.getNode(ISD::FADD, sdl,
getValue(I.getArgOperand(0)).getValueType(),
- Mul, getValue(I.getArgOperand(2)), Flags);
+ Mul, getValue(I.getArgOperand(2)), Flags);
setValue(&I, Add);
}
return;
@@ -6214,20 +6214,20 @@ void SelectionDAGBuilder::visitIntrinsicCall(const CallInst &I,
DAG.getNode(ISD::BITCAST, sdl, MVT::f16,
getValue(I.getArgOperand(0)))));
return;
- case Intrinsic::fptosi_sat: {
- EVT Type = TLI.getValueType(DAG.getDataLayout(), I.getType());
- SDValue SatW = DAG.getConstant(Type.getScalarSizeInBits(), sdl, MVT::i32);
- setValue(&I, DAG.getNode(ISD::FP_TO_SINT_SAT, sdl, Type,
- getValue(I.getArgOperand(0)), SatW));
- return;
- }
- case Intrinsic::fptoui_sat: {
- EVT Type = TLI.getValueType(DAG.getDataLayout(), I.getType());
- SDValue SatW = DAG.getConstant(Type.getScalarSizeInBits(), sdl, MVT::i32);
- setValue(&I, DAG.getNode(ISD::FP_TO_UINT_SAT, sdl, Type,
- getValue(I.getArgOperand(0)), SatW));
- return;
- }
+ case Intrinsic::fptosi_sat: {
+ EVT Type = TLI.getValueType(DAG.getDataLayout(), I.getType());
+ SDValue SatW = DAG.getConstant(Type.getScalarSizeInBits(), sdl, MVT::i32);
+ setValue(&I, DAG.getNode(ISD::FP_TO_SINT_SAT, sdl, Type,
+ getValue(I.getArgOperand(0)), SatW));
+ return;
+ }
+ case Intrinsic::fptoui_sat: {
+ EVT Type = TLI.getValueType(DAG.getDataLayout(), I.getType());
+ SDValue SatW = DAG.getConstant(Type.getScalarSizeInBits(), sdl, MVT::i32);
+ setValue(&I, DAG.getNode(ISD::FP_TO_UINT_SAT, sdl, Type,
+ getValue(I.getArgOperand(0)), SatW));
+ return;
+ }
case Intrinsic::pcmarker: {
SDValue Tmp = getValue(I.getArgOperand(0));
DAG.setRoot(DAG.getNode(ISD::PCMARKER, sdl, MVT::Other, getRoot(), Tmp));
@@ -6281,11 +6281,11 @@ void SelectionDAGBuilder::visitIntrinsicCall(const CallInst &I,
SDValue Z = getValue(I.getArgOperand(2));
EVT VT = X.getValueType();
- if (X == Y) {
- auto RotateOpcode = IsFSHL ? ISD::ROTL : ISD::ROTR;
- setValue(&I, DAG.getNode(RotateOpcode, sdl, VT, X, Z));
- } else {
- auto FunnelOpcode = IsFSHL ? ISD::FSHL : ISD::FSHR;
+ if (X == Y) {
+ auto RotateOpcode = IsFSHL ? ISD::ROTL : ISD::ROTR;
+ setValue(&I, DAG.getNode(RotateOpcode, sdl, VT, X, Z));
+ } else {
+ auto FunnelOpcode = IsFSHL ? ISD::FSHL : ISD::FSHR;
setValue(&I, DAG.getNode(FunnelOpcode, sdl, VT, X, Y, Z));
}
return;
@@ -6314,18 +6314,18 @@ void SelectionDAGBuilder::visitIntrinsicCall(const CallInst &I,
setValue(&I, DAG.getNode(ISD::USUBSAT, sdl, Op1.getValueType(), Op1, Op2));
return;
}
- case Intrinsic::sshl_sat: {
- SDValue Op1 = getValue(I.getArgOperand(0));
- SDValue Op2 = getValue(I.getArgOperand(1));
- setValue(&I, DAG.getNode(ISD::SSHLSAT, sdl, Op1.getValueType(), Op1, Op2));
- return;
- }
- case Intrinsic::ushl_sat: {
- SDValue Op1 = getValue(I.getArgOperand(0));
- SDValue Op2 = getValue(I.getArgOperand(1));
- setValue(&I, DAG.getNode(ISD::USHLSAT, sdl, Op1.getValueType(), Op1, Op2));
- return;
- }
+ case Intrinsic::sshl_sat: {
+ SDValue Op1 = getValue(I.getArgOperand(0));
+ SDValue Op2 = getValue(I.getArgOperand(1));
+ setValue(&I, DAG.getNode(ISD::SSHLSAT, sdl, Op1.getValueType(), Op1, Op2));
+ return;
+ }
+ case Intrinsic::ushl_sat: {
+ SDValue Op1 = getValue(I.getArgOperand(0));
+ SDValue Op2 = getValue(I.getArgOperand(1));
+ setValue(&I, DAG.getNode(ISD::USHLSAT, sdl, Op1.getValueType(), Op1, Op2));
+ return;
+ }
case Intrinsic::smul_fix:
case Intrinsic::umul_fix:
case Intrinsic::smul_fix_sat:
@@ -6348,36 +6348,36 @@ void SelectionDAGBuilder::visitIntrinsicCall(const CallInst &I,
Op1, Op2, Op3, DAG, TLI));
return;
}
- case Intrinsic::smax: {
- SDValue Op1 = getValue(I.getArgOperand(0));
- SDValue Op2 = getValue(I.getArgOperand(1));
- setValue(&I, DAG.getNode(ISD::SMAX, sdl, Op1.getValueType(), Op1, Op2));
- return;
- }
- case Intrinsic::smin: {
- SDValue Op1 = getValue(I.getArgOperand(0));
- SDValue Op2 = getValue(I.getArgOperand(1));
- setValue(&I, DAG.getNode(ISD::SMIN, sdl, Op1.getValueType(), Op1, Op2));
- return;
- }
- case Intrinsic::umax: {
- SDValue Op1 = getValue(I.getArgOperand(0));
- SDValue Op2 = getValue(I.getArgOperand(1));
- setValue(&I, DAG.getNode(ISD::UMAX, sdl, Op1.getValueType(), Op1, Op2));
- return;
- }
- case Intrinsic::umin: {
- SDValue Op1 = getValue(I.getArgOperand(0));
- SDValue Op2 = getValue(I.getArgOperand(1));
- setValue(&I, DAG.getNode(ISD::UMIN, sdl, Op1.getValueType(), Op1, Op2));
- return;
- }
- case Intrinsic::abs: {
- // TODO: Preserve "int min is poison" arg in SDAG?
- SDValue Op1 = getValue(I.getArgOperand(0));
- setValue(&I, DAG.getNode(ISD::ABS, sdl, Op1.getValueType(), Op1));
- return;
- }
+ case Intrinsic::smax: {
+ SDValue Op1 = getValue(I.getArgOperand(0));
+ SDValue Op2 = getValue(I.getArgOperand(1));
+ setValue(&I, DAG.getNode(ISD::SMAX, sdl, Op1.getValueType(), Op1, Op2));
+ return;
+ }
+ case Intrinsic::smin: {
+ SDValue Op1 = getValue(I.getArgOperand(0));
+ SDValue Op2 = getValue(I.getArgOperand(1));
+ setValue(&I, DAG.getNode(ISD::SMIN, sdl, Op1.getValueType(), Op1, Op2));
+ return;
+ }
+ case Intrinsic::umax: {
+ SDValue Op1 = getValue(I.getArgOperand(0));
+ SDValue Op2 = getValue(I.getArgOperand(1));
+ setValue(&I, DAG.getNode(ISD::UMAX, sdl, Op1.getValueType(), Op1, Op2));
+ return;
+ }
+ case Intrinsic::umin: {
+ SDValue Op1 = getValue(I.getArgOperand(0));
+ SDValue Op2 = getValue(I.getArgOperand(1));
+ setValue(&I, DAG.getNode(ISD::UMIN, sdl, Op1.getValueType(), Op1, Op2));
+ return;
+ }
+ case Intrinsic::abs: {
+ // TODO: Preserve "int min is poison" arg in SDAG?
+ SDValue Op1 = getValue(I.getArgOperand(0));
+ setValue(&I, DAG.getNode(ISD::ABS, sdl, Op1.getValueType(), Op1));
+ return;
+ }
case Intrinsic::stacksave: {
SDValue Op = getRoot();
EVT VT = TLI.getValueType(DAG.getDataLayout(), I.getType());
@@ -6396,7 +6396,7 @@ void SelectionDAGBuilder::visitIntrinsicCall(const CallInst &I,
EVT ResTy = TLI.getValueType(DAG.getDataLayout(), I.getType());
// Result type for @llvm.get.dynamic.area.offset should match PtrTy for
// target.
- if (PtrTy.getFixedSizeInBits() < ResTy.getFixedSizeInBits())
+ if (PtrTy.getFixedSizeInBits() < ResTy.getFixedSizeInBits())
report_fatal_error("Wrong result type for @llvm.get.dynamic.area.offset"
" intrinsic!");
Res = DAG.getNode(ISD::GET_DYNAMIC_AREA_OFFSET, sdl, DAG.getVTList(ResTy),
@@ -6414,7 +6414,7 @@ void SelectionDAGBuilder::visitIntrinsicCall(const CallInst &I,
} else {
EVT PtrTy = TLI.getValueType(DAG.getDataLayout(), I.getType());
const Value *Global = TLI.getSDagStackGuard(M);
- Align Align = DL->getPrefTypeAlign(Global->getType());
+ Align Align = DL->getPrefTypeAlign(Global->getType());
Res = DAG.getLoad(PtrTy, sdl, Chain, getValue(Global),
MachinePointerInfo(Global, 0), Align,
MachineMemOperand::MOVolatile);
@@ -6445,10 +6445,10 @@ void SelectionDAGBuilder::visitIntrinsicCall(const CallInst &I,
SDValue FIN = DAG.getFrameIndex(FI, PtrTy);
// Store the stack protector onto the stack.
- Res = DAG.getStore(
- Chain, sdl, Src, FIN,
- MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FI),
- MaybeAlign(), MachineMemOperand::MOVolatile);
+ Res = DAG.getStore(
+ Chain, sdl, Src, FIN,
+ MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FI),
+ MaybeAlign(), MachineMemOperand::MOVolatile);
setValue(&I, Res);
DAG.setRoot(Res);
return;
@@ -6466,13 +6466,13 @@ void SelectionDAGBuilder::visitIntrinsicCall(const CallInst &I,
// Drop the intrinsic, but forward the value
setValue(&I, getValue(I.getOperand(0)));
return;
-
+
case Intrinsic::assume:
- case Intrinsic::experimental_noalias_scope_decl:
+ case Intrinsic::experimental_noalias_scope_decl:
case Intrinsic::var_annotation:
case Intrinsic::sideeffect:
- // Discard annotate attributes, noalias scope declarations, assumptions, and
- // artificial side-effects.
+ // Discard annotate attributes, noalias scope declarations, assumptions, and
+ // artificial side-effects.
return;
case Intrinsic::codeview_annotation: {
@@ -6533,7 +6533,7 @@ void SelectionDAGBuilder::visitIntrinsicCall(const CallInst &I,
setValue(&I, getValue(I.getArgOperand(0)));
return;
- case Intrinsic::ubsantrap:
+ case Intrinsic::ubsantrap:
case Intrinsic::debugtrap:
case Intrinsic::trap: {
StringRef TrapFuncName =
@@ -6541,31 +6541,31 @@ void SelectionDAGBuilder::visitIntrinsicCall(const CallInst &I,
.getAttribute(AttributeList::FunctionIndex, "trap-func-name")
.getValueAsString();
if (TrapFuncName.empty()) {
- switch (Intrinsic) {
- case Intrinsic::trap:
- DAG.setRoot(DAG.getNode(ISD::TRAP, sdl, MVT::Other, getRoot()));
- break;
- case Intrinsic::debugtrap:
- DAG.setRoot(DAG.getNode(ISD::DEBUGTRAP, sdl, MVT::Other, getRoot()));
- break;
- case Intrinsic::ubsantrap:
- DAG.setRoot(DAG.getNode(
- ISD::UBSANTRAP, sdl, MVT::Other, getRoot(),
- DAG.getTargetConstant(
- cast<ConstantInt>(I.getArgOperand(0))->getZExtValue(), sdl,
- MVT::i32)));
- break;
- default: llvm_unreachable("unknown trap intrinsic");
- }
+ switch (Intrinsic) {
+ case Intrinsic::trap:
+ DAG.setRoot(DAG.getNode(ISD::TRAP, sdl, MVT::Other, getRoot()));
+ break;
+ case Intrinsic::debugtrap:
+ DAG.setRoot(DAG.getNode(ISD::DEBUGTRAP, sdl, MVT::Other, getRoot()));
+ break;
+ case Intrinsic::ubsantrap:
+ DAG.setRoot(DAG.getNode(
+ ISD::UBSANTRAP, sdl, MVT::Other, getRoot(),
+ DAG.getTargetConstant(
+ cast<ConstantInt>(I.getArgOperand(0))->getZExtValue(), sdl,
+ MVT::i32)));
+ break;
+ default: llvm_unreachable("unknown trap intrinsic");
+ }
return;
}
TargetLowering::ArgListTy Args;
- if (Intrinsic == Intrinsic::ubsantrap) {
- Args.push_back(TargetLoweringBase::ArgListEntry());
- Args[0].Val = I.getArgOperand(0);
- Args[0].Node = getValue(Args[0].Val);
- Args[0].Ty = Args[0].Val->getType();
- }
+ if (Intrinsic == Intrinsic::ubsantrap) {
+ Args.push_back(TargetLoweringBase::ArgListEntry());
+ Args[0].Val = I.getArgOperand(0);
+ Args[0].Node = getValue(Args[0].Val);
+ Args[0].Ty = Args[0].Val->getType();
+ }
TargetLowering::CallLoweringInfo CLI(DAG);
CLI.setDebugLoc(sdl).setChain(getRoot()).setLibCallee(
@@ -6602,7 +6602,7 @@ void SelectionDAGBuilder::visitIntrinsicCall(const CallInst &I,
EVT OverflowVT = MVT::i1;
if (ResultVT.isVector())
OverflowVT = EVT::getVectorVT(
- *Context, OverflowVT, ResultVT.getVectorElementCount());
+ *Context, OverflowVT, ResultVT.getVectorElementCount());
SDVTList VTs = DAG.getVTList(ResultVT, OverflowVT);
setValue(&I, DAG.getNode(Op, sdl, VTs, Op1, Op2));
@@ -6640,7 +6640,7 @@ void SelectionDAGBuilder::visitIntrinsicCall(const CallInst &I,
cast<ConstantInt>(I.getArgOperand(0))->getSExtValue();
Value *const ObjectPtr = I.getArgOperand(1);
SmallVector<const Value *, 4> Allocas;
- getUnderlyingObjects(ObjectPtr, Allocas);
+ getUnderlyingObjects(ObjectPtr, Allocas);
for (SmallVectorImpl<const Value*>::iterator Object = Allocas.begin(),
E = Allocas.end(); Object != E; ++Object) {
@@ -6667,14 +6667,14 @@ void SelectionDAGBuilder::visitIntrinsicCall(const CallInst &I,
}
return;
}
- case Intrinsic::pseudoprobe: {
- auto Guid = cast<ConstantInt>(I.getArgOperand(0))->getZExtValue();
- auto Index = cast<ConstantInt>(I.getArgOperand(1))->getZExtValue();
- auto Attr = cast<ConstantInt>(I.getArgOperand(2))->getZExtValue();
- Res = DAG.getPseudoProbeNode(sdl, getRoot(), Guid, Index, Attr);
- DAG.setRoot(Res);
- return;
- }
+ case Intrinsic::pseudoprobe: {
+ auto Guid = cast<ConstantInt>(I.getArgOperand(0))->getZExtValue();
+ auto Index = cast<ConstantInt>(I.getArgOperand(1))->getZExtValue();
+ auto Attr = cast<ConstantInt>(I.getArgOperand(2))->getZExtValue();
+ Res = DAG.getPseudoProbeNode(sdl, getRoot(), Guid, Index, Attr);
+ DAG.setRoot(Res);
+ return;
+ }
case Intrinsic::invariant_start:
// Discard region information.
setValue(&I, DAG.getUNDEF(TLI.getPointerTy(DAG.getDataLayout())));
@@ -6785,7 +6785,7 @@ void SelectionDAGBuilder::visitIntrinsicCall(const CallInst &I,
// specific calling convention, and only for x86_64.
// FIXME: Support other platforms later.
const auto &Triple = DAG.getTarget().getTargetTriple();
- if (Triple.getArch() != Triple::x86_64)
+ if (Triple.getArch() != Triple::x86_64)
return;
SDLoc DL = getCurSDLoc();
@@ -6816,7 +6816,7 @@ void SelectionDAGBuilder::visitIntrinsicCall(const CallInst &I,
// specific calling convention, and only for x86_64.
// FIXME: Support other platforms later.
const auto &Triple = DAG.getTarget().getTargetTriple();
- if (Triple.getArch() != Triple::x86_64)
+ if (Triple.getArch() != Triple::x86_64)
return;
SDLoc DL = getCurSDLoc();
@@ -6850,19 +6850,19 @@ void SelectionDAGBuilder::visitIntrinsicCall(const CallInst &I,
LowerDeoptimizeCall(&I);
return;
- case Intrinsic::vector_reduce_fadd:
- case Intrinsic::vector_reduce_fmul:
- case Intrinsic::vector_reduce_add:
- case Intrinsic::vector_reduce_mul:
- case Intrinsic::vector_reduce_and:
- case Intrinsic::vector_reduce_or:
- case Intrinsic::vector_reduce_xor:
- case Intrinsic::vector_reduce_smax:
- case Intrinsic::vector_reduce_smin:
- case Intrinsic::vector_reduce_umax:
- case Intrinsic::vector_reduce_umin:
- case Intrinsic::vector_reduce_fmax:
- case Intrinsic::vector_reduce_fmin:
+ case Intrinsic::vector_reduce_fadd:
+ case Intrinsic::vector_reduce_fmul:
+ case Intrinsic::vector_reduce_add:
+ case Intrinsic::vector_reduce_mul:
+ case Intrinsic::vector_reduce_and:
+ case Intrinsic::vector_reduce_or:
+ case Intrinsic::vector_reduce_xor:
+ case Intrinsic::vector_reduce_smax:
+ case Intrinsic::vector_reduce_smin:
+ case Intrinsic::vector_reduce_umax:
+ case Intrinsic::vector_reduce_umin:
+ case Intrinsic::vector_reduce_fmax:
+ case Intrinsic::vector_reduce_fmin:
visitVectorReduce(I, Intrinsic);
return;
@@ -6950,58 +6950,58 @@ void SelectionDAGBuilder::visitIntrinsicCall(const CallInst &I,
case Intrinsic::get_active_lane_mask: {
auto DL = getCurSDLoc();
SDValue Index = getValue(I.getOperand(0));
- SDValue TripCount = getValue(I.getOperand(1));
+ SDValue TripCount = getValue(I.getOperand(1));
Type *ElementTy = I.getOperand(0)->getType();
EVT VT = TLI.getValueType(DAG.getDataLayout(), I.getType());
unsigned VecWidth = VT.getVectorNumElements();
- SmallVector<SDValue, 16> OpsTripCount;
+ SmallVector<SDValue, 16> OpsTripCount;
SmallVector<SDValue, 16> OpsIndex;
SmallVector<SDValue, 16> OpsStepConstants;
for (unsigned i = 0; i < VecWidth; i++) {
- OpsTripCount.push_back(TripCount);
+ OpsTripCount.push_back(TripCount);
OpsIndex.push_back(Index);
- OpsStepConstants.push_back(
- DAG.getConstant(i, DL, EVT::getEVT(ElementTy)));
+ OpsStepConstants.push_back(
+ DAG.getConstant(i, DL, EVT::getEVT(ElementTy)));
}
- EVT CCVT = EVT::getVectorVT(I.getContext(), MVT::i1, VecWidth);
+ EVT CCVT = EVT::getVectorVT(I.getContext(), MVT::i1, VecWidth);
- auto VecTy = EVT::getEVT(FixedVectorType::get(ElementTy, VecWidth));
+ auto VecTy = EVT::getEVT(FixedVectorType::get(ElementTy, VecWidth));
SDValue VectorIndex = DAG.getBuildVector(VecTy, DL, OpsIndex);
SDValue VectorStep = DAG.getBuildVector(VecTy, DL, OpsStepConstants);
SDValue VectorInduction = DAG.getNode(
ISD::UADDO, DL, DAG.getVTList(VecTy, CCVT), VectorIndex, VectorStep);
- SDValue VectorTripCount = DAG.getBuildVector(VecTy, DL, OpsTripCount);
+ SDValue VectorTripCount = DAG.getBuildVector(VecTy, DL, OpsTripCount);
SDValue SetCC = DAG.getSetCC(DL, CCVT, VectorInduction.getValue(0),
- VectorTripCount, ISD::CondCode::SETULT);
+ VectorTripCount, ISD::CondCode::SETULT);
setValue(&I, DAG.getNode(ISD::AND, DL, CCVT,
DAG.getNOT(DL, VectorInduction.getValue(1), CCVT),
SetCC));
return;
}
- case Intrinsic::experimental_vector_insert: {
- auto DL = getCurSDLoc();
-
- SDValue Vec = getValue(I.getOperand(0));
- SDValue SubVec = getValue(I.getOperand(1));
- SDValue Index = getValue(I.getOperand(2));
- EVT ResultVT = TLI.getValueType(DAG.getDataLayout(), I.getType());
- setValue(&I, DAG.getNode(ISD::INSERT_SUBVECTOR, DL, ResultVT, Vec, SubVec,
- Index));
- return;
- }
- case Intrinsic::experimental_vector_extract: {
- auto DL = getCurSDLoc();
-
- SDValue Vec = getValue(I.getOperand(0));
- SDValue Index = getValue(I.getOperand(1));
- EVT ResultVT = TLI.getValueType(DAG.getDataLayout(), I.getType());
-
- setValue(&I, DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, ResultVT, Vec, Index));
- return;
- }
- }
+ case Intrinsic::experimental_vector_insert: {
+ auto DL = getCurSDLoc();
+
+ SDValue Vec = getValue(I.getOperand(0));
+ SDValue SubVec = getValue(I.getOperand(1));
+ SDValue Index = getValue(I.getOperand(2));
+ EVT ResultVT = TLI.getValueType(DAG.getDataLayout(), I.getType());
+ setValue(&I, DAG.getNode(ISD::INSERT_SUBVECTOR, DL, ResultVT, Vec, SubVec,
+ Index));
+ return;
+ }
+ case Intrinsic::experimental_vector_extract: {
+ auto DL = getCurSDLoc();
+
+ SDValue Vec = getValue(I.getOperand(0));
+ SDValue Index = getValue(I.getOperand(1));
+ EVT ResultVT = TLI.getValueType(DAG.getDataLayout(), I.getType());
+
+ setValue(&I, DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, ResultVT, Vec, Index));
+ return;
+ }
+ }
}
void SelectionDAGBuilder::visitConstrainedFPIntrinsic(
@@ -7116,41 +7116,41 @@ void SelectionDAGBuilder::visitConstrainedFPIntrinsic(
setValue(&FPI, FPResult);
}
-static unsigned getISDForVPIntrinsic(const VPIntrinsic &VPIntrin) {
- Optional<unsigned> ResOPC;
- switch (VPIntrin.getIntrinsicID()) {
-#define BEGIN_REGISTER_VP_INTRINSIC(INTRIN, ...) case Intrinsic::INTRIN:
-#define BEGIN_REGISTER_VP_SDNODE(VPSDID, ...) ResOPC = ISD::VPSDID;
-#define END_REGISTER_VP_INTRINSIC(...) break;
-#include "llvm/IR/VPIntrinsics.def"
- }
-
- if (!ResOPC.hasValue())
- llvm_unreachable(
- "Inconsistency: no SDNode available for this VPIntrinsic!");
-
- return ResOPC.getValue();
-}
-
-void SelectionDAGBuilder::visitVectorPredicationIntrinsic(
- const VPIntrinsic &VPIntrin) {
- unsigned Opcode = getISDForVPIntrinsic(VPIntrin);
-
- SmallVector<EVT, 4> ValueVTs;
- const TargetLowering &TLI = DAG.getTargetLoweringInfo();
- ComputeValueVTs(TLI, DAG.getDataLayout(), VPIntrin.getType(), ValueVTs);
- SDVTList VTs = DAG.getVTList(ValueVTs);
-
- // Request operands.
- SmallVector<SDValue, 7> OpValues;
- for (int i = 0; i < (int)VPIntrin.getNumArgOperands(); ++i)
- OpValues.push_back(getValue(VPIntrin.getArgOperand(i)));
-
- SDLoc DL = getCurSDLoc();
- SDValue Result = DAG.getNode(Opcode, DL, VTs, OpValues);
- setValue(&VPIntrin, Result);
-}
-
+static unsigned getISDForVPIntrinsic(const VPIntrinsic &VPIntrin) {
+ Optional<unsigned> ResOPC;
+ switch (VPIntrin.getIntrinsicID()) {
+#define BEGIN_REGISTER_VP_INTRINSIC(INTRIN, ...) case Intrinsic::INTRIN:
+#define BEGIN_REGISTER_VP_SDNODE(VPSDID, ...) ResOPC = ISD::VPSDID;
+#define END_REGISTER_VP_INTRINSIC(...) break;
+#include "llvm/IR/VPIntrinsics.def"
+ }
+
+ if (!ResOPC.hasValue())
+ llvm_unreachable(
+ "Inconsistency: no SDNode available for this VPIntrinsic!");
+
+ return ResOPC.getValue();
+}
+
+void SelectionDAGBuilder::visitVectorPredicationIntrinsic(
+ const VPIntrinsic &VPIntrin) {
+ unsigned Opcode = getISDForVPIntrinsic(VPIntrin);
+
+ SmallVector<EVT, 4> ValueVTs;
+ const TargetLowering &TLI = DAG.getTargetLoweringInfo();
+ ComputeValueVTs(TLI, DAG.getDataLayout(), VPIntrin.getType(), ValueVTs);
+ SDVTList VTs = DAG.getVTList(ValueVTs);
+
+ // Request operands.
+ SmallVector<SDValue, 7> OpValues;
+ for (int i = 0; i < (int)VPIntrin.getNumArgOperands(); ++i)
+ OpValues.push_back(getValue(VPIntrin.getArgOperand(i)));
+
+ SDLoc DL = getCurSDLoc();
+ SDValue Result = DAG.getNode(Opcode, DL, VTs, OpValues);
+ setValue(&VPIntrin, Result);
+}
+
std::pair<SDValue, SDValue>
SelectionDAGBuilder::lowerInvokable(TargetLowering::CallLoweringInfo &CLI,
const BasicBlock *EHPadBB) {
@@ -7367,9 +7367,9 @@ static SDValue getMemCmpLoad(const Value *PtrVal, MVT LoadVT,
}
SDValue Ptr = Builder.getValue(PtrVal);
- SDValue LoadVal =
- Builder.DAG.getLoad(LoadVT, Builder.getCurSDLoc(), Root, Ptr,
- MachinePointerInfo(PtrVal), Align(1));
+ SDValue LoadVal =
+ Builder.DAG.getLoad(LoadVT, Builder.getCurSDLoc(), Root, Ptr,
+ MachinePointerInfo(PtrVal), Align(1));
if (!ConstantMemory)
Builder.PendingLoads.push_back(LoadVal.getValue(1));
@@ -7390,12 +7390,12 @@ void SelectionDAGBuilder::processIntegerCallValue(const Instruction &I,
setValue(&I, Value);
}
-/// See if we can lower a memcmp/bcmp call into an optimized form. If so, return
+/// See if we can lower a memcmp/bcmp call into an optimized form. If so, return
/// true and lower it. Otherwise return false, and it will be lowered like a
/// normal call.
/// The caller already checked that \p I calls the appropriate LibFunc with a
/// correct prototype.
-bool SelectionDAGBuilder::visitMemCmpBCmpCall(const CallInst &I) {
+bool SelectionDAGBuilder::visitMemCmpBCmpCall(const CallInst &I) {
const Value *LHS = I.getArgOperand(0), *RHS = I.getArgOperand(1);
const Value *Size = I.getArgOperand(2);
const ConstantInt *CSize = dyn_cast<ConstantInt>(Size);
@@ -7646,12 +7646,12 @@ bool SelectionDAGBuilder::visitUnaryFloatCall(const CallInst &I,
if (!I.onlyReadsMemory())
return false;
- SDNodeFlags Flags;
- Flags.copyFMF(cast<FPMathOperator>(I));
-
+ SDNodeFlags Flags;
+ Flags.copyFMF(cast<FPMathOperator>(I));
+
SDValue Tmp = getValue(I.getArgOperand(0));
- setValue(&I,
- DAG.getNode(Opcode, getCurSDLoc(), Tmp.getValueType(), Tmp, Flags));
+ setValue(&I,
+ DAG.getNode(Opcode, getCurSDLoc(), Tmp.getValueType(), Tmp, Flags));
return true;
}
@@ -7666,13 +7666,13 @@ bool SelectionDAGBuilder::visitBinaryFloatCall(const CallInst &I,
if (!I.onlyReadsMemory())
return false;
- SDNodeFlags Flags;
- Flags.copyFMF(cast<FPMathOperator>(I));
-
+ SDNodeFlags Flags;
+ Flags.copyFMF(cast<FPMathOperator>(I));
+
SDValue Tmp0 = getValue(I.getArgOperand(0));
SDValue Tmp1 = getValue(I.getArgOperand(1));
EVT VT = Tmp0.getValueType();
- setValue(&I, DAG.getNode(Opcode, getCurSDLoc(), VT, Tmp0, Tmp1, Flags));
+ setValue(&I, DAG.getNode(Opcode, getCurSDLoc(), VT, Tmp0, Tmp1, Flags));
return true;
}
@@ -7706,10 +7706,10 @@ void SelectionDAGBuilder::visitCall(const CallInst &I) {
LibInfo->hasOptimizedCodeGen(Func)) {
switch (Func) {
default: break;
- case LibFunc_bcmp:
- if (visitMemCmpBCmpCall(I))
- return;
- break;
+ case LibFunc_bcmp:
+ if (visitMemCmpBCmpCall(I))
+ return;
+ break;
case LibFunc_copysign:
case LibFunc_copysignf:
case LibFunc_copysignl:
@@ -7811,7 +7811,7 @@ void SelectionDAGBuilder::visitCall(const CallInst &I) {
return;
break;
case LibFunc_memcmp:
- if (visitMemCmpBCmpCall(I))
+ if (visitMemCmpBCmpCall(I))
return;
break;
case LibFunc_mempcpy:
@@ -8231,9 +8231,9 @@ void SelectionDAGBuilder::visitInlineAsm(const CallBase &Call) {
OpInfo.CallOperand = getValue(OpInfo.CallOperandVal);
}
- EVT VT = OpInfo.getCallOperandValEVT(*DAG.getContext(), TLI,
- DAG.getDataLayout());
- OpInfo.ConstraintVT = VT.isSimple() ? VT.getSimpleVT() : MVT::Other;
+ EVT VT = OpInfo.getCallOperandValEVT(*DAG.getContext(), TLI,
+ DAG.getDataLayout());
+ OpInfo.ConstraintVT = VT.isSimple() ? VT.getSimpleVT() : MVT::Other;
} else if (OpInfo.Type == InlineAsm::isOutput && !OpInfo.isIndirect) {
// The return value of the call is this value. As such, there is no
// corresponding argument.
@@ -8495,7 +8495,7 @@ void SelectionDAGBuilder::visitInlineAsm(const CallBase &Call) {
InlineAsm::getFlagWord(InlineAsm::Kind_Imm, Ops.size());
AsmNodeOperands.push_back(DAG.getTargetConstant(
ResOpType, getCurSDLoc(), TLI.getPointerTy(DAG.getDataLayout())));
- llvm::append_range(AsmNodeOperands, Ops);
+ llvm::append_range(AsmNodeOperands, Ops);
break;
}
@@ -9075,59 +9075,59 @@ void SelectionDAGBuilder::visitVectorReduce(const CallInst &I,
SDLoc dl = getCurSDLoc();
EVT VT = TLI.getValueType(DAG.getDataLayout(), I.getType());
SDValue Res;
- SDNodeFlags SDFlags;
- if (auto *FPMO = dyn_cast<FPMathOperator>(&I))
- SDFlags.copyFMF(*FPMO);
+ SDNodeFlags SDFlags;
+ if (auto *FPMO = dyn_cast<FPMathOperator>(&I))
+ SDFlags.copyFMF(*FPMO);
switch (Intrinsic) {
- case Intrinsic::vector_reduce_fadd:
- if (SDFlags.hasAllowReassociation())
+ case Intrinsic::vector_reduce_fadd:
+ if (SDFlags.hasAllowReassociation())
Res = DAG.getNode(ISD::FADD, dl, VT, Op1,
- DAG.getNode(ISD::VECREDUCE_FADD, dl, VT, Op2, SDFlags),
- SDFlags);
+ DAG.getNode(ISD::VECREDUCE_FADD, dl, VT, Op2, SDFlags),
+ SDFlags);
else
- Res = DAG.getNode(ISD::VECREDUCE_SEQ_FADD, dl, VT, Op1, Op2, SDFlags);
+ Res = DAG.getNode(ISD::VECREDUCE_SEQ_FADD, dl, VT, Op1, Op2, SDFlags);
break;
- case Intrinsic::vector_reduce_fmul:
- if (SDFlags.hasAllowReassociation())
+ case Intrinsic::vector_reduce_fmul:
+ if (SDFlags.hasAllowReassociation())
Res = DAG.getNode(ISD::FMUL, dl, VT, Op1,
- DAG.getNode(ISD::VECREDUCE_FMUL, dl, VT, Op2, SDFlags),
- SDFlags);
+ DAG.getNode(ISD::VECREDUCE_FMUL, dl, VT, Op2, SDFlags),
+ SDFlags);
else
- Res = DAG.getNode(ISD::VECREDUCE_SEQ_FMUL, dl, VT, Op1, Op2, SDFlags);
+ Res = DAG.getNode(ISD::VECREDUCE_SEQ_FMUL, dl, VT, Op1, Op2, SDFlags);
break;
- case Intrinsic::vector_reduce_add:
+ case Intrinsic::vector_reduce_add:
Res = DAG.getNode(ISD::VECREDUCE_ADD, dl, VT, Op1);
break;
- case Intrinsic::vector_reduce_mul:
+ case Intrinsic::vector_reduce_mul:
Res = DAG.getNode(ISD::VECREDUCE_MUL, dl, VT, Op1);
break;
- case Intrinsic::vector_reduce_and:
+ case Intrinsic::vector_reduce_and:
Res = DAG.getNode(ISD::VECREDUCE_AND, dl, VT, Op1);
break;
- case Intrinsic::vector_reduce_or:
+ case Intrinsic::vector_reduce_or:
Res = DAG.getNode(ISD::VECREDUCE_OR, dl, VT, Op1);
break;
- case Intrinsic::vector_reduce_xor:
+ case Intrinsic::vector_reduce_xor:
Res = DAG.getNode(ISD::VECREDUCE_XOR, dl, VT, Op1);
break;
- case Intrinsic::vector_reduce_smax:
+ case Intrinsic::vector_reduce_smax:
Res = DAG.getNode(ISD::VECREDUCE_SMAX, dl, VT, Op1);
break;
- case Intrinsic::vector_reduce_smin:
+ case Intrinsic::vector_reduce_smin:
Res = DAG.getNode(ISD::VECREDUCE_SMIN, dl, VT, Op1);
break;
- case Intrinsic::vector_reduce_umax:
+ case Intrinsic::vector_reduce_umax:
Res = DAG.getNode(ISD::VECREDUCE_UMAX, dl, VT, Op1);
break;
- case Intrinsic::vector_reduce_umin:
+ case Intrinsic::vector_reduce_umin:
Res = DAG.getNode(ISD::VECREDUCE_UMIN, dl, VT, Op1);
break;
- case Intrinsic::vector_reduce_fmax:
- Res = DAG.getNode(ISD::VECREDUCE_FMAX, dl, VT, Op1, SDFlags);
+ case Intrinsic::vector_reduce_fmax:
+ Res = DAG.getNode(ISD::VECREDUCE_FMAX, dl, VT, Op1, SDFlags);
break;
- case Intrinsic::vector_reduce_fmin:
- Res = DAG.getNode(ISD::VECREDUCE_FMIN, dl, VT, Op1, SDFlags);
+ case Intrinsic::vector_reduce_fmin:
+ Res = DAG.getNode(ISD::VECREDUCE_FMIN, dl, VT, Op1, SDFlags);
break;
default:
llvm_unreachable("Unhandled vector reduce intrinsic");
@@ -9214,7 +9214,7 @@ TargetLowering::LowerCallTo(TargetLowering::CallLoweringInfo &CLI) const {
Entry.IsSRet = true;
Entry.IsNest = false;
Entry.IsByVal = false;
- Entry.IsByRef = false;
+ Entry.IsByRef = false;
Entry.IsReturned = false;
Entry.IsSwiftSelf = false;
Entry.IsSwiftError = false;
@@ -9335,8 +9335,8 @@ TargetLowering::LowerCallTo(TargetLowering::CallLoweringInfo &CLI) const {
Flags.setCFGuardTarget();
if (Args[i].IsByVal)
Flags.setByVal();
- if (Args[i].IsByRef)
- Flags.setByRef();
+ if (Args[i].IsByRef)
+ Flags.setByRef();
if (Args[i].IsPreallocated) {
Flags.setPreallocated();
// Set the byval flag for CCAssignFn callbacks that don't know about
@@ -9542,33 +9542,33 @@ TargetLowering::LowerCallTo(TargetLowering::CallLoweringInfo &CLI) const {
return std::make_pair(Res, CLI.Chain);
}
-/// Places new result values for the node in Results (their number
-/// and types must exactly match those of the original return values of
-/// the node), or leaves Results empty, which indicates that the node is not
-/// to be custom lowered after all.
+/// Places new result values for the node in Results (their number
+/// and types must exactly match those of the original return values of
+/// the node), or leaves Results empty, which indicates that the node is not
+/// to be custom lowered after all.
void TargetLowering::LowerOperationWrapper(SDNode *N,
SmallVectorImpl<SDValue> &Results,
SelectionDAG &DAG) const {
- SDValue Res = LowerOperation(SDValue(N, 0), DAG);
-
- if (!Res.getNode())
- return;
-
- // If the original node has one result, take the return value from
- // LowerOperation as is. It might not be result number 0.
- if (N->getNumValues() == 1) {
+ SDValue Res = LowerOperation(SDValue(N, 0), DAG);
+
+ if (!Res.getNode())
+ return;
+
+ // If the original node has one result, take the return value from
+ // LowerOperation as is. It might not be result number 0.
+ if (N->getNumValues() == 1) {
Results.push_back(Res);
- return;
- }
-
- // If the original node has multiple results, then the return node should
- // have the same number of results.
- assert((N->getNumValues() == Res->getNumValues()) &&
- "Lowering returned the wrong number of results!");
-
- // Places new result values base on N result number.
- for (unsigned I = 0, E = N->getNumValues(); I != E; ++I)
- Results.push_back(Res.getValue(I));
+ return;
+ }
+
+ // If the original node has multiple results, then the return node should
+ // have the same number of results.
+ assert((N->getNumValues() == Res->getNumValues()) &&
+ "Lowering returned the wrong number of results!");
+
+ // Places new result values base on N result number.
+ for (unsigned I = 0, E = N->getNumValues(); I != E; ++I)
+ Results.push_back(Res.getValue(I));
}
SDValue TargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG) const {
@@ -9660,9 +9660,9 @@ findArgumentCopyElisionCandidates(const DataLayout &DL,
// We will look through cast uses, so ignore them completely.
if (I.isCast())
continue;
- // Ignore debug info and pseudo op intrinsics, they don't escape or store
- // to allocas.
- if (I.isDebugOrPseudoInst())
+ // Ignore debug info and pseudo op intrinsics, they don't escape or store
+ // to allocas.
+ if (I.isDebugOrPseudoInst())
continue;
// This is an unknown instruction. Assume it escapes or writes to all
// static alloca operands.
@@ -9692,7 +9692,7 @@ findArgumentCopyElisionCandidates(const DataLayout &DL,
// initializes the alloca. Don't elide copies from the same argument twice.
const Value *Val = SI->getValueOperand()->stripPointerCasts();
const auto *Arg = dyn_cast<Argument>(Val);
- if (!Arg || Arg->hasPassPointeeByValueCopyAttr() ||
+ if (!Arg || Arg->hasPassPointeeByValueCopyAttr() ||
Arg->getType()->isEmptyTy() ||
DL.getTypeStoreSize(Arg->getType()) !=
DL.getTypeAllocSize(AI->getAllocatedType()) ||
@@ -9873,8 +9873,8 @@ void SelectionDAGISel::LowerArguments(const Function &F) {
Flags.setSwiftError();
if (Arg.hasAttribute(Attribute::ByVal))
Flags.setByVal();
- if (Arg.hasAttribute(Attribute::ByRef))
- Flags.setByRef();
+ if (Arg.hasAttribute(Attribute::ByRef))
+ Flags.setByRef();
if (Arg.hasAttribute(Attribute::InAlloca)) {
Flags.setInAlloca();
// Set the byval flag for CCAssignFn callbacks that don't know about
@@ -9894,30 +9894,30 @@ void SelectionDAGISel::LowerArguments(const Function &F) {
Flags.setByVal();
}
- Type *ArgMemTy = nullptr;
- if (Flags.isByVal() || Flags.isInAlloca() || Flags.isPreallocated() ||
- Flags.isByRef()) {
- if (!ArgMemTy)
- ArgMemTy = Arg.getPointeeInMemoryValueType();
-
- uint64_t MemSize = DL.getTypeAllocSize(ArgMemTy);
-
- // For in-memory arguments, size and alignment should be passed from FE.
- // BE will guess if this info is not there but there are cases it cannot
- // get right.
- MaybeAlign MemAlign = Arg.getParamAlign();
- if (!MemAlign)
- MemAlign = Align(TLI->getByValTypeAlignment(ArgMemTy, DL));
-
- if (Flags.isByRef()) {
- Flags.setByRefSize(MemSize);
- Flags.setByRefAlign(*MemAlign);
- } else {
- Flags.setByValSize(MemSize);
- Flags.setByValAlign(*MemAlign);
- }
+ Type *ArgMemTy = nullptr;
+ if (Flags.isByVal() || Flags.isInAlloca() || Flags.isPreallocated() ||
+ Flags.isByRef()) {
+ if (!ArgMemTy)
+ ArgMemTy = Arg.getPointeeInMemoryValueType();
+
+ uint64_t MemSize = DL.getTypeAllocSize(ArgMemTy);
+
+ // For in-memory arguments, size and alignment should be passed from FE.
+ // BE will guess if this info is not there but there are cases it cannot
+ // get right.
+ MaybeAlign MemAlign = Arg.getParamAlign();
+ if (!MemAlign)
+ MemAlign = Align(TLI->getByValTypeAlignment(ArgMemTy, DL));
+
+ if (Flags.isByRef()) {
+ Flags.setByRefSize(MemSize);
+ Flags.setByRefAlign(*MemAlign);
+ } else {
+ Flags.setByValSize(MemSize);
+ Flags.setByValAlign(*MemAlign);
+ }
}
-
+
if (Arg.hasAttribute(Attribute::Nest))
Flags.setNest();
if (NeedsRegBlock)
@@ -10794,7 +10794,7 @@ void SelectionDAGBuilder::visitSwitch(const SwitchInst &SI) {
{PeeledSwitchMBB, First, Last, nullptr, nullptr, DefaultProb});
while (!WorkList.empty()) {
- SwitchWorkListItem W = WorkList.pop_back_val();
+ SwitchWorkListItem W = WorkList.pop_back_val();
unsigned NumClusters = W.LastCluster - W.FirstCluster + 1;
if (NumClusters > 3 && TM.getOptLevel() != CodeGenOpt::None &&
diff --git a/contrib/libs/llvm12/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.h b/contrib/libs/llvm12/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.h
index 8f6e98c401..97f09350cd 100644
--- a/contrib/libs/llvm12/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.h
+++ b/contrib/libs/llvm12/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.h
@@ -37,7 +37,7 @@
namespace llvm {
-class AAResults;
+class AAResults;
class AllocaInst;
class AtomicCmpXchgInst;
class AtomicRMWInst;
@@ -62,7 +62,7 @@ class FunctionLoweringInfo;
class GCFunctionInfo;
class GCRelocateInst;
class GCResultInst;
-class GCStatepointInst;
+class GCStatepointInst;
class IndirectBrInst;
class InvokeInst;
class LandingPadInst;
@@ -388,7 +388,7 @@ public:
SelectionDAG &DAG;
const DataLayout *DL = nullptr;
- AAResults *AA = nullptr;
+ AAResults *AA = nullptr;
const TargetLibraryInfo *LibInfo;
class SDAGSwitchLowering : public SwitchCG::SwitchLowering {
@@ -442,7 +442,7 @@ public:
SL(std::make_unique<SDAGSwitchLowering>(this, funcinfo)), FuncInfo(funcinfo),
SwiftError(swifterror) {}
- void init(GCFunctionInfo *gfi, AAResults *AA,
+ void init(GCFunctionInfo *gfi, AAResults *AA,
const TargetLibraryInfo *li);
/// Clear out the current SelectionDAG and the associated state and prepare
@@ -685,7 +685,7 @@ private:
void visitAdd(const User &I) { visitBinary(I, ISD::ADD); }
void visitFAdd(const User &I) { visitBinary(I, ISD::FADD); }
void visitSub(const User &I) { visitBinary(I, ISD::SUB); }
- void visitFSub(const User &I) { visitBinary(I, ISD::FSUB); }
+ void visitFSub(const User &I) { visitBinary(I, ISD::FSUB); }
void visitMul(const User &I) { visitBinary(I, ISD::MUL); }
void visitFMul(const User &I) { visitBinary(I, ISD::FMUL); }
void visitURem(const User &I) { visitBinary(I, ISD::UREM); }
@@ -740,7 +740,7 @@ private:
void visitFence(const FenceInst &I);
void visitPHI(const PHINode &I);
void visitCall(const CallInst &I);
- bool visitMemCmpBCmpCall(const CallInst &I);
+ bool visitMemCmpBCmpCall(const CallInst &I);
bool visitMemPCpyCall(const CallInst &I);
bool visitMemChrCall(const CallInst &I);
bool visitStrCpyCall(const CallInst &I, bool isStpcpy);
@@ -759,7 +759,7 @@ private:
void visitIntrinsicCall(const CallInst &I, unsigned Intrinsic);
void visitTargetIntrinsic(const CallInst &I, unsigned Intrinsic);
void visitConstrainedFPIntrinsic(const ConstrainedFPIntrinsic &FPI);
- void visitVectorPredicationIntrinsic(const VPIntrinsic &VPIntrin);
+ void visitVectorPredicationIntrinsic(const VPIntrinsic &VPIntrin);
void visitVAStart(const CallInst &I);
void visitVAArg(const VAArgInst &I);
@@ -896,7 +896,7 @@ struct RegsForValue {
}
/// Return a list of registers and their sizes.
- SmallVector<std::pair<unsigned, TypeSize>, 4> getRegsAndSizes() const;
+ SmallVector<std::pair<unsigned, TypeSize>, 4> getRegsAndSizes() const;
};
} // end namespace llvm
diff --git a/contrib/libs/llvm12/lib/CodeGen/SelectionDAG/SelectionDAGDumper.cpp b/contrib/libs/llvm12/lib/CodeGen/SelectionDAG/SelectionDAGDumper.cpp
index d867f3e09e..8e0502abb3 100644
--- a/contrib/libs/llvm12/lib/CodeGen/SelectionDAG/SelectionDAGDumper.cpp
+++ b/contrib/libs/llvm12/lib/CodeGen/SelectionDAG/SelectionDAGDumper.cpp
@@ -293,7 +293,7 @@ std::string SDNode::getOperationName(const SelectionDAG *G) const {
case ISD::ADDC: return "addc";
case ISD::ADDE: return "adde";
case ISD::ADDCARRY: return "addcarry";
- case ISD::SADDO_CARRY: return "saddo_carry";
+ case ISD::SADDO_CARRY: return "saddo_carry";
case ISD::SADDO: return "saddo";
case ISD::UADDO: return "uaddo";
case ISD::SSUBO: return "ssubo";
@@ -303,7 +303,7 @@ std::string SDNode::getOperationName(const SelectionDAG *G) const {
case ISD::SUBC: return "subc";
case ISD::SUBE: return "sube";
case ISD::SUBCARRY: return "subcarry";
- case ISD::SSUBO_CARRY: return "ssubo_carry";
+ case ISD::SSUBO_CARRY: return "ssubo_carry";
case ISD::SHL_PARTS: return "shl_parts";
case ISD::SRA_PARTS: return "sra_parts";
case ISD::SRL_PARTS: return "srl_parts";
@@ -312,8 +312,8 @@ std::string SDNode::getOperationName(const SelectionDAG *G) const {
case ISD::UADDSAT: return "uaddsat";
case ISD::SSUBSAT: return "ssubsat";
case ISD::USUBSAT: return "usubsat";
- case ISD::SSHLSAT: return "sshlsat";
- case ISD::USHLSAT: return "ushlsat";
+ case ISD::SSHLSAT: return "sshlsat";
+ case ISD::USHLSAT: return "ushlsat";
case ISD::SMULFIX: return "smulfix";
case ISD::SMULFIXSAT: return "smulfixsat";
@@ -348,8 +348,8 @@ std::string SDNode::getOperationName(const SelectionDAG *G) const {
case ISD::STRICT_FP_TO_SINT: return "strict_fp_to_sint";
case ISD::FP_TO_UINT: return "fp_to_uint";
case ISD::STRICT_FP_TO_UINT: return "strict_fp_to_uint";
- case ISD::FP_TO_SINT_SAT: return "fp_to_sint_sat";
- case ISD::FP_TO_UINT_SAT: return "fp_to_uint_sat";
+ case ISD::FP_TO_SINT_SAT: return "fp_to_sint_sat";
+ case ISD::FP_TO_UINT_SAT: return "fp_to_uint_sat";
case ISD::BITCAST: return "bitcast";
case ISD::ADDRSPACECAST: return "addrspacecast";
case ISD::FP16_TO_FP: return "fp16_to_fp";
@@ -396,11 +396,11 @@ std::string SDNode::getOperationName(const SelectionDAG *G) const {
case ISD::STACKRESTORE: return "stackrestore";
case ISD::TRAP: return "trap";
case ISD::DEBUGTRAP: return "debugtrap";
- case ISD::UBSANTRAP: return "ubsantrap";
+ case ISD::UBSANTRAP: return "ubsantrap";
case ISD::LIFETIME_START: return "lifetime.start";
case ISD::LIFETIME_END: return "lifetime.end";
- case ISD::PSEUDO_PROBE:
- return "pseudoprobe";
+ case ISD::PSEUDO_PROBE:
+ return "pseudoprobe";
case ISD::GC_TRANSITION_START: return "gc_transition.start";
case ISD::GC_TRANSITION_END: return "gc_transition.end";
case ISD::GET_DYNAMIC_AREA_OFFSET: return "get.dynamic.area.offset";
@@ -419,7 +419,7 @@ std::string SDNode::getOperationName(const SelectionDAG *G) const {
case ISD::CTTZ_ZERO_UNDEF: return "cttz_zero_undef";
case ISD::CTLZ: return "ctlz";
case ISD::CTLZ_ZERO_UNDEF: return "ctlz_zero_undef";
- case ISD::PARITY: return "parity";
+ case ISD::PARITY: return "parity";
// Trampolines
case ISD::INIT_TRAMPOLINE: return "init_trampoline";
@@ -457,9 +457,9 @@ std::string SDNode::getOperationName(const SelectionDAG *G) const {
case ISD::SETFALSE2: return "setfalse2";
}
case ISD::VECREDUCE_FADD: return "vecreduce_fadd";
- case ISD::VECREDUCE_SEQ_FADD: return "vecreduce_seq_fadd";
+ case ISD::VECREDUCE_SEQ_FADD: return "vecreduce_seq_fadd";
case ISD::VECREDUCE_FMUL: return "vecreduce_fmul";
- case ISD::VECREDUCE_SEQ_FMUL: return "vecreduce_seq_fmul";
+ case ISD::VECREDUCE_SEQ_FMUL: return "vecreduce_seq_fmul";
case ISD::VECREDUCE_ADD: return "vecreduce_add";
case ISD::VECREDUCE_MUL: return "vecreduce_mul";
case ISD::VECREDUCE_AND: return "vecreduce_and";
@@ -471,12 +471,12 @@ std::string SDNode::getOperationName(const SelectionDAG *G) const {
case ISD::VECREDUCE_UMIN: return "vecreduce_umin";
case ISD::VECREDUCE_FMAX: return "vecreduce_fmax";
case ISD::VECREDUCE_FMIN: return "vecreduce_fmin";
-
- // Vector Predication
-#define BEGIN_REGISTER_VP_SDNODE(SDID, LEGALARG, NAME, ...) \
- case ISD::SDID: \
- return #NAME;
-#include "llvm/IR/VPIntrinsics.def"
+
+ // Vector Predication
+#define BEGIN_REGISTER_VP_SDNODE(SDID, LEGALARG, NAME, ...) \
+ case ISD::SDID: \
+ return #NAME;
+#include "llvm/IR/VPIntrinsics.def"
}
}
@@ -746,39 +746,39 @@ void SDNode::print_details(raw_ostream &OS, const SelectionDAG *G) const {
OS << ", compressing";
OS << ">";
- } else if (const auto *MGather = dyn_cast<MaskedGatherSDNode>(this)) {
- OS << "<";
- printMemOperand(OS, *MGather->getMemOperand(), G);
-
- bool doExt = true;
- switch (MGather->getExtensionType()) {
- default: doExt = false; break;
- case ISD::EXTLOAD: OS << ", anyext"; break;
- case ISD::SEXTLOAD: OS << ", sext"; break;
- case ISD::ZEXTLOAD: OS << ", zext"; break;
- }
- if (doExt)
- OS << " from " << MGather->getMemoryVT().getEVTString();
-
- auto Signed = MGather->isIndexSigned() ? "signed" : "unsigned";
- auto Scaled = MGather->isIndexScaled() ? "scaled" : "unscaled";
- OS << ", " << Signed << " " << Scaled << " offset";
-
- OS << ">";
- } else if (const auto *MScatter = dyn_cast<MaskedScatterSDNode>(this)) {
- OS << "<";
- printMemOperand(OS, *MScatter->getMemOperand(), G);
-
- if (MScatter->isTruncatingStore())
- OS << ", trunc to " << MScatter->getMemoryVT().getEVTString();
-
- auto Signed = MScatter->isIndexSigned() ? "signed" : "unsigned";
- auto Scaled = MScatter->isIndexScaled() ? "scaled" : "unscaled";
- OS << ", " << Signed << " " << Scaled << " offset";
-
- OS << ">";
- } else if (const MemSDNode *M = dyn_cast<MemSDNode>(this)) {
+ } else if (const auto *MGather = dyn_cast<MaskedGatherSDNode>(this)) {
OS << "<";
+ printMemOperand(OS, *MGather->getMemOperand(), G);
+
+ bool doExt = true;
+ switch (MGather->getExtensionType()) {
+ default: doExt = false; break;
+ case ISD::EXTLOAD: OS << ", anyext"; break;
+ case ISD::SEXTLOAD: OS << ", sext"; break;
+ case ISD::ZEXTLOAD: OS << ", zext"; break;
+ }
+ if (doExt)
+ OS << " from " << MGather->getMemoryVT().getEVTString();
+
+ auto Signed = MGather->isIndexSigned() ? "signed" : "unsigned";
+ auto Scaled = MGather->isIndexScaled() ? "scaled" : "unscaled";
+ OS << ", " << Signed << " " << Scaled << " offset";
+
+ OS << ">";
+ } else if (const auto *MScatter = dyn_cast<MaskedScatterSDNode>(this)) {
+ OS << "<";
+ printMemOperand(OS, *MScatter->getMemOperand(), G);
+
+ if (MScatter->isTruncatingStore())
+ OS << ", trunc to " << MScatter->getMemoryVT().getEVTString();
+
+ auto Signed = MScatter->isIndexSigned() ? "signed" : "unsigned";
+ auto Scaled = MScatter->isIndexScaled() ? "scaled" : "unscaled";
+ OS << ", " << Signed << " " << Scaled << " offset";
+
+ OS << ">";
+ } else if (const MemSDNode *M = dyn_cast<MemSDNode>(this)) {
+ OS << "<";
printMemOperand(OS, *M->getMemOperand(), G);
OS << ">";
} else if (const BlockAddressSDNode *BA =
diff --git a/contrib/libs/llvm12/lib/CodeGen/SelectionDAG/SelectionDAGISel.cpp b/contrib/libs/llvm12/lib/CodeGen/SelectionDAG/SelectionDAGISel.cpp
index d17dd1c5ec..0bb4a5d06e 100644
--- a/contrib/libs/llvm12/lib/CodeGen/SelectionDAG/SelectionDAGISel.cpp
+++ b/contrib/libs/llvm12/lib/CodeGen/SelectionDAG/SelectionDAGISel.cpp
@@ -75,7 +75,7 @@
#include "llvm/IR/Intrinsics.h"
#include "llvm/IR/IntrinsicsWebAssembly.h"
#include "llvm/IR/Metadata.h"
-#include "llvm/IR/Statepoint.h"
+#include "llvm/IR/Statepoint.h"
#include "llvm/IR/Type.h"
#include "llvm/IR/User.h"
#include "llvm/IR/Value.h"
@@ -779,11 +779,11 @@ void SelectionDAGISel::CodeGenAndEmitDAG() {
<< "'\n";
CurDAG->dump());
-#ifndef NDEBUG
- if (TTI.hasBranchDivergence())
- CurDAG->VerifyDAGDiverence();
-#endif
-
+#ifndef NDEBUG
+ if (TTI.hasBranchDivergence())
+ CurDAG->VerifyDAGDiverence();
+#endif
+
if (ViewDAGCombine1 && MatchFilterBB)
CurDAG->viewGraph("dag-combine1 input for " + BlockName);
@@ -794,11 +794,11 @@ void SelectionDAGISel::CodeGenAndEmitDAG() {
CurDAG->Combine(BeforeLegalizeTypes, AA, OptLevel);
}
- LLVM_DEBUG(dbgs() << "Optimized lowered selection DAG: "
- << printMBBReference(*FuncInfo->MBB) << " '" << BlockName
- << "'\n";
- CurDAG->dump());
-
+ LLVM_DEBUG(dbgs() << "Optimized lowered selection DAG: "
+ << printMBBReference(*FuncInfo->MBB) << " '" << BlockName
+ << "'\n";
+ CurDAG->dump());
+
#ifndef NDEBUG
if (TTI.hasBranchDivergence())
CurDAG->VerifyDAGDiverence();
@@ -816,11 +816,11 @@ void SelectionDAGISel::CodeGenAndEmitDAG() {
Changed = CurDAG->LegalizeTypes();
}
- LLVM_DEBUG(dbgs() << "Type-legalized selection DAG: "
- << printMBBReference(*FuncInfo->MBB) << " '" << BlockName
- << "'\n";
- CurDAG->dump());
-
+ LLVM_DEBUG(dbgs() << "Type-legalized selection DAG: "
+ << printMBBReference(*FuncInfo->MBB) << " '" << BlockName
+ << "'\n";
+ CurDAG->dump());
+
#ifndef NDEBUG
if (TTI.hasBranchDivergence())
CurDAG->VerifyDAGDiverence();
@@ -840,11 +840,11 @@ void SelectionDAGISel::CodeGenAndEmitDAG() {
CurDAG->Combine(AfterLegalizeTypes, AA, OptLevel);
}
- LLVM_DEBUG(dbgs() << "Optimized type-legalized selection DAG: "
- << printMBBReference(*FuncInfo->MBB) << " '" << BlockName
- << "'\n";
- CurDAG->dump());
-
+ LLVM_DEBUG(dbgs() << "Optimized type-legalized selection DAG: "
+ << printMBBReference(*FuncInfo->MBB) << " '" << BlockName
+ << "'\n";
+ CurDAG->dump());
+
#ifndef NDEBUG
if (TTI.hasBranchDivergence())
CurDAG->VerifyDAGDiverence();
@@ -863,11 +863,11 @@ void SelectionDAGISel::CodeGenAndEmitDAG() {
<< "'\n";
CurDAG->dump());
-#ifndef NDEBUG
- if (TTI.hasBranchDivergence())
- CurDAG->VerifyDAGDiverence();
-#endif
-
+#ifndef NDEBUG
+ if (TTI.hasBranchDivergence())
+ CurDAG->VerifyDAGDiverence();
+#endif
+
{
NamedRegionTimer T("legalize_types2", "Type Legalization 2", GroupName,
GroupDescription, TimePassesIsEnabled);
@@ -879,11 +879,11 @@ void SelectionDAGISel::CodeGenAndEmitDAG() {
<< "'\n";
CurDAG->dump());
-#ifndef NDEBUG
- if (TTI.hasBranchDivergence())
- CurDAG->VerifyDAGDiverence();
-#endif
-
+#ifndef NDEBUG
+ if (TTI.hasBranchDivergence())
+ CurDAG->VerifyDAGDiverence();
+#endif
+
if (ViewDAGCombineLT && MatchFilterBB)
CurDAG->viewGraph("dag-combine-lv input for " + BlockName);
@@ -914,11 +914,11 @@ void SelectionDAGISel::CodeGenAndEmitDAG() {
CurDAG->Legalize();
}
- LLVM_DEBUG(dbgs() << "Legalized selection DAG: "
- << printMBBReference(*FuncInfo->MBB) << " '" << BlockName
- << "'\n";
- CurDAG->dump());
-
+ LLVM_DEBUG(dbgs() << "Legalized selection DAG: "
+ << printMBBReference(*FuncInfo->MBB) << " '" << BlockName
+ << "'\n";
+ CurDAG->dump());
+
#ifndef NDEBUG
if (TTI.hasBranchDivergence())
CurDAG->VerifyDAGDiverence();
@@ -934,11 +934,11 @@ void SelectionDAGISel::CodeGenAndEmitDAG() {
CurDAG->Combine(AfterLegalizeDAG, AA, OptLevel);
}
- LLVM_DEBUG(dbgs() << "Optimized legalized selection DAG: "
- << printMBBReference(*FuncInfo->MBB) << " '" << BlockName
- << "'\n";
- CurDAG->dump());
-
+ LLVM_DEBUG(dbgs() << "Optimized legalized selection DAG: "
+ << printMBBReference(*FuncInfo->MBB) << " '" << BlockName
+ << "'\n";
+ CurDAG->dump());
+
#ifndef NDEBUG
if (TTI.hasBranchDivergence())
CurDAG->VerifyDAGDiverence();
@@ -1267,12 +1267,12 @@ bool SelectionDAGISel::PrepareEHLandingPad() {
BuildMI(*MBB, FuncInfo->InsertPt, SDB->getCurDebugLoc(), II)
.addSym(Label);
- // If the unwinder does not preserve all registers, ensure that the
- // function marks the clobbered registers as used.
- const TargetRegisterInfo &TRI = *MF->getSubtarget().getRegisterInfo();
- if (auto *RegMask = TRI.getCustomEHPadPreservedMask(*MF))
- MF->getRegInfo().addPhysRegsUsedFromRegMask(RegMask);
-
+ // If the unwinder does not preserve all registers, ensure that the
+ // function marks the clobbered registers as used.
+ const TargetRegisterInfo &TRI = *MF->getSubtarget().getRegisterInfo();
+ if (auto *RegMask = TRI.getCustomEHPadPreservedMask(*MF))
+ MF->getRegInfo().addPhysRegsUsedFromRegMask(RegMask);
+
if (Pers == EHPersonality::Wasm_CXX) {
if (const auto *CPI = dyn_cast<CatchPadInst>(LLVMBB->getFirstNonPHI()))
mapWasmLandingPadIndex(MBB, CPI);
@@ -1691,8 +1691,8 @@ static bool MIIsInTerminatorSequence(const MachineInstr &MI) {
/// terminator, but additionally the copies that move the vregs into the
/// physical registers.
static MachineBasicBlock::iterator
-FindSplitPointForStackProtector(MachineBasicBlock *BB,
- const TargetInstrInfo &TII) {
+FindSplitPointForStackProtector(MachineBasicBlock *BB,
+ const TargetInstrInfo &TII) {
MachineBasicBlock::iterator SplitPoint = BB->getFirstTerminator();
if (SplitPoint == BB->begin())
return SplitPoint;
@@ -1701,31 +1701,31 @@ FindSplitPointForStackProtector(MachineBasicBlock *BB,
MachineBasicBlock::iterator Previous = SplitPoint;
--Previous;
- if (TII.isTailCall(*SplitPoint) &&
- Previous->getOpcode() == TII.getCallFrameDestroyOpcode()) {
- // call itself, then we must insert before the sequence even starts. For
- // example:
- // <split point>
- // ADJCALLSTACKDOWN ...
- // <Moves>
- // ADJCALLSTACKUP ...
- // TAILJMP somewhere
- // On the other hand, it could be an unrelated call in which case this tail call
- // has to register moves of its own and should be the split point. For example:
- // ADJCALLSTACKDOWN
- // CALL something_else
- // ADJCALLSTACKUP
- // <split point>
- // TAILJMP somewhere
- do {
- --Previous;
- if (Previous->isCall())
- return SplitPoint;
- } while(Previous->getOpcode() != TII.getCallFrameSetupOpcode());
-
- return Previous;
- }
-
+ if (TII.isTailCall(*SplitPoint) &&
+ Previous->getOpcode() == TII.getCallFrameDestroyOpcode()) {
+ // call itself, then we must insert before the sequence even starts. For
+ // example:
+ // <split point>
+ // ADJCALLSTACKDOWN ...
+ // <Moves>
+ // ADJCALLSTACKUP ...
+ // TAILJMP somewhere
+ // On the other hand, it could be an unrelated call in which case this tail call
+ // has to register moves of its own and should be the split point. For example:
+ // ADJCALLSTACKDOWN
+ // CALL something_else
+ // ADJCALLSTACKUP
+ // <split point>
+ // TAILJMP somewhere
+ do {
+ --Previous;
+ if (Previous->isCall())
+ return SplitPoint;
+ } while(Previous->getOpcode() != TII.getCallFrameSetupOpcode());
+
+ return Previous;
+ }
+
while (MIIsInTerminatorSequence(*Previous)) {
SplitPoint = Previous;
if (Previous == Start)
@@ -1765,7 +1765,7 @@ SelectionDAGISel::FinishBasicBlock() {
// Add load and check to the basicblock.
FuncInfo->MBB = ParentMBB;
FuncInfo->InsertPt =
- FindSplitPointForStackProtector(ParentMBB, *TII);
+ FindSplitPointForStackProtector(ParentMBB, *TII);
SDB->visitSPDescriptorParent(SDB->SPDescriptor, ParentMBB);
CurDAG->setRoot(SDB->getRoot());
SDB->clear();
@@ -1784,7 +1784,7 @@ SelectionDAGISel::FinishBasicBlock() {
// register allocation issues caused by us splitting the parent mbb. The
// register allocator will clean up said virtual copies later on.
MachineBasicBlock::iterator SplitPoint =
- FindSplitPointForStackProtector(ParentMBB, *TII);
+ FindSplitPointForStackProtector(ParentMBB, *TII);
// Splice the terminator of ParentMBB into SuccessMBB.
SuccessMBB->splice(SuccessMBB->end(), ParentMBB,
@@ -2119,7 +2119,7 @@ void SelectionDAGISel::SelectInlineAsmMemoryOperands(std::vector<SDValue> &Ops,
InlineAsm::getFlagWord(InlineAsm::Kind_Mem, SelOps.size());
NewFlags = InlineAsm::getFlagWordForMem(NewFlags, ConstraintID);
Ops.push_back(CurDAG->getTargetConstant(NewFlags, DL, MVT::i32));
- llvm::append_range(Ops, SelOps);
+ llvm::append_range(Ops, SelOps);
i += 2;
}
}
@@ -2319,7 +2319,7 @@ void SelectionDAGISel::Select_FREEZE(SDNode *N) {
}
/// GetVBR - decode a vbr encoding whose top bit is set.
-LLVM_ATTRIBUTE_ALWAYS_INLINE static uint64_t
+LLVM_ATTRIBUTE_ALWAYS_INLINE static uint64_t
GetVBR(uint64_t Val, const unsigned char *MatcherTable, unsigned &Idx) {
assert(Val >= 128 && "Not a VBR");
Val &= 127; // Remove first vbr bit.
@@ -2378,7 +2378,7 @@ void SelectionDAGISel::UpdateChains(
// If the node became dead and we haven't already seen it, delete it.
if (ChainNode != NodeToMatch && ChainNode->use_empty() &&
- !llvm::is_contained(NowDeadNodes, ChainNode))
+ !llvm::is_contained(NowDeadNodes, ChainNode))
NowDeadNodes.push_back(ChainNode);
}
}
@@ -2516,9 +2516,9 @@ MorphNode(SDNode *Node, unsigned TargetOpc, SDVTList VTList,
}
/// CheckSame - Implements OP_CheckSame.
-LLVM_ATTRIBUTE_ALWAYS_INLINE static bool
-CheckSame(const unsigned char *MatcherTable, unsigned &MatcherIndex, SDValue N,
- const SmallVectorImpl<std::pair<SDValue, SDNode *>> &RecordedNodes) {
+LLVM_ATTRIBUTE_ALWAYS_INLINE static bool
+CheckSame(const unsigned char *MatcherTable, unsigned &MatcherIndex, SDValue N,
+ const SmallVectorImpl<std::pair<SDValue, SDNode *>> &RecordedNodes) {
// Accept if it is exactly the same as a previously recorded node.
unsigned RecNo = MatcherTable[MatcherIndex++];
assert(RecNo < RecordedNodes.size() && "Invalid CheckSame");
@@ -2526,10 +2526,10 @@ CheckSame(const unsigned char *MatcherTable, unsigned &MatcherIndex, SDValue N,
}
/// CheckChildSame - Implements OP_CheckChildXSame.
-LLVM_ATTRIBUTE_ALWAYS_INLINE static bool CheckChildSame(
- const unsigned char *MatcherTable, unsigned &MatcherIndex, SDValue N,
- const SmallVectorImpl<std::pair<SDValue, SDNode *>> &RecordedNodes,
- unsigned ChildNo) {
+LLVM_ATTRIBUTE_ALWAYS_INLINE static bool CheckChildSame(
+ const unsigned char *MatcherTable, unsigned &MatcherIndex, SDValue N,
+ const SmallVectorImpl<std::pair<SDValue, SDNode *>> &RecordedNodes,
+ unsigned ChildNo) {
if (ChildNo >= N.getNumOperands())
return false; // Match fails if out of range child #.
return ::CheckSame(MatcherTable, MatcherIndex, N.getOperand(ChildNo),
@@ -2537,20 +2537,20 @@ LLVM_ATTRIBUTE_ALWAYS_INLINE static bool CheckChildSame(
}
/// CheckPatternPredicate - Implements OP_CheckPatternPredicate.
-LLVM_ATTRIBUTE_ALWAYS_INLINE static bool
+LLVM_ATTRIBUTE_ALWAYS_INLINE static bool
CheckPatternPredicate(const unsigned char *MatcherTable, unsigned &MatcherIndex,
const SelectionDAGISel &SDISel) {
return SDISel.CheckPatternPredicate(MatcherTable[MatcherIndex++]);
}
/// CheckNodePredicate - Implements OP_CheckNodePredicate.
-LLVM_ATTRIBUTE_ALWAYS_INLINE static bool
+LLVM_ATTRIBUTE_ALWAYS_INLINE static bool
CheckNodePredicate(const unsigned char *MatcherTable, unsigned &MatcherIndex,
const SelectionDAGISel &SDISel, SDNode *N) {
return SDISel.CheckNodePredicate(N, MatcherTable[MatcherIndex++]);
}
-LLVM_ATTRIBUTE_ALWAYS_INLINE static bool
+LLVM_ATTRIBUTE_ALWAYS_INLINE static bool
CheckOpcode(const unsigned char *MatcherTable, unsigned &MatcherIndex,
SDNode *N) {
uint16_t Opc = MatcherTable[MatcherIndex++];
@@ -2558,7 +2558,7 @@ CheckOpcode(const unsigned char *MatcherTable, unsigned &MatcherIndex,
return N->getOpcode() == Opc;
}
-LLVM_ATTRIBUTE_ALWAYS_INLINE static bool
+LLVM_ATTRIBUTE_ALWAYS_INLINE static bool
CheckType(const unsigned char *MatcherTable, unsigned &MatcherIndex, SDValue N,
const TargetLowering *TLI, const DataLayout &DL) {
MVT::SimpleValueType VT = (MVT::SimpleValueType)MatcherTable[MatcherIndex++];
@@ -2568,7 +2568,7 @@ CheckType(const unsigned char *MatcherTable, unsigned &MatcherIndex, SDValue N,
return VT == MVT::iPTR && N.getValueType() == TLI->getPointerTy(DL);
}
-LLVM_ATTRIBUTE_ALWAYS_INLINE static bool
+LLVM_ATTRIBUTE_ALWAYS_INLINE static bool
CheckChildType(const unsigned char *MatcherTable, unsigned &MatcherIndex,
SDValue N, const TargetLowering *TLI, const DataLayout &DL,
unsigned ChildNo) {
@@ -2578,14 +2578,14 @@ CheckChildType(const unsigned char *MatcherTable, unsigned &MatcherIndex,
DL);
}
-LLVM_ATTRIBUTE_ALWAYS_INLINE static bool
+LLVM_ATTRIBUTE_ALWAYS_INLINE static bool
CheckCondCode(const unsigned char *MatcherTable, unsigned &MatcherIndex,
SDValue N) {
return cast<CondCodeSDNode>(N)->get() ==
(ISD::CondCode)MatcherTable[MatcherIndex++];
}
-LLVM_ATTRIBUTE_ALWAYS_INLINE static bool
+LLVM_ATTRIBUTE_ALWAYS_INLINE static bool
CheckChild2CondCode(const unsigned char *MatcherTable, unsigned &MatcherIndex,
SDValue N) {
if (2 >= N.getNumOperands())
@@ -2593,7 +2593,7 @@ CheckChild2CondCode(const unsigned char *MatcherTable, unsigned &MatcherIndex,
return ::CheckCondCode(MatcherTable, MatcherIndex, N.getOperand(2));
}
-LLVM_ATTRIBUTE_ALWAYS_INLINE static bool
+LLVM_ATTRIBUTE_ALWAYS_INLINE static bool
CheckValueType(const unsigned char *MatcherTable, unsigned &MatcherIndex,
SDValue N, const TargetLowering *TLI, const DataLayout &DL) {
MVT::SimpleValueType VT = (MVT::SimpleValueType)MatcherTable[MatcherIndex++];
@@ -2604,7 +2604,7 @@ CheckValueType(const unsigned char *MatcherTable, unsigned &MatcherIndex,
return VT == MVT::iPTR && cast<VTSDNode>(N)->getVT() == TLI->getPointerTy(DL);
}
-LLVM_ATTRIBUTE_ALWAYS_INLINE static bool
+LLVM_ATTRIBUTE_ALWAYS_INLINE static bool
CheckInteger(const unsigned char *MatcherTable, unsigned &MatcherIndex,
SDValue N) {
int64_t Val = MatcherTable[MatcherIndex++];
@@ -2615,7 +2615,7 @@ CheckInteger(const unsigned char *MatcherTable, unsigned &MatcherIndex,
return C && C->getSExtValue() == Val;
}
-LLVM_ATTRIBUTE_ALWAYS_INLINE static bool
+LLVM_ATTRIBUTE_ALWAYS_INLINE static bool
CheckChildInteger(const unsigned char *MatcherTable, unsigned &MatcherIndex,
SDValue N, unsigned ChildNo) {
if (ChildNo >= N.getNumOperands())
@@ -2623,7 +2623,7 @@ CheckChildInteger(const unsigned char *MatcherTable, unsigned &MatcherIndex,
return ::CheckInteger(MatcherTable, MatcherIndex, N.getOperand(ChildNo));
}
-LLVM_ATTRIBUTE_ALWAYS_INLINE static bool
+LLVM_ATTRIBUTE_ALWAYS_INLINE static bool
CheckAndImm(const unsigned char *MatcherTable, unsigned &MatcherIndex,
SDValue N, const SelectionDAGISel &SDISel) {
int64_t Val = MatcherTable[MatcherIndex++];
@@ -2636,9 +2636,9 @@ CheckAndImm(const unsigned char *MatcherTable, unsigned &MatcherIndex,
return C && SDISel.CheckAndMask(N.getOperand(0), C, Val);
}
-LLVM_ATTRIBUTE_ALWAYS_INLINE static bool
-CheckOrImm(const unsigned char *MatcherTable, unsigned &MatcherIndex, SDValue N,
- const SelectionDAGISel &SDISel) {
+LLVM_ATTRIBUTE_ALWAYS_INLINE static bool
+CheckOrImm(const unsigned char *MatcherTable, unsigned &MatcherIndex, SDValue N,
+ const SelectionDAGISel &SDISel) {
int64_t Val = MatcherTable[MatcherIndex++];
if (Val & 128)
Val = GetVBR(Val, MatcherTable, MatcherIndex);
@@ -2831,7 +2831,7 @@ void SelectionDAGISel::SelectCodeCommon(SDNode *NodeToMatch,
case ISD::ANNOTATION_LABEL:
case ISD::LIFETIME_START:
case ISD::LIFETIME_END:
- case ISD::PSEUDO_PROBE:
+ case ISD::PSEUDO_PROBE:
NodeToMatch->setNodeId(-1); // Mark selected.
return;
case ISD::AssertSext:
@@ -3227,12 +3227,12 @@ void SelectionDAGISel::SelectCodeCommon(SDNode *NodeToMatch,
if (!::CheckOrImm(MatcherTable, MatcherIndex, N, *this)) break;
continue;
case OPC_CheckImmAllOnesV:
- if (!ISD::isConstantSplatVectorAllOnes(N.getNode()))
- break;
+ if (!ISD::isConstantSplatVectorAllOnes(N.getNode()))
+ break;
continue;
case OPC_CheckImmAllZerosV:
- if (!ISD::isConstantSplatVectorAllZeros(N.getNode()))
- break;
+ if (!ISD::isConstantSplatVectorAllZeros(N.getNode()))
+ break;
continue;
case OPC_CheckFoldableChainNode: {
@@ -3537,7 +3537,7 @@ void SelectionDAGISel::SelectCodeCommon(SDNode *NodeToMatch,
auto &Chain = ChainNodesMatched;
assert((!E || !is_contained(Chain, N)) &&
"Chain node replaced during MorphNode");
- llvm::erase_value(Chain, N);
+ llvm::erase_value(Chain, N);
});
Res = cast<MachineSDNode>(MorphNode(NodeToMatch, TargetOpc, VTList,
Ops, EmitNodeInfo));
diff --git a/contrib/libs/llvm12/lib/CodeGen/SelectionDAG/StatepointLowering.cpp b/contrib/libs/llvm12/lib/CodeGen/SelectionDAG/StatepointLowering.cpp
index 0172646c22..e0701ee581 100644
--- a/contrib/libs/llvm12/lib/CodeGen/SelectionDAG/StatepointLowering.cpp
+++ b/contrib/libs/llvm12/lib/CodeGen/SelectionDAG/StatepointLowering.cpp
@@ -64,20 +64,20 @@ cl::opt<bool> UseRegistersForDeoptValues(
"use-registers-for-deopt-values", cl::Hidden, cl::init(false),
cl::desc("Allow using registers for non pointer deopt args"));
-cl::opt<bool> UseRegistersForGCPointersInLandingPad(
- "use-registers-for-gc-values-in-landing-pad", cl::Hidden, cl::init(false),
- cl::desc("Allow using registers for gc pointer in landing pad"));
-
-cl::opt<unsigned> MaxRegistersForGCPointers(
- "max-registers-for-gc-values", cl::Hidden, cl::init(0),
- cl::desc("Max number of VRegs allowed to pass GC pointer meta args in"));
-
-cl::opt<bool> AlwaysSpillBase("statepoint-always-spill-base", cl::Hidden,
- cl::init(true),
- cl::desc("Force spilling of base GC pointers"));
-
-typedef FunctionLoweringInfo::StatepointRelocationRecord RecordType;
-
+cl::opt<bool> UseRegistersForGCPointersInLandingPad(
+ "use-registers-for-gc-values-in-landing-pad", cl::Hidden, cl::init(false),
+ cl::desc("Allow using registers for gc pointer in landing pad"));
+
+cl::opt<unsigned> MaxRegistersForGCPointers(
+ "max-registers-for-gc-values", cl::Hidden, cl::init(0),
+ cl::desc("Max number of VRegs allowed to pass GC pointer meta args in"));
+
+cl::opt<bool> AlwaysSpillBase("statepoint-always-spill-base", cl::Hidden,
+ cl::init(true),
+ cl::desc("Force spilling of base GC pointers"));
+
+typedef FunctionLoweringInfo::StatepointRelocationRecord RecordType;
+
static void pushStackMapConstant(SmallVectorImpl<SDValue>& Ops,
SelectionDAGBuilder &Builder, uint64_t Value) {
SDLoc L = Builder.getCurSDLoc();
@@ -167,18 +167,18 @@ static Optional<int> findPreviousSpillSlot(const Value *Val,
// Spill location is known for gc relocates
if (const auto *Relocate = dyn_cast<GCRelocateInst>(Val)) {
- const auto &RelocationMap =
- Builder.FuncInfo.StatepointRelocationMaps[Relocate->getStatepoint()];
-
- auto It = RelocationMap.find(Relocate->getDerivedPtr());
- if (It == RelocationMap.end())
- return None;
+ const auto &RelocationMap =
+ Builder.FuncInfo.StatepointRelocationMaps[Relocate->getStatepoint()];
- auto &Record = It->second;
- if (Record.type != RecordType::Spill)
+ auto It = RelocationMap.find(Relocate->getDerivedPtr());
+ if (It == RelocationMap.end())
return None;
- return Record.payload.FI;
+ auto &Record = It->second;
+ if (Record.type != RecordType::Spill)
+ return None;
+
+ return Record.payload.FI;
}
// Look through bitcast instructions.
@@ -401,7 +401,7 @@ spillIncomingStatepointValue(SDValue Incoming, SDValue Chain,
StoreMMO);
MMO = getMachineMemOperand(MF, *cast<FrameIndexSDNode>(Loc));
-
+
Builder.StatepointLowering.setLocation(Incoming, Loc);
}
@@ -498,10 +498,10 @@ lowerIncomingStatepointValue(SDValue Incoming, bool RequireSpillSlot,
/// will be set to the last value spilled (if any were).
static void
lowerStatepointMetaArgs(SmallVectorImpl<SDValue> &Ops,
- SmallVectorImpl<MachineMemOperand *> &MemRefs,
- SmallVectorImpl<SDValue> &GCPtrs,
- DenseMap<SDValue, int> &LowerAsVReg,
- SelectionDAGBuilder::StatepointLoweringInfo &SI,
+ SmallVectorImpl<MachineMemOperand *> &MemRefs,
+ SmallVectorImpl<SDValue> &GCPtrs,
+ DenseMap<SDValue, int> &LowerAsVReg,
+ SelectionDAGBuilder::StatepointLoweringInfo &SI,
SelectionDAGBuilder &Builder) {
// Lower the deopt and gc arguments for this statepoint. Layout will be:
// deopt argument length, deopt arguments.., gc arguments...
@@ -547,66 +547,66 @@ lowerStatepointMetaArgs(SmallVectorImpl<SDValue> &Ops,
const bool LiveInDeopt =
SI.StatepointFlags & (uint64_t)StatepointFlags::DeoptLiveIn;
- // Decide which deriver pointers will go on VRegs
- unsigned MaxVRegPtrs = MaxRegistersForGCPointers.getValue();
-
- // Pointers used on exceptional path of invoke statepoint.
- // We cannot assing them to VRegs.
- SmallSet<SDValue, 8> LPadPointers;
- if (!UseRegistersForGCPointersInLandingPad)
- if (auto *StInvoke = dyn_cast_or_null<InvokeInst>(SI.StatepointInstr)) {
- LandingPadInst *LPI = StInvoke->getLandingPadInst();
- for (auto *Relocate : SI.GCRelocates)
- if (Relocate->getOperand(0) == LPI) {
- LPadPointers.insert(Builder.getValue(Relocate->getBasePtr()));
- LPadPointers.insert(Builder.getValue(Relocate->getDerivedPtr()));
- }
- }
-
- LLVM_DEBUG(dbgs() << "Deciding how to lower GC Pointers:\n");
-
- // List of unique lowered GC Pointer values.
- SmallSetVector<SDValue, 16> LoweredGCPtrs;
- // Map lowered GC Pointer value to the index in above vector
- DenseMap<SDValue, unsigned> GCPtrIndexMap;
-
- unsigned CurNumVRegs = 0;
-
- auto canPassGCPtrOnVReg = [&](SDValue SD) {
- if (SD.getValueType().isVector())
- return false;
- if (LPadPointers.count(SD))
- return false;
- return !willLowerDirectly(SD);
- };
-
- auto processGCPtr = [&](const Value *V) {
- SDValue PtrSD = Builder.getValue(V);
- if (!LoweredGCPtrs.insert(PtrSD))
- return; // skip duplicates
- GCPtrIndexMap[PtrSD] = LoweredGCPtrs.size() - 1;
-
- assert(!LowerAsVReg.count(PtrSD) && "must not have been seen");
- if (LowerAsVReg.size() == MaxVRegPtrs)
- return;
- assert(V->getType()->isVectorTy() == PtrSD.getValueType().isVector() &&
- "IR and SD types disagree");
- if (!canPassGCPtrOnVReg(PtrSD)) {
- LLVM_DEBUG(dbgs() << "direct/spill "; PtrSD.dump(&Builder.DAG));
- return;
- }
- LLVM_DEBUG(dbgs() << "vreg "; PtrSD.dump(&Builder.DAG));
- LowerAsVReg[PtrSD] = CurNumVRegs++;
- };
-
- // Process derived pointers first to give them more chance to go on VReg.
- for (const Value *V : SI.Ptrs)
- processGCPtr(V);
- for (const Value *V : SI.Bases)
- processGCPtr(V);
-
- LLVM_DEBUG(dbgs() << LowerAsVReg.size() << " pointers will go in vregs\n");
-
+ // Decide which deriver pointers will go on VRegs
+ unsigned MaxVRegPtrs = MaxRegistersForGCPointers.getValue();
+
+ // Pointers used on exceptional path of invoke statepoint.
+ // We cannot assing them to VRegs.
+ SmallSet<SDValue, 8> LPadPointers;
+ if (!UseRegistersForGCPointersInLandingPad)
+ if (auto *StInvoke = dyn_cast_or_null<InvokeInst>(SI.StatepointInstr)) {
+ LandingPadInst *LPI = StInvoke->getLandingPadInst();
+ for (auto *Relocate : SI.GCRelocates)
+ if (Relocate->getOperand(0) == LPI) {
+ LPadPointers.insert(Builder.getValue(Relocate->getBasePtr()));
+ LPadPointers.insert(Builder.getValue(Relocate->getDerivedPtr()));
+ }
+ }
+
+ LLVM_DEBUG(dbgs() << "Deciding how to lower GC Pointers:\n");
+
+ // List of unique lowered GC Pointer values.
+ SmallSetVector<SDValue, 16> LoweredGCPtrs;
+ // Map lowered GC Pointer value to the index in above vector
+ DenseMap<SDValue, unsigned> GCPtrIndexMap;
+
+ unsigned CurNumVRegs = 0;
+
+ auto canPassGCPtrOnVReg = [&](SDValue SD) {
+ if (SD.getValueType().isVector())
+ return false;
+ if (LPadPointers.count(SD))
+ return false;
+ return !willLowerDirectly(SD);
+ };
+
+ auto processGCPtr = [&](const Value *V) {
+ SDValue PtrSD = Builder.getValue(V);
+ if (!LoweredGCPtrs.insert(PtrSD))
+ return; // skip duplicates
+ GCPtrIndexMap[PtrSD] = LoweredGCPtrs.size() - 1;
+
+ assert(!LowerAsVReg.count(PtrSD) && "must not have been seen");
+ if (LowerAsVReg.size() == MaxVRegPtrs)
+ return;
+ assert(V->getType()->isVectorTy() == PtrSD.getValueType().isVector() &&
+ "IR and SD types disagree");
+ if (!canPassGCPtrOnVReg(PtrSD)) {
+ LLVM_DEBUG(dbgs() << "direct/spill "; PtrSD.dump(&Builder.DAG));
+ return;
+ }
+ LLVM_DEBUG(dbgs() << "vreg "; PtrSD.dump(&Builder.DAG));
+ LowerAsVReg[PtrSD] = CurNumVRegs++;
+ };
+
+ // Process derived pointers first to give them more chance to go on VReg.
+ for (const Value *V : SI.Ptrs)
+ processGCPtr(V);
+ for (const Value *V : SI.Bases)
+ processGCPtr(V);
+
+ LLVM_DEBUG(dbgs() << LowerAsVReg.size() << " pointers will go in vregs\n");
+
auto isGCValue = [&](const Value *V) {
auto *Ty = V->getType();
if (!Ty->isPtrOrPtrVectorTy())
@@ -618,9 +618,9 @@ lowerStatepointMetaArgs(SmallVectorImpl<SDValue> &Ops,
};
auto requireSpillSlot = [&](const Value *V) {
- if (isGCValue(V))
- return !LowerAsVReg.count(Builder.getValue(V));
- return !(LiveInDeopt || UseRegistersForDeoptValues);
+ if (isGCValue(V))
+ return !LowerAsVReg.count(Builder.getValue(V));
+ return !(LiveInDeopt || UseRegistersForDeoptValues);
};
// Before we actually start lowering (and allocating spill slots for values),
@@ -632,19 +632,19 @@ lowerStatepointMetaArgs(SmallVectorImpl<SDValue> &Ops,
if (requireSpillSlot(V))
reservePreviousStackSlotForValue(V, Builder);
}
-
- for (const Value *V : SI.Ptrs) {
- SDValue SDV = Builder.getValue(V);
- if (!LowerAsVReg.count(SDV))
- reservePreviousStackSlotForValue(V, Builder);
- }
-
- for (const Value *V : SI.Bases) {
- SDValue SDV = Builder.getValue(V);
- if (!LowerAsVReg.count(SDV))
- reservePreviousStackSlotForValue(V, Builder);
+
+ for (const Value *V : SI.Ptrs) {
+ SDValue SDV = Builder.getValue(V);
+ if (!LowerAsVReg.count(SDV))
+ reservePreviousStackSlotForValue(V, Builder);
}
+ for (const Value *V : SI.Bases) {
+ SDValue SDV = Builder.getValue(V);
+ if (!LowerAsVReg.count(SDV))
+ reservePreviousStackSlotForValue(V, Builder);
+ }
+
// First, prefix the list with the number of unique values to be
// lowered. Note that this is the number of *Values* not the
// number of SDValues required to lower them.
@@ -653,7 +653,7 @@ lowerStatepointMetaArgs(SmallVectorImpl<SDValue> &Ops,
// The vm state arguments are lowered in an opaque manner. We do not know
// what type of values are contained within.
- LLVM_DEBUG(dbgs() << "Lowering deopt state\n");
+ LLVM_DEBUG(dbgs() << "Lowering deopt state\n");
for (const Value *V : SI.DeoptState) {
SDValue Incoming;
// If this is a function argument at a static frame index, generate it as
@@ -665,56 +665,56 @@ lowerStatepointMetaArgs(SmallVectorImpl<SDValue> &Ops,
}
if (!Incoming.getNode())
Incoming = Builder.getValue(V);
- LLVM_DEBUG(dbgs() << "Value " << *V
- << " requireSpillSlot = " << requireSpillSlot(V) << "\n");
+ LLVM_DEBUG(dbgs() << "Value " << *V
+ << " requireSpillSlot = " << requireSpillSlot(V) << "\n");
lowerIncomingStatepointValue(Incoming, requireSpillSlot(V), Ops, MemRefs,
Builder);
}
- // Finally, go ahead and lower all the gc arguments.
- pushStackMapConstant(Ops, Builder, LoweredGCPtrs.size());
- for (SDValue SDV : LoweredGCPtrs)
- lowerIncomingStatepointValue(SDV, !LowerAsVReg.count(SDV), Ops, MemRefs,
+ // Finally, go ahead and lower all the gc arguments.
+ pushStackMapConstant(Ops, Builder, LoweredGCPtrs.size());
+ for (SDValue SDV : LoweredGCPtrs)
+ lowerIncomingStatepointValue(SDV, !LowerAsVReg.count(SDV), Ops, MemRefs,
Builder);
- // Copy to out vector. LoweredGCPtrs will be empty after this point.
- GCPtrs = LoweredGCPtrs.takeVector();
+ // Copy to out vector. LoweredGCPtrs will be empty after this point.
+ GCPtrs = LoweredGCPtrs.takeVector();
// If there are any explicit spill slots passed to the statepoint, record
// them, but otherwise do not do anything special. These are user provided
// allocas and give control over placement to the consumer. In this case,
// it is the contents of the slot which may get updated, not the pointer to
// the alloca
- SmallVector<SDValue, 4> Allocas;
+ SmallVector<SDValue, 4> Allocas;
for (Value *V : SI.GCArgs) {
SDValue Incoming = Builder.getValue(V);
if (FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(Incoming)) {
// This handles allocas as arguments to the statepoint
assert(Incoming.getValueType() == Builder.getFrameIndexTy() &&
"Incoming value is a frame index!");
- Allocas.push_back(Builder.DAG.getTargetFrameIndex(
- FI->getIndex(), Builder.getFrameIndexTy()));
+ Allocas.push_back(Builder.DAG.getTargetFrameIndex(
+ FI->getIndex(), Builder.getFrameIndexTy()));
auto &MF = Builder.DAG.getMachineFunction();
auto *MMO = getMachineMemOperand(MF, *FI);
MemRefs.push_back(MMO);
}
}
- pushStackMapConstant(Ops, Builder, Allocas.size());
- Ops.append(Allocas.begin(), Allocas.end());
-
- // Now construct GC base/derived map;
- pushStackMapConstant(Ops, Builder, SI.Ptrs.size());
- SDLoc L = Builder.getCurSDLoc();
- for (unsigned i = 0; i < SI.Ptrs.size(); ++i) {
- SDValue Base = Builder.getValue(SI.Bases[i]);
- assert(GCPtrIndexMap.count(Base) && "base not found in index map");
- Ops.push_back(
- Builder.DAG.getTargetConstant(GCPtrIndexMap[Base], L, MVT::i64));
- SDValue Derived = Builder.getValue(SI.Ptrs[i]);
- assert(GCPtrIndexMap.count(Derived) && "derived not found in index map");
- Ops.push_back(
- Builder.DAG.getTargetConstant(GCPtrIndexMap[Derived], L, MVT::i64));
+ pushStackMapConstant(Ops, Builder, Allocas.size());
+ Ops.append(Allocas.begin(), Allocas.end());
+
+ // Now construct GC base/derived map;
+ pushStackMapConstant(Ops, Builder, SI.Ptrs.size());
+ SDLoc L = Builder.getCurSDLoc();
+ for (unsigned i = 0; i < SI.Ptrs.size(); ++i) {
+ SDValue Base = Builder.getValue(SI.Bases[i]);
+ assert(GCPtrIndexMap.count(Base) && "base not found in index map");
+ Ops.push_back(
+ Builder.DAG.getTargetConstant(GCPtrIndexMap[Base], L, MVT::i64));
+ SDValue Derived = Builder.getValue(SI.Ptrs[i]);
+ assert(GCPtrIndexMap.count(Derived) && "derived not found in index map");
+ Ops.push_back(
+ Builder.DAG.getTargetConstant(GCPtrIndexMap[Derived], L, MVT::i64));
}
}
@@ -730,7 +730,7 @@ SDValue SelectionDAGBuilder::LowerAsSTATEPOINT(
assert(SI.Bases.size() == SI.Ptrs.size() &&
SI.Ptrs.size() <= SI.GCRelocates.size());
- LLVM_DEBUG(dbgs() << "Lowering statepoint " << *SI.StatepointInstr << "\n");
+ LLVM_DEBUG(dbgs() << "Lowering statepoint " << *SI.StatepointInstr << "\n");
#ifndef NDEBUG
for (auto *Reloc : SI.GCRelocates)
if (Reloc->getParent() == SI.StatepointInstr->getParent())
@@ -738,16 +738,16 @@ SDValue SelectionDAGBuilder::LowerAsSTATEPOINT(
#endif
// Lower statepoint vmstate and gcstate arguments
-
- // All lowered meta args.
+
+ // All lowered meta args.
SmallVector<SDValue, 10> LoweredMetaArgs;
- // Lowered GC pointers (subset of above).
- SmallVector<SDValue, 16> LoweredGCArgs;
+ // Lowered GC pointers (subset of above).
+ SmallVector<SDValue, 16> LoweredGCArgs;
SmallVector<MachineMemOperand*, 16> MemRefs;
- // Maps derived pointer SDValue to statepoint result of relocated pointer.
- DenseMap<SDValue, int> LowerAsVReg;
- lowerStatepointMetaArgs(LoweredMetaArgs, MemRefs, LoweredGCArgs, LowerAsVReg,
- SI, *this);
+ // Maps derived pointer SDValue to statepoint result of relocated pointer.
+ DenseMap<SDValue, int> LowerAsVReg;
+ lowerStatepointMetaArgs(LoweredMetaArgs, MemRefs, LoweredGCArgs, LowerAsVReg,
+ SI, *this);
// Now that we've emitted the spills, we need to update the root so that the
// call sequence is ordered correctly.
@@ -847,7 +847,7 @@ SDValue SelectionDAGBuilder::LowerAsSTATEPOINT(
pushStackMapConstant(Ops, *this, Flags);
// Insert all vmstate and gcstate arguments
- llvm::append_range(Ops, LoweredMetaArgs);
+ llvm::append_range(Ops, LoweredMetaArgs);
// Add register mask from call node
Ops.push_back(*RegMaskIt);
@@ -861,79 +861,79 @@ SDValue SelectionDAGBuilder::LowerAsSTATEPOINT(
// Compute return values. Provide a glue output since we consume one as
// input. This allows someone else to chain off us as needed.
- SmallVector<EVT, 8> NodeTys;
- for (auto SD : LoweredGCArgs) {
- if (!LowerAsVReg.count(SD))
- continue;
- NodeTys.push_back(SD.getValueType());
- }
- LLVM_DEBUG(dbgs() << "Statepoint has " << NodeTys.size() << " results\n");
- assert(NodeTys.size() == LowerAsVReg.size() && "Inconsistent GC Ptr lowering");
- NodeTys.push_back(MVT::Other);
- NodeTys.push_back(MVT::Glue);
-
- unsigned NumResults = NodeTys.size();
+ SmallVector<EVT, 8> NodeTys;
+ for (auto SD : LoweredGCArgs) {
+ if (!LowerAsVReg.count(SD))
+ continue;
+ NodeTys.push_back(SD.getValueType());
+ }
+ LLVM_DEBUG(dbgs() << "Statepoint has " << NodeTys.size() << " results\n");
+ assert(NodeTys.size() == LowerAsVReg.size() && "Inconsistent GC Ptr lowering");
+ NodeTys.push_back(MVT::Other);
+ NodeTys.push_back(MVT::Glue);
+
+ unsigned NumResults = NodeTys.size();
MachineSDNode *StatepointMCNode =
DAG.getMachineNode(TargetOpcode::STATEPOINT, getCurSDLoc(), NodeTys, Ops);
DAG.setNodeMemRefs(StatepointMCNode, MemRefs);
- // For values lowered to tied-defs, create the virtual registers. Note that
- // for simplicity, we *always* create a vreg even within a single block.
- DenseMap<SDValue, Register> VirtRegs;
- for (const auto *Relocate : SI.GCRelocates) {
- Value *Derived = Relocate->getDerivedPtr();
- SDValue SD = getValue(Derived);
- if (!LowerAsVReg.count(SD))
- continue;
-
- // Handle multiple gc.relocates of the same input efficiently.
- if (VirtRegs.count(SD))
- continue;
-
- SDValue Relocated = SDValue(StatepointMCNode, LowerAsVReg[SD]);
-
- auto *RetTy = Relocate->getType();
- Register Reg = FuncInfo.CreateRegs(RetTy);
- RegsForValue RFV(*DAG.getContext(), DAG.getTargetLoweringInfo(),
- DAG.getDataLayout(), Reg, RetTy, None);
- SDValue Chain = DAG.getRoot();
- RFV.getCopyToRegs(Relocated, DAG, getCurSDLoc(), Chain, nullptr);
- PendingExports.push_back(Chain);
-
- VirtRegs[SD] = Reg;
- }
-
- // Record for later use how each relocation was lowered. This is needed to
- // allow later gc.relocates to mirror the lowering chosen.
- const Instruction *StatepointInstr = SI.StatepointInstr;
- auto &RelocationMap = FuncInfo.StatepointRelocationMaps[StatepointInstr];
- for (const GCRelocateInst *Relocate : SI.GCRelocates) {
- const Value *V = Relocate->getDerivedPtr();
- SDValue SDV = getValue(V);
- SDValue Loc = StatepointLowering.getLocation(SDV);
-
- RecordType Record;
- if (LowerAsVReg.count(SDV)) {
- Record.type = RecordType::VReg;
- assert(VirtRegs.count(SDV));
- Record.payload.Reg = VirtRegs[SDV];
- } else if (Loc.getNode()) {
- Record.type = RecordType::Spill;
- Record.payload.FI = cast<FrameIndexSDNode>(Loc)->getIndex();
- } else {
- Record.type = RecordType::NoRelocate;
- // If we didn't relocate a value, we'll essentialy end up inserting an
- // additional use of the original value when lowering the gc.relocate.
- // We need to make sure the value is available at the new use, which
- // might be in another block.
- if (Relocate->getParent() != StatepointInstr->getParent())
- ExportFromCurrentBlock(V);
- }
- RelocationMap[V] = Record;
- }
-
-
-
+ // For values lowered to tied-defs, create the virtual registers. Note that
+ // for simplicity, we *always* create a vreg even within a single block.
+ DenseMap<SDValue, Register> VirtRegs;
+ for (const auto *Relocate : SI.GCRelocates) {
+ Value *Derived = Relocate->getDerivedPtr();
+ SDValue SD = getValue(Derived);
+ if (!LowerAsVReg.count(SD))
+ continue;
+
+ // Handle multiple gc.relocates of the same input efficiently.
+ if (VirtRegs.count(SD))
+ continue;
+
+ SDValue Relocated = SDValue(StatepointMCNode, LowerAsVReg[SD]);
+
+ auto *RetTy = Relocate->getType();
+ Register Reg = FuncInfo.CreateRegs(RetTy);
+ RegsForValue RFV(*DAG.getContext(), DAG.getTargetLoweringInfo(),
+ DAG.getDataLayout(), Reg, RetTy, None);
+ SDValue Chain = DAG.getRoot();
+ RFV.getCopyToRegs(Relocated, DAG, getCurSDLoc(), Chain, nullptr);
+ PendingExports.push_back(Chain);
+
+ VirtRegs[SD] = Reg;
+ }
+
+ // Record for later use how each relocation was lowered. This is needed to
+ // allow later gc.relocates to mirror the lowering chosen.
+ const Instruction *StatepointInstr = SI.StatepointInstr;
+ auto &RelocationMap = FuncInfo.StatepointRelocationMaps[StatepointInstr];
+ for (const GCRelocateInst *Relocate : SI.GCRelocates) {
+ const Value *V = Relocate->getDerivedPtr();
+ SDValue SDV = getValue(V);
+ SDValue Loc = StatepointLowering.getLocation(SDV);
+
+ RecordType Record;
+ if (LowerAsVReg.count(SDV)) {
+ Record.type = RecordType::VReg;
+ assert(VirtRegs.count(SDV));
+ Record.payload.Reg = VirtRegs[SDV];
+ } else if (Loc.getNode()) {
+ Record.type = RecordType::Spill;
+ Record.payload.FI = cast<FrameIndexSDNode>(Loc)->getIndex();
+ } else {
+ Record.type = RecordType::NoRelocate;
+ // If we didn't relocate a value, we'll essentialy end up inserting an
+ // additional use of the original value when lowering the gc.relocate.
+ // We need to make sure the value is available at the new use, which
+ // might be in another block.
+ if (Relocate->getParent() != StatepointInstr->getParent())
+ ExportFromCurrentBlock(V);
+ }
+ RelocationMap[V] = Record;
+ }
+
+
+
SDNode *SinkNode = StatepointMCNode;
// Build the GC_TRANSITION_END node if necessary.
@@ -944,7 +944,7 @@ SDValue SelectionDAGBuilder::LowerAsSTATEPOINT(
SmallVector<SDValue, 8> TEOps;
// Add chain
- TEOps.push_back(SDValue(StatepointMCNode, NumResults - 2));
+ TEOps.push_back(SDValue(StatepointMCNode, NumResults - 2));
// Add GC transition arguments
for (const Value *V : SI.GCTransitionArgs) {
@@ -954,7 +954,7 @@ SDValue SelectionDAGBuilder::LowerAsSTATEPOINT(
}
// Add glue
- TEOps.push_back(SDValue(StatepointMCNode, NumResults - 1));
+ TEOps.push_back(SDValue(StatepointMCNode, NumResults - 1));
SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue);
@@ -965,18 +965,18 @@ SDValue SelectionDAGBuilder::LowerAsSTATEPOINT(
}
// Replace original call
- // Call: ch,glue = CALL ...
- // Statepoint: [gc relocates],ch,glue = STATEPOINT ...
- unsigned NumSinkValues = SinkNode->getNumValues();
- SDValue StatepointValues[2] = {SDValue(SinkNode, NumSinkValues - 2),
- SDValue(SinkNode, NumSinkValues - 1)};
- DAG.ReplaceAllUsesWith(CallNode, StatepointValues);
+ // Call: ch,glue = CALL ...
+ // Statepoint: [gc relocates],ch,glue = STATEPOINT ...
+ unsigned NumSinkValues = SinkNode->getNumValues();
+ SDValue StatepointValues[2] = {SDValue(SinkNode, NumSinkValues - 2),
+ SDValue(SinkNode, NumSinkValues - 1)};
+ DAG.ReplaceAllUsesWith(CallNode, StatepointValues);
// Remove original call node
DAG.DeleteNode(CallNode);
- // Since we always emit CopyToRegs (even for local relocates), we must
- // update root, so that they are emitted before any local uses.
- (void)getControlRoot();
+ // Since we always emit CopyToRegs (even for local relocates), we must
+ // update root, so that they are emitted before any local uses.
+ (void)getControlRoot();
// TODO: A better future implementation would be to emit a single variable
// argument, variable return value STATEPOINT node here and then hookup the
@@ -1073,7 +1073,7 @@ SelectionDAGBuilder::LowerStatepoint(const GCStatepointInst &I,
setValue(&I, ReturnValue);
return;
}
-
+
// Result value will be used in a different basic block so we need to export
// it now. Default exporting mechanism will not work here because statepoint
// call has a different type than the actual call. It means that by default
@@ -1170,28 +1170,28 @@ void SelectionDAGBuilder::visitGCRelocate(const GCRelocateInst &Relocate) {
#endif
const Value *DerivedPtr = Relocate.getDerivedPtr();
- auto &RelocationMap =
- FuncInfo.StatepointRelocationMaps[Relocate.getStatepoint()];
- auto SlotIt = RelocationMap.find(DerivedPtr);
- assert(SlotIt != RelocationMap.end() && "Relocating not lowered gc value");
- const RecordType &Record = SlotIt->second;
-
- // If relocation was done via virtual register..
- if (Record.type == RecordType::VReg) {
- Register InReg = Record.payload.Reg;
- RegsForValue RFV(*DAG.getContext(), DAG.getTargetLoweringInfo(),
- DAG.getDataLayout(), InReg, Relocate.getType(),
- None); // This is not an ABI copy.
- // We generate copy to/from regs even for local uses, hence we must
- // chain with current root to ensure proper ordering of copies w.r.t.
- // statepoint.
- SDValue Chain = DAG.getRoot();
- SDValue Relocation = RFV.getCopyFromRegs(DAG, FuncInfo, getCurSDLoc(),
- Chain, nullptr, nullptr);
- setValue(&Relocate, Relocation);
- return;
- }
-
+ auto &RelocationMap =
+ FuncInfo.StatepointRelocationMaps[Relocate.getStatepoint()];
+ auto SlotIt = RelocationMap.find(DerivedPtr);
+ assert(SlotIt != RelocationMap.end() && "Relocating not lowered gc value");
+ const RecordType &Record = SlotIt->second;
+
+ // If relocation was done via virtual register..
+ if (Record.type == RecordType::VReg) {
+ Register InReg = Record.payload.Reg;
+ RegsForValue RFV(*DAG.getContext(), DAG.getTargetLoweringInfo(),
+ DAG.getDataLayout(), InReg, Relocate.getType(),
+ None); // This is not an ABI copy.
+ // We generate copy to/from regs even for local uses, hence we must
+ // chain with current root to ensure proper ordering of copies w.r.t.
+ // statepoint.
+ SDValue Chain = DAG.getRoot();
+ SDValue Relocation = RFV.getCopyFromRegs(DAG, FuncInfo, getCurSDLoc(),
+ Chain, nullptr, nullptr);
+ setValue(&Relocate, Relocation);
+ return;
+ }
+
SDValue SD = getValue(DerivedPtr);
if (SD.isUndef() && SD.getValueType().getSizeInBits() <= 64) {
@@ -1204,14 +1204,14 @@ void SelectionDAGBuilder::visitGCRelocate(const GCRelocateInst &Relocate) {
// We didn't need to spill these special cases (constants and allocas).
// See the handling in spillIncomingValueForStatepoint for detail.
- if (Record.type == RecordType::NoRelocate) {
+ if (Record.type == RecordType::NoRelocate) {
setValue(&Relocate, SD);
return;
}
- assert(Record.type == RecordType::Spill);
-
- unsigned Index = Record.payload.FI;;
+ assert(Record.type == RecordType::Spill);
+
+ unsigned Index = Record.payload.FI;;
SDValue SpillSlot = DAG.getTargetFrameIndex(Index, getFrameIndexTy());
// All the reloads are independent and are reading memory only modified by
diff --git a/contrib/libs/llvm12/lib/CodeGen/SelectionDAG/TargetLowering.cpp b/contrib/libs/llvm12/lib/CodeGen/SelectionDAG/TargetLowering.cpp
index b0ad86899d..2b0bf413c0 100644
--- a/contrib/libs/llvm12/lib/CodeGen/SelectionDAG/TargetLowering.cpp
+++ b/contrib/libs/llvm12/lib/CodeGen/SelectionDAG/TargetLowering.cpp
@@ -93,7 +93,7 @@ bool TargetLowering::parametersInCSRMatch(const MachineRegisterInfo &MRI,
SDValue Value = OutVals[I];
if (Value->getOpcode() != ISD::CopyFromReg)
return false;
- Register ArgReg = cast<RegisterSDNode>(Value->getOperand(1))->getReg();
+ Register ArgReg = cast<RegisterSDNode>(Value->getOperand(1))->getReg();
if (MRI.getLiveInPhysReg(ArgReg) != Reg)
return false;
}
@@ -250,7 +250,7 @@ bool TargetLowering::findOptimalMemOpLowering(
bool Fast;
if (NumMemOps && Op.allowOverlap() && NewVTSize < Size &&
allowsMisalignedMemoryAccesses(
- VT, DstAS, Op.isFixedDstAlign() ? Op.getDstAlign().value() : 1,
+ VT, DstAS, Op.isFixedDstAlign() ? Op.getDstAlign().value() : 1,
MachineMemOperand::MONone, &Fast) &&
Fast)
VTSize = Size;
@@ -912,17 +912,17 @@ bool TargetLowering::SimplifyDemandedBits(
if (Op.getOpcode() == ISD::Constant) {
// We know all of the bits for a constant!
- Known = KnownBits::makeConstant(cast<ConstantSDNode>(Op)->getAPIntValue());
- return false;
- }
-
- if (Op.getOpcode() == ISD::ConstantFP) {
- // We know all of the bits for a floating point constant!
- Known = KnownBits::makeConstant(
- cast<ConstantFPSDNode>(Op)->getValueAPF().bitcastToAPInt());
+ Known = KnownBits::makeConstant(cast<ConstantSDNode>(Op)->getAPIntValue());
return false;
}
+ if (Op.getOpcode() == ISD::ConstantFP) {
+ // We know all of the bits for a floating point constant!
+ Known = KnownBits::makeConstant(
+ cast<ConstantFPSDNode>(Op)->getValueAPF().bitcastToAPInt());
+ return false;
+ }
+
// Other users may use these bits.
EVT VT = Op.getValueType();
if (!Op.getNode()->hasOneUse() && !AssumeSingleUse) {
@@ -1015,8 +1015,8 @@ bool TargetLowering::SimplifyDemandedBits(
Depth + 1))
return true;
- if (!!DemandedVecElts)
- Known = KnownBits::commonBits(Known, KnownVec);
+ if (!!DemandedVecElts)
+ Known = KnownBits::commonBits(Known, KnownVec);
return false;
}
@@ -1041,10 +1041,10 @@ bool TargetLowering::SimplifyDemandedBits(
Known.Zero.setAllBits();
Known.One.setAllBits();
- if (!!DemandedSubElts)
- Known = KnownBits::commonBits(Known, KnownSub);
- if (!!DemandedSrcElts)
- Known = KnownBits::commonBits(Known, KnownSrc);
+ if (!!DemandedSubElts)
+ Known = KnownBits::commonBits(Known, KnownSub);
+ if (!!DemandedSrcElts)
+ Known = KnownBits::commonBits(Known, KnownSrc);
// Attempt to avoid multi-use src if we don't need anything from it.
if (!DemandedBits.isAllOnesValue() || !DemandedSubElts.isAllOnesValue() ||
@@ -1101,8 +1101,8 @@ bool TargetLowering::SimplifyDemandedBits(
Known2, TLO, Depth + 1))
return true;
// Known bits are shared by every demanded subvector element.
- if (!!DemandedSubElts)
- Known = KnownBits::commonBits(Known, Known2);
+ if (!!DemandedSubElts)
+ Known = KnownBits::commonBits(Known, Known2);
}
break;
}
@@ -1140,13 +1140,13 @@ bool TargetLowering::SimplifyDemandedBits(
if (SimplifyDemandedBits(Op0, DemandedBits, DemandedLHS, Known2, TLO,
Depth + 1))
return true;
- Known = KnownBits::commonBits(Known, Known2);
+ Known = KnownBits::commonBits(Known, Known2);
}
if (!!DemandedRHS) {
if (SimplifyDemandedBits(Op1, DemandedBits, DemandedRHS, Known2, TLO,
Depth + 1))
return true;
- Known = KnownBits::commonBits(Known, Known2);
+ Known = KnownBits::commonBits(Known, Known2);
}
// Attempt to avoid multi-use ops if we don't need anything from them.
@@ -1321,15 +1321,15 @@ bool TargetLowering::SimplifyDemandedBits(
return true;
// If all of the unknown bits are known to be zero on one side or the other
- // turn this into an *inclusive* or.
+ // turn this into an *inclusive* or.
// e.g. (A & C1)^(B & C2) -> (A & C1)|(B & C2) iff C1&C2 == 0
if (DemandedBits.isSubsetOf(Known.Zero | Known2.Zero))
return TLO.CombineTo(Op, TLO.DAG.getNode(ISD::OR, dl, VT, Op0, Op1));
ConstantSDNode* C = isConstOrConstSplat(Op1, DemandedElts);
if (C) {
- // If one side is a constant, and all of the set bits in the constant are
- // also known set on the other side, turn this into an AND, as we know
+ // If one side is a constant, and all of the set bits in the constant are
+ // also known set on the other side, turn this into an AND, as we know
// the bits will be cleared.
// e.g. (X | C1) ^ C2 --> (X | C1) & ~C2 iff (C1&C2) == C2
// NB: it is okay if more bits are known than are requested
@@ -1373,7 +1373,7 @@ bool TargetLowering::SimplifyDemandedBits(
return true;
// Only known if known in both the LHS and RHS.
- Known = KnownBits::commonBits(Known, Known2);
+ Known = KnownBits::commonBits(Known, Known2);
break;
case ISD::SELECT_CC:
if (SimplifyDemandedBits(Op.getOperand(3), DemandedBits, Known, TLO,
@@ -1390,7 +1390,7 @@ bool TargetLowering::SimplifyDemandedBits(
return true;
// Only known if known in both the LHS and RHS.
- Known = KnownBits::commonBits(Known, Known2);
+ Known = KnownBits::commonBits(Known, Known2);
break;
case ISD::SETCC: {
SDValue Op0 = Op.getOperand(0);
@@ -1722,32 +1722,32 @@ bool TargetLowering::SimplifyDemandedBits(
}
break;
}
- case ISD::UMIN: {
- // Check if one arg is always less than (or equal) to the other arg.
- SDValue Op0 = Op.getOperand(0);
- SDValue Op1 = Op.getOperand(1);
- KnownBits Known0 = TLO.DAG.computeKnownBits(Op0, DemandedElts, Depth + 1);
- KnownBits Known1 = TLO.DAG.computeKnownBits(Op1, DemandedElts, Depth + 1);
- Known = KnownBits::umin(Known0, Known1);
- if (Optional<bool> IsULE = KnownBits::ule(Known0, Known1))
- return TLO.CombineTo(Op, IsULE.getValue() ? Op0 : Op1);
- if (Optional<bool> IsULT = KnownBits::ult(Known0, Known1))
- return TLO.CombineTo(Op, IsULT.getValue() ? Op0 : Op1);
- break;
- }
- case ISD::UMAX: {
- // Check if one arg is always greater than (or equal) to the other arg.
- SDValue Op0 = Op.getOperand(0);
- SDValue Op1 = Op.getOperand(1);
- KnownBits Known0 = TLO.DAG.computeKnownBits(Op0, DemandedElts, Depth + 1);
- KnownBits Known1 = TLO.DAG.computeKnownBits(Op1, DemandedElts, Depth + 1);
- Known = KnownBits::umax(Known0, Known1);
- if (Optional<bool> IsUGE = KnownBits::uge(Known0, Known1))
- return TLO.CombineTo(Op, IsUGE.getValue() ? Op0 : Op1);
- if (Optional<bool> IsUGT = KnownBits::ugt(Known0, Known1))
- return TLO.CombineTo(Op, IsUGT.getValue() ? Op0 : Op1);
- break;
- }
+ case ISD::UMIN: {
+ // Check if one arg is always less than (or equal) to the other arg.
+ SDValue Op0 = Op.getOperand(0);
+ SDValue Op1 = Op.getOperand(1);
+ KnownBits Known0 = TLO.DAG.computeKnownBits(Op0, DemandedElts, Depth + 1);
+ KnownBits Known1 = TLO.DAG.computeKnownBits(Op1, DemandedElts, Depth + 1);
+ Known = KnownBits::umin(Known0, Known1);
+ if (Optional<bool> IsULE = KnownBits::ule(Known0, Known1))
+ return TLO.CombineTo(Op, IsULE.getValue() ? Op0 : Op1);
+ if (Optional<bool> IsULT = KnownBits::ult(Known0, Known1))
+ return TLO.CombineTo(Op, IsULT.getValue() ? Op0 : Op1);
+ break;
+ }
+ case ISD::UMAX: {
+ // Check if one arg is always greater than (or equal) to the other arg.
+ SDValue Op0 = Op.getOperand(0);
+ SDValue Op1 = Op.getOperand(1);
+ KnownBits Known0 = TLO.DAG.computeKnownBits(Op0, DemandedElts, Depth + 1);
+ KnownBits Known1 = TLO.DAG.computeKnownBits(Op1, DemandedElts, Depth + 1);
+ Known = KnownBits::umax(Known0, Known1);
+ if (Optional<bool> IsUGE = KnownBits::uge(Known0, Known1))
+ return TLO.CombineTo(Op, IsUGE.getValue() ? Op0 : Op1);
+ if (Optional<bool> IsUGT = KnownBits::ugt(Known0, Known1))
+ return TLO.CombineTo(Op, IsUGT.getValue() ? Op0 : Op1);
+ break;
+ }
case ISD::BITREVERSE: {
SDValue Src = Op.getOperand(0);
APInt DemandedSrcBits = DemandedBits.reverseBits();
@@ -1768,17 +1768,17 @@ bool TargetLowering::SimplifyDemandedBits(
Known.Zero = Known2.Zero.byteSwap();
break;
}
- case ISD::CTPOP: {
- // If only 1 bit is demanded, replace with PARITY as long as we're before
- // op legalization.
- // FIXME: Limit to scalars for now.
- if (DemandedBits.isOneValue() && !TLO.LegalOps && !VT.isVector())
- return TLO.CombineTo(Op, TLO.DAG.getNode(ISD::PARITY, dl, VT,
- Op.getOperand(0)));
-
- Known = TLO.DAG.computeKnownBits(Op, DemandedElts, Depth);
- break;
- }
+ case ISD::CTPOP: {
+ // If only 1 bit is demanded, replace with PARITY as long as we're before
+ // op legalization.
+ // FIXME: Limit to scalars for now.
+ if (DemandedBits.isOneValue() && !TLO.LegalOps && !VT.isVector())
+ return TLO.CombineTo(Op, TLO.DAG.getNode(ISD::PARITY, dl, VT,
+ Op.getOperand(0)));
+
+ Known = TLO.DAG.computeKnownBits(Op, DemandedElts, Depth);
+ break;
+ }
case ISD::SIGN_EXTEND_INREG: {
SDValue Op0 = Op.getOperand(0);
EVT ExVT = cast<VTSDNode>(Op.getOperand(1))->getVT();
@@ -1889,11 +1889,11 @@ bool TargetLowering::SimplifyDemandedBits(
assert(!Known.hasConflict() && "Bits known to be one AND zero?");
assert(Known.getBitWidth() == InBits && "Src width has changed?");
Known = Known.zext(BitWidth);
-
- // Attempt to avoid multi-use ops if we don't need anything from them.
- if (SDValue NewSrc = SimplifyMultipleUseDemandedBits(
- Src, InDemandedBits, InDemandedElts, TLO.DAG, Depth + 1))
- return TLO.CombineTo(Op, TLO.DAG.getNode(Op.getOpcode(), dl, VT, NewSrc));
+
+ // Attempt to avoid multi-use ops if we don't need anything from them.
+ if (SDValue NewSrc = SimplifyMultipleUseDemandedBits(
+ Src, InDemandedBits, InDemandedElts, TLO.DAG, Depth + 1))
+ return TLO.CombineTo(Op, TLO.DAG.getNode(Op.getOpcode(), dl, VT, NewSrc));
break;
}
case ISD::SIGN_EXTEND:
@@ -1942,11 +1942,11 @@ bool TargetLowering::SimplifyDemandedBits(
if (!TLO.LegalOperations() || isOperationLegal(Opc, VT))
return TLO.CombineTo(Op, TLO.DAG.getNode(Opc, dl, VT, Src));
}
-
- // Attempt to avoid multi-use ops if we don't need anything from them.
- if (SDValue NewSrc = SimplifyMultipleUseDemandedBits(
- Src, InDemandedBits, InDemandedElts, TLO.DAG, Depth + 1))
- return TLO.CombineTo(Op, TLO.DAG.getNode(Op.getOpcode(), dl, VT, NewSrc));
+
+ // Attempt to avoid multi-use ops if we don't need anything from them.
+ if (SDValue NewSrc = SimplifyMultipleUseDemandedBits(
+ Src, InDemandedBits, InDemandedElts, TLO.DAG, Depth + 1))
+ return TLO.CombineTo(Op, TLO.DAG.getNode(Op.getOpcode(), dl, VT, NewSrc));
break;
}
case ISD::ANY_EXTEND:
@@ -1986,8 +1986,8 @@ bool TargetLowering::SimplifyDemandedBits(
// zero/one bits live out.
unsigned OperandBitWidth = Src.getScalarValueSizeInBits();
APInt TruncMask = DemandedBits.zext(OperandBitWidth);
- if (SimplifyDemandedBits(Src, TruncMask, DemandedElts, Known, TLO,
- Depth + 1))
+ if (SimplifyDemandedBits(Src, TruncMask, DemandedElts, Known, TLO,
+ Depth + 1))
return true;
Known = Known.trunc(BitWidth);
@@ -2010,9 +2010,9 @@ bool TargetLowering::SimplifyDemandedBits(
// undesirable.
break;
- const APInt *ShAmtC =
- TLO.DAG.getValidShiftAmountConstant(Src, DemandedElts);
- if (!ShAmtC || ShAmtC->uge(BitWidth))
+ const APInt *ShAmtC =
+ TLO.DAG.getValidShiftAmountConstant(Src, DemandedElts);
+ if (!ShAmtC || ShAmtC->uge(BitWidth))
break;
uint64_t ShVal = ShAmtC->getZExtValue();
@@ -2024,12 +2024,12 @@ bool TargetLowering::SimplifyDemandedBits(
if (!(HighBits & DemandedBits)) {
// None of the shifted in bits are needed. Add a truncate of the
// shift input, then shift it.
- SDValue NewShAmt = TLO.DAG.getConstant(
- ShVal, dl, getShiftAmountTy(VT, DL, TLO.LegalTypes()));
+ SDValue NewShAmt = TLO.DAG.getConstant(
+ ShVal, dl, getShiftAmountTy(VT, DL, TLO.LegalTypes()));
SDValue NewTrunc =
TLO.DAG.getNode(ISD::TRUNCATE, dl, VT, Src.getOperand(0));
return TLO.CombineTo(
- Op, TLO.DAG.getNode(ISD::SRL, dl, VT, NewTrunc, NewShAmt));
+ Op, TLO.DAG.getNode(ISD::SRL, dl, VT, NewTrunc, NewShAmt));
}
break;
}
@@ -2054,14 +2054,14 @@ bool TargetLowering::SimplifyDemandedBits(
case ISD::EXTRACT_VECTOR_ELT: {
SDValue Src = Op.getOperand(0);
SDValue Idx = Op.getOperand(1);
- ElementCount SrcEltCnt = Src.getValueType().getVectorElementCount();
+ ElementCount SrcEltCnt = Src.getValueType().getVectorElementCount();
unsigned EltBitWidth = Src.getScalarValueSizeInBits();
- if (SrcEltCnt.isScalable())
- return false;
-
+ if (SrcEltCnt.isScalable())
+ return false;
+
// Demand the bits from every vector element without a constant index.
- unsigned NumSrcElts = SrcEltCnt.getFixedValue();
+ unsigned NumSrcElts = SrcEltCnt.getFixedValue();
APInt DemandedSrcElts = APInt::getAllOnesValue(NumSrcElts);
if (auto *CIdx = dyn_cast<ConstantSDNode>(Idx))
if (CIdx->getAPIntValue().ult(NumSrcElts))
@@ -2277,11 +2277,11 @@ bool TargetLowering::SimplifyDemandedBits(
}
if (VT.isInteger())
return TLO.CombineTo(Op, TLO.DAG.getConstant(Known.One, dl, VT));
- if (VT.isFloatingPoint())
- return TLO.CombineTo(
- Op,
- TLO.DAG.getConstantFP(
- APFloat(TLO.DAG.EVTToAPFloatSemantics(VT), Known.One), dl, VT));
+ if (VT.isFloatingPoint())
+ return TLO.CombineTo(
+ Op,
+ TLO.DAG.getConstantFP(
+ APFloat(TLO.DAG.EVTToAPFloatSemantics(VT), Known.One), dl, VT));
}
return false;
@@ -2643,9 +2643,9 @@ bool TargetLowering::SimplifyDemandedVectorElts(
KnownZero, TLO, Depth + 1))
return true;
- KnownUndef.setBitVal(Idx, Scl.isUndef());
+ KnownUndef.setBitVal(Idx, Scl.isUndef());
- KnownZero.setBitVal(Idx, isNullConstant(Scl) || isNullFPConstant(Scl));
+ KnownZero.setBitVal(Idx, isNullConstant(Scl) || isNullFPConstant(Scl));
break;
}
@@ -3393,74 +3393,74 @@ SDValue TargetLowering::foldSetCCWithBinOp(EVT VT, SDValue N0, SDValue N1,
return DAG.getSetCC(DL, VT, X, YShl1, Cond);
}
-static SDValue simplifySetCCWithCTPOP(const TargetLowering &TLI, EVT VT,
- SDValue N0, const APInt &C1,
- ISD::CondCode Cond, const SDLoc &dl,
- SelectionDAG &DAG) {
- // Look through truncs that don't change the value of a ctpop.
- // FIXME: Add vector support? Need to be careful with setcc result type below.
- SDValue CTPOP = N0;
- if (N0.getOpcode() == ISD::TRUNCATE && N0.hasOneUse() && !VT.isVector() &&
- N0.getScalarValueSizeInBits() > Log2_32(N0.getOperand(0).getScalarValueSizeInBits()))
- CTPOP = N0.getOperand(0);
-
- if (CTPOP.getOpcode() != ISD::CTPOP || !CTPOP.hasOneUse())
- return SDValue();
-
- EVT CTVT = CTPOP.getValueType();
- SDValue CTOp = CTPOP.getOperand(0);
-
- // If this is a vector CTPOP, keep the CTPOP if it is legal.
- // TODO: Should we check if CTPOP is legal(or custom) for scalars?
- if (VT.isVector() && TLI.isOperationLegal(ISD::CTPOP, CTVT))
- return SDValue();
-
- // (ctpop x) u< 2 -> (x & x-1) == 0
- // (ctpop x) u> 1 -> (x & x-1) != 0
- if (Cond == ISD::SETULT || Cond == ISD::SETUGT) {
- unsigned CostLimit = TLI.getCustomCtpopCost(CTVT, Cond);
- if (C1.ugt(CostLimit + (Cond == ISD::SETULT)))
- return SDValue();
- if (C1 == 0 && (Cond == ISD::SETULT))
- return SDValue(); // This is handled elsewhere.
-
- unsigned Passes = C1.getLimitedValue() - (Cond == ISD::SETULT);
-
- SDValue NegOne = DAG.getAllOnesConstant(dl, CTVT);
- SDValue Result = CTOp;
- for (unsigned i = 0; i < Passes; i++) {
- SDValue Add = DAG.getNode(ISD::ADD, dl, CTVT, Result, NegOne);
- Result = DAG.getNode(ISD::AND, dl, CTVT, Result, Add);
- }
- ISD::CondCode CC = Cond == ISD::SETULT ? ISD::SETEQ : ISD::SETNE;
- return DAG.getSetCC(dl, VT, Result, DAG.getConstant(0, dl, CTVT), CC);
- }
-
- // If ctpop is not supported, expand a power-of-2 comparison based on it.
- if ((Cond == ISD::SETEQ || Cond == ISD::SETNE) && C1 == 1) {
- // For scalars, keep CTPOP if it is legal or custom.
- if (!VT.isVector() && TLI.isOperationLegalOrCustom(ISD::CTPOP, CTVT))
- return SDValue();
- // This is based on X86's custom lowering for CTPOP which produces more
- // instructions than the expansion here.
-
- // (ctpop x) == 1 --> (x != 0) && ((x & x-1) == 0)
- // (ctpop x) != 1 --> (x == 0) || ((x & x-1) != 0)
- SDValue Zero = DAG.getConstant(0, dl, CTVT);
- SDValue NegOne = DAG.getAllOnesConstant(dl, CTVT);
- assert(CTVT.isInteger());
- ISD::CondCode InvCond = ISD::getSetCCInverse(Cond, CTVT);
- SDValue Add = DAG.getNode(ISD::ADD, dl, CTVT, CTOp, NegOne);
- SDValue And = DAG.getNode(ISD::AND, dl, CTVT, CTOp, Add);
- SDValue LHS = DAG.getSetCC(dl, VT, CTOp, Zero, InvCond);
- SDValue RHS = DAG.getSetCC(dl, VT, And, Zero, Cond);
- unsigned LogicOpcode = Cond == ISD::SETEQ ? ISD::AND : ISD::OR;
- return DAG.getNode(LogicOpcode, dl, VT, LHS, RHS);
- }
-
- return SDValue();
-}
-
+static SDValue simplifySetCCWithCTPOP(const TargetLowering &TLI, EVT VT,
+ SDValue N0, const APInt &C1,
+ ISD::CondCode Cond, const SDLoc &dl,
+ SelectionDAG &DAG) {
+ // Look through truncs that don't change the value of a ctpop.
+ // FIXME: Add vector support? Need to be careful with setcc result type below.
+ SDValue CTPOP = N0;
+ if (N0.getOpcode() == ISD::TRUNCATE && N0.hasOneUse() && !VT.isVector() &&
+ N0.getScalarValueSizeInBits() > Log2_32(N0.getOperand(0).getScalarValueSizeInBits()))
+ CTPOP = N0.getOperand(0);
+
+ if (CTPOP.getOpcode() != ISD::CTPOP || !CTPOP.hasOneUse())
+ return SDValue();
+
+ EVT CTVT = CTPOP.getValueType();
+ SDValue CTOp = CTPOP.getOperand(0);
+
+ // If this is a vector CTPOP, keep the CTPOP if it is legal.
+ // TODO: Should we check if CTPOP is legal(or custom) for scalars?
+ if (VT.isVector() && TLI.isOperationLegal(ISD::CTPOP, CTVT))
+ return SDValue();
+
+ // (ctpop x) u< 2 -> (x & x-1) == 0
+ // (ctpop x) u> 1 -> (x & x-1) != 0
+ if (Cond == ISD::SETULT || Cond == ISD::SETUGT) {
+ unsigned CostLimit = TLI.getCustomCtpopCost(CTVT, Cond);
+ if (C1.ugt(CostLimit + (Cond == ISD::SETULT)))
+ return SDValue();
+ if (C1 == 0 && (Cond == ISD::SETULT))
+ return SDValue(); // This is handled elsewhere.
+
+ unsigned Passes = C1.getLimitedValue() - (Cond == ISD::SETULT);
+
+ SDValue NegOne = DAG.getAllOnesConstant(dl, CTVT);
+ SDValue Result = CTOp;
+ for (unsigned i = 0; i < Passes; i++) {
+ SDValue Add = DAG.getNode(ISD::ADD, dl, CTVT, Result, NegOne);
+ Result = DAG.getNode(ISD::AND, dl, CTVT, Result, Add);
+ }
+ ISD::CondCode CC = Cond == ISD::SETULT ? ISD::SETEQ : ISD::SETNE;
+ return DAG.getSetCC(dl, VT, Result, DAG.getConstant(0, dl, CTVT), CC);
+ }
+
+ // If ctpop is not supported, expand a power-of-2 comparison based on it.
+ if ((Cond == ISD::SETEQ || Cond == ISD::SETNE) && C1 == 1) {
+ // For scalars, keep CTPOP if it is legal or custom.
+ if (!VT.isVector() && TLI.isOperationLegalOrCustom(ISD::CTPOP, CTVT))
+ return SDValue();
+ // This is based on X86's custom lowering for CTPOP which produces more
+ // instructions than the expansion here.
+
+ // (ctpop x) == 1 --> (x != 0) && ((x & x-1) == 0)
+ // (ctpop x) != 1 --> (x == 0) || ((x & x-1) != 0)
+ SDValue Zero = DAG.getConstant(0, dl, CTVT);
+ SDValue NegOne = DAG.getAllOnesConstant(dl, CTVT);
+ assert(CTVT.isInteger());
+ ISD::CondCode InvCond = ISD::getSetCCInverse(Cond, CTVT);
+ SDValue Add = DAG.getNode(ISD::ADD, dl, CTVT, CTOp, NegOne);
+ SDValue And = DAG.getNode(ISD::AND, dl, CTVT, CTOp, Add);
+ SDValue LHS = DAG.getSetCC(dl, VT, CTOp, Zero, InvCond);
+ SDValue RHS = DAG.getSetCC(dl, VT, And, Zero, Cond);
+ unsigned LogicOpcode = Cond == ISD::SETEQ ? ISD::AND : ISD::OR;
+ return DAG.getNode(LogicOpcode, dl, VT, LHS, RHS);
+ }
+
+ return SDValue();
+}
+
/// Try to simplify a setcc built with the specified operands and cc. If it is
/// unable to simplify it, return a null SDValue.
SDValue TargetLowering::SimplifySetCC(EVT VT, SDValue N0, SDValue N1,
@@ -3477,11 +3477,11 @@ SDValue TargetLowering::SimplifySetCC(EVT VT, SDValue N0, SDValue N1,
// Ensure that the constant occurs on the RHS and fold constant comparisons.
// TODO: Handle non-splat vector constants. All undef causes trouble.
- // FIXME: We can't yet fold constant scalable vector splats, so avoid an
- // infinite loop here when we encounter one.
+ // FIXME: We can't yet fold constant scalable vector splats, so avoid an
+ // infinite loop here when we encounter one.
ISD::CondCode SwappedCC = ISD::getSetCCSwappedOperands(Cond);
if (isConstOrConstSplat(N0) &&
- (!OpVT.isScalableVector() || !isConstOrConstSplat(N1)) &&
+ (!OpVT.isScalableVector() || !isConstOrConstSplat(N1)) &&
(DCI.isBeforeLegalizeOps() ||
isCondCodeLegal(SwappedCC, N0.getSimpleValueType())))
return DAG.getSetCC(dl, VT, N1, N0, SwappedCC);
@@ -3493,46 +3493,46 @@ SDValue TargetLowering::SimplifySetCC(EVT VT, SDValue N0, SDValue N1,
if (!isConstOrConstSplat(N0) && !isConstOrConstSplat(N1) &&
(DCI.isBeforeLegalizeOps() ||
isCondCodeLegal(SwappedCC, N0.getSimpleValueType())) &&
- DAG.doesNodeExist(ISD::SUB, DAG.getVTList(OpVT), {N1, N0}) &&
- !DAG.doesNodeExist(ISD::SUB, DAG.getVTList(OpVT), {N0, N1}))
+ DAG.doesNodeExist(ISD::SUB, DAG.getVTList(OpVT), {N1, N0}) &&
+ !DAG.doesNodeExist(ISD::SUB, DAG.getVTList(OpVT), {N0, N1}))
return DAG.getSetCC(dl, VT, N1, N0, SwappedCC);
- if (auto *N1C = isConstOrConstSplat(N1)) {
+ if (auto *N1C = isConstOrConstSplat(N1)) {
const APInt &C1 = N1C->getAPIntValue();
- // Optimize some CTPOP cases.
- if (SDValue V = simplifySetCCWithCTPOP(*this, VT, N0, C1, Cond, dl, DAG))
- return V;
-
+ // Optimize some CTPOP cases.
+ if (SDValue V = simplifySetCCWithCTPOP(*this, VT, N0, C1, Cond, dl, DAG))
+ return V;
+
// If the LHS is '(srl (ctlz x), 5)', the RHS is 0/1, and this is an
// equality comparison, then we're just comparing whether X itself is
// zero.
if (N0.getOpcode() == ISD::SRL && (C1.isNullValue() || C1.isOneValue()) &&
N0.getOperand(0).getOpcode() == ISD::CTLZ &&
- isPowerOf2_32(N0.getScalarValueSizeInBits())) {
- if (ConstantSDNode *ShAmt = isConstOrConstSplat(N0.getOperand(1))) {
- if ((Cond == ISD::SETEQ || Cond == ISD::SETNE) &&
- ShAmt->getAPIntValue() == Log2_32(N0.getScalarValueSizeInBits())) {
- if ((C1 == 0) == (Cond == ISD::SETEQ)) {
- // (srl (ctlz x), 5) == 0 -> X != 0
- // (srl (ctlz x), 5) != 1 -> X != 0
- Cond = ISD::SETNE;
- } else {
- // (srl (ctlz x), 5) != 0 -> X == 0
- // (srl (ctlz x), 5) == 1 -> X == 0
- Cond = ISD::SETEQ;
- }
- SDValue Zero = DAG.getConstant(0, dl, N0.getValueType());
- return DAG.getSetCC(dl, VT, N0.getOperand(0).getOperand(0), Zero,
- Cond);
+ isPowerOf2_32(N0.getScalarValueSizeInBits())) {
+ if (ConstantSDNode *ShAmt = isConstOrConstSplat(N0.getOperand(1))) {
+ if ((Cond == ISD::SETEQ || Cond == ISD::SETNE) &&
+ ShAmt->getAPIntValue() == Log2_32(N0.getScalarValueSizeInBits())) {
+ if ((C1 == 0) == (Cond == ISD::SETEQ)) {
+ // (srl (ctlz x), 5) == 0 -> X != 0
+ // (srl (ctlz x), 5) != 1 -> X != 0
+ Cond = ISD::SETNE;
+ } else {
+ // (srl (ctlz x), 5) != 0 -> X == 0
+ // (srl (ctlz x), 5) == 1 -> X == 0
+ Cond = ISD::SETEQ;
+ }
+ SDValue Zero = DAG.getConstant(0, dl, N0.getValueType());
+ return DAG.getSetCC(dl, VT, N0.getOperand(0).getOperand(0), Zero,
+ Cond);
}
}
}
- }
+ }
- // FIXME: Support vectors.
- if (auto *N1C = dyn_cast<ConstantSDNode>(N1.getNode())) {
- const APInt &C1 = N1C->getAPIntValue();
+ // FIXME: Support vectors.
+ if (auto *N1C = dyn_cast<ConstantSDNode>(N1.getNode())) {
+ const APInt &C1 = N1C->getAPIntValue();
// (zext x) == C --> x == (trunc C)
// (sext x) == C --> x == (trunc C)
@@ -3666,12 +3666,12 @@ SDValue TargetLowering::SimplifySetCC(EVT VT, SDValue N0, SDValue N1,
shouldReduceLoadWidth(Lod, ISD::NON_EXTLOAD, newVT)) {
SDValue Ptr = Lod->getBasePtr();
if (bestOffset != 0)
- Ptr =
- DAG.getMemBasePlusOffset(Ptr, TypeSize::Fixed(bestOffset), dl);
- SDValue NewLoad =
- DAG.getLoad(newVT, dl, Lod->getChain(), Ptr,
- Lod->getPointerInfo().getWithOffset(bestOffset),
- Lod->getOriginalAlign());
+ Ptr =
+ DAG.getMemBasePlusOffset(Ptr, TypeSize::Fixed(bestOffset), dl);
+ SDValue NewLoad =
+ DAG.getLoad(newVT, dl, Lod->getChain(), Ptr,
+ Lod->getPointerInfo().getWithOffset(bestOffset),
+ Lod->getOriginalAlign());
return DAG.getSetCC(dl, VT,
DAG.getNode(ISD::AND, dl, newVT, NewLoad,
DAG.getConstant(bestMask.trunc(bestWidth),
@@ -3736,9 +3736,9 @@ SDValue TargetLowering::SimplifySetCC(EVT VT, SDValue N0, SDValue N1,
break; // todo, be more careful with signed comparisons
}
} else if (N0.getOpcode() == ISD::SIGN_EXTEND_INREG &&
- (Cond == ISD::SETEQ || Cond == ISD::SETNE) &&
- !isSExtCheaperThanZExt(cast<VTSDNode>(N0.getOperand(1))->getVT(),
- OpVT)) {
+ (Cond == ISD::SETEQ || Cond == ISD::SETNE) &&
+ !isSExtCheaperThanZExt(cast<VTSDNode>(N0.getOperand(1))->getVT(),
+ OpVT)) {
EVT ExtSrcTy = cast<VTSDNode>(N0.getOperand(1))->getVT();
unsigned ExtSrcTyBits = ExtSrcTy.getSizeInBits();
EVT ExtDstTy = N0.getValueType();
@@ -3747,18 +3747,18 @@ SDValue TargetLowering::SimplifySetCC(EVT VT, SDValue N0, SDValue N1,
// If the constant doesn't fit into the number of bits for the source of
// the sign extension, it is impossible for both sides to be equal.
if (C1.getMinSignedBits() > ExtSrcTyBits)
- return DAG.getBoolConstant(Cond == ISD::SETNE, dl, VT, OpVT);
+ return DAG.getBoolConstant(Cond == ISD::SETNE, dl, VT, OpVT);
- assert(ExtDstTy == N0.getOperand(0).getValueType() &&
- ExtDstTy != ExtSrcTy && "Unexpected types!");
- APInt Imm = APInt::getLowBitsSet(ExtDstTyBits, ExtSrcTyBits);
- SDValue ZextOp = DAG.getNode(ISD::AND, dl, ExtDstTy, N0.getOperand(0),
- DAG.getConstant(Imm, dl, ExtDstTy));
+ assert(ExtDstTy == N0.getOperand(0).getValueType() &&
+ ExtDstTy != ExtSrcTy && "Unexpected types!");
+ APInt Imm = APInt::getLowBitsSet(ExtDstTyBits, ExtSrcTyBits);
+ SDValue ZextOp = DAG.getNode(ISD::AND, dl, ExtDstTy, N0.getOperand(0),
+ DAG.getConstant(Imm, dl, ExtDstTy));
if (!DCI.isCalledByLegalizer())
DCI.AddToWorklist(ZextOp.getNode());
// Otherwise, make this a use of a zext.
return DAG.getSetCC(dl, VT, ZextOp,
- DAG.getConstant(C1 & Imm, dl, ExtDstTy), Cond);
+ DAG.getConstant(C1 & Imm, dl, ExtDstTy), Cond);
} else if ((N1C->isNullValue() || N1C->isOne()) &&
(Cond == ISD::SETEQ || Cond == ISD::SETNE)) {
// SETCC (SETCC), [0|1], [EQ|NE] -> SETCC
@@ -3782,7 +3782,7 @@ SDValue TargetLowering::SimplifySetCC(EVT VT, SDValue N0, SDValue N1,
(N0.getOpcode() == ISD::AND &&
N0.getOperand(0).getOpcode() == ISD::XOR &&
N0.getOperand(1) == N0.getOperand(0).getOperand(1))) &&
- isOneConstant(N0.getOperand(1))) {
+ isOneConstant(N0.getOperand(1))) {
// If this is (X^1) == 0/1, swap the RHS and eliminate the xor. We
// can only do this if the top bits are known zero.
unsigned BitWidth = N0.getValueSizeInBits();
@@ -3826,7 +3826,7 @@ SDValue TargetLowering::SimplifySetCC(EVT VT, SDValue N0, SDValue N1,
return DAG.getSetCC(dl, VT, XorLHS, XorRHS, Cond);
}
}
- if (Op0.getOpcode() == ISD::AND && isOneConstant(Op0.getOperand(1))) {
+ if (Op0.getOpcode() == ISD::AND && isOneConstant(Op0.getOperand(1))) {
// If this is (X&1) == / != 1, normalize it to (X&1) != / == 0.
if (Op0.getValueType().bitsGT(VT))
Op0 = DAG.getNode(ISD::AND, dl, VT,
@@ -3964,67 +3964,67 @@ SDValue TargetLowering::SimplifySetCC(EVT VT, SDValue N0, SDValue N1,
if (SDValue CC = optimizeSetCCByHoistingAndByConstFromLogicalShift(
VT, N0, N1, Cond, DCI, dl))
return CC;
-
- // For all/any comparisons, replace or(x,shl(y,bw/2)) with and/or(x,y).
- // For example, when high 32-bits of i64 X are known clear:
- // all bits clear: (X | (Y<<32)) == 0 --> (X | Y) == 0
- // all bits set: (X | (Y<<32)) == -1 --> (X & Y) == -1
- bool CmpZero = N1C->getAPIntValue().isNullValue();
- bool CmpNegOne = N1C->getAPIntValue().isAllOnesValue();
- if ((CmpZero || CmpNegOne) && N0.hasOneUse()) {
- // Match or(lo,shl(hi,bw/2)) pattern.
- auto IsConcat = [&](SDValue V, SDValue &Lo, SDValue &Hi) {
- unsigned EltBits = V.getScalarValueSizeInBits();
- if (V.getOpcode() != ISD::OR || (EltBits % 2) != 0)
- return false;
- SDValue LHS = V.getOperand(0);
- SDValue RHS = V.getOperand(1);
- APInt HiBits = APInt::getHighBitsSet(EltBits, EltBits / 2);
- // Unshifted element must have zero upperbits.
- if (RHS.getOpcode() == ISD::SHL &&
- isa<ConstantSDNode>(RHS.getOperand(1)) &&
- RHS.getConstantOperandAPInt(1) == (EltBits / 2) &&
- DAG.MaskedValueIsZero(LHS, HiBits)) {
- Lo = LHS;
- Hi = RHS.getOperand(0);
- return true;
- }
- if (LHS.getOpcode() == ISD::SHL &&
- isa<ConstantSDNode>(LHS.getOperand(1)) &&
- LHS.getConstantOperandAPInt(1) == (EltBits / 2) &&
- DAG.MaskedValueIsZero(RHS, HiBits)) {
- Lo = RHS;
- Hi = LHS.getOperand(0);
- return true;
- }
- return false;
- };
-
- auto MergeConcat = [&](SDValue Lo, SDValue Hi) {
- unsigned EltBits = N0.getScalarValueSizeInBits();
- unsigned HalfBits = EltBits / 2;
- APInt HiBits = APInt::getHighBitsSet(EltBits, HalfBits);
- SDValue LoBits = DAG.getConstant(~HiBits, dl, OpVT);
- SDValue HiMask = DAG.getNode(ISD::AND, dl, OpVT, Hi, LoBits);
- SDValue NewN0 =
- DAG.getNode(CmpZero ? ISD::OR : ISD::AND, dl, OpVT, Lo, HiMask);
- SDValue NewN1 = CmpZero ? DAG.getConstant(0, dl, OpVT) : LoBits;
- return DAG.getSetCC(dl, VT, NewN0, NewN1, Cond);
- };
-
- SDValue Lo, Hi;
- if (IsConcat(N0, Lo, Hi))
- return MergeConcat(Lo, Hi);
-
- if (N0.getOpcode() == ISD::AND || N0.getOpcode() == ISD::OR) {
- SDValue Lo0, Lo1, Hi0, Hi1;
- if (IsConcat(N0.getOperand(0), Lo0, Hi0) &&
- IsConcat(N0.getOperand(1), Lo1, Hi1)) {
- return MergeConcat(DAG.getNode(N0.getOpcode(), dl, OpVT, Lo0, Lo1),
- DAG.getNode(N0.getOpcode(), dl, OpVT, Hi0, Hi1));
- }
- }
- }
+
+ // For all/any comparisons, replace or(x,shl(y,bw/2)) with and/or(x,y).
+ // For example, when high 32-bits of i64 X are known clear:
+ // all bits clear: (X | (Y<<32)) == 0 --> (X | Y) == 0
+ // all bits set: (X | (Y<<32)) == -1 --> (X & Y) == -1
+ bool CmpZero = N1C->getAPIntValue().isNullValue();
+ bool CmpNegOne = N1C->getAPIntValue().isAllOnesValue();
+ if ((CmpZero || CmpNegOne) && N0.hasOneUse()) {
+ // Match or(lo,shl(hi,bw/2)) pattern.
+ auto IsConcat = [&](SDValue V, SDValue &Lo, SDValue &Hi) {
+ unsigned EltBits = V.getScalarValueSizeInBits();
+ if (V.getOpcode() != ISD::OR || (EltBits % 2) != 0)
+ return false;
+ SDValue LHS = V.getOperand(0);
+ SDValue RHS = V.getOperand(1);
+ APInt HiBits = APInt::getHighBitsSet(EltBits, EltBits / 2);
+ // Unshifted element must have zero upperbits.
+ if (RHS.getOpcode() == ISD::SHL &&
+ isa<ConstantSDNode>(RHS.getOperand(1)) &&
+ RHS.getConstantOperandAPInt(1) == (EltBits / 2) &&
+ DAG.MaskedValueIsZero(LHS, HiBits)) {
+ Lo = LHS;
+ Hi = RHS.getOperand(0);
+ return true;
+ }
+ if (LHS.getOpcode() == ISD::SHL &&
+ isa<ConstantSDNode>(LHS.getOperand(1)) &&
+ LHS.getConstantOperandAPInt(1) == (EltBits / 2) &&
+ DAG.MaskedValueIsZero(RHS, HiBits)) {
+ Lo = RHS;
+ Hi = LHS.getOperand(0);
+ return true;
+ }
+ return false;
+ };
+
+ auto MergeConcat = [&](SDValue Lo, SDValue Hi) {
+ unsigned EltBits = N0.getScalarValueSizeInBits();
+ unsigned HalfBits = EltBits / 2;
+ APInt HiBits = APInt::getHighBitsSet(EltBits, HalfBits);
+ SDValue LoBits = DAG.getConstant(~HiBits, dl, OpVT);
+ SDValue HiMask = DAG.getNode(ISD::AND, dl, OpVT, Hi, LoBits);
+ SDValue NewN0 =
+ DAG.getNode(CmpZero ? ISD::OR : ISD::AND, dl, OpVT, Lo, HiMask);
+ SDValue NewN1 = CmpZero ? DAG.getConstant(0, dl, OpVT) : LoBits;
+ return DAG.getSetCC(dl, VT, NewN0, NewN1, Cond);
+ };
+
+ SDValue Lo, Hi;
+ if (IsConcat(N0, Lo, Hi))
+ return MergeConcat(Lo, Hi);
+
+ if (N0.getOpcode() == ISD::AND || N0.getOpcode() == ISD::OR) {
+ SDValue Lo0, Lo1, Hi0, Hi1;
+ if (IsConcat(N0.getOperand(0), Lo0, Hi0) &&
+ IsConcat(N0.getOperand(1), Lo1, Hi1)) {
+ return MergeConcat(DAG.getNode(N0.getOpcode(), dl, OpVT, Lo0, Lo1),
+ DAG.getNode(N0.getOpcode(), dl, OpVT, Hi0, Hi1));
+ }
+ }
+ }
}
// If we have "setcc X, C0", check to see if we can shrink the immediate
@@ -4032,20 +4032,20 @@ SDValue TargetLowering::SimplifySetCC(EVT VT, SDValue N0, SDValue N1,
// TODO: Support this for vectors after legalize ops.
if (!VT.isVector() || DCI.isBeforeLegalizeOps()) {
// SETUGT X, SINTMAX -> SETLT X, 0
- // SETUGE X, SINTMIN -> SETLT X, 0
- if ((Cond == ISD::SETUGT && C1.isMaxSignedValue()) ||
- (Cond == ISD::SETUGE && C1.isMinSignedValue()))
+ // SETUGE X, SINTMIN -> SETLT X, 0
+ if ((Cond == ISD::SETUGT && C1.isMaxSignedValue()) ||
+ (Cond == ISD::SETUGE && C1.isMinSignedValue()))
return DAG.getSetCC(dl, VT, N0,
DAG.getConstant(0, dl, N1.getValueType()),
ISD::SETLT);
// SETULT X, SINTMIN -> SETGT X, -1
- // SETULE X, SINTMAX -> SETGT X, -1
- if ((Cond == ISD::SETULT && C1.isMinSignedValue()) ||
- (Cond == ISD::SETULE && C1.isMaxSignedValue()))
- return DAG.getSetCC(dl, VT, N0,
- DAG.getAllOnesConstant(dl, N1.getValueType()),
- ISD::SETGT);
+ // SETULE X, SINTMAX -> SETGT X, -1
+ if ((Cond == ISD::SETULT && C1.isMinSignedValue()) ||
+ (Cond == ISD::SETULE && C1.isMaxSignedValue()))
+ return DAG.getSetCC(dl, VT, N0,
+ DAG.getAllOnesConstant(dl, N1.getValueType()),
+ ISD::SETGT);
}
}
@@ -4056,13 +4056,13 @@ SDValue TargetLowering::SimplifySetCC(EVT VT, SDValue N0, SDValue N1,
const APInt &C1 = N1C->getAPIntValue();
EVT ShValTy = N0.getValueType();
- // Fold bit comparisons when we can. This will result in an
- // incorrect value when boolean false is negative one, unless
- // the bitsize is 1 in which case the false value is the same
- // in practice regardless of the representation.
- if ((VT.getSizeInBits() == 1 ||
- getBooleanContents(N0.getValueType()) == ZeroOrOneBooleanContent) &&
- (Cond == ISD::SETEQ || Cond == ISD::SETNE) &&
+ // Fold bit comparisons when we can. This will result in an
+ // incorrect value when boolean false is negative one, unless
+ // the bitsize is 1 in which case the false value is the same
+ // in practice regardless of the representation.
+ if ((VT.getSizeInBits() == 1 ||
+ getBooleanContents(N0.getValueType()) == ZeroOrOneBooleanContent) &&
+ (Cond == ISD::SETEQ || Cond == ISD::SETNE) &&
(VT == ShValTy || (isTypeLegal(VT) && VT.bitsLE(ShValTy))) &&
N0.getOpcode() == ISD::AND) {
if (auto *AndRHS = dyn_cast<ConstantSDNode>(N0.getOperand(1))) {
@@ -4458,8 +4458,8 @@ const char *TargetLowering::LowerXConstraint(EVT ConstraintVT) const {
}
SDValue TargetLowering::LowerAsmOutputForConstraint(
- SDValue &Chain, SDValue &Flag, const SDLoc &DL,
- const AsmOperandInfo &OpInfo, SelectionDAG &DAG) const {
+ SDValue &Chain, SDValue &Flag, const SDLoc &DL,
+ const AsmOperandInfo &OpInfo, SelectionDAG &DAG) const {
return SDValue();
}
@@ -5033,15 +5033,15 @@ static SDValue BuildExactSDIV(const TargetLowering &TLI, SDNode *N,
return SDValue();
SDValue Shift, Factor;
- if (VT.isFixedLengthVector()) {
+ if (VT.isFixedLengthVector()) {
Shift = DAG.getBuildVector(ShVT, dl, Shifts);
Factor = DAG.getBuildVector(VT, dl, Factors);
- } else if (VT.isScalableVector()) {
- assert(Shifts.size() == 1 && Factors.size() == 1 &&
- "Expected matchUnaryPredicate to return one element for scalable "
- "vectors");
- Shift = DAG.getSplatVector(ShVT, dl, Shifts[0]);
- Factor = DAG.getSplatVector(VT, dl, Factors[0]);
+ } else if (VT.isScalableVector()) {
+ assert(Shifts.size() == 1 && Factors.size() == 1 &&
+ "Expected matchUnaryPredicate to return one element for scalable "
+ "vectors");
+ Shift = DAG.getSplatVector(ShVT, dl, Shifts[0]);
+ Factor = DAG.getSplatVector(VT, dl, Factors[0]);
} else {
Shift = Shifts[0];
Factor = Factors[0];
@@ -5134,20 +5134,20 @@ SDValue TargetLowering::BuildSDIV(SDNode *N, SelectionDAG &DAG,
return SDValue();
SDValue MagicFactor, Factor, Shift, ShiftMask;
- if (VT.isFixedLengthVector()) {
+ if (VT.isFixedLengthVector()) {
MagicFactor = DAG.getBuildVector(VT, dl, MagicFactors);
Factor = DAG.getBuildVector(VT, dl, Factors);
Shift = DAG.getBuildVector(ShVT, dl, Shifts);
ShiftMask = DAG.getBuildVector(VT, dl, ShiftMasks);
- } else if (VT.isScalableVector()) {
- assert(MagicFactors.size() == 1 && Factors.size() == 1 &&
- Shifts.size() == 1 && ShiftMasks.size() == 1 &&
- "Expected matchUnaryPredicate to return one element for scalable "
- "vectors");
- MagicFactor = DAG.getSplatVector(VT, dl, MagicFactors[0]);
- Factor = DAG.getSplatVector(VT, dl, Factors[0]);
- Shift = DAG.getSplatVector(ShVT, dl, Shifts[0]);
- ShiftMask = DAG.getSplatVector(VT, dl, ShiftMasks[0]);
+ } else if (VT.isScalableVector()) {
+ assert(MagicFactors.size() == 1 && Factors.size() == 1 &&
+ Shifts.size() == 1 && ShiftMasks.size() == 1 &&
+ "Expected matchUnaryPredicate to return one element for scalable "
+ "vectors");
+ MagicFactor = DAG.getSplatVector(VT, dl, MagicFactors[0]);
+ Factor = DAG.getSplatVector(VT, dl, Factors[0]);
+ Shift = DAG.getSplatVector(ShVT, dl, Shifts[0]);
+ ShiftMask = DAG.getSplatVector(VT, dl, ShiftMasks[0]);
} else {
MagicFactor = MagicFactors[0];
Factor = Factors[0];
@@ -5261,19 +5261,19 @@ SDValue TargetLowering::BuildUDIV(SDNode *N, SelectionDAG &DAG,
return SDValue();
SDValue PreShift, PostShift, MagicFactor, NPQFactor;
- if (VT.isFixedLengthVector()) {
+ if (VT.isFixedLengthVector()) {
PreShift = DAG.getBuildVector(ShVT, dl, PreShifts);
MagicFactor = DAG.getBuildVector(VT, dl, MagicFactors);
NPQFactor = DAG.getBuildVector(VT, dl, NPQFactors);
PostShift = DAG.getBuildVector(ShVT, dl, PostShifts);
- } else if (VT.isScalableVector()) {
- assert(PreShifts.size() == 1 && MagicFactors.size() == 1 &&
- NPQFactors.size() == 1 && PostShifts.size() == 1 &&
- "Expected matchUnaryPredicate to return one for scalable vectors");
- PreShift = DAG.getSplatVector(ShVT, dl, PreShifts[0]);
- MagicFactor = DAG.getSplatVector(VT, dl, MagicFactors[0]);
- NPQFactor = DAG.getSplatVector(VT, dl, NPQFactors[0]);
- PostShift = DAG.getSplatVector(ShVT, dl, PostShifts[0]);
+ } else if (VT.isScalableVector()) {
+ assert(PreShifts.size() == 1 && MagicFactors.size() == 1 &&
+ NPQFactors.size() == 1 && PostShifts.size() == 1 &&
+ "Expected matchUnaryPredicate to return one for scalable vectors");
+ PreShift = DAG.getSplatVector(ShVT, dl, PreShifts[0]);
+ MagicFactor = DAG.getSplatVector(VT, dl, MagicFactors[0]);
+ NPQFactor = DAG.getSplatVector(VT, dl, NPQFactors[0]);
+ PostShift = DAG.getSplatVector(ShVT, dl, PostShifts[0]);
} else {
PreShift = PreShifts[0];
MagicFactor = MagicFactors[0];
@@ -5325,10 +5325,10 @@ SDValue TargetLowering::BuildUDIV(SDNode *N, SelectionDAG &DAG,
Q = DAG.getNode(ISD::SRL, dl, VT, Q, PostShift);
Created.push_back(Q.getNode());
- EVT SetCCVT = getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), VT);
-
+ EVT SetCCVT = getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), VT);
+
SDValue One = DAG.getConstant(1, dl, VT);
- SDValue IsOne = DAG.getSetCC(dl, SetCCVT, N1, One, ISD::SETEQ);
+ SDValue IsOne = DAG.getSetCC(dl, SetCCVT, N1, One, ISD::SETEQ);
return DAG.getSelect(dl, VT, IsOne, N0, Q);
}
@@ -5755,7 +5755,7 @@ TargetLowering::prepareSREMEqFold(EVT SETCCVT, SDValue REMNode,
return SDValue();
SDValue PVal, AVal, KVal, QVal;
- if (VT.isFixedLengthVector()) {
+ if (VT.isFixedLengthVector()) {
if (HadOneDivisor) {
// Try to turn PAmts into a splat, since we don't care about the values
// that are currently '0'. If we can't, just keep '0'`s.
@@ -5774,15 +5774,15 @@ TargetLowering::prepareSREMEqFold(EVT SETCCVT, SDValue REMNode,
AVal = DAG.getBuildVector(VT, DL, AAmts);
KVal = DAG.getBuildVector(ShVT, DL, KAmts);
QVal = DAG.getBuildVector(VT, DL, QAmts);
- } else if (VT.isScalableVector()) {
- assert(PAmts.size() == 1 && AAmts.size() == 1 && KAmts.size() == 1 &&
- QAmts.size() == 1 &&
- "Expected matchUnaryPredicate to return one element for scalable "
- "vectors");
- PVal = DAG.getSplatVector(VT, DL, PAmts[0]);
- AVal = DAG.getSplatVector(VT, DL, AAmts[0]);
- KVal = DAG.getSplatVector(ShVT, DL, KAmts[0]);
- QVal = DAG.getSplatVector(VT, DL, QAmts[0]);
+ } else if (VT.isScalableVector()) {
+ assert(PAmts.size() == 1 && AAmts.size() == 1 && KAmts.size() == 1 &&
+ QAmts.size() == 1 &&
+ "Expected matchUnaryPredicate to return one element for scalable "
+ "vectors");
+ PVal = DAG.getSplatVector(VT, DL, PAmts[0]);
+ AVal = DAG.getSplatVector(VT, DL, AAmts[0]);
+ KVal = DAG.getSplatVector(ShVT, DL, KAmts[0]);
+ QVal = DAG.getSplatVector(VT, DL, QAmts[0]);
} else {
PVal = PAmts[0];
AVal = AAmts[0];
@@ -5877,28 +5877,28 @@ verifyReturnAddressArgumentIsConstant(SDValue Op, SelectionDAG &DAG) const {
return false;
}
-SDValue TargetLowering::getSqrtInputTest(SDValue Op, SelectionDAG &DAG,
- const DenormalMode &Mode) const {
- SDLoc DL(Op);
- EVT VT = Op.getValueType();
- EVT CCVT = getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), VT);
- SDValue FPZero = DAG.getConstantFP(0.0, DL, VT);
- // Testing it with denormal inputs to avoid wrong estimate.
- if (Mode.Input == DenormalMode::IEEE) {
- // This is specifically a check for the handling of denormal inputs,
- // not the result.
-
- // Test = fabs(X) < SmallestNormal
- const fltSemantics &FltSem = DAG.EVTToAPFloatSemantics(VT);
- APFloat SmallestNorm = APFloat::getSmallestNormalized(FltSem);
- SDValue NormC = DAG.getConstantFP(SmallestNorm, DL, VT);
- SDValue Fabs = DAG.getNode(ISD::FABS, DL, VT, Op);
- return DAG.getSetCC(DL, CCVT, Fabs, NormC, ISD::SETLT);
- }
- // Test = X == 0.0
- return DAG.getSetCC(DL, CCVT, Op, FPZero, ISD::SETEQ);
-}
-
+SDValue TargetLowering::getSqrtInputTest(SDValue Op, SelectionDAG &DAG,
+ const DenormalMode &Mode) const {
+ SDLoc DL(Op);
+ EVT VT = Op.getValueType();
+ EVT CCVT = getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), VT);
+ SDValue FPZero = DAG.getConstantFP(0.0, DL, VT);
+ // Testing it with denormal inputs to avoid wrong estimate.
+ if (Mode.Input == DenormalMode::IEEE) {
+ // This is specifically a check for the handling of denormal inputs,
+ // not the result.
+
+ // Test = fabs(X) < SmallestNormal
+ const fltSemantics &FltSem = DAG.EVTToAPFloatSemantics(VT);
+ APFloat SmallestNorm = APFloat::getSmallestNormalized(FltSem);
+ SDValue NormC = DAG.getConstantFP(SmallestNorm, DL, VT);
+ SDValue Fabs = DAG.getNode(ISD::FABS, DL, VT, Op);
+ return DAG.getSetCC(DL, CCVT, Fabs, NormC, ISD::SETLT);
+ }
+ // Test = X == 0.0
+ return DAG.getSetCC(DL, CCVT, Op, FPZero, ISD::SETEQ);
+}
+
SDValue TargetLowering::getNegatedExpression(SDValue Op, SelectionDAG &DAG,
bool LegalOps, bool OptForSize,
NegatibleCost &Cost,
@@ -5935,11 +5935,11 @@ SDValue TargetLowering::getNegatedExpression(SDValue Op, SelectionDAG &DAG,
SDLoc DL(Op);
- // Because getNegatedExpression can delete nodes we need a handle to keep
- // temporary nodes alive in case the recursion manages to create an identical
- // node.
- std::list<HandleSDNode> Handles;
-
+ // Because getNegatedExpression can delete nodes we need a handle to keep
+ // temporary nodes alive in case the recursion manages to create an identical
+ // node.
+ std::list<HandleSDNode> Handles;
+
switch (Opcode) {
case ISD::ConstantFP: {
// Don't invert constant FP values after legalization unless the target says
@@ -6008,18 +6008,18 @@ SDValue TargetLowering::getNegatedExpression(SDValue Op, SelectionDAG &DAG,
NegatibleCost CostX = NegatibleCost::Expensive;
SDValue NegX =
getNegatedExpression(X, DAG, LegalOps, OptForSize, CostX, Depth);
- // Prevent this node from being deleted by the next call.
- if (NegX)
- Handles.emplace_back(NegX);
-
+ // Prevent this node from being deleted by the next call.
+ if (NegX)
+ Handles.emplace_back(NegX);
+
// fold (fneg (fadd X, Y)) -> (fsub (fneg Y), X)
NegatibleCost CostY = NegatibleCost::Expensive;
SDValue NegY =
getNegatedExpression(Y, DAG, LegalOps, OptForSize, CostY, Depth);
- // We're done with the handles.
- Handles.clear();
-
+ // We're done with the handles.
+ Handles.clear();
+
// Negate the X if its cost is less or equal than Y.
if (NegX && (CostX <= CostY)) {
Cost = CostX;
@@ -6064,18 +6064,18 @@ SDValue TargetLowering::getNegatedExpression(SDValue Op, SelectionDAG &DAG,
NegatibleCost CostX = NegatibleCost::Expensive;
SDValue NegX =
getNegatedExpression(X, DAG, LegalOps, OptForSize, CostX, Depth);
- // Prevent this node from being deleted by the next call.
- if (NegX)
- Handles.emplace_back(NegX);
-
+ // Prevent this node from being deleted by the next call.
+ if (NegX)
+ Handles.emplace_back(NegX);
+
// fold (fneg (fmul X, Y)) -> (fmul X, (fneg Y))
NegatibleCost CostY = NegatibleCost::Expensive;
SDValue NegY =
getNegatedExpression(Y, DAG, LegalOps, OptForSize, CostY, Depth);
- // We're done with the handles.
- Handles.clear();
-
+ // We're done with the handles.
+ Handles.clear();
+
// Negate the X if its cost is less or equal than Y.
if (NegX && (CostX <= CostY)) {
Cost = CostX;
@@ -6113,25 +6113,25 @@ SDValue TargetLowering::getNegatedExpression(SDValue Op, SelectionDAG &DAG,
if (!NegZ)
break;
- // Prevent this node from being deleted by the next two calls.
- Handles.emplace_back(NegZ);
-
+ // Prevent this node from being deleted by the next two calls.
+ Handles.emplace_back(NegZ);
+
// fold (fneg (fma X, Y, Z)) -> (fma (fneg X), Y, (fneg Z))
NegatibleCost CostX = NegatibleCost::Expensive;
SDValue NegX =
getNegatedExpression(X, DAG, LegalOps, OptForSize, CostX, Depth);
- // Prevent this node from being deleted by the next call.
- if (NegX)
- Handles.emplace_back(NegX);
-
+ // Prevent this node from being deleted by the next call.
+ if (NegX)
+ Handles.emplace_back(NegX);
+
// fold (fneg (fma X, Y, Z)) -> (fma X, (fneg Y), (fneg Z))
NegatibleCost CostY = NegatibleCost::Expensive;
SDValue NegY =
getNegatedExpression(Y, DAG, LegalOps, OptForSize, CostY, Depth);
- // We're done with the handles.
- Handles.clear();
-
+ // We're done with the handles.
+ Handles.clear();
+
// Negate the X if its cost is less or equal than Y.
if (NegX && (CostX <= CostY)) {
Cost = std::min(CostX, CostZ);
@@ -6172,7 +6172,7 @@ SDValue TargetLowering::getNegatedExpression(SDValue Op, SelectionDAG &DAG,
// Legalization Utilities
//===----------------------------------------------------------------------===//
-bool TargetLowering::expandMUL_LOHI(unsigned Opcode, EVT VT, const SDLoc &dl,
+bool TargetLowering::expandMUL_LOHI(unsigned Opcode, EVT VT, const SDLoc &dl,
SDValue LHS, SDValue RHS,
SmallVectorImpl<SDValue> &Result,
EVT HiLoVT, SelectionDAG &DAG,
@@ -6243,9 +6243,9 @@ bool TargetLowering::expandMUL_LOHI(unsigned Opcode, EVT VT, const SDLoc &dl,
}
}
- if (!VT.isVector() && Opcode == ISD::MUL &&
- DAG.ComputeNumSignBits(LHS) > InnerBitSize &&
- DAG.ComputeNumSignBits(RHS) > InnerBitSize) {
+ if (!VT.isVector() && Opcode == ISD::MUL &&
+ DAG.ComputeNumSignBits(LHS) > InnerBitSize &&
+ DAG.ComputeNumSignBits(RHS) > InnerBitSize) {
// The input values are both sign-extended.
// TODO non-MUL case?
if (MakeMUL_LOHI(LL, RL, Lo, Hi, true)) {
@@ -6359,7 +6359,7 @@ bool TargetLowering::expandMUL(SDNode *N, SDValue &Lo, SDValue &Hi, EVT HiLoVT,
SDValue LL, SDValue LH, SDValue RL,
SDValue RH) const {
SmallVector<SDValue, 2> Result;
- bool Ok = expandMUL_LOHI(N->getOpcode(), N->getValueType(0), SDLoc(N),
+ bool Ok = expandMUL_LOHI(N->getOpcode(), N->getValueType(0), SDLoc(N),
N->getOperand(0), N->getOperand(1), Result, HiLoVT,
DAG, Kind, LL, LH, RL, RH);
if (Ok) {
@@ -6371,7 +6371,7 @@ bool TargetLowering::expandMUL(SDNode *N, SDValue &Lo, SDValue &Hi, EVT HiLoVT,
}
// Check that (every element of) Z is undef or not an exact multiple of BW.
-static bool isNonZeroModBitWidthOrUndef(SDValue Z, unsigned BW) {
+static bool isNonZeroModBitWidthOrUndef(SDValue Z, unsigned BW) {
return ISD::matchUnaryPredicate(
Z,
[=](ConstantSDNode *C) { return !C || C->getAPIntValue().urem(BW) != 0; },
@@ -6398,35 +6398,35 @@ bool TargetLowering::expandFunnelShift(SDNode *Node, SDValue &Result,
EVT ShVT = Z.getValueType();
- // If a funnel shift in the other direction is more supported, use it.
- unsigned RevOpcode = IsFSHL ? ISD::FSHR : ISD::FSHL;
- if (!isOperationLegalOrCustom(Node->getOpcode(), VT) &&
- isOperationLegalOrCustom(RevOpcode, VT) && isPowerOf2_32(BW)) {
- if (isNonZeroModBitWidthOrUndef(Z, BW)) {
- // fshl X, Y, Z -> fshr X, Y, -Z
- // fshr X, Y, Z -> fshl X, Y, -Z
- SDValue Zero = DAG.getConstant(0, DL, ShVT);
- Z = DAG.getNode(ISD::SUB, DL, VT, Zero, Z);
- } else {
- // fshl X, Y, Z -> fshr (srl X, 1), (fshr X, Y, 1), ~Z
- // fshr X, Y, Z -> fshl (fshl X, Y, 1), (shl Y, 1), ~Z
- SDValue One = DAG.getConstant(1, DL, ShVT);
- if (IsFSHL) {
- Y = DAG.getNode(RevOpcode, DL, VT, X, Y, One);
- X = DAG.getNode(ISD::SRL, DL, VT, X, One);
- } else {
- X = DAG.getNode(RevOpcode, DL, VT, X, Y, One);
- Y = DAG.getNode(ISD::SHL, DL, VT, Y, One);
- }
- Z = DAG.getNOT(DL, Z, ShVT);
- }
- Result = DAG.getNode(RevOpcode, DL, VT, X, Y, Z);
- return true;
- }
-
+ // If a funnel shift in the other direction is more supported, use it.
+ unsigned RevOpcode = IsFSHL ? ISD::FSHR : ISD::FSHL;
+ if (!isOperationLegalOrCustom(Node->getOpcode(), VT) &&
+ isOperationLegalOrCustom(RevOpcode, VT) && isPowerOf2_32(BW)) {
+ if (isNonZeroModBitWidthOrUndef(Z, BW)) {
+ // fshl X, Y, Z -> fshr X, Y, -Z
+ // fshr X, Y, Z -> fshl X, Y, -Z
+ SDValue Zero = DAG.getConstant(0, DL, ShVT);
+ Z = DAG.getNode(ISD::SUB, DL, VT, Zero, Z);
+ } else {
+ // fshl X, Y, Z -> fshr (srl X, 1), (fshr X, Y, 1), ~Z
+ // fshr X, Y, Z -> fshl (fshl X, Y, 1), (shl Y, 1), ~Z
+ SDValue One = DAG.getConstant(1, DL, ShVT);
+ if (IsFSHL) {
+ Y = DAG.getNode(RevOpcode, DL, VT, X, Y, One);
+ X = DAG.getNode(ISD::SRL, DL, VT, X, One);
+ } else {
+ X = DAG.getNode(RevOpcode, DL, VT, X, Y, One);
+ Y = DAG.getNode(ISD::SHL, DL, VT, Y, One);
+ }
+ Z = DAG.getNOT(DL, Z, ShVT);
+ }
+ Result = DAG.getNode(RevOpcode, DL, VT, X, Y, Z);
+ return true;
+ }
+
SDValue ShX, ShY;
SDValue ShAmt, InvShAmt;
- if (isNonZeroModBitWidthOrUndef(Z, BW)) {
+ if (isNonZeroModBitWidthOrUndef(Z, BW)) {
// fshl: X << C | Y >> (BW - C)
// fshr: X << (BW - C) | Y >> C
// where C = Z % BW is not zero
@@ -6466,8 +6466,8 @@ bool TargetLowering::expandFunnelShift(SDNode *Node, SDValue &Result,
}
// TODO: Merge with expandFunnelShift.
-bool TargetLowering::expandROT(SDNode *Node, bool AllowVectorOps,
- SDValue &Result, SelectionDAG &DAG) const {
+bool TargetLowering::expandROT(SDNode *Node, bool AllowVectorOps,
+ SDValue &Result, SelectionDAG &DAG) const {
EVT VT = Node->getValueType(0);
unsigned EltSizeInBits = VT.getScalarSizeInBits();
bool IsLeft = Node->getOpcode() == ISD::ROTL;
@@ -6480,45 +6480,45 @@ bool TargetLowering::expandROT(SDNode *Node, bool AllowVectorOps,
// If a rotate in the other direction is supported, use it.
unsigned RevRot = IsLeft ? ISD::ROTR : ISD::ROTL;
- if (isOperationLegalOrCustom(RevRot, VT) && isPowerOf2_32(EltSizeInBits)) {
+ if (isOperationLegalOrCustom(RevRot, VT) && isPowerOf2_32(EltSizeInBits)) {
SDValue Sub = DAG.getNode(ISD::SUB, DL, ShVT, Zero, Op1);
Result = DAG.getNode(RevRot, DL, VT, Op0, Sub);
return true;
}
- if (!AllowVectorOps && VT.isVector() &&
- (!isOperationLegalOrCustom(ISD::SHL, VT) ||
- !isOperationLegalOrCustom(ISD::SRL, VT) ||
- !isOperationLegalOrCustom(ISD::SUB, VT) ||
- !isOperationLegalOrCustomOrPromote(ISD::OR, VT) ||
- !isOperationLegalOrCustomOrPromote(ISD::AND, VT)))
+ if (!AllowVectorOps && VT.isVector() &&
+ (!isOperationLegalOrCustom(ISD::SHL, VT) ||
+ !isOperationLegalOrCustom(ISD::SRL, VT) ||
+ !isOperationLegalOrCustom(ISD::SUB, VT) ||
+ !isOperationLegalOrCustomOrPromote(ISD::OR, VT) ||
+ !isOperationLegalOrCustomOrPromote(ISD::AND, VT)))
return false;
unsigned ShOpc = IsLeft ? ISD::SHL : ISD::SRL;
unsigned HsOpc = IsLeft ? ISD::SRL : ISD::SHL;
SDValue BitWidthMinusOneC = DAG.getConstant(EltSizeInBits - 1, DL, ShVT);
- SDValue ShVal;
- SDValue HsVal;
- if (isPowerOf2_32(EltSizeInBits)) {
- // (rotl x, c) -> x << (c & (w - 1)) | x >> (-c & (w - 1))
- // (rotr x, c) -> x >> (c & (w - 1)) | x << (-c & (w - 1))
- SDValue NegOp1 = DAG.getNode(ISD::SUB, DL, ShVT, Zero, Op1);
- SDValue ShAmt = DAG.getNode(ISD::AND, DL, ShVT, Op1, BitWidthMinusOneC);
- ShVal = DAG.getNode(ShOpc, DL, VT, Op0, ShAmt);
- SDValue HsAmt = DAG.getNode(ISD::AND, DL, ShVT, NegOp1, BitWidthMinusOneC);
- HsVal = DAG.getNode(HsOpc, DL, VT, Op0, HsAmt);
- } else {
- // (rotl x, c) -> x << (c % w) | x >> 1 >> (w - 1 - (c % w))
- // (rotr x, c) -> x >> (c % w) | x << 1 << (w - 1 - (c % w))
- SDValue BitWidthC = DAG.getConstant(EltSizeInBits, DL, ShVT);
- SDValue ShAmt = DAG.getNode(ISD::UREM, DL, ShVT, Op1, BitWidthC);
- ShVal = DAG.getNode(ShOpc, DL, VT, Op0, ShAmt);
- SDValue HsAmt = DAG.getNode(ISD::SUB, DL, ShVT, BitWidthMinusOneC, ShAmt);
- SDValue One = DAG.getConstant(1, DL, ShVT);
- HsVal =
- DAG.getNode(HsOpc, DL, VT, DAG.getNode(HsOpc, DL, VT, Op0, One), HsAmt);
- }
- Result = DAG.getNode(ISD::OR, DL, VT, ShVal, HsVal);
+ SDValue ShVal;
+ SDValue HsVal;
+ if (isPowerOf2_32(EltSizeInBits)) {
+ // (rotl x, c) -> x << (c & (w - 1)) | x >> (-c & (w - 1))
+ // (rotr x, c) -> x >> (c & (w - 1)) | x << (-c & (w - 1))
+ SDValue NegOp1 = DAG.getNode(ISD::SUB, DL, ShVT, Zero, Op1);
+ SDValue ShAmt = DAG.getNode(ISD::AND, DL, ShVT, Op1, BitWidthMinusOneC);
+ ShVal = DAG.getNode(ShOpc, DL, VT, Op0, ShAmt);
+ SDValue HsAmt = DAG.getNode(ISD::AND, DL, ShVT, NegOp1, BitWidthMinusOneC);
+ HsVal = DAG.getNode(HsOpc, DL, VT, Op0, HsAmt);
+ } else {
+ // (rotl x, c) -> x << (c % w) | x >> 1 >> (w - 1 - (c % w))
+ // (rotr x, c) -> x >> (c % w) | x << 1 << (w - 1 - (c % w))
+ SDValue BitWidthC = DAG.getConstant(EltSizeInBits, DL, ShVT);
+ SDValue ShAmt = DAG.getNode(ISD::UREM, DL, ShVT, Op1, BitWidthC);
+ ShVal = DAG.getNode(ShOpc, DL, VT, Op0, ShAmt);
+ SDValue HsAmt = DAG.getNode(ISD::SUB, DL, ShVT, BitWidthMinusOneC, ShAmt);
+ SDValue One = DAG.getConstant(1, DL, ShVT);
+ HsVal =
+ DAG.getNode(HsOpc, DL, VT, DAG.getNode(HsOpc, DL, VT, Op0, One), HsAmt);
+ }
+ Result = DAG.getNode(ISD::OR, DL, VT, ShVal, HsVal);
return true;
}
@@ -6537,7 +6537,7 @@ bool TargetLowering::expandFP_TO_SINT(SDNode *Node, SDValue &Result,
if (Node->isStrictFPOpcode())
// When a NaN is converted to an integer a trap is allowed. We can't
// use this expansion here because it would eliminate that trap. Other
- // traps are also allowed and cannot be eliminated. See
+ // traps are also allowed and cannot be eliminated. See
// IEEE 754-2008 sec 5.8.
return false;
@@ -6608,7 +6608,7 @@ bool TargetLowering::expandFP_TO_UINT(SDNode *Node, SDValue &Result,
getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), DstVT);
// Only expand vector types if we have the appropriate vector bit operations.
- unsigned SIntOpcode = Node->isStrictFPOpcode() ? ISD::STRICT_FP_TO_SINT :
+ unsigned SIntOpcode = Node->isStrictFPOpcode() ? ISD::STRICT_FP_TO_SINT :
ISD::FP_TO_SINT;
if (DstVT.isVector() && (!isOperationLegalOrCustom(SIntOpcode, DstVT) ||
!isOperationLegalOrCustomOrPromote(ISD::XOR, SrcVT)))
@@ -6623,19 +6623,19 @@ bool TargetLowering::expandFP_TO_UINT(SDNode *Node, SDValue &Result,
if (APFloat::opOverflow &
APF.convertFromAPInt(SignMask, false, APFloat::rmNearestTiesToEven)) {
if (Node->isStrictFPOpcode()) {
- Result = DAG.getNode(ISD::STRICT_FP_TO_SINT, dl, { DstVT, MVT::Other },
- { Node->getOperand(0), Src });
+ Result = DAG.getNode(ISD::STRICT_FP_TO_SINT, dl, { DstVT, MVT::Other },
+ { Node->getOperand(0), Src });
Chain = Result.getValue(1);
} else
Result = DAG.getNode(ISD::FP_TO_SINT, dl, DstVT, Src);
return true;
}
- // Don't expand it if there isn't cheap fsub instruction.
- if (!isOperationLegalOrCustom(
- Node->isStrictFPOpcode() ? ISD::STRICT_FSUB : ISD::FSUB, SrcVT))
- return false;
-
+ // Don't expand it if there isn't cheap fsub instruction.
+ if (!isOperationLegalOrCustom(
+ Node->isStrictFPOpcode() ? ISD::STRICT_FSUB : ISD::FSUB, SrcVT))
+ return false;
+
SDValue Cst = DAG.getConstantFP(APF, dl, SrcVT);
SDValue Sel;
@@ -6667,9 +6667,9 @@ bool TargetLowering::expandFP_TO_UINT(SDNode *Node, SDValue &Result,
DAG.getConstant(SignMask, dl, DstVT));
SDValue SInt;
if (Node->isStrictFPOpcode()) {
- SDValue Val = DAG.getNode(ISD::STRICT_FSUB, dl, { SrcVT, MVT::Other },
+ SDValue Val = DAG.getNode(ISD::STRICT_FSUB, dl, { SrcVT, MVT::Other },
{ Chain, Src, FltOfs });
- SInt = DAG.getNode(ISD::STRICT_FP_TO_SINT, dl, { DstVT, MVT::Other },
+ SInt = DAG.getNode(ISD::STRICT_FP_TO_SINT, dl, { DstVT, MVT::Other },
{ Val.getValue(1), Val });
Chain = SInt.getValue(1);
} else {
@@ -6698,13 +6698,13 @@ bool TargetLowering::expandFP_TO_UINT(SDNode *Node, SDValue &Result,
bool TargetLowering::expandUINT_TO_FP(SDNode *Node, SDValue &Result,
SDValue &Chain,
SelectionDAG &DAG) const {
- // This transform is not correct for converting 0 when rounding mode is set
- // to round toward negative infinity which will produce -0.0. So disable under
- // strictfp.
- if (Node->isStrictFPOpcode())
- return false;
-
- SDValue Src = Node->getOperand(0);
+ // This transform is not correct for converting 0 when rounding mode is set
+ // to round toward negative infinity which will produce -0.0. So disable under
+ // strictfp.
+ if (Node->isStrictFPOpcode())
+ return false;
+
+ SDValue Src = Node->getOperand(0);
EVT SrcVT = Src.getValueType();
EVT DstVT = Node->getValueType(0);
@@ -6723,10 +6723,10 @@ bool TargetLowering::expandUINT_TO_FP(SDNode *Node, SDValue &Result,
EVT ShiftVT = getShiftAmountTy(SrcVT, DAG.getDataLayout());
// Implementation of unsigned i64 to f64 following the algorithm in
- // __floatundidf in compiler_rt. This implementation performs rounding
- // correctly in all rounding modes with the exception of converting 0
- // when rounding toward negative infinity. In that case the fsub will produce
- // -0.0. This will be added to +0.0 and produce -0.0 which is incorrect.
+ // __floatundidf in compiler_rt. This implementation performs rounding
+ // correctly in all rounding modes with the exception of converting 0
+ // when rounding toward negative infinity. In that case the fsub will produce
+ // -0.0. This will be added to +0.0 and produce -0.0 which is incorrect.
SDValue TwoP52 = DAG.getConstant(UINT64_C(0x4330000000000000), dl, SrcVT);
SDValue TwoP84PlusTwoP52 = DAG.getConstantFP(
BitsToDouble(UINT64_C(0x4530000000100000)), dl, DstVT);
@@ -6740,9 +6740,9 @@ bool TargetLowering::expandUINT_TO_FP(SDNode *Node, SDValue &Result,
SDValue HiOr = DAG.getNode(ISD::OR, dl, SrcVT, Hi, TwoP84);
SDValue LoFlt = DAG.getBitcast(DstVT, LoOr);
SDValue HiFlt = DAG.getBitcast(DstVT, HiOr);
- SDValue HiSub =
- DAG.getNode(ISD::FSUB, dl, DstVT, HiFlt, TwoP84PlusTwoP52);
- Result = DAG.getNode(ISD::FADD, dl, DstVT, LoFlt, HiSub);
+ SDValue HiSub =
+ DAG.getNode(ISD::FSUB, dl, DstVT, HiFlt, TwoP84PlusTwoP52);
+ Result = DAG.getNode(ISD::FADD, dl, DstVT, LoFlt, HiSub);
return true;
}
@@ -6752,11 +6752,11 @@ SDValue TargetLowering::expandFMINNUM_FMAXNUM(SDNode *Node,
unsigned NewOp = Node->getOpcode() == ISD::FMINNUM ?
ISD::FMINNUM_IEEE : ISD::FMAXNUM_IEEE;
EVT VT = Node->getValueType(0);
-
- if (VT.isScalableVector())
- report_fatal_error(
- "Expanding fminnum/fmaxnum for scalable vectors is undefined.");
-
+
+ if (VT.isScalableVector())
+ report_fatal_error(
+ "Expanding fminnum/fmaxnum for scalable vectors is undefined.");
+
if (isOperationLegalOrCustom(NewOp, VT)) {
SDValue Quiet0 = Node->getOperand(0);
SDValue Quiet1 = Node->getOperand(1);
@@ -6980,58 +6980,58 @@ bool TargetLowering::expandCTTZ(SDNode *Node, SDValue &Result,
}
bool TargetLowering::expandABS(SDNode *N, SDValue &Result,
- SelectionDAG &DAG, bool IsNegative) const {
+ SelectionDAG &DAG, bool IsNegative) const {
SDLoc dl(N);
EVT VT = N->getValueType(0);
EVT ShVT = getShiftAmountTy(VT, DAG.getDataLayout());
SDValue Op = N->getOperand(0);
- // abs(x) -> smax(x,sub(0,x))
- if (!IsNegative && isOperationLegal(ISD::SUB, VT) &&
- isOperationLegal(ISD::SMAX, VT)) {
- SDValue Zero = DAG.getConstant(0, dl, VT);
- Result = DAG.getNode(ISD::SMAX, dl, VT, Op,
- DAG.getNode(ISD::SUB, dl, VT, Zero, Op));
- return true;
- }
-
- // abs(x) -> umin(x,sub(0,x))
- if (!IsNegative && isOperationLegal(ISD::SUB, VT) &&
- isOperationLegal(ISD::UMIN, VT)) {
- SDValue Zero = DAG.getConstant(0, dl, VT);
- Result = DAG.getNode(ISD::UMIN, dl, VT, Op,
- DAG.getNode(ISD::SUB, dl, VT, Zero, Op));
- return true;
- }
-
- // 0 - abs(x) -> smin(x, sub(0,x))
- if (IsNegative && isOperationLegal(ISD::SUB, VT) &&
- isOperationLegal(ISD::SMIN, VT)) {
- SDValue Zero = DAG.getConstant(0, dl, VT);
- Result = DAG.getNode(ISD::SMIN, dl, VT, Op,
- DAG.getNode(ISD::SUB, dl, VT, Zero, Op));
- return true;
- }
-
+ // abs(x) -> smax(x,sub(0,x))
+ if (!IsNegative && isOperationLegal(ISD::SUB, VT) &&
+ isOperationLegal(ISD::SMAX, VT)) {
+ SDValue Zero = DAG.getConstant(0, dl, VT);
+ Result = DAG.getNode(ISD::SMAX, dl, VT, Op,
+ DAG.getNode(ISD::SUB, dl, VT, Zero, Op));
+ return true;
+ }
+
+ // abs(x) -> umin(x,sub(0,x))
+ if (!IsNegative && isOperationLegal(ISD::SUB, VT) &&
+ isOperationLegal(ISD::UMIN, VT)) {
+ SDValue Zero = DAG.getConstant(0, dl, VT);
+ Result = DAG.getNode(ISD::UMIN, dl, VT, Op,
+ DAG.getNode(ISD::SUB, dl, VT, Zero, Op));
+ return true;
+ }
+
+ // 0 - abs(x) -> smin(x, sub(0,x))
+ if (IsNegative && isOperationLegal(ISD::SUB, VT) &&
+ isOperationLegal(ISD::SMIN, VT)) {
+ SDValue Zero = DAG.getConstant(0, dl, VT);
+ Result = DAG.getNode(ISD::SMIN, dl, VT, Op,
+ DAG.getNode(ISD::SUB, dl, VT, Zero, Op));
+ return true;
+ }
+
// Only expand vector types if we have the appropriate vector operations.
- if (VT.isVector() &&
- (!isOperationLegalOrCustom(ISD::SRA, VT) ||
- (!IsNegative && !isOperationLegalOrCustom(ISD::ADD, VT)) ||
- (IsNegative && !isOperationLegalOrCustom(ISD::SUB, VT)) ||
- !isOperationLegalOrCustomOrPromote(ISD::XOR, VT)))
+ if (VT.isVector() &&
+ (!isOperationLegalOrCustom(ISD::SRA, VT) ||
+ (!IsNegative && !isOperationLegalOrCustom(ISD::ADD, VT)) ||
+ (IsNegative && !isOperationLegalOrCustom(ISD::SUB, VT)) ||
+ !isOperationLegalOrCustomOrPromote(ISD::XOR, VT)))
return false;
SDValue Shift =
DAG.getNode(ISD::SRA, dl, VT, Op,
DAG.getConstant(VT.getScalarSizeInBits() - 1, dl, ShVT));
- if (!IsNegative) {
- SDValue Add = DAG.getNode(ISD::ADD, dl, VT, Op, Shift);
- Result = DAG.getNode(ISD::XOR, dl, VT, Add, Shift);
- } else {
- // 0 - abs(x) -> Y = sra (X, size(X)-1); sub (Y, xor (X, Y))
- SDValue Xor = DAG.getNode(ISD::XOR, dl, VT, Op, Shift);
- Result = DAG.getNode(ISD::SUB, dl, VT, Shift, Xor);
- }
+ if (!IsNegative) {
+ SDValue Add = DAG.getNode(ISD::ADD, dl, VT, Op, Shift);
+ Result = DAG.getNode(ISD::XOR, dl, VT, Add, Shift);
+ } else {
+ // 0 - abs(x) -> Y = sra (X, size(X)-1); sub (Y, xor (X, Y))
+ SDValue Xor = DAG.getNode(ISD::XOR, dl, VT, Op, Shift);
+ Result = DAG.getNode(ISD::SUB, dl, VT, Shift, Xor);
+ }
return true;
}
@@ -7045,9 +7045,9 @@ TargetLowering::scalarizeVectorLoad(LoadSDNode *LD,
EVT DstVT = LD->getValueType(0);
ISD::LoadExtType ExtType = LD->getExtensionType();
- if (SrcVT.isScalableVector())
- report_fatal_error("Cannot scalarize scalable vector loads");
-
+ if (SrcVT.isScalableVector())
+ report_fatal_error("Cannot scalarize scalable vector loads");
+
unsigned NumElem = SrcVT.getVectorNumElements();
EVT SrcEltVT = SrcVT.getScalarType();
@@ -7074,7 +7074,7 @@ TargetLowering::scalarizeVectorLoad(LoadSDNode *LD,
// the codegen worse.
SDValue Load =
DAG.getExtLoad(ISD::EXTLOAD, SL, LoadVT, Chain, BasePTR,
- LD->getPointerInfo(), SrcIntVT, LD->getOriginalAlign(),
+ LD->getPointerInfo(), SrcIntVT, LD->getOriginalAlign(),
LD->getMemOperand()->getFlags(), LD->getAAInfo());
SmallVector<SDValue, 8> Vals;
@@ -7111,10 +7111,10 @@ TargetLowering::scalarizeVectorLoad(LoadSDNode *LD,
SDValue ScalarLoad =
DAG.getExtLoad(ExtType, SL, DstEltVT, Chain, BasePTR,
LD->getPointerInfo().getWithOffset(Idx * Stride),
- SrcEltVT, LD->getOriginalAlign(),
+ SrcEltVT, LD->getOriginalAlign(),
LD->getMemOperand()->getFlags(), LD->getAAInfo());
- BasePTR = DAG.getObjectPtrOffset(SL, BasePTR, TypeSize::Fixed(Stride));
+ BasePTR = DAG.getObjectPtrOffset(SL, BasePTR, TypeSize::Fixed(Stride));
Vals.push_back(ScalarLoad.getValue(0));
LoadChains.push_back(ScalarLoad.getValue(1));
@@ -7135,9 +7135,9 @@ SDValue TargetLowering::scalarizeVectorStore(StoreSDNode *ST,
SDValue Value = ST->getValue();
EVT StVT = ST->getMemoryVT();
- if (StVT.isScalableVector())
- report_fatal_error("Cannot scalarize scalable vector stores");
-
+ if (StVT.isScalableVector())
+ report_fatal_error("Cannot scalarize scalable vector stores");
+
// The type of the data we want to save
EVT RegVT = Value.getValueType();
EVT RegSclVT = RegVT.getScalarType();
@@ -7174,7 +7174,7 @@ SDValue TargetLowering::scalarizeVectorStore(StoreSDNode *ST,
}
return DAG.getStore(Chain, SL, CurrVal, BasePtr, ST->getPointerInfo(),
- ST->getOriginalAlign(), ST->getMemOperand()->getFlags(),
+ ST->getOriginalAlign(), ST->getMemOperand()->getFlags(),
ST->getAAInfo());
}
@@ -7188,14 +7188,14 @@ SDValue TargetLowering::scalarizeVectorStore(StoreSDNode *ST,
SDValue Elt = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, RegSclVT, Value,
DAG.getVectorIdxConstant(Idx, SL));
- SDValue Ptr =
- DAG.getObjectPtrOffset(SL, BasePtr, TypeSize::Fixed(Idx * Stride));
+ SDValue Ptr =
+ DAG.getObjectPtrOffset(SL, BasePtr, TypeSize::Fixed(Idx * Stride));
// This scalar TruncStore may be illegal, but we legalize it later.
SDValue Store = DAG.getTruncStore(
Chain, SL, Elt, Ptr, ST->getPointerInfo().getWithOffset(Idx * Stride),
- MemSclVT, ST->getOriginalAlign(), ST->getMemOperand()->getFlags(),
- ST->getAAInfo());
+ MemSclVT, ST->getOriginalAlign(), ST->getMemOperand()->getFlags(),
+ ST->getAAInfo());
Stores.push_back(Store);
}
@@ -7260,7 +7260,7 @@ TargetLowering::expandUnalignedLoad(LoadSDNode *LD, SelectionDAG &DAG) const {
// Load one integer register's worth from the original location.
SDValue Load = DAG.getLoad(
RegVT, dl, Chain, Ptr, LD->getPointerInfo().getWithOffset(Offset),
- LD->getOriginalAlign(), LD->getMemOperand()->getFlags(),
+ LD->getOriginalAlign(), LD->getMemOperand()->getFlags(),
LD->getAAInfo());
// Follow the load with a store to the stack slot. Remember the store.
Stores.push_back(DAG.getStore(
@@ -7279,8 +7279,8 @@ TargetLowering::expandUnalignedLoad(LoadSDNode *LD, SelectionDAG &DAG) const {
SDValue Load =
DAG.getExtLoad(ISD::EXTLOAD, dl, RegVT, Chain, Ptr,
LD->getPointerInfo().getWithOffset(Offset), MemVT,
- LD->getOriginalAlign(), LD->getMemOperand()->getFlags(),
- LD->getAAInfo());
+ LD->getOriginalAlign(), LD->getMemOperand()->getFlags(),
+ LD->getAAInfo());
// Follow the load with a store to the stack slot. Remember the store.
// On big-endian machines this requires a truncating store to ensure
// that the bits end up in the right place.
@@ -7310,7 +7310,7 @@ TargetLowering::expandUnalignedLoad(LoadSDNode *LD, SelectionDAG &DAG) const {
NewLoadedVT = EVT::getIntegerVT(*DAG.getContext(), NumBits/2);
NumBits >>= 1;
- Align Alignment = LD->getOriginalAlign();
+ Align Alignment = LD->getOriginalAlign();
unsigned IncrementSize = NumBits / 8;
ISD::LoadExtType HiExtType = LD->getExtensionType();
@@ -7325,21 +7325,21 @@ TargetLowering::expandUnalignedLoad(LoadSDNode *LD, SelectionDAG &DAG) const {
NewLoadedVT, Alignment, LD->getMemOperand()->getFlags(),
LD->getAAInfo());
- Ptr = DAG.getObjectPtrOffset(dl, Ptr, TypeSize::Fixed(IncrementSize));
+ Ptr = DAG.getObjectPtrOffset(dl, Ptr, TypeSize::Fixed(IncrementSize));
Hi = DAG.getExtLoad(HiExtType, dl, VT, Chain, Ptr,
LD->getPointerInfo().getWithOffset(IncrementSize),
- NewLoadedVT, Alignment, LD->getMemOperand()->getFlags(),
- LD->getAAInfo());
+ NewLoadedVT, Alignment, LD->getMemOperand()->getFlags(),
+ LD->getAAInfo());
} else {
Hi = DAG.getExtLoad(HiExtType, dl, VT, Chain, Ptr, LD->getPointerInfo(),
NewLoadedVT, Alignment, LD->getMemOperand()->getFlags(),
LD->getAAInfo());
- Ptr = DAG.getObjectPtrOffset(dl, Ptr, TypeSize::Fixed(IncrementSize));
+ Ptr = DAG.getObjectPtrOffset(dl, Ptr, TypeSize::Fixed(IncrementSize));
Lo = DAG.getExtLoad(ISD::ZEXTLOAD, dl, VT, Chain, Ptr,
LD->getPointerInfo().getWithOffset(IncrementSize),
- NewLoadedVT, Alignment, LD->getMemOperand()->getFlags(),
- LD->getAAInfo());
+ NewLoadedVT, Alignment, LD->getMemOperand()->getFlags(),
+ LD->getAAInfo());
}
// aggregate the two parts
@@ -7363,7 +7363,7 @@ SDValue TargetLowering::expandUnalignedStore(StoreSDNode *ST,
SDValue Ptr = ST->getBasePtr();
SDValue Val = ST->getValue();
EVT VT = Val.getValueType();
- Align Alignment = ST->getOriginalAlign();
+ Align Alignment = ST->getOriginalAlign();
auto &MF = DAG.getMachineFunction();
EVT StoreMemVT = ST->getMemoryVT();
@@ -7420,7 +7420,7 @@ SDValue TargetLowering::expandUnalignedStore(StoreSDNode *ST,
// Store it to the final location. Remember the store.
Stores.push_back(DAG.getStore(Load.getValue(1), dl, Load, Ptr,
ST->getPointerInfo().getWithOffset(Offset),
- ST->getOriginalAlign(),
+ ST->getOriginalAlign(),
ST->getMemOperand()->getFlags()));
// Increment the pointers.
Offset += RegBytes;
@@ -7442,7 +7442,7 @@ SDValue TargetLowering::expandUnalignedStore(StoreSDNode *ST,
Stores.push_back(
DAG.getTruncStore(Load.getValue(1), dl, Load, Ptr,
ST->getPointerInfo().getWithOffset(Offset), LoadMemVT,
- ST->getOriginalAlign(),
+ ST->getOriginalAlign(),
ST->getMemOperand()->getFlags(), ST->getAAInfo()));
// The order of the stores doesn't matter - say it with a TokenFactor.
SDValue Result = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Stores);
@@ -7453,8 +7453,8 @@ SDValue TargetLowering::expandUnalignedStore(StoreSDNode *ST,
"Unaligned store of unknown type.");
// Get the half-size VT
EVT NewStoredVT = StoreMemVT.getHalfSizedIntegerVT(*DAG.getContext());
- unsigned NumBits = NewStoredVT.getFixedSizeInBits();
- unsigned IncrementSize = NumBits / 8;
+ unsigned NumBits = NewStoredVT.getFixedSizeInBits();
+ unsigned IncrementSize = NumBits / 8;
// Divide the stored value in two parts.
SDValue ShiftAmount = DAG.getConstant(
@@ -7469,7 +7469,7 @@ SDValue TargetLowering::expandUnalignedStore(StoreSDNode *ST,
Ptr, ST->getPointerInfo(), NewStoredVT, Alignment,
ST->getMemOperand()->getFlags());
- Ptr = DAG.getObjectPtrOffset(dl, Ptr, TypeSize::Fixed(IncrementSize));
+ Ptr = DAG.getObjectPtrOffset(dl, Ptr, TypeSize::Fixed(IncrementSize));
Store2 = DAG.getTruncStore(
Chain, dl, DAG.getDataLayout().isLittleEndian() ? Hi : Lo, Ptr,
ST->getPointerInfo().getWithOffset(IncrementSize), NewStoredVT, Alignment,
@@ -7488,12 +7488,12 @@ TargetLowering::IncrementMemoryAddress(SDValue Addr, SDValue Mask,
SDValue Increment;
EVT AddrVT = Addr.getValueType();
EVT MaskVT = Mask.getValueType();
- assert(DataVT.getVectorElementCount() == MaskVT.getVectorElementCount() &&
+ assert(DataVT.getVectorElementCount() == MaskVT.getVectorElementCount() &&
"Incompatible types of Data and Mask");
if (IsCompressedMemory) {
- if (DataVT.isScalableVector())
- report_fatal_error(
- "Cannot currently handle compressed memory with scalable vectors");
+ if (DataVT.isScalableVector())
+ report_fatal_error(
+ "Cannot currently handle compressed memory with scalable vectors");
// Incrementing the pointer according to number of '1's in the mask.
EVT MaskIntVT = EVT::getIntegerVT(*DAG.getContext(), MaskVT.getSizeInBits());
SDValue MaskInIntReg = DAG.getBitcast(MaskIntVT, Mask);
@@ -7509,10 +7509,10 @@ TargetLowering::IncrementMemoryAddress(SDValue Addr, SDValue Mask,
SDValue Scale = DAG.getConstant(DataVT.getScalarSizeInBits() / 8, DL,
AddrVT);
Increment = DAG.getNode(ISD::MUL, DL, AddrVT, Increment, Scale);
- } else if (DataVT.isScalableVector()) {
- Increment = DAG.getVScale(DL, AddrVT,
- APInt(AddrVT.getFixedSizeInBits(),
- DataVT.getStoreSize().getKnownMinSize()));
+ } else if (DataVT.isScalableVector()) {
+ Increment = DAG.getVScale(DL, AddrVT,
+ APInt(AddrVT.getFixedSizeInBits(),
+ DataVT.getStoreSize().getKnownMinSize()));
} else
Increment = DAG.getConstant(DataVT.getStoreSize(), DL, AddrVT);
@@ -7523,26 +7523,26 @@ static SDValue clampDynamicVectorIndex(SelectionDAG &DAG,
SDValue Idx,
EVT VecVT,
const SDLoc &dl) {
- if (!VecVT.isScalableVector() && isa<ConstantSDNode>(Idx))
+ if (!VecVT.isScalableVector() && isa<ConstantSDNode>(Idx))
return Idx;
EVT IdxVT = Idx.getValueType();
- unsigned NElts = VecVT.getVectorMinNumElements();
- if (VecVT.isScalableVector()) {
- SDValue VS = DAG.getVScale(dl, IdxVT,
- APInt(IdxVT.getFixedSizeInBits(),
- NElts));
- SDValue Sub = DAG.getNode(ISD::SUB, dl, IdxVT, VS,
- DAG.getConstant(1, dl, IdxVT));
-
- return DAG.getNode(ISD::UMIN, dl, IdxVT, Idx, Sub);
- } else {
- if (isPowerOf2_32(NElts)) {
- APInt Imm = APInt::getLowBitsSet(IdxVT.getSizeInBits(),
- Log2_32(NElts));
- return DAG.getNode(ISD::AND, dl, IdxVT, Idx,
- DAG.getConstant(Imm, dl, IdxVT));
- }
+ unsigned NElts = VecVT.getVectorMinNumElements();
+ if (VecVT.isScalableVector()) {
+ SDValue VS = DAG.getVScale(dl, IdxVT,
+ APInt(IdxVT.getFixedSizeInBits(),
+ NElts));
+ SDValue Sub = DAG.getNode(ISD::SUB, dl, IdxVT, VS,
+ DAG.getConstant(1, dl, IdxVT));
+
+ return DAG.getNode(ISD::UMIN, dl, IdxVT, Idx, Sub);
+ } else {
+ if (isPowerOf2_32(NElts)) {
+ APInt Imm = APInt::getLowBitsSet(IdxVT.getSizeInBits(),
+ Log2_32(NElts));
+ return DAG.getNode(ISD::AND, dl, IdxVT, Idx,
+ DAG.getConstant(Imm, dl, IdxVT));
+ }
}
return DAG.getNode(ISD::UMIN, dl, IdxVT, Idx,
@@ -7559,8 +7559,8 @@ SDValue TargetLowering::getVectorElementPointer(SelectionDAG &DAG,
EVT EltVT = VecVT.getVectorElementType();
// Calculate the element offset and add it to the pointer.
- unsigned EltSize = EltVT.getFixedSizeInBits() / 8; // FIXME: should be ABI size.
- assert(EltSize * 8 == EltVT.getFixedSizeInBits() &&
+ unsigned EltSize = EltVT.getFixedSizeInBits() / 8; // FIXME: should be ABI size.
+ assert(EltSize * 8 == EltVT.getFixedSizeInBits() &&
"Converting bits to bytes lost precision");
Index = clampDynamicVectorIndex(DAG, Index, VecVT, dl);
@@ -7638,65 +7638,65 @@ SDValue TargetLowering::lowerCmpEqZeroToCtlzSrl(SDValue Op,
return SDValue();
}
-// Convert redundant addressing modes (e.g. scaling is redundant
-// when accessing bytes).
-ISD::MemIndexType
-TargetLowering::getCanonicalIndexType(ISD::MemIndexType IndexType, EVT MemVT,
- SDValue Offsets) const {
- bool IsScaledIndex =
- (IndexType == ISD::SIGNED_SCALED) || (IndexType == ISD::UNSIGNED_SCALED);
- bool IsSignedIndex =
- (IndexType == ISD::SIGNED_SCALED) || (IndexType == ISD::SIGNED_UNSCALED);
-
- // Scaling is unimportant for bytes, canonicalize to unscaled.
- if (IsScaledIndex && MemVT.getScalarType() == MVT::i8) {
- IsScaledIndex = false;
- IndexType = IsSignedIndex ? ISD::SIGNED_UNSCALED : ISD::UNSIGNED_UNSCALED;
- }
-
- return IndexType;
-}
-
-SDValue TargetLowering::expandIntMINMAX(SDNode *Node, SelectionDAG &DAG) const {
- SDValue Op0 = Node->getOperand(0);
- SDValue Op1 = Node->getOperand(1);
- EVT VT = Op0.getValueType();
- unsigned Opcode = Node->getOpcode();
- SDLoc DL(Node);
-
- // umin(x,y) -> sub(x,usubsat(x,y))
- if (Opcode == ISD::UMIN && isOperationLegal(ISD::SUB, VT) &&
- isOperationLegal(ISD::USUBSAT, VT)) {
- return DAG.getNode(ISD::SUB, DL, VT, Op0,
- DAG.getNode(ISD::USUBSAT, DL, VT, Op0, Op1));
- }
-
- // umax(x,y) -> add(x,usubsat(y,x))
- if (Opcode == ISD::UMAX && isOperationLegal(ISD::ADD, VT) &&
- isOperationLegal(ISD::USUBSAT, VT)) {
- return DAG.getNode(ISD::ADD, DL, VT, Op0,
- DAG.getNode(ISD::USUBSAT, DL, VT, Op1, Op0));
- }
-
- // Expand Y = MAX(A, B) -> Y = (A > B) ? A : B
- ISD::CondCode CC;
- switch (Opcode) {
- default: llvm_unreachable("How did we get here?");
- case ISD::SMAX: CC = ISD::SETGT; break;
- case ISD::SMIN: CC = ISD::SETLT; break;
- case ISD::UMAX: CC = ISD::SETUGT; break;
- case ISD::UMIN: CC = ISD::SETULT; break;
- }
-
- // FIXME: Should really try to split the vector in case it's legal on a
- // subvector.
- if (VT.isVector() && !isOperationLegalOrCustom(ISD::VSELECT, VT))
- return DAG.UnrollVectorOp(Node);
-
- SDValue Cond = DAG.getSetCC(DL, VT, Op0, Op1, CC);
- return DAG.getSelect(DL, VT, Cond, Op0, Op1);
-}
-
+// Convert redundant addressing modes (e.g. scaling is redundant
+// when accessing bytes).
+ISD::MemIndexType
+TargetLowering::getCanonicalIndexType(ISD::MemIndexType IndexType, EVT MemVT,
+ SDValue Offsets) const {
+ bool IsScaledIndex =
+ (IndexType == ISD::SIGNED_SCALED) || (IndexType == ISD::UNSIGNED_SCALED);
+ bool IsSignedIndex =
+ (IndexType == ISD::SIGNED_SCALED) || (IndexType == ISD::SIGNED_UNSCALED);
+
+ // Scaling is unimportant for bytes, canonicalize to unscaled.
+ if (IsScaledIndex && MemVT.getScalarType() == MVT::i8) {
+ IsScaledIndex = false;
+ IndexType = IsSignedIndex ? ISD::SIGNED_UNSCALED : ISD::UNSIGNED_UNSCALED;
+ }
+
+ return IndexType;
+}
+
+SDValue TargetLowering::expandIntMINMAX(SDNode *Node, SelectionDAG &DAG) const {
+ SDValue Op0 = Node->getOperand(0);
+ SDValue Op1 = Node->getOperand(1);
+ EVT VT = Op0.getValueType();
+ unsigned Opcode = Node->getOpcode();
+ SDLoc DL(Node);
+
+ // umin(x,y) -> sub(x,usubsat(x,y))
+ if (Opcode == ISD::UMIN && isOperationLegal(ISD::SUB, VT) &&
+ isOperationLegal(ISD::USUBSAT, VT)) {
+ return DAG.getNode(ISD::SUB, DL, VT, Op0,
+ DAG.getNode(ISD::USUBSAT, DL, VT, Op0, Op1));
+ }
+
+ // umax(x,y) -> add(x,usubsat(y,x))
+ if (Opcode == ISD::UMAX && isOperationLegal(ISD::ADD, VT) &&
+ isOperationLegal(ISD::USUBSAT, VT)) {
+ return DAG.getNode(ISD::ADD, DL, VT, Op0,
+ DAG.getNode(ISD::USUBSAT, DL, VT, Op1, Op0));
+ }
+
+ // Expand Y = MAX(A, B) -> Y = (A > B) ? A : B
+ ISD::CondCode CC;
+ switch (Opcode) {
+ default: llvm_unreachable("How did we get here?");
+ case ISD::SMAX: CC = ISD::SETGT; break;
+ case ISD::SMIN: CC = ISD::SETLT; break;
+ case ISD::UMAX: CC = ISD::SETUGT; break;
+ case ISD::UMIN: CC = ISD::SETULT; break;
+ }
+
+ // FIXME: Should really try to split the vector in case it's legal on a
+ // subvector.
+ if (VT.isVector() && !isOperationLegalOrCustom(ISD::VSELECT, VT))
+ return DAG.UnrollVectorOp(Node);
+
+ SDValue Cond = DAG.getSetCC(DL, VT, Op0, Op1, CC);
+ return DAG.getSelect(DL, VT, Cond, Op0, Op1);
+}
+
SDValue TargetLowering::expandAddSubSat(SDNode *Node, SelectionDAG &DAG) const {
unsigned Opcode = Node->getOpcode();
SDValue LHS = Node->getOperand(0);
@@ -7708,13 +7708,13 @@ SDValue TargetLowering::expandAddSubSat(SDNode *Node, SelectionDAG &DAG) const {
assert(VT.isInteger() && "Expected operands to be integers");
// usub.sat(a, b) -> umax(a, b) - b
- if (Opcode == ISD::USUBSAT && isOperationLegal(ISD::UMAX, VT)) {
+ if (Opcode == ISD::USUBSAT && isOperationLegal(ISD::UMAX, VT)) {
SDValue Max = DAG.getNode(ISD::UMAX, dl, VT, LHS, RHS);
return DAG.getNode(ISD::SUB, dl, VT, Max, RHS);
}
- // uadd.sat(a, b) -> umin(a, ~b) + b
- if (Opcode == ISD::UADDSAT && isOperationLegal(ISD::UMIN, VT)) {
+ // uadd.sat(a, b) -> umin(a, ~b) + b
+ if (Opcode == ISD::UADDSAT && isOperationLegal(ISD::UMIN, VT)) {
SDValue InvRHS = DAG.getNOT(dl, RHS, VT);
SDValue Min = DAG.getNode(ISD::UMIN, dl, VT, LHS, InvRHS);
return DAG.getNode(ISD::ADD, dl, VT, Min, RHS);
@@ -7739,11 +7739,11 @@ SDValue TargetLowering::expandAddSubSat(SDNode *Node, SelectionDAG &DAG) const {
"addition or subtraction node.");
}
- // FIXME: Should really try to split the vector in case it's legal on a
- // subvector.
- if (VT.isVector() && !isOperationLegalOrCustom(ISD::VSELECT, VT))
- return DAG.UnrollVectorOp(Node);
-
+ // FIXME: Should really try to split the vector in case it's legal on a
+ // subvector.
+ if (VT.isVector() && !isOperationLegalOrCustom(ISD::VSELECT, VT))
+ return DAG.UnrollVectorOp(Node);
+
unsigned BitWidth = LHS.getScalarValueSizeInBits();
EVT BoolVT = getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), VT);
SDValue Result = DAG.getNode(OverflowOp, dl, DAG.getVTList(VT, BoolVT),
@@ -7783,41 +7783,41 @@ SDValue TargetLowering::expandAddSubSat(SDNode *Node, SelectionDAG &DAG) const {
}
}
-SDValue TargetLowering::expandShlSat(SDNode *Node, SelectionDAG &DAG) const {
- unsigned Opcode = Node->getOpcode();
- bool IsSigned = Opcode == ISD::SSHLSAT;
- SDValue LHS = Node->getOperand(0);
- SDValue RHS = Node->getOperand(1);
- EVT VT = LHS.getValueType();
- SDLoc dl(Node);
-
- assert((Node->getOpcode() == ISD::SSHLSAT ||
- Node->getOpcode() == ISD::USHLSAT) &&
- "Expected a SHLSAT opcode");
- assert(VT == RHS.getValueType() && "Expected operands to be the same type");
- assert(VT.isInteger() && "Expected operands to be integers");
-
- // If LHS != (LHS << RHS) >> RHS, we have overflow and must saturate.
-
- unsigned BW = VT.getScalarSizeInBits();
- SDValue Result = DAG.getNode(ISD::SHL, dl, VT, LHS, RHS);
- SDValue Orig =
- DAG.getNode(IsSigned ? ISD::SRA : ISD::SRL, dl, VT, Result, RHS);
-
- SDValue SatVal;
- if (IsSigned) {
- SDValue SatMin = DAG.getConstant(APInt::getSignedMinValue(BW), dl, VT);
- SDValue SatMax = DAG.getConstant(APInt::getSignedMaxValue(BW), dl, VT);
- SatVal = DAG.getSelectCC(dl, LHS, DAG.getConstant(0, dl, VT),
- SatMin, SatMax, ISD::SETLT);
- } else {
- SatVal = DAG.getConstant(APInt::getMaxValue(BW), dl, VT);
- }
- Result = DAG.getSelectCC(dl, LHS, Orig, SatVal, Result, ISD::SETNE);
-
- return Result;
-}
-
+SDValue TargetLowering::expandShlSat(SDNode *Node, SelectionDAG &DAG) const {
+ unsigned Opcode = Node->getOpcode();
+ bool IsSigned = Opcode == ISD::SSHLSAT;
+ SDValue LHS = Node->getOperand(0);
+ SDValue RHS = Node->getOperand(1);
+ EVT VT = LHS.getValueType();
+ SDLoc dl(Node);
+
+ assert((Node->getOpcode() == ISD::SSHLSAT ||
+ Node->getOpcode() == ISD::USHLSAT) &&
+ "Expected a SHLSAT opcode");
+ assert(VT == RHS.getValueType() && "Expected operands to be the same type");
+ assert(VT.isInteger() && "Expected operands to be integers");
+
+ // If LHS != (LHS << RHS) >> RHS, we have overflow and must saturate.
+
+ unsigned BW = VT.getScalarSizeInBits();
+ SDValue Result = DAG.getNode(ISD::SHL, dl, VT, LHS, RHS);
+ SDValue Orig =
+ DAG.getNode(IsSigned ? ISD::SRA : ISD::SRL, dl, VT, Result, RHS);
+
+ SDValue SatVal;
+ if (IsSigned) {
+ SDValue SatMin = DAG.getConstant(APInt::getSignedMinValue(BW), dl, VT);
+ SDValue SatMax = DAG.getConstant(APInt::getSignedMaxValue(BW), dl, VT);
+ SatVal = DAG.getSelectCC(dl, LHS, DAG.getConstant(0, dl, VT),
+ SatMin, SatMax, ISD::SETLT);
+ } else {
+ SatVal = DAG.getConstant(APInt::getMaxValue(BW), dl, VT);
+ }
+ Result = DAG.getSelectCC(dl, LHS, Orig, SatVal, Result, ISD::SETNE);
+
+ return Result;
+}
+
SDValue
TargetLowering::expandFixedPointMul(SDNode *Node, SelectionDAG &DAG) const {
assert((Node->getOpcode() == ISD::SMULFIX ||
@@ -8191,7 +8191,7 @@ bool TargetLowering::expandMULO(SDNode *Node, SDValue &Result,
if (isSigned) {
// The high part is obtained by SRA'ing all but one of the bits of low
// part.
- unsigned LoSize = VT.getFixedSizeInBits();
+ unsigned LoSize = VT.getFixedSizeInBits();
HiLHS =
DAG.getNode(ISD::SRA, dl, VT, LHS,
DAG.getConstant(LoSize - 1, dl,
@@ -8250,7 +8250,7 @@ bool TargetLowering::expandMULO(SDNode *Node, SDValue &Result,
// Truncate the result if SetCC returns a larger type than needed.
EVT RType = Node->getValueType(1);
- if (RType.bitsLT(Overflow.getValueType()))
+ if (RType.bitsLT(Overflow.getValueType()))
Overflow = DAG.getNode(ISD::TRUNCATE, dl, RType, Overflow);
assert(RType.getSizeInBits() == Overflow.getValueSizeInBits() &&
@@ -8260,14 +8260,14 @@ bool TargetLowering::expandMULO(SDNode *Node, SDValue &Result,
SDValue TargetLowering::expandVecReduce(SDNode *Node, SelectionDAG &DAG) const {
SDLoc dl(Node);
- unsigned BaseOpcode = ISD::getVecReduceBaseOpcode(Node->getOpcode());
+ unsigned BaseOpcode = ISD::getVecReduceBaseOpcode(Node->getOpcode());
SDValue Op = Node->getOperand(0);
EVT VT = Op.getValueType();
- if (VT.isScalableVector())
- report_fatal_error(
- "Expanding reductions for scalable vectors is undefined.");
-
+ if (VT.isScalableVector())
+ report_fatal_error(
+ "Expanding reductions for scalable vectors is undefined.");
+
// Try to use a shuffle reduction for power of two vectors.
if (VT.isPow2VectorType()) {
while (VT.getVectorNumElements() > 1) {
@@ -8298,33 +8298,33 @@ SDValue TargetLowering::expandVecReduce(SDNode *Node, SelectionDAG &DAG) const {
return Res;
}
-SDValue TargetLowering::expandVecReduceSeq(SDNode *Node, SelectionDAG &DAG) const {
- SDLoc dl(Node);
- SDValue AccOp = Node->getOperand(0);
- SDValue VecOp = Node->getOperand(1);
- SDNodeFlags Flags = Node->getFlags();
-
- EVT VT = VecOp.getValueType();
- EVT EltVT = VT.getVectorElementType();
-
- if (VT.isScalableVector())
- report_fatal_error(
- "Expanding reductions for scalable vectors is undefined.");
-
- unsigned NumElts = VT.getVectorNumElements();
-
- SmallVector<SDValue, 8> Ops;
- DAG.ExtractVectorElements(VecOp, Ops, 0, NumElts);
-
- unsigned BaseOpcode = ISD::getVecReduceBaseOpcode(Node->getOpcode());
-
- SDValue Res = AccOp;
- for (unsigned i = 0; i < NumElts; i++)
- Res = DAG.getNode(BaseOpcode, dl, EltVT, Res, Ops[i], Flags);
-
- return Res;
-}
-
+SDValue TargetLowering::expandVecReduceSeq(SDNode *Node, SelectionDAG &DAG) const {
+ SDLoc dl(Node);
+ SDValue AccOp = Node->getOperand(0);
+ SDValue VecOp = Node->getOperand(1);
+ SDNodeFlags Flags = Node->getFlags();
+
+ EVT VT = VecOp.getValueType();
+ EVT EltVT = VT.getVectorElementType();
+
+ if (VT.isScalableVector())
+ report_fatal_error(
+ "Expanding reductions for scalable vectors is undefined.");
+
+ unsigned NumElts = VT.getVectorNumElements();
+
+ SmallVector<SDValue, 8> Ops;
+ DAG.ExtractVectorElements(VecOp, Ops, 0, NumElts);
+
+ unsigned BaseOpcode = ISD::getVecReduceBaseOpcode(Node->getOpcode());
+
+ SDValue Res = AccOp;
+ for (unsigned i = 0; i < NumElts; i++)
+ Res = DAG.getNode(BaseOpcode, dl, EltVT, Res, Ops[i], Flags);
+
+ return Res;
+}
+
bool TargetLowering::expandREM(SDNode *Node, SDValue &Result,
SelectionDAG &DAG) const {
EVT VT = Node->getValueType(0);
@@ -8347,105 +8347,105 @@ bool TargetLowering::expandREM(SDNode *Node, SDValue &Result,
}
return false;
}
-
-SDValue TargetLowering::expandFP_TO_INT_SAT(SDNode *Node,
- SelectionDAG &DAG) const {
- bool IsSigned = Node->getOpcode() == ISD::FP_TO_SINT_SAT;
- SDLoc dl(SDValue(Node, 0));
- SDValue Src = Node->getOperand(0);
-
- // DstVT is the result type, while SatVT is the size to which we saturate
- EVT SrcVT = Src.getValueType();
- EVT DstVT = Node->getValueType(0);
-
- unsigned SatWidth = Node->getConstantOperandVal(1);
- unsigned DstWidth = DstVT.getScalarSizeInBits();
- assert(SatWidth <= DstWidth &&
- "Expected saturation width smaller than result width");
-
- // Determine minimum and maximum integer values and their corresponding
- // floating-point values.
- APInt MinInt, MaxInt;
- if (IsSigned) {
- MinInt = APInt::getSignedMinValue(SatWidth).sextOrSelf(DstWidth);
- MaxInt = APInt::getSignedMaxValue(SatWidth).sextOrSelf(DstWidth);
- } else {
- MinInt = APInt::getMinValue(SatWidth).zextOrSelf(DstWidth);
- MaxInt = APInt::getMaxValue(SatWidth).zextOrSelf(DstWidth);
- }
-
- // We cannot risk emitting FP_TO_XINT nodes with a source VT of f16, as
- // libcall emission cannot handle this. Large result types will fail.
- if (SrcVT == MVT::f16) {
- Src = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f32, Src);
- SrcVT = Src.getValueType();
- }
-
- APFloat MinFloat(DAG.EVTToAPFloatSemantics(SrcVT));
- APFloat MaxFloat(DAG.EVTToAPFloatSemantics(SrcVT));
-
- APFloat::opStatus MinStatus =
- MinFloat.convertFromAPInt(MinInt, IsSigned, APFloat::rmTowardZero);
- APFloat::opStatus MaxStatus =
- MaxFloat.convertFromAPInt(MaxInt, IsSigned, APFloat::rmTowardZero);
- bool AreExactFloatBounds = !(MinStatus & APFloat::opStatus::opInexact) &&
- !(MaxStatus & APFloat::opStatus::opInexact);
-
- SDValue MinFloatNode = DAG.getConstantFP(MinFloat, dl, SrcVT);
- SDValue MaxFloatNode = DAG.getConstantFP(MaxFloat, dl, SrcVT);
-
- // If the integer bounds are exactly representable as floats and min/max are
- // legal, emit a min+max+fptoi sequence. Otherwise we have to use a sequence
- // of comparisons and selects.
- bool MinMaxLegal = isOperationLegal(ISD::FMINNUM, SrcVT) &&
- isOperationLegal(ISD::FMAXNUM, SrcVT);
- if (AreExactFloatBounds && MinMaxLegal) {
- SDValue Clamped = Src;
-
- // Clamp Src by MinFloat from below. If Src is NaN the result is MinFloat.
- Clamped = DAG.getNode(ISD::FMAXNUM, dl, SrcVT, Clamped, MinFloatNode);
- // Clamp by MaxFloat from above. NaN cannot occur.
- Clamped = DAG.getNode(ISD::FMINNUM, dl, SrcVT, Clamped, MaxFloatNode);
- // Convert clamped value to integer.
- SDValue FpToInt = DAG.getNode(IsSigned ? ISD::FP_TO_SINT : ISD::FP_TO_UINT,
- dl, DstVT, Clamped);
-
- // In the unsigned case we're done, because we mapped NaN to MinFloat,
- // which will cast to zero.
- if (!IsSigned)
- return FpToInt;
-
- // Otherwise, select 0 if Src is NaN.
- SDValue ZeroInt = DAG.getConstant(0, dl, DstVT);
- return DAG.getSelectCC(dl, Src, Src, ZeroInt, FpToInt,
- ISD::CondCode::SETUO);
- }
-
- SDValue MinIntNode = DAG.getConstant(MinInt, dl, DstVT);
- SDValue MaxIntNode = DAG.getConstant(MaxInt, dl, DstVT);
-
- // Result of direct conversion. The assumption here is that the operation is
- // non-trapping and it's fine to apply it to an out-of-range value if we
- // select it away later.
- SDValue FpToInt =
- DAG.getNode(IsSigned ? ISD::FP_TO_SINT : ISD::FP_TO_UINT, dl, DstVT, Src);
-
- SDValue Select = FpToInt;
-
- // If Src ULT MinFloat, select MinInt. In particular, this also selects
- // MinInt if Src is NaN.
- Select = DAG.getSelectCC(dl, Src, MinFloatNode, MinIntNode, Select,
- ISD::CondCode::SETULT);
- // If Src OGT MaxFloat, select MaxInt.
- Select = DAG.getSelectCC(dl, Src, MaxFloatNode, MaxIntNode, Select,
- ISD::CondCode::SETOGT);
-
- // In the unsigned case we are done, because we mapped NaN to MinInt, which
- // is already zero.
- if (!IsSigned)
- return Select;
-
- // Otherwise, select 0 if Src is NaN.
- SDValue ZeroInt = DAG.getConstant(0, dl, DstVT);
- return DAG.getSelectCC(dl, Src, Src, ZeroInt, Select, ISD::CondCode::SETUO);
-}
+
+SDValue TargetLowering::expandFP_TO_INT_SAT(SDNode *Node,
+ SelectionDAG &DAG) const {
+ bool IsSigned = Node->getOpcode() == ISD::FP_TO_SINT_SAT;
+ SDLoc dl(SDValue(Node, 0));
+ SDValue Src = Node->getOperand(0);
+
+ // DstVT is the result type, while SatVT is the size to which we saturate
+ EVT SrcVT = Src.getValueType();
+ EVT DstVT = Node->getValueType(0);
+
+ unsigned SatWidth = Node->getConstantOperandVal(1);
+ unsigned DstWidth = DstVT.getScalarSizeInBits();
+ assert(SatWidth <= DstWidth &&
+ "Expected saturation width smaller than result width");
+
+ // Determine minimum and maximum integer values and their corresponding
+ // floating-point values.
+ APInt MinInt, MaxInt;
+ if (IsSigned) {
+ MinInt = APInt::getSignedMinValue(SatWidth).sextOrSelf(DstWidth);
+ MaxInt = APInt::getSignedMaxValue(SatWidth).sextOrSelf(DstWidth);
+ } else {
+ MinInt = APInt::getMinValue(SatWidth).zextOrSelf(DstWidth);
+ MaxInt = APInt::getMaxValue(SatWidth).zextOrSelf(DstWidth);
+ }
+
+ // We cannot risk emitting FP_TO_XINT nodes with a source VT of f16, as
+ // libcall emission cannot handle this. Large result types will fail.
+ if (SrcVT == MVT::f16) {
+ Src = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f32, Src);
+ SrcVT = Src.getValueType();
+ }
+
+ APFloat MinFloat(DAG.EVTToAPFloatSemantics(SrcVT));
+ APFloat MaxFloat(DAG.EVTToAPFloatSemantics(SrcVT));
+
+ APFloat::opStatus MinStatus =
+ MinFloat.convertFromAPInt(MinInt, IsSigned, APFloat::rmTowardZero);
+ APFloat::opStatus MaxStatus =
+ MaxFloat.convertFromAPInt(MaxInt, IsSigned, APFloat::rmTowardZero);
+ bool AreExactFloatBounds = !(MinStatus & APFloat::opStatus::opInexact) &&
+ !(MaxStatus & APFloat::opStatus::opInexact);
+
+ SDValue MinFloatNode = DAG.getConstantFP(MinFloat, dl, SrcVT);
+ SDValue MaxFloatNode = DAG.getConstantFP(MaxFloat, dl, SrcVT);
+
+ // If the integer bounds are exactly representable as floats and min/max are
+ // legal, emit a min+max+fptoi sequence. Otherwise we have to use a sequence
+ // of comparisons and selects.
+ bool MinMaxLegal = isOperationLegal(ISD::FMINNUM, SrcVT) &&
+ isOperationLegal(ISD::FMAXNUM, SrcVT);
+ if (AreExactFloatBounds && MinMaxLegal) {
+ SDValue Clamped = Src;
+
+ // Clamp Src by MinFloat from below. If Src is NaN the result is MinFloat.
+ Clamped = DAG.getNode(ISD::FMAXNUM, dl, SrcVT, Clamped, MinFloatNode);
+ // Clamp by MaxFloat from above. NaN cannot occur.
+ Clamped = DAG.getNode(ISD::FMINNUM, dl, SrcVT, Clamped, MaxFloatNode);
+ // Convert clamped value to integer.
+ SDValue FpToInt = DAG.getNode(IsSigned ? ISD::FP_TO_SINT : ISD::FP_TO_UINT,
+ dl, DstVT, Clamped);
+
+ // In the unsigned case we're done, because we mapped NaN to MinFloat,
+ // which will cast to zero.
+ if (!IsSigned)
+ return FpToInt;
+
+ // Otherwise, select 0 if Src is NaN.
+ SDValue ZeroInt = DAG.getConstant(0, dl, DstVT);
+ return DAG.getSelectCC(dl, Src, Src, ZeroInt, FpToInt,
+ ISD::CondCode::SETUO);
+ }
+
+ SDValue MinIntNode = DAG.getConstant(MinInt, dl, DstVT);
+ SDValue MaxIntNode = DAG.getConstant(MaxInt, dl, DstVT);
+
+ // Result of direct conversion. The assumption here is that the operation is
+ // non-trapping and it's fine to apply it to an out-of-range value if we
+ // select it away later.
+ SDValue FpToInt =
+ DAG.getNode(IsSigned ? ISD::FP_TO_SINT : ISD::FP_TO_UINT, dl, DstVT, Src);
+
+ SDValue Select = FpToInt;
+
+ // If Src ULT MinFloat, select MinInt. In particular, this also selects
+ // MinInt if Src is NaN.
+ Select = DAG.getSelectCC(dl, Src, MinFloatNode, MinIntNode, Select,
+ ISD::CondCode::SETULT);
+ // If Src OGT MaxFloat, select MaxInt.
+ Select = DAG.getSelectCC(dl, Src, MaxFloatNode, MaxIntNode, Select,
+ ISD::CondCode::SETOGT);
+
+ // In the unsigned case we are done, because we mapped NaN to MinInt, which
+ // is already zero.
+ if (!IsSigned)
+ return Select;
+
+ // Otherwise, select 0 if Src is NaN.
+ SDValue ZeroInt = DAG.getConstant(0, dl, DstVT);
+ return DAG.getSelectCC(dl, Src, Src, ZeroInt, Select, ISD::CondCode::SETUO);
+}
diff --git a/contrib/libs/llvm12/lib/CodeGen/SelectionDAG/ya.make b/contrib/libs/llvm12/lib/CodeGen/SelectionDAG/ya.make
index 9fa4f90d2f..147d550458 100644
--- a/contrib/libs/llvm12/lib/CodeGen/SelectionDAG/ya.make
+++ b/contrib/libs/llvm12/lib/CodeGen/SelectionDAG/ya.make
@@ -12,15 +12,15 @@ LICENSE(Apache-2.0 WITH LLVM-exception)
LICENSE_TEXTS(.yandex_meta/licenses.list.txt)
PEERDIR(
- contrib/libs/llvm12
- contrib/libs/llvm12/include
- contrib/libs/llvm12/lib/Analysis
- contrib/libs/llvm12/lib/CodeGen
- contrib/libs/llvm12/lib/IR
- contrib/libs/llvm12/lib/MC
- contrib/libs/llvm12/lib/Support
- contrib/libs/llvm12/lib/Target
- contrib/libs/llvm12/lib/Transforms/Utils
+ contrib/libs/llvm12
+ contrib/libs/llvm12/include
+ contrib/libs/llvm12/lib/Analysis
+ contrib/libs/llvm12/lib/CodeGen
+ contrib/libs/llvm12/lib/IR
+ contrib/libs/llvm12/lib/MC
+ contrib/libs/llvm12/lib/Support
+ contrib/libs/llvm12/lib/Target
+ contrib/libs/llvm12/lib/Transforms/Utils
)
ADDINCL(