1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
1001
1002
1003
1004
1005
1006
1007
1008
1009
1010
1011
1012
1013
1014
1015
1016
1017
1018
1019
1020
1021
1022
1023
1024
1025
1026
1027
1028
1029
1030
1031
1032
1033
1034
1035
1036
1037
1038
1039
1040
1041
1042
1043
1044
1045
1046
1047
1048
1049
1050
1051
1052
1053
1054
1055
1056
1057
1058
1059
1060
1061
1062
1063
1064
1065
1066
1067
1068
1069
1070
1071
1072
1073
1074
1075
1076
1077
1078
1079
1080
1081
1082
1083
1084
1085
1086
1087
1088
1089
1090
1091
1092
1093
1094
1095
1096
1097
1098
1099
1100
1101
1102
1103
1104
1105
1106
1107
1108
1109
1110
1111
1112
1113
1114
1115
1116
1117
1118
1119
1120
1121
1122
1123
1124
1125
1126
1127
1128
1129
1130
1131
1132
1133
1134
1135
1136
1137
1138
1139
1140
1141
1142
1143
1144
1145
1146
1147
1148
1149
1150
1151
1152
1153
1154
1155
1156
1157
1158
1159
1160
1161
1162
1163
1164
1165
1166
1167
1168
1169
1170
1171
1172
1173
1174
1175
1176
1177
1178
1179
1180
1181
1182
1183
1184
1185
1186
1187
1188
1189
1190
1191
1192
1193
1194
1195
1196
1197
1198
1199
1200
1201
1202
1203
1204
1205
1206
1207
1208
1209
1210
1211
1212
1213
1214
1215
1216
1217
1218
1219
1220
1221
1222
1223
1224
1225
1226
1227
1228
1229
1230
1231
1232
1233
1234
1235
1236
1237
1238
1239
1240
1241
1242
1243
1244
1245
1246
1247
1248
1249
1250
1251
1252
1253
1254
1255
1256
1257
1258
1259
1260
1261
1262
1263
1264
1265
1266
1267
1268
1269
1270
1271
1272
1273
1274
1275
1276
1277
1278
1279
1280
1281
1282
1283
1284
1285
1286
1287
1288
1289
1290
1291
1292
1293
1294
1295
1296
1297
1298
1299
1300
1301
1302
1303
1304
1305
1306
1307
1308
1309
1310
1311
1312
1313
1314
1315
1316
1317
1318
1319
1320
1321
1322
1323
1324
1325
1326
1327
1328
1329
1330
1331
1332
1333
1334
1335
1336
1337
1338
1339
1340
1341
1342
1343
1344
1345
1346
1347
1348
1349
1350
1351
1352
1353
1354
1355
1356
1357
1358
1359
1360
1361
1362
1363
1364
1365
1366
1367
1368
1369
1370
1371
1372
1373
1374
1375
1376
1377
1378
1379
1380
1381
1382
1383
1384
1385
1386
1387
1388
1389
1390
1391
1392
1393
1394
1395
1396
1397
1398
1399
1400
1401
1402
1403
1404
1405
1406
1407
1408
1409
1410
1411
1412
1413
1414
1415
1416
1417
1418
1419
1420
1421
1422
1423
1424
1425
1426
1427
1428
1429
1430
1431
1432
1433
1434
1435
1436
1437
1438
1439
1440
1441
1442
1443
1444
1445
1446
1447
1448
1449
1450
1451
1452
1453
1454
1455
1456
1457
1458
1459
1460
1461
1462
1463
1464
1465
1466
1467
1468
1469
1470
1471
1472
1473
1474
1475
1476
1477
1478
1479
1480
1481
1482
1483
1484
1485
1486
1487
1488
1489
1490
1491
1492
1493
1494
1495
1496
1497
1498
1499
1500
1501
1502
1503
1504
1505
1506
1507
1508
1509
1510
1511
1512
1513
1514
1515
1516
1517
1518
1519
1520
1521
1522
1523
1524
1525
1526
1527
1528
1529
1530
1531
1532
1533
1534
1535
1536
1537
1538
1539
1540
1541
1542
1543
1544
1545
1546
1547
1548
1549
1550
1551
1552
1553
1554
1555
1556
1557
1558
1559
1560
1561
1562
1563
1564
1565
1566
1567
1568
1569
1570
1571
1572
1573
1574
1575
1576
1577
1578
1579
1580
1581
1582
1583
1584
1585
1586
1587
1588
1589
1590
1591
1592
1593
1594
1595
1596
1597
1598
1599
1600
1601
1602
1603
1604
1605
1606
1607
1608
1609
1610
1611
1612
1613
1614
1615
1616
1617
1618
1619
1620
1621
1622
1623
1624
1625
1626
1627
1628
1629
1630
1631
1632
1633
1634
1635
1636
1637
1638
1639
1640
1641
1642
1643
1644
1645
1646
1647
1648
1649
1650
1651
1652
1653
1654
1655
1656
1657
1658
1659
1660
1661
1662
1663
1664
1665
1666
1667
1668
1669
1670
1671
1672
1673
1674
1675
1676
1677
1678
1679
1680
1681
1682
1683
1684
1685
1686
1687
1688
1689
1690
1691
1692
1693
1694
1695
1696
1697
1698
1699
1700
1701
1702
1703
1704
1705
1706
1707
1708
1709
1710
1711
1712
1713
1714
1715
1716
1717
1718
1719
1720
1721
1722
1723
1724
1725
1726
1727
1728
1729
1730
1731
1732
1733
1734
1735
1736
1737
1738
1739
1740
1741
1742
1743
1744
1745
1746
1747
1748
1749
1750
1751
1752
1753
1754
1755
1756
1757
1758
1759
1760
1761
1762
1763
1764
1765
1766
1767
1768
1769
1770
1771
1772
1773
1774
1775
1776
1777
1778
1779
1780
1781
1782
1783
1784
1785
1786
1787
1788
1789
1790
1791
1792
1793
1794
1795
1796
1797
1798
1799
1800
1801
1802
1803
1804
1805
1806
1807
1808
1809
1810
1811
1812
1813
1814
1815
1816
1817
1818
1819
1820
1821
1822
1823
1824
1825
1826
1827
1828
1829
1830
1831
1832
1833
1834
1835
1836
1837
1838
1839
1840
1841
1842
1843
1844
1845
1846
1847
1848
1849
1850
1851
1852
1853
1854
1855
1856
1857
1858
1859
1860
1861
1862
1863
1864
1865
1866
1867
1868
1869
1870
1871
1872
1873
1874
1875
1876
1877
1878
1879
1880
1881
1882
1883
1884
1885
1886
1887
1888
1889
1890
1891
1892
1893
1894
1895
1896
1897
1898
1899
1900
1901
1902
1903
1904
1905
1906
1907
1908
1909
1910
1911
1912
1913
1914
1915
1916
1917
1918
1919
1920
1921
1922
1923
1924
1925
1926
1927
1928
1929
1930
1931
1932
1933
1934
1935
1936
1937
1938
1939
1940
1941
1942
1943
1944
1945
1946
1947
1948
1949
1950
1951
1952
1953
1954
1955
1956
1957
1958
1959
1960
1961
1962
1963
1964
1965
1966
1967
1968
1969
1970
1971
1972
1973
1974
1975
1976
1977
1978
1979
1980
1981
1982
1983
1984
1985
1986
1987
1988
1989
1990
1991
1992
1993
1994
1995
1996
1997
1998
1999
2000
2001
2002
2003
2004
2005
2006
2007
2008
2009
2010
2011
2012
2013
2014
2015
2016
2017
2018
2019
2020
2021
2022
2023
2024
2025
2026
2027
2028
2029
2030
2031
2032
2033
2034
2035
2036
2037
2038
2039
2040
2041
2042
2043
2044
2045
2046
2047
2048
2049
2050
2051
2052
2053
2054
2055
2056
2057
2058
2059
2060
2061
2062
2063
2064
2065
2066
2067
2068
2069
2070
2071
2072
2073
2074
2075
2076
2077
2078
2079
2080
2081
2082
2083
2084
2085
2086
2087
2088
2089
2090
2091
2092
2093
2094
2095
2096
2097
2098
2099
2100
2101
2102
2103
2104
2105
2106
2107
2108
2109
2110
2111
2112
2113
2114
2115
2116
2117
2118
2119
2120
2121
2122
2123
2124
2125
2126
2127
2128
2129
2130
2131
2132
2133
2134
2135
2136
2137
2138
2139
2140
2141
2142
2143
2144
2145
2146
2147
2148
2149
2150
2151
2152
2153
2154
2155
2156
2157
2158
2159
2160
2161
2162
2163
2164
2165
2166
2167
2168
2169
2170
2171
|
#pragma once
#ifdef __GNUC__
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wunused-parameter"
#endif
//===- llvm/CodeGen/SelectionDAG.h - InstSelection DAG ----------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file declares the SelectionDAG class, and transitively defines the
// SDNode class and subclasses.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_CODEGEN_SELECTIONDAG_H
#define LLVM_CODEGEN_SELECTIONDAG_H
#include "llvm/ADT/APFloat.h"
#include "llvm/ADT/APInt.h"
#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/DenseSet.h"
#include "llvm/ADT/FoldingSet.h"
#include "llvm/ADT/SetVector.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/StringMap.h"
#include "llvm/ADT/ilist.h"
#include "llvm/ADT/iterator.h"
#include "llvm/ADT/iterator_range.h"
#include "llvm/CodeGen/DAGCombine.h"
#include "llvm/CodeGen/ISDOpcodes.h"
#include "llvm/CodeGen/MachineFunction.h"
#include "llvm/CodeGen/MachineMemOperand.h"
#include "llvm/CodeGen/SelectionDAGNodes.h"
#include "llvm/CodeGen/ValueTypes.h"
#include "llvm/IR/DebugLoc.h"
#include "llvm/IR/Instructions.h"
#include "llvm/IR/Metadata.h"
#include "llvm/Support/Allocator.h"
#include "llvm/Support/ArrayRecycler.h"
#include "llvm/Support/AtomicOrdering.h"
#include "llvm/Support/Casting.h"
#include "llvm/Support/CodeGen.h"
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/MachineValueType.h"
#include "llvm/Support/RecyclingAllocator.h"
#include <algorithm>
#include <cassert>
#include <cstdint>
#include <functional>
#include <map>
#include <string>
#include <tuple>
#include <utility>
#include <vector>
namespace llvm {
class AAResults;
class BlockAddress;
class BlockFrequencyInfo;
class Constant;
class ConstantFP;
class ConstantInt;
class DataLayout;
struct fltSemantics;
class FunctionLoweringInfo;
class GlobalValue;
struct KnownBits;
class LegacyDivergenceAnalysis;
class LLVMContext;
class MachineBasicBlock;
class MachineConstantPoolValue;
class MCSymbol;
class OptimizationRemarkEmitter;
class ProfileSummaryInfo;
class SDDbgValue;
class SDDbgOperand;
class SDDbgLabel;
class SelectionDAG;
class SelectionDAGTargetInfo;
class TargetLibraryInfo;
class TargetLowering;
class TargetMachine;
class TargetSubtargetInfo;
class Value;
class SDVTListNode : public FoldingSetNode {
friend struct FoldingSetTrait<SDVTListNode>;
/// A reference to an Interned FoldingSetNodeID for this node.
/// The Allocator in SelectionDAG holds the data.
/// SDVTList contains all types which are frequently accessed in SelectionDAG.
/// The size of this list is not expected to be big so it won't introduce
/// a memory penalty.
FoldingSetNodeIDRef FastID;
const EVT *VTs;
unsigned int NumVTs;
/// The hash value for SDVTList is fixed, so cache it to avoid
/// hash calculation.
unsigned HashValue;
public:
SDVTListNode(const FoldingSetNodeIDRef ID, const EVT *VT, unsigned int Num) :
FastID(ID), VTs(VT), NumVTs(Num) {
HashValue = ID.ComputeHash();
}
SDVTList getSDVTList() {
SDVTList result = {VTs, NumVTs};
return result;
}
};
/// Specialize FoldingSetTrait for SDVTListNode
/// to avoid computing temp FoldingSetNodeID and hash value.
template<> struct FoldingSetTrait<SDVTListNode> : DefaultFoldingSetTrait<SDVTListNode> {
static void Profile(const SDVTListNode &X, FoldingSetNodeID& ID) {
ID = X.FastID;
}
static bool Equals(const SDVTListNode &X, const FoldingSetNodeID &ID,
unsigned IDHash, FoldingSetNodeID &TempID) {
if (X.HashValue != IDHash)
return false;
return ID == X.FastID;
}
static unsigned ComputeHash(const SDVTListNode &X, FoldingSetNodeID &TempID) {
return X.HashValue;
}
};
template <> struct ilist_alloc_traits<SDNode> {
static void deleteNode(SDNode *) {
llvm_unreachable("ilist_traits<SDNode> shouldn't see a deleteNode call!");
}
};
/// Keeps track of dbg_value information through SDISel. We do
/// not build SDNodes for these so as not to perturb the generated code;
/// instead the info is kept off to the side in this structure. Each SDNode may
/// have one or more associated dbg_value entries. This information is kept in
/// DbgValMap.
/// Byval parameters are handled separately because they don't use alloca's,
/// which busts the normal mechanism. There is good reason for handling all
/// parameters separately: they may not have code generated for them, they
/// should always go at the beginning of the function regardless of other code
/// motion, and debug info for them is potentially useful even if the parameter
/// is unused. Right now only byval parameters are handled separately.
class SDDbgInfo {
BumpPtrAllocator Alloc;
SmallVector<SDDbgValue*, 32> DbgValues;
SmallVector<SDDbgValue*, 32> ByvalParmDbgValues;
SmallVector<SDDbgLabel*, 4> DbgLabels;
using DbgValMapType = DenseMap<const SDNode *, SmallVector<SDDbgValue *, 2>>;
DbgValMapType DbgValMap;
public:
SDDbgInfo() = default;
SDDbgInfo(const SDDbgInfo &) = delete;
SDDbgInfo &operator=(const SDDbgInfo &) = delete;
void add(SDDbgValue *V, bool isParameter);
void add(SDDbgLabel *L) { DbgLabels.push_back(L); }
/// Invalidate all DbgValues attached to the node and remove
/// it from the Node-to-DbgValues map.
void erase(const SDNode *Node);
void clear() {
DbgValMap.clear();
DbgValues.clear();
ByvalParmDbgValues.clear();
DbgLabels.clear();
Alloc.Reset();
}
BumpPtrAllocator &getAlloc() { return Alloc; }
bool empty() const {
return DbgValues.empty() && ByvalParmDbgValues.empty() && DbgLabels.empty();
}
ArrayRef<SDDbgValue*> getSDDbgValues(const SDNode *Node) const {
auto I = DbgValMap.find(Node);
if (I != DbgValMap.end())
return I->second;
return ArrayRef<SDDbgValue*>();
}
using DbgIterator = SmallVectorImpl<SDDbgValue*>::iterator;
using DbgLabelIterator = SmallVectorImpl<SDDbgLabel*>::iterator;
DbgIterator DbgBegin() { return DbgValues.begin(); }
DbgIterator DbgEnd() { return DbgValues.end(); }
DbgIterator ByvalParmDbgBegin() { return ByvalParmDbgValues.begin(); }
DbgIterator ByvalParmDbgEnd() { return ByvalParmDbgValues.end(); }
DbgLabelIterator DbgLabelBegin() { return DbgLabels.begin(); }
DbgLabelIterator DbgLabelEnd() { return DbgLabels.end(); }
};
void checkForCycles(const SelectionDAG *DAG, bool force = false);
/// This is used to represent a portion of an LLVM function in a low-level
/// Data Dependence DAG representation suitable for instruction selection.
/// This DAG is constructed as the first step of instruction selection in order
/// to allow implementation of machine specific optimizations
/// and code simplifications.
///
/// The representation used by the SelectionDAG is a target-independent
/// representation, which has some similarities to the GCC RTL representation,
/// but is significantly more simple, powerful, and is a graph form instead of a
/// linear form.
///
class SelectionDAG {
const TargetMachine &TM;
const SelectionDAGTargetInfo *TSI = nullptr;
const TargetLowering *TLI = nullptr;
const TargetLibraryInfo *LibInfo = nullptr;
MachineFunction *MF;
Pass *SDAGISelPass = nullptr;
LLVMContext *Context;
CodeGenOpt::Level OptLevel;
LegacyDivergenceAnalysis * DA = nullptr;
FunctionLoweringInfo * FLI = nullptr;
/// The function-level optimization remark emitter. Used to emit remarks
/// whenever manipulating the DAG.
OptimizationRemarkEmitter *ORE;
ProfileSummaryInfo *PSI = nullptr;
BlockFrequencyInfo *BFI = nullptr;
/// The starting token.
SDNode EntryNode;
/// The root of the entire DAG.
SDValue Root;
/// A linked list of nodes in the current DAG.
ilist<SDNode> AllNodes;
/// The AllocatorType for allocating SDNodes. We use
/// pool allocation with recycling.
using NodeAllocatorType = RecyclingAllocator<BumpPtrAllocator, SDNode,
sizeof(LargestSDNode),
alignof(MostAlignedSDNode)>;
/// Pool allocation for nodes.
NodeAllocatorType NodeAllocator;
/// This structure is used to memoize nodes, automatically performing
/// CSE with existing nodes when a duplicate is requested.
FoldingSet<SDNode> CSEMap;
/// Pool allocation for machine-opcode SDNode operands.
BumpPtrAllocator OperandAllocator;
ArrayRecycler<SDUse> OperandRecycler;
/// Pool allocation for misc. objects that are created once per SelectionDAG.
BumpPtrAllocator Allocator;
/// Tracks dbg_value and dbg_label information through SDISel.
SDDbgInfo *DbgInfo;
using CallSiteInfo = MachineFunction::CallSiteInfo;
using CallSiteInfoImpl = MachineFunction::CallSiteInfoImpl;
struct CallSiteDbgInfo {
CallSiteInfo CSInfo;
MDNode *HeapAllocSite = nullptr;
bool NoMerge = false;
};
DenseMap<const SDNode *, CallSiteDbgInfo> SDCallSiteDbgInfo;
uint16_t NextPersistentId = 0;
/// Are instruction referencing variable locations desired for this function?
bool UseInstrRefDebugInfo = false;
public:
/// Clients of various APIs that cause global effects on
/// the DAG can optionally implement this interface. This allows the clients
/// to handle the various sorts of updates that happen.
///
/// A DAGUpdateListener automatically registers itself with DAG when it is
/// constructed, and removes itself when destroyed in RAII fashion.
struct DAGUpdateListener {
DAGUpdateListener *const Next;
SelectionDAG &DAG;
explicit DAGUpdateListener(SelectionDAG &D)
: Next(D.UpdateListeners), DAG(D) {
DAG.UpdateListeners = this;
}
virtual ~DAGUpdateListener() {
assert(DAG.UpdateListeners == this &&
"DAGUpdateListeners must be destroyed in LIFO order");
DAG.UpdateListeners = Next;
}
/// The node N that was deleted and, if E is not null, an
/// equivalent node E that replaced it.
virtual void NodeDeleted(SDNode *N, SDNode *E);
/// The node N that was updated.
virtual void NodeUpdated(SDNode *N);
/// The node N that was inserted.
virtual void NodeInserted(SDNode *N);
};
struct DAGNodeDeletedListener : public DAGUpdateListener {
std::function<void(SDNode *, SDNode *)> Callback;
DAGNodeDeletedListener(SelectionDAG &DAG,
std::function<void(SDNode *, SDNode *)> Callback)
: DAGUpdateListener(DAG), Callback(std::move(Callback)) {}
void NodeDeleted(SDNode *N, SDNode *E) override { Callback(N, E); }
private:
virtual void anchor();
};
/// Help to insert SDNodeFlags automatically in transforming. Use
/// RAII to save and resume flags in current scope.
class FlagInserter {
SelectionDAG &DAG;
SDNodeFlags Flags;
FlagInserter *LastInserter;
public:
FlagInserter(SelectionDAG &SDAG, SDNodeFlags Flags)
: DAG(SDAG), Flags(Flags),
LastInserter(SDAG.getFlagInserter()) {
SDAG.setFlagInserter(this);
}
FlagInserter(SelectionDAG &SDAG, SDNode *N)
: FlagInserter(SDAG, N->getFlags()) {}
FlagInserter(const FlagInserter &) = delete;
FlagInserter &operator=(const FlagInserter &) = delete;
~FlagInserter() { DAG.setFlagInserter(LastInserter); }
SDNodeFlags getFlags() const { return Flags; }
};
/// When true, additional steps are taken to
/// ensure that getConstant() and similar functions return DAG nodes that
/// have legal types. This is important after type legalization since
/// any illegally typed nodes generated after this point will not experience
/// type legalization.
bool NewNodesMustHaveLegalTypes = false;
private:
/// DAGUpdateListener is a friend so it can manipulate the listener stack.
friend struct DAGUpdateListener;
/// Linked list of registered DAGUpdateListener instances.
/// This stack is maintained by DAGUpdateListener RAII.
DAGUpdateListener *UpdateListeners = nullptr;
/// Implementation of setSubgraphColor.
/// Return whether we had to truncate the search.
bool setSubgraphColorHelper(SDNode *N, const char *Color,
DenseSet<SDNode *> &visited,
int level, bool &printed);
template <typename SDNodeT, typename... ArgTypes>
SDNodeT *newSDNode(ArgTypes &&... Args) {
return new (NodeAllocator.template Allocate<SDNodeT>())
SDNodeT(std::forward<ArgTypes>(Args)...);
}
/// Build a synthetic SDNodeT with the given args and extract its subclass
/// data as an integer (e.g. for use in a folding set).
///
/// The args to this function are the same as the args to SDNodeT's
/// constructor, except the second arg (assumed to be a const DebugLoc&) is
/// omitted.
template <typename SDNodeT, typename... ArgTypes>
static uint16_t getSyntheticNodeSubclassData(unsigned IROrder,
ArgTypes &&... Args) {
// The compiler can reduce this expression to a constant iff we pass an
// empty DebugLoc. Thankfully, the debug location doesn't have any bearing
// on the subclass data.
return SDNodeT(IROrder, DebugLoc(), std::forward<ArgTypes>(Args)...)
.getRawSubclassData();
}
template <typename SDNodeTy>
static uint16_t getSyntheticNodeSubclassData(unsigned Opc, unsigned Order,
SDVTList VTs, EVT MemoryVT,
MachineMemOperand *MMO) {
return SDNodeTy(Opc, Order, DebugLoc(), VTs, MemoryVT, MMO)
.getRawSubclassData();
}
void createOperands(SDNode *Node, ArrayRef<SDValue> Vals);
void removeOperands(SDNode *Node) {
if (!Node->OperandList)
return;
OperandRecycler.deallocate(
ArrayRecycler<SDUse>::Capacity::get(Node->NumOperands),
Node->OperandList);
Node->NumOperands = 0;
Node->OperandList = nullptr;
}
void CreateTopologicalOrder(std::vector<SDNode*>& Order);
public:
// Maximum depth for recursive analysis such as computeKnownBits, etc.
static constexpr unsigned MaxRecursionDepth = 6;
explicit SelectionDAG(const TargetMachine &TM, CodeGenOpt::Level);
SelectionDAG(const SelectionDAG &) = delete;
SelectionDAG &operator=(const SelectionDAG &) = delete;
~SelectionDAG();
/// Prepare this SelectionDAG to process code in the given MachineFunction.
void init(MachineFunction &NewMF, OptimizationRemarkEmitter &NewORE,
Pass *PassPtr, const TargetLibraryInfo *LibraryInfo,
LegacyDivergenceAnalysis * Divergence,
ProfileSummaryInfo *PSIin, BlockFrequencyInfo *BFIin);
void setFunctionLoweringInfo(FunctionLoweringInfo * FuncInfo) {
FLI = FuncInfo;
}
/// Clear state and free memory necessary to make this
/// SelectionDAG ready to process a new block.
void clear();
MachineFunction &getMachineFunction() const { return *MF; }
const Pass *getPass() const { return SDAGISelPass; }
const DataLayout &getDataLayout() const { return MF->getDataLayout(); }
const TargetMachine &getTarget() const { return TM; }
const TargetSubtargetInfo &getSubtarget() const { return MF->getSubtarget(); }
const TargetLowering &getTargetLoweringInfo() const { return *TLI; }
const TargetLibraryInfo &getLibInfo() const { return *LibInfo; }
const SelectionDAGTargetInfo &getSelectionDAGInfo() const { return *TSI; }
const LegacyDivergenceAnalysis *getDivergenceAnalysis() const { return DA; }
LLVMContext *getContext() const { return Context; }
OptimizationRemarkEmitter &getORE() const { return *ORE; }
ProfileSummaryInfo *getPSI() const { return PSI; }
BlockFrequencyInfo *getBFI() const { return BFI; }
FlagInserter *getFlagInserter() { return Inserter; }
void setFlagInserter(FlagInserter *FI) { Inserter = FI; }
/// Just dump dot graph to a user-provided path and title.
/// This doesn't open the dot viewer program and
/// helps visualization when outside debugging session.
/// FileName expects absolute path. If provided
/// without any path separators then the file
/// will be created in the current directory.
/// Error will be emitted if the path is insane.
#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
LLVM_DUMP_METHOD void dumpDotGraph(const Twine &FileName, const Twine &Title);
#endif
/// Pop up a GraphViz/gv window with the DAG rendered using 'dot'.
void viewGraph(const std::string &Title);
void viewGraph();
#ifndef NDEBUG
std::map<const SDNode *, std::string> NodeGraphAttrs;
#endif
/// Clear all previously defined node graph attributes.
/// Intended to be used from a debugging tool (eg. gdb).
void clearGraphAttrs();
/// Set graph attributes for a node. (eg. "color=red".)
void setGraphAttrs(const SDNode *N, const char *Attrs);
/// Get graph attributes for a node. (eg. "color=red".)
/// Used from getNodeAttributes.
std::string getGraphAttrs(const SDNode *N) const;
/// Convenience for setting node color attribute.
void setGraphColor(const SDNode *N, const char *Color);
/// Convenience for setting subgraph color attribute.
void setSubgraphColor(SDNode *N, const char *Color);
using allnodes_const_iterator = ilist<SDNode>::const_iterator;
allnodes_const_iterator allnodes_begin() const { return AllNodes.begin(); }
allnodes_const_iterator allnodes_end() const { return AllNodes.end(); }
using allnodes_iterator = ilist<SDNode>::iterator;
allnodes_iterator allnodes_begin() { return AllNodes.begin(); }
allnodes_iterator allnodes_end() { return AllNodes.end(); }
ilist<SDNode>::size_type allnodes_size() const {
return AllNodes.size();
}
iterator_range<allnodes_iterator> allnodes() {
return make_range(allnodes_begin(), allnodes_end());
}
iterator_range<allnodes_const_iterator> allnodes() const {
return make_range(allnodes_begin(), allnodes_end());
}
/// Return the root tag of the SelectionDAG.
const SDValue &getRoot() const { return Root; }
/// Return the token chain corresponding to the entry of the function.
SDValue getEntryNode() const {
return SDValue(const_cast<SDNode *>(&EntryNode), 0);
}
/// Set the current root tag of the SelectionDAG.
///
const SDValue &setRoot(SDValue N) {
assert((!N.getNode() || N.getValueType() == MVT::Other) &&
"DAG root value is not a chain!");
if (N.getNode())
checkForCycles(N.getNode(), this);
Root = N;
if (N.getNode())
checkForCycles(this);
return Root;
}
#ifndef NDEBUG
void VerifyDAGDivergence();
#endif
/// This iterates over the nodes in the SelectionDAG, folding
/// certain types of nodes together, or eliminating superfluous nodes. The
/// Level argument controls whether Combine is allowed to produce nodes and
/// types that are illegal on the target.
void Combine(CombineLevel Level, AAResults *AA,
CodeGenOpt::Level OptLevel);
/// This transforms the SelectionDAG into a SelectionDAG that
/// only uses types natively supported by the target.
/// Returns "true" if it made any changes.
///
/// Note that this is an involved process that may invalidate pointers into
/// the graph.
bool LegalizeTypes();
/// This transforms the SelectionDAG into a SelectionDAG that is
/// compatible with the target instruction selector, as indicated by the
/// TargetLowering object.
///
/// Note that this is an involved process that may invalidate pointers into
/// the graph.
void Legalize();
/// Transforms a SelectionDAG node and any operands to it into a node
/// that is compatible with the target instruction selector, as indicated by
/// the TargetLowering object.
///
/// \returns true if \c N is a valid, legal node after calling this.
///
/// This essentially runs a single recursive walk of the \c Legalize process
/// over the given node (and its operands). This can be used to incrementally
/// legalize the DAG. All of the nodes which are directly replaced,
/// potentially including N, are added to the output parameter \c
/// UpdatedNodes so that the delta to the DAG can be understood by the
/// caller.
///
/// When this returns false, N has been legalized in a way that make the
/// pointer passed in no longer valid. It may have even been deleted from the
/// DAG, and so it shouldn't be used further. When this returns true, the
/// N passed in is a legal node, and can be immediately processed as such.
/// This may still have done some work on the DAG, and will still populate
/// UpdatedNodes with any new nodes replacing those originally in the DAG.
bool LegalizeOp(SDNode *N, SmallSetVector<SDNode *, 16> &UpdatedNodes);
/// This transforms the SelectionDAG into a SelectionDAG
/// that only uses vector math operations supported by the target. This is
/// necessary as a separate step from Legalize because unrolling a vector
/// operation can introduce illegal types, which requires running
/// LegalizeTypes again.
///
/// This returns true if it made any changes; in that case, LegalizeTypes
/// is called again before Legalize.
///
/// Note that this is an involved process that may invalidate pointers into
/// the graph.
bool LegalizeVectors();
/// This method deletes all unreachable nodes in the SelectionDAG.
void RemoveDeadNodes();
/// Remove the specified node from the system. This node must
/// have no referrers.
void DeleteNode(SDNode *N);
/// Return an SDVTList that represents the list of values specified.
SDVTList getVTList(EVT VT);
SDVTList getVTList(EVT VT1, EVT VT2);
SDVTList getVTList(EVT VT1, EVT VT2, EVT VT3);
SDVTList getVTList(EVT VT1, EVT VT2, EVT VT3, EVT VT4);
SDVTList getVTList(ArrayRef<EVT> VTs);
//===--------------------------------------------------------------------===//
// Node creation methods.
/// Create a ConstantSDNode wrapping a constant value.
/// If VT is a vector type, the constant is splatted into a BUILD_VECTOR.
///
/// If only legal types can be produced, this does the necessary
/// transformations (e.g., if the vector element type is illegal).
/// @{
SDValue getConstant(uint64_t Val, const SDLoc &DL, EVT VT,
bool isTarget = false, bool isOpaque = false);
SDValue getConstant(const APInt &Val, const SDLoc &DL, EVT VT,
bool isTarget = false, bool isOpaque = false);
SDValue getAllOnesConstant(const SDLoc &DL, EVT VT, bool IsTarget = false,
bool IsOpaque = false) {
return getConstant(APInt::getAllOnes(VT.getScalarSizeInBits()), DL, VT,
IsTarget, IsOpaque);
}
SDValue getConstant(const ConstantInt &Val, const SDLoc &DL, EVT VT,
bool isTarget = false, bool isOpaque = false);
SDValue getIntPtrConstant(uint64_t Val, const SDLoc &DL,
bool isTarget = false);
SDValue getShiftAmountConstant(uint64_t Val, EVT VT, const SDLoc &DL,
bool LegalTypes = true);
SDValue getVectorIdxConstant(uint64_t Val, const SDLoc &DL,
bool isTarget = false);
SDValue getTargetConstant(uint64_t Val, const SDLoc &DL, EVT VT,
bool isOpaque = false) {
return getConstant(Val, DL, VT, true, isOpaque);
}
SDValue getTargetConstant(const APInt &Val, const SDLoc &DL, EVT VT,
bool isOpaque = false) {
return getConstant(Val, DL, VT, true, isOpaque);
}
SDValue getTargetConstant(const ConstantInt &Val, const SDLoc &DL, EVT VT,
bool isOpaque = false) {
return getConstant(Val, DL, VT, true, isOpaque);
}
/// Create a true or false constant of type \p VT using the target's
/// BooleanContent for type \p OpVT.
SDValue getBoolConstant(bool V, const SDLoc &DL, EVT VT, EVT OpVT);
/// @}
/// Create a ConstantFPSDNode wrapping a constant value.
/// If VT is a vector type, the constant is splatted into a BUILD_VECTOR.
///
/// If only legal types can be produced, this does the necessary
/// transformations (e.g., if the vector element type is illegal).
/// The forms that take a double should only be used for simple constants
/// that can be exactly represented in VT. No checks are made.
/// @{
SDValue getConstantFP(double Val, const SDLoc &DL, EVT VT,
bool isTarget = false);
SDValue getConstantFP(const APFloat &Val, const SDLoc &DL, EVT VT,
bool isTarget = false);
SDValue getConstantFP(const ConstantFP &V, const SDLoc &DL, EVT VT,
bool isTarget = false);
SDValue getTargetConstantFP(double Val, const SDLoc &DL, EVT VT) {
return getConstantFP(Val, DL, VT, true);
}
SDValue getTargetConstantFP(const APFloat &Val, const SDLoc &DL, EVT VT) {
return getConstantFP(Val, DL, VT, true);
}
SDValue getTargetConstantFP(const ConstantFP &Val, const SDLoc &DL, EVT VT) {
return getConstantFP(Val, DL, VT, true);
}
/// @}
SDValue getGlobalAddress(const GlobalValue *GV, const SDLoc &DL, EVT VT,
int64_t offset = 0, bool isTargetGA = false,
unsigned TargetFlags = 0);
SDValue getTargetGlobalAddress(const GlobalValue *GV, const SDLoc &DL, EVT VT,
int64_t offset = 0, unsigned TargetFlags = 0) {
return getGlobalAddress(GV, DL, VT, offset, true, TargetFlags);
}
SDValue getFrameIndex(int FI, EVT VT, bool isTarget = false);
SDValue getTargetFrameIndex(int FI, EVT VT) {
return getFrameIndex(FI, VT, true);
}
SDValue getJumpTable(int JTI, EVT VT, bool isTarget = false,
unsigned TargetFlags = 0);
SDValue getTargetJumpTable(int JTI, EVT VT, unsigned TargetFlags = 0) {
return getJumpTable(JTI, VT, true, TargetFlags);
}
SDValue getConstantPool(const Constant *C, EVT VT, MaybeAlign Align = None,
int Offs = 0, bool isT = false,
unsigned TargetFlags = 0);
SDValue getTargetConstantPool(const Constant *C, EVT VT,
MaybeAlign Align = None, int Offset = 0,
unsigned TargetFlags = 0) {
return getConstantPool(C, VT, Align, Offset, true, TargetFlags);
}
SDValue getConstantPool(MachineConstantPoolValue *C, EVT VT,
MaybeAlign Align = None, int Offs = 0,
bool isT = false, unsigned TargetFlags = 0);
SDValue getTargetConstantPool(MachineConstantPoolValue *C, EVT VT,
MaybeAlign Align = None, int Offset = 0,
unsigned TargetFlags = 0) {
return getConstantPool(C, VT, Align, Offset, true, TargetFlags);
}
SDValue getTargetIndex(int Index, EVT VT, int64_t Offset = 0,
unsigned TargetFlags = 0);
// When generating a branch to a BB, we don't in general know enough
// to provide debug info for the BB at that time, so keep this one around.
SDValue getBasicBlock(MachineBasicBlock *MBB);
SDValue getExternalSymbol(const char *Sym, EVT VT);
SDValue getTargetExternalSymbol(const char *Sym, EVT VT,
unsigned TargetFlags = 0);
SDValue getMCSymbol(MCSymbol *Sym, EVT VT);
SDValue getValueType(EVT);
SDValue getRegister(unsigned Reg, EVT VT);
SDValue getRegisterMask(const uint32_t *RegMask);
SDValue getEHLabel(const SDLoc &dl, SDValue Root, MCSymbol *Label);
SDValue getLabelNode(unsigned Opcode, const SDLoc &dl, SDValue Root,
MCSymbol *Label);
SDValue getBlockAddress(const BlockAddress *BA, EVT VT, int64_t Offset = 0,
bool isTarget = false, unsigned TargetFlags = 0);
SDValue getTargetBlockAddress(const BlockAddress *BA, EVT VT,
int64_t Offset = 0, unsigned TargetFlags = 0) {
return getBlockAddress(BA, VT, Offset, true, TargetFlags);
}
SDValue getCopyToReg(SDValue Chain, const SDLoc &dl, unsigned Reg,
SDValue N) {
return getNode(ISD::CopyToReg, dl, MVT::Other, Chain,
getRegister(Reg, N.getValueType()), N);
}
// This version of the getCopyToReg method takes an extra operand, which
// indicates that there is potentially an incoming glue value (if Glue is not
// null) and that there should be a glue result.
SDValue getCopyToReg(SDValue Chain, const SDLoc &dl, unsigned Reg, SDValue N,
SDValue Glue) {
SDVTList VTs = getVTList(MVT::Other, MVT::Glue);
SDValue Ops[] = { Chain, getRegister(Reg, N.getValueType()), N, Glue };
return getNode(ISD::CopyToReg, dl, VTs,
makeArrayRef(Ops, Glue.getNode() ? 4 : 3));
}
// Similar to last getCopyToReg() except parameter Reg is a SDValue
SDValue getCopyToReg(SDValue Chain, const SDLoc &dl, SDValue Reg, SDValue N,
SDValue Glue) {
SDVTList VTs = getVTList(MVT::Other, MVT::Glue);
SDValue Ops[] = { Chain, Reg, N, Glue };
return getNode(ISD::CopyToReg, dl, VTs,
makeArrayRef(Ops, Glue.getNode() ? 4 : 3));
}
SDValue getCopyFromReg(SDValue Chain, const SDLoc &dl, unsigned Reg, EVT VT) {
SDVTList VTs = getVTList(VT, MVT::Other);
SDValue Ops[] = { Chain, getRegister(Reg, VT) };
return getNode(ISD::CopyFromReg, dl, VTs, Ops);
}
// This version of the getCopyFromReg method takes an extra operand, which
// indicates that there is potentially an incoming glue value (if Glue is not
// null) and that there should be a glue result.
SDValue getCopyFromReg(SDValue Chain, const SDLoc &dl, unsigned Reg, EVT VT,
SDValue Glue) {
SDVTList VTs = getVTList(VT, MVT::Other, MVT::Glue);
SDValue Ops[] = { Chain, getRegister(Reg, VT), Glue };
return getNode(ISD::CopyFromReg, dl, VTs,
makeArrayRef(Ops, Glue.getNode() ? 3 : 2));
}
SDValue getCondCode(ISD::CondCode Cond);
/// Return an ISD::VECTOR_SHUFFLE node. The number of elements in VT,
/// which must be a vector type, must match the number of mask elements
/// NumElts. An integer mask element equal to -1 is treated as undefined.
SDValue getVectorShuffle(EVT VT, const SDLoc &dl, SDValue N1, SDValue N2,
ArrayRef<int> Mask);
/// Return an ISD::BUILD_VECTOR node. The number of elements in VT,
/// which must be a vector type, must match the number of operands in Ops.
/// The operands must have the same type as (or, for integers, a type wider
/// than) VT's element type.
SDValue getBuildVector(EVT VT, const SDLoc &DL, ArrayRef<SDValue> Ops) {
// VerifySDNode (via InsertNode) checks BUILD_VECTOR later.
return getNode(ISD::BUILD_VECTOR, DL, VT, Ops);
}
/// Return an ISD::BUILD_VECTOR node. The number of elements in VT,
/// which must be a vector type, must match the number of operands in Ops.
/// The operands must have the same type as (or, for integers, a type wider
/// than) VT's element type.
SDValue getBuildVector(EVT VT, const SDLoc &DL, ArrayRef<SDUse> Ops) {
// VerifySDNode (via InsertNode) checks BUILD_VECTOR later.
return getNode(ISD::BUILD_VECTOR, DL, VT, Ops);
}
/// Return a splat ISD::BUILD_VECTOR node, consisting of Op splatted to all
/// elements. VT must be a vector type. Op's type must be the same as (or,
/// for integers, a type wider than) VT's element type.
SDValue getSplatBuildVector(EVT VT, const SDLoc &DL, SDValue Op) {
// VerifySDNode (via InsertNode) checks BUILD_VECTOR later.
if (Op.getOpcode() == ISD::UNDEF) {
assert((VT.getVectorElementType() == Op.getValueType() ||
(VT.isInteger() &&
VT.getVectorElementType().bitsLE(Op.getValueType()))) &&
"A splatted value must have a width equal or (for integers) "
"greater than the vector element type!");
return getNode(ISD::UNDEF, SDLoc(), VT);
}
SmallVector<SDValue, 16> Ops(VT.getVectorNumElements(), Op);
return getNode(ISD::BUILD_VECTOR, DL, VT, Ops);
}
// Return a splat ISD::SPLAT_VECTOR node, consisting of Op splatted to all
// elements.
SDValue getSplatVector(EVT VT, const SDLoc &DL, SDValue Op) {
if (Op.getOpcode() == ISD::UNDEF) {
assert((VT.getVectorElementType() == Op.getValueType() ||
(VT.isInteger() &&
VT.getVectorElementType().bitsLE(Op.getValueType()))) &&
"A splatted value must have a width equal or (for integers) "
"greater than the vector element type!");
return getNode(ISD::UNDEF, SDLoc(), VT);
}
return getNode(ISD::SPLAT_VECTOR, DL, VT, Op);
}
/// Returns a vector of type ResVT whose elements contain the linear sequence
/// <0, Step, Step * 2, Step * 3, ...>
SDValue getStepVector(const SDLoc &DL, EVT ResVT, APInt StepVal);
/// Returns a vector of type ResVT whose elements contain the linear sequence
/// <0, 1, 2, 3, ...>
SDValue getStepVector(const SDLoc &DL, EVT ResVT);
/// Returns an ISD::VECTOR_SHUFFLE node semantically equivalent to
/// the shuffle node in input but with swapped operands.
///
/// Example: shuffle A, B, <0,5,2,7> -> shuffle B, A, <4,1,6,3>
SDValue getCommutedVectorShuffle(const ShuffleVectorSDNode &SV);
/// Convert Op, which must be of float type, to the
/// float type VT, by either extending or rounding (by truncation).
SDValue getFPExtendOrRound(SDValue Op, const SDLoc &DL, EVT VT);
/// Convert Op, which must be a STRICT operation of float type, to the
/// float type VT, by either extending or rounding (by truncation).
std::pair<SDValue, SDValue>
getStrictFPExtendOrRound(SDValue Op, SDValue Chain, const SDLoc &DL, EVT VT);
/// Convert Op, which must be of integer type, to the
/// integer type VT, by either any-extending or truncating it.
SDValue getAnyExtOrTrunc(SDValue Op, const SDLoc &DL, EVT VT);
/// Convert Op, which must be of integer type, to the
/// integer type VT, by either sign-extending or truncating it.
SDValue getSExtOrTrunc(SDValue Op, const SDLoc &DL, EVT VT);
/// Convert Op, which must be of integer type, to the
/// integer type VT, by either zero-extending or truncating it.
SDValue getZExtOrTrunc(SDValue Op, const SDLoc &DL, EVT VT);
/// Return the expression required to zero extend the Op
/// value assuming it was the smaller SrcTy value.
SDValue getZeroExtendInReg(SDValue Op, const SDLoc &DL, EVT VT);
/// Convert Op, which must be of integer type, to the integer type VT, by
/// either truncating it or performing either zero or sign extension as
/// appropriate extension for the pointer's semantics.
SDValue getPtrExtOrTrunc(SDValue Op, const SDLoc &DL, EVT VT);
/// Return the expression required to extend the Op as a pointer value
/// assuming it was the smaller SrcTy value. This may be either a zero extend
/// or a sign extend.
SDValue getPtrExtendInReg(SDValue Op, const SDLoc &DL, EVT VT);
/// Convert Op, which must be of integer type, to the integer type VT,
/// by using an extension appropriate for the target's
/// BooleanContent for type OpVT or truncating it.
SDValue getBoolExtOrTrunc(SDValue Op, const SDLoc &SL, EVT VT, EVT OpVT);
/// Create a bitwise NOT operation as (XOR Val, -1).
SDValue getNOT(const SDLoc &DL, SDValue Val, EVT VT);
/// Create a logical NOT operation as (XOR Val, BooleanOne).
SDValue getLogicalNOT(const SDLoc &DL, SDValue Val, EVT VT);
/// Returns sum of the base pointer and offset.
/// Unlike getObjectPtrOffset this does not set NoUnsignedWrap by default.
SDValue getMemBasePlusOffset(SDValue Base, TypeSize Offset, const SDLoc &DL,
const SDNodeFlags Flags = SDNodeFlags());
SDValue getMemBasePlusOffset(SDValue Base, SDValue Offset, const SDLoc &DL,
const SDNodeFlags Flags = SDNodeFlags());
/// Create an add instruction with appropriate flags when used for
/// addressing some offset of an object. i.e. if a load is split into multiple
/// components, create an add nuw from the base pointer to the offset.
SDValue getObjectPtrOffset(const SDLoc &SL, SDValue Ptr, TypeSize Offset) {
SDNodeFlags Flags;
Flags.setNoUnsignedWrap(true);
return getMemBasePlusOffset(Ptr, Offset, SL, Flags);
}
SDValue getObjectPtrOffset(const SDLoc &SL, SDValue Ptr, SDValue Offset) {
// The object itself can't wrap around the address space, so it shouldn't be
// possible for the adds of the offsets to the split parts to overflow.
SDNodeFlags Flags;
Flags.setNoUnsignedWrap(true);
return getMemBasePlusOffset(Ptr, Offset, SL, Flags);
}
/// Return a new CALLSEQ_START node, that starts new call frame, in which
/// InSize bytes are set up inside CALLSEQ_START..CALLSEQ_END sequence and
/// OutSize specifies part of the frame set up prior to the sequence.
SDValue getCALLSEQ_START(SDValue Chain, uint64_t InSize, uint64_t OutSize,
const SDLoc &DL) {
SDVTList VTs = getVTList(MVT::Other, MVT::Glue);
SDValue Ops[] = { Chain,
getIntPtrConstant(InSize, DL, true),
getIntPtrConstant(OutSize, DL, true) };
return getNode(ISD::CALLSEQ_START, DL, VTs, Ops);
}
/// Return a new CALLSEQ_END node, which always must have a
/// glue result (to ensure it's not CSE'd).
/// CALLSEQ_END does not have a useful SDLoc.
SDValue getCALLSEQ_END(SDValue Chain, SDValue Op1, SDValue Op2,
SDValue InGlue, const SDLoc &DL) {
SDVTList NodeTys = getVTList(MVT::Other, MVT::Glue);
SmallVector<SDValue, 4> Ops;
Ops.push_back(Chain);
Ops.push_back(Op1);
Ops.push_back(Op2);
if (InGlue.getNode())
Ops.push_back(InGlue);
return getNode(ISD::CALLSEQ_END, DL, NodeTys, Ops);
}
/// Return true if the result of this operation is always undefined.
bool isUndef(unsigned Opcode, ArrayRef<SDValue> Ops);
/// Return an UNDEF node. UNDEF does not have a useful SDLoc.
SDValue getUNDEF(EVT VT) {
return getNode(ISD::UNDEF, SDLoc(), VT);
}
/// Return a node that represents the runtime scaling 'MulImm * RuntimeVL'.
SDValue getVScale(const SDLoc &DL, EVT VT, APInt MulImm) {
assert(MulImm.getMinSignedBits() <= VT.getSizeInBits() &&
"Immediate does not fit VT");
return getNode(ISD::VSCALE, DL, VT,
getConstant(MulImm.sextOrTrunc(VT.getSizeInBits()), DL, VT));
}
/// Return a GLOBAL_OFFSET_TABLE node. This does not have a useful SDLoc.
SDValue getGLOBAL_OFFSET_TABLE(EVT VT) {
return getNode(ISD::GLOBAL_OFFSET_TABLE, SDLoc(), VT);
}
/// Gets or creates the specified node.
///
SDValue getNode(unsigned Opcode, const SDLoc &DL, EVT VT,
ArrayRef<SDUse> Ops);
SDValue getNode(unsigned Opcode, const SDLoc &DL, EVT VT,
ArrayRef<SDValue> Ops, const SDNodeFlags Flags);
SDValue getNode(unsigned Opcode, const SDLoc &DL, ArrayRef<EVT> ResultTys,
ArrayRef<SDValue> Ops);
SDValue getNode(unsigned Opcode, const SDLoc &DL, SDVTList VTList,
ArrayRef<SDValue> Ops, const SDNodeFlags Flags);
// Use flags from current flag inserter.
SDValue getNode(unsigned Opcode, const SDLoc &DL, EVT VT,
ArrayRef<SDValue> Ops);
SDValue getNode(unsigned Opcode, const SDLoc &DL, SDVTList VTList,
ArrayRef<SDValue> Ops);
SDValue getNode(unsigned Opcode, const SDLoc &DL, EVT VT, SDValue Operand);
SDValue getNode(unsigned Opcode, const SDLoc &DL, EVT VT, SDValue N1,
SDValue N2);
SDValue getNode(unsigned Opcode, const SDLoc &DL, EVT VT, SDValue N1,
SDValue N2, SDValue N3);
// Specialize based on number of operands.
SDValue getNode(unsigned Opcode, const SDLoc &DL, EVT VT);
SDValue getNode(unsigned Opcode, const SDLoc &DL, EVT VT, SDValue Operand,
const SDNodeFlags Flags);
SDValue getNode(unsigned Opcode, const SDLoc &DL, EVT VT, SDValue N1,
SDValue N2, const SDNodeFlags Flags);
SDValue getNode(unsigned Opcode, const SDLoc &DL, EVT VT, SDValue N1,
SDValue N2, SDValue N3, const SDNodeFlags Flags);
SDValue getNode(unsigned Opcode, const SDLoc &DL, EVT VT, SDValue N1,
SDValue N2, SDValue N3, SDValue N4);
SDValue getNode(unsigned Opcode, const SDLoc &DL, EVT VT, SDValue N1,
SDValue N2, SDValue N3, SDValue N4, SDValue N5);
// Specialize again based on number of operands for nodes with a VTList
// rather than a single VT.
SDValue getNode(unsigned Opcode, const SDLoc &DL, SDVTList VTList);
SDValue getNode(unsigned Opcode, const SDLoc &DL, SDVTList VTList, SDValue N);
SDValue getNode(unsigned Opcode, const SDLoc &DL, SDVTList VTList, SDValue N1,
SDValue N2);
SDValue getNode(unsigned Opcode, const SDLoc &DL, SDVTList VTList, SDValue N1,
SDValue N2, SDValue N3);
SDValue getNode(unsigned Opcode, const SDLoc &DL, SDVTList VTList, SDValue N1,
SDValue N2, SDValue N3, SDValue N4);
SDValue getNode(unsigned Opcode, const SDLoc &DL, SDVTList VTList, SDValue N1,
SDValue N2, SDValue N3, SDValue N4, SDValue N5);
/// Compute a TokenFactor to force all the incoming stack arguments to be
/// loaded from the stack. This is used in tail call lowering to protect
/// stack arguments from being clobbered.
SDValue getStackArgumentTokenFactor(SDValue Chain);
SDValue getMemcpy(SDValue Chain, const SDLoc &dl, SDValue Dst, SDValue Src,
SDValue Size, Align Alignment, bool isVol,
bool AlwaysInline, bool isTailCall,
MachinePointerInfo DstPtrInfo,
MachinePointerInfo SrcPtrInfo,
const AAMDNodes &AAInfo = AAMDNodes());
SDValue getMemmove(SDValue Chain, const SDLoc &dl, SDValue Dst, SDValue Src,
SDValue Size, Align Alignment, bool isVol, bool isTailCall,
MachinePointerInfo DstPtrInfo,
MachinePointerInfo SrcPtrInfo,
const AAMDNodes &AAInfo = AAMDNodes());
SDValue getMemset(SDValue Chain, const SDLoc &dl, SDValue Dst, SDValue Src,
SDValue Size, Align Alignment, bool isVol, bool isTailCall,
MachinePointerInfo DstPtrInfo,
const AAMDNodes &AAInfo = AAMDNodes());
SDValue getAtomicMemcpy(SDValue Chain, const SDLoc &dl, SDValue Dst,
unsigned DstAlign, SDValue Src, unsigned SrcAlign,
SDValue Size, Type *SizeTy, unsigned ElemSz,
bool isTailCall, MachinePointerInfo DstPtrInfo,
MachinePointerInfo SrcPtrInfo);
SDValue getAtomicMemmove(SDValue Chain, const SDLoc &dl, SDValue Dst,
unsigned DstAlign, SDValue Src, unsigned SrcAlign,
SDValue Size, Type *SizeTy, unsigned ElemSz,
bool isTailCall, MachinePointerInfo DstPtrInfo,
MachinePointerInfo SrcPtrInfo);
SDValue getAtomicMemset(SDValue Chain, const SDLoc &dl, SDValue Dst,
unsigned DstAlign, SDValue Value, SDValue Size,
Type *SizeTy, unsigned ElemSz, bool isTailCall,
MachinePointerInfo DstPtrInfo);
/// Helper function to make it easier to build SetCC's if you just have an
/// ISD::CondCode instead of an SDValue.
SDValue getSetCC(const SDLoc &DL, EVT VT, SDValue LHS, SDValue RHS,
ISD::CondCode Cond, SDValue Chain = SDValue(),
bool IsSignaling = false) {
assert(LHS.getValueType().isVector() == RHS.getValueType().isVector() &&
"Cannot compare scalars to vectors");
assert(LHS.getValueType().isVector() == VT.isVector() &&
"Cannot compare scalars to vectors");
assert(Cond != ISD::SETCC_INVALID &&
"Cannot create a setCC of an invalid node.");
if (Chain)
return getNode(IsSignaling ? ISD::STRICT_FSETCCS : ISD::STRICT_FSETCC, DL,
{VT, MVT::Other}, {Chain, LHS, RHS, getCondCode(Cond)});
return getNode(ISD::SETCC, DL, VT, LHS, RHS, getCondCode(Cond));
}
/// Helper function to make it easier to build Select's if you just have
/// operands and don't want to check for vector.
SDValue getSelect(const SDLoc &DL, EVT VT, SDValue Cond, SDValue LHS,
SDValue RHS) {
assert(LHS.getValueType() == RHS.getValueType() &&
"Cannot use select on differing types");
assert(VT.isVector() == LHS.getValueType().isVector() &&
"Cannot mix vectors and scalars");
auto Opcode = Cond.getValueType().isVector() ? ISD::VSELECT : ISD::SELECT;
return getNode(Opcode, DL, VT, Cond, LHS, RHS);
}
/// Helper function to make it easier to build SelectCC's if you just have an
/// ISD::CondCode instead of an SDValue.
SDValue getSelectCC(const SDLoc &DL, SDValue LHS, SDValue RHS, SDValue True,
SDValue False, ISD::CondCode Cond) {
return getNode(ISD::SELECT_CC, DL, True.getValueType(), LHS, RHS, True,
False, getCondCode(Cond));
}
/// Try to simplify a select/vselect into 1 of its operands or a constant.
SDValue simplifySelect(SDValue Cond, SDValue TVal, SDValue FVal);
/// Try to simplify a shift into 1 of its operands or a constant.
SDValue simplifyShift(SDValue X, SDValue Y);
/// Try to simplify a floating-point binary operation into 1 of its operands
/// or a constant.
SDValue simplifyFPBinop(unsigned Opcode, SDValue X, SDValue Y,
SDNodeFlags Flags);
/// VAArg produces a result and token chain, and takes a pointer
/// and a source value as input.
SDValue getVAArg(EVT VT, const SDLoc &dl, SDValue Chain, SDValue Ptr,
SDValue SV, unsigned Align);
/// Gets a node for an atomic cmpxchg op. There are two
/// valid Opcodes. ISD::ATOMIC_CMO_SWAP produces the value loaded and a
/// chain result. ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS produces the value loaded,
/// a success flag (initially i1), and a chain.
SDValue getAtomicCmpSwap(unsigned Opcode, const SDLoc &dl, EVT MemVT,
SDVTList VTs, SDValue Chain, SDValue Ptr,
SDValue Cmp, SDValue Swp, MachineMemOperand *MMO);
/// Gets a node for an atomic op, produces result (if relevant)
/// and chain and takes 2 operands.
SDValue getAtomic(unsigned Opcode, const SDLoc &dl, EVT MemVT, SDValue Chain,
SDValue Ptr, SDValue Val, MachineMemOperand *MMO);
/// Gets a node for an atomic op, produces result and chain and
/// takes 1 operand.
SDValue getAtomic(unsigned Opcode, const SDLoc &dl, EVT MemVT, EVT VT,
SDValue Chain, SDValue Ptr, MachineMemOperand *MMO);
/// Gets a node for an atomic op, produces result and chain and takes N
/// operands.
SDValue getAtomic(unsigned Opcode, const SDLoc &dl, EVT MemVT,
SDVTList VTList, ArrayRef<SDValue> Ops,
MachineMemOperand *MMO);
/// Creates a MemIntrinsicNode that may produce a
/// result and takes a list of operands. Opcode may be INTRINSIC_VOID,
/// INTRINSIC_W_CHAIN, or a target-specific opcode with a value not
/// less than FIRST_TARGET_MEMORY_OPCODE.
SDValue getMemIntrinsicNode(
unsigned Opcode, const SDLoc &dl, SDVTList VTList, ArrayRef<SDValue> Ops,
EVT MemVT, MachinePointerInfo PtrInfo, Align Alignment,
MachineMemOperand::Flags Flags = MachineMemOperand::MOLoad |
MachineMemOperand::MOStore,
uint64_t Size = 0, const AAMDNodes &AAInfo = AAMDNodes());
inline SDValue getMemIntrinsicNode(
unsigned Opcode, const SDLoc &dl, SDVTList VTList, ArrayRef<SDValue> Ops,
EVT MemVT, MachinePointerInfo PtrInfo, MaybeAlign Alignment = None,
MachineMemOperand::Flags Flags = MachineMemOperand::MOLoad |
MachineMemOperand::MOStore,
uint64_t Size = 0, const AAMDNodes &AAInfo = AAMDNodes()) {
// Ensure that codegen never sees alignment 0
return getMemIntrinsicNode(Opcode, dl, VTList, Ops, MemVT, PtrInfo,
Alignment.getValueOr(getEVTAlign(MemVT)), Flags,
Size, AAInfo);
}
SDValue getMemIntrinsicNode(unsigned Opcode, const SDLoc &dl, SDVTList VTList,
ArrayRef<SDValue> Ops, EVT MemVT,
MachineMemOperand *MMO);
/// Creates a LifetimeSDNode that starts (`IsStart==true`) or ends
/// (`IsStart==false`) the lifetime of the portion of `FrameIndex` between
/// offsets `Offset` and `Offset + Size`.
SDValue getLifetimeNode(bool IsStart, const SDLoc &dl, SDValue Chain,
int FrameIndex, int64_t Size, int64_t Offset = -1);
/// Creates a PseudoProbeSDNode with function GUID `Guid` and
/// the index of the block `Index` it is probing, as well as the attributes
/// `attr` of the probe.
SDValue getPseudoProbeNode(const SDLoc &Dl, SDValue Chain, uint64_t Guid,
uint64_t Index, uint32_t Attr);
/// Create a MERGE_VALUES node from the given operands.
SDValue getMergeValues(ArrayRef<SDValue> Ops, const SDLoc &dl);
/// Loads are not normal binary operators: their result type is not
/// determined by their operands, and they produce a value AND a token chain.
///
/// This function will set the MOLoad flag on MMOFlags, but you can set it if
/// you want. The MOStore flag must not be set.
SDValue getLoad(EVT VT, const SDLoc &dl, SDValue Chain, SDValue Ptr,
MachinePointerInfo PtrInfo,
MaybeAlign Alignment = MaybeAlign(),
MachineMemOperand::Flags MMOFlags = MachineMemOperand::MONone,
const AAMDNodes &AAInfo = AAMDNodes(),
const MDNode *Ranges = nullptr);
/// FIXME: Remove once transition to Align is over.
inline SDValue
getLoad(EVT VT, const SDLoc &dl, SDValue Chain, SDValue Ptr,
MachinePointerInfo PtrInfo, unsigned Alignment,
MachineMemOperand::Flags MMOFlags = MachineMemOperand::MONone,
const AAMDNodes &AAInfo = AAMDNodes(),
const MDNode *Ranges = nullptr) {
return getLoad(VT, dl, Chain, Ptr, PtrInfo, MaybeAlign(Alignment), MMOFlags,
AAInfo, Ranges);
}
SDValue getLoad(EVT VT, const SDLoc &dl, SDValue Chain, SDValue Ptr,
MachineMemOperand *MMO);
SDValue
getExtLoad(ISD::LoadExtType ExtType, const SDLoc &dl, EVT VT, SDValue Chain,
SDValue Ptr, MachinePointerInfo PtrInfo, EVT MemVT,
MaybeAlign Alignment = MaybeAlign(),
MachineMemOperand::Flags MMOFlags = MachineMemOperand::MONone,
const AAMDNodes &AAInfo = AAMDNodes());
/// FIXME: Remove once transition to Align is over.
inline SDValue
getExtLoad(ISD::LoadExtType ExtType, const SDLoc &dl, EVT VT, SDValue Chain,
SDValue Ptr, MachinePointerInfo PtrInfo, EVT MemVT,
unsigned Alignment,
MachineMemOperand::Flags MMOFlags = MachineMemOperand::MONone,
const AAMDNodes &AAInfo = AAMDNodes()) {
return getExtLoad(ExtType, dl, VT, Chain, Ptr, PtrInfo, MemVT,
MaybeAlign(Alignment), MMOFlags, AAInfo);
}
SDValue getExtLoad(ISD::LoadExtType ExtType, const SDLoc &dl, EVT VT,
SDValue Chain, SDValue Ptr, EVT MemVT,
MachineMemOperand *MMO);
SDValue getIndexedLoad(SDValue OrigLoad, const SDLoc &dl, SDValue Base,
SDValue Offset, ISD::MemIndexedMode AM);
SDValue getLoad(ISD::MemIndexedMode AM, ISD::LoadExtType ExtType, EVT VT,
const SDLoc &dl, SDValue Chain, SDValue Ptr, SDValue Offset,
MachinePointerInfo PtrInfo, EVT MemVT, Align Alignment,
MachineMemOperand::Flags MMOFlags = MachineMemOperand::MONone,
const AAMDNodes &AAInfo = AAMDNodes(),
const MDNode *Ranges = nullptr);
inline SDValue getLoad(
ISD::MemIndexedMode AM, ISD::LoadExtType ExtType, EVT VT, const SDLoc &dl,
SDValue Chain, SDValue Ptr, SDValue Offset, MachinePointerInfo PtrInfo,
EVT MemVT, MaybeAlign Alignment = MaybeAlign(),
MachineMemOperand::Flags MMOFlags = MachineMemOperand::MONone,
const AAMDNodes &AAInfo = AAMDNodes(), const MDNode *Ranges = nullptr) {
// Ensures that codegen never sees a None Alignment.
return getLoad(AM, ExtType, VT, dl, Chain, Ptr, Offset, PtrInfo, MemVT,
Alignment.getValueOr(getEVTAlign(MemVT)), MMOFlags, AAInfo,
Ranges);
}
/// FIXME: Remove once transition to Align is over.
inline SDValue
getLoad(ISD::MemIndexedMode AM, ISD::LoadExtType ExtType, EVT VT,
const SDLoc &dl, SDValue Chain, SDValue Ptr, SDValue Offset,
MachinePointerInfo PtrInfo, EVT MemVT, unsigned Alignment,
MachineMemOperand::Flags MMOFlags = MachineMemOperand::MONone,
const AAMDNodes &AAInfo = AAMDNodes(),
const MDNode *Ranges = nullptr) {
return getLoad(AM, ExtType, VT, dl, Chain, Ptr, Offset, PtrInfo, MemVT,
MaybeAlign(Alignment), MMOFlags, AAInfo, Ranges);
}
SDValue getLoad(ISD::MemIndexedMode AM, ISD::LoadExtType ExtType, EVT VT,
const SDLoc &dl, SDValue Chain, SDValue Ptr, SDValue Offset,
EVT MemVT, MachineMemOperand *MMO);
/// Helper function to build ISD::STORE nodes.
///
/// This function will set the MOStore flag on MMOFlags, but you can set it if
/// you want. The MOLoad and MOInvariant flags must not be set.
SDValue
getStore(SDValue Chain, const SDLoc &dl, SDValue Val, SDValue Ptr,
MachinePointerInfo PtrInfo, Align Alignment,
MachineMemOperand::Flags MMOFlags = MachineMemOperand::MONone,
const AAMDNodes &AAInfo = AAMDNodes());
inline SDValue
getStore(SDValue Chain, const SDLoc &dl, SDValue Val, SDValue Ptr,
MachinePointerInfo PtrInfo, MaybeAlign Alignment = MaybeAlign(),
MachineMemOperand::Flags MMOFlags = MachineMemOperand::MONone,
const AAMDNodes &AAInfo = AAMDNodes()) {
return getStore(Chain, dl, Val, Ptr, PtrInfo,
Alignment.getValueOr(getEVTAlign(Val.getValueType())),
MMOFlags, AAInfo);
}
/// FIXME: Remove once transition to Align is over.
inline SDValue
getStore(SDValue Chain, const SDLoc &dl, SDValue Val, SDValue Ptr,
MachinePointerInfo PtrInfo, unsigned Alignment,
MachineMemOperand::Flags MMOFlags = MachineMemOperand::MONone,
const AAMDNodes &AAInfo = AAMDNodes()) {
return getStore(Chain, dl, Val, Ptr, PtrInfo, MaybeAlign(Alignment),
MMOFlags, AAInfo);
}
SDValue getStore(SDValue Chain, const SDLoc &dl, SDValue Val, SDValue Ptr,
MachineMemOperand *MMO);
SDValue
getTruncStore(SDValue Chain, const SDLoc &dl, SDValue Val, SDValue Ptr,
MachinePointerInfo PtrInfo, EVT SVT, Align Alignment,
MachineMemOperand::Flags MMOFlags = MachineMemOperand::MONone,
const AAMDNodes &AAInfo = AAMDNodes());
inline SDValue
getTruncStore(SDValue Chain, const SDLoc &dl, SDValue Val, SDValue Ptr,
MachinePointerInfo PtrInfo, EVT SVT,
MaybeAlign Alignment = MaybeAlign(),
MachineMemOperand::Flags MMOFlags = MachineMemOperand::MONone,
const AAMDNodes &AAInfo = AAMDNodes()) {
return getTruncStore(Chain, dl, Val, Ptr, PtrInfo, SVT,
Alignment.getValueOr(getEVTAlign(SVT)), MMOFlags,
AAInfo);
}
/// FIXME: Remove once transition to Align is over.
inline SDValue
getTruncStore(SDValue Chain, const SDLoc &dl, SDValue Val, SDValue Ptr,
MachinePointerInfo PtrInfo, EVT SVT, unsigned Alignment,
MachineMemOperand::Flags MMOFlags = MachineMemOperand::MONone,
const AAMDNodes &AAInfo = AAMDNodes()) {
return getTruncStore(Chain, dl, Val, Ptr, PtrInfo, SVT,
MaybeAlign(Alignment), MMOFlags, AAInfo);
}
SDValue getTruncStore(SDValue Chain, const SDLoc &dl, SDValue Val,
SDValue Ptr, EVT SVT, MachineMemOperand *MMO);
SDValue getIndexedStore(SDValue OrigStore, const SDLoc &dl, SDValue Base,
SDValue Offset, ISD::MemIndexedMode AM);
SDValue getLoadVP(ISD::MemIndexedMode AM, ISD::LoadExtType ExtType, EVT VT,
const SDLoc &dl, SDValue Chain, SDValue Ptr, SDValue Offset,
SDValue Mask, SDValue EVL, MachinePointerInfo PtrInfo,
EVT MemVT, Align Alignment,
MachineMemOperand::Flags MMOFlags, const AAMDNodes &AAInfo,
const MDNode *Ranges = nullptr, bool IsExpanding = false);
inline SDValue
getLoadVP(ISD::MemIndexedMode AM, ISD::LoadExtType ExtType, EVT VT,
const SDLoc &dl, SDValue Chain, SDValue Ptr, SDValue Offset,
SDValue Mask, SDValue EVL, MachinePointerInfo PtrInfo, EVT MemVT,
MaybeAlign Alignment = MaybeAlign(),
MachineMemOperand::Flags MMOFlags = MachineMemOperand::MONone,
const AAMDNodes &AAInfo = AAMDNodes(),
const MDNode *Ranges = nullptr, bool IsExpanding = false) {
// Ensures that codegen never sees a None Alignment.
return getLoadVP(AM, ExtType, VT, dl, Chain, Ptr, Offset, Mask, EVL,
PtrInfo, MemVT, Alignment.getValueOr(getEVTAlign(MemVT)),
MMOFlags, AAInfo, Ranges, IsExpanding);
}
SDValue getLoadVP(ISD::MemIndexedMode AM, ISD::LoadExtType ExtType, EVT VT,
const SDLoc &dl, SDValue Chain, SDValue Ptr, SDValue Offset,
SDValue Mask, SDValue EVL, EVT MemVT,
MachineMemOperand *MMO, bool IsExpanding = false);
SDValue getLoadVP(EVT VT, const SDLoc &dl, SDValue Chain, SDValue Ptr,
SDValue Mask, SDValue EVL, MachinePointerInfo PtrInfo,
MaybeAlign Alignment, MachineMemOperand::Flags MMOFlags,
const AAMDNodes &AAInfo, const MDNode *Ranges = nullptr,
bool IsExpanding = false);
SDValue getLoadVP(EVT VT, const SDLoc &dl, SDValue Chain, SDValue Ptr,
SDValue Mask, SDValue EVL, MachineMemOperand *MMO,
bool IsExpanding = false);
SDValue getExtLoadVP(ISD::LoadExtType ExtType, const SDLoc &dl, EVT VT,
SDValue Chain, SDValue Ptr, SDValue Mask, SDValue EVL,
MachinePointerInfo PtrInfo, EVT MemVT,
MaybeAlign Alignment, MachineMemOperand::Flags MMOFlags,
const AAMDNodes &AAInfo, bool IsExpanding = false);
SDValue getExtLoadVP(ISD::LoadExtType ExtType, const SDLoc &dl, EVT VT,
SDValue Chain, SDValue Ptr, SDValue Mask, SDValue EVL,
EVT MemVT, MachineMemOperand *MMO,
bool IsExpanding = false);
SDValue getIndexedLoadVP(SDValue OrigLoad, const SDLoc &dl, SDValue Base,
SDValue Offset, ISD::MemIndexedMode AM);
SDValue getStoreVP(SDValue Chain, const SDLoc &dl, SDValue Val, SDValue Ptr,
SDValue Offset, SDValue Mask, SDValue EVL, EVT MemVT,
MachineMemOperand *MMO, ISD::MemIndexedMode AM,
bool IsTruncating = false, bool IsCompressing = false);
SDValue getTruncStoreVP(SDValue Chain, const SDLoc &dl, SDValue Val,
SDValue Ptr, SDValue Mask, SDValue EVL,
MachinePointerInfo PtrInfo, EVT SVT, Align Alignment,
MachineMemOperand::Flags MMOFlags,
const AAMDNodes &AAInfo, bool IsCompressing = false);
SDValue getTruncStoreVP(SDValue Chain, const SDLoc &dl, SDValue Val,
SDValue Ptr, SDValue Mask, SDValue EVL, EVT SVT,
MachineMemOperand *MMO, bool IsCompressing = false);
SDValue getIndexedStoreVP(SDValue OrigStore, const SDLoc &dl, SDValue Base,
SDValue Offset, ISD::MemIndexedMode AM);
SDValue getGatherVP(SDVTList VTs, EVT VT, const SDLoc &dl,
ArrayRef<SDValue> Ops, MachineMemOperand *MMO,
ISD::MemIndexType IndexType);
SDValue getScatterVP(SDVTList VTs, EVT VT, const SDLoc &dl,
ArrayRef<SDValue> Ops, MachineMemOperand *MMO,
ISD::MemIndexType IndexType);
SDValue getMaskedLoad(EVT VT, const SDLoc &dl, SDValue Chain, SDValue Base,
SDValue Offset, SDValue Mask, SDValue Src0, EVT MemVT,
MachineMemOperand *MMO, ISD::MemIndexedMode AM,
ISD::LoadExtType, bool IsExpanding = false);
SDValue getIndexedMaskedLoad(SDValue OrigLoad, const SDLoc &dl, SDValue Base,
SDValue Offset, ISD::MemIndexedMode AM);
SDValue getMaskedStore(SDValue Chain, const SDLoc &dl, SDValue Val,
SDValue Base, SDValue Offset, SDValue Mask, EVT MemVT,
MachineMemOperand *MMO, ISD::MemIndexedMode AM,
bool IsTruncating = false, bool IsCompressing = false);
SDValue getIndexedMaskedStore(SDValue OrigStore, const SDLoc &dl,
SDValue Base, SDValue Offset,
ISD::MemIndexedMode AM);
SDValue getMaskedGather(SDVTList VTs, EVT MemVT, const SDLoc &dl,
ArrayRef<SDValue> Ops, MachineMemOperand *MMO,
ISD::MemIndexType IndexType, ISD::LoadExtType ExtTy);
SDValue getMaskedScatter(SDVTList VTs, EVT MemVT, const SDLoc &dl,
ArrayRef<SDValue> Ops, MachineMemOperand *MMO,
ISD::MemIndexType IndexType,
bool IsTruncating = false);
/// Construct a node to track a Value* through the backend.
SDValue getSrcValue(const Value *v);
/// Return an MDNodeSDNode which holds an MDNode.
SDValue getMDNode(const MDNode *MD);
/// Return a bitcast using the SDLoc of the value operand, and casting to the
/// provided type. Use getNode to set a custom SDLoc.
SDValue getBitcast(EVT VT, SDValue V);
/// Return an AddrSpaceCastSDNode.
SDValue getAddrSpaceCast(const SDLoc &dl, EVT VT, SDValue Ptr, unsigned SrcAS,
unsigned DestAS);
/// Return a freeze using the SDLoc of the value operand.
SDValue getFreeze(SDValue V);
/// Return an AssertAlignSDNode.
SDValue getAssertAlign(const SDLoc &DL, SDValue V, Align A);
/// Return the specified value casted to
/// the target's desired shift amount type.
SDValue getShiftAmountOperand(EVT LHSTy, SDValue Op);
/// Expand the specified \c ISD::VAARG node as the Legalize pass would.
SDValue expandVAArg(SDNode *Node);
/// Expand the specified \c ISD::VACOPY node as the Legalize pass would.
SDValue expandVACopy(SDNode *Node);
/// Returs an GlobalAddress of the function from the current module with
/// name matching the given ExternalSymbol. Additionally can provide the
/// matched function.
/// Panics the function doesn't exists.
SDValue getSymbolFunctionGlobalAddress(SDValue Op,
Function **TargetFunction = nullptr);
/// *Mutate* the specified node in-place to have the
/// specified operands. If the resultant node already exists in the DAG,
/// this does not modify the specified node, instead it returns the node that
/// already exists. If the resultant node does not exist in the DAG, the
/// input node is returned. As a degenerate case, if you specify the same
/// input operands as the node already has, the input node is returned.
SDNode *UpdateNodeOperands(SDNode *N, SDValue Op);
SDNode *UpdateNodeOperands(SDNode *N, SDValue Op1, SDValue Op2);
SDNode *UpdateNodeOperands(SDNode *N, SDValue Op1, SDValue Op2,
SDValue Op3);
SDNode *UpdateNodeOperands(SDNode *N, SDValue Op1, SDValue Op2,
SDValue Op3, SDValue Op4);
SDNode *UpdateNodeOperands(SDNode *N, SDValue Op1, SDValue Op2,
SDValue Op3, SDValue Op4, SDValue Op5);
SDNode *UpdateNodeOperands(SDNode *N, ArrayRef<SDValue> Ops);
/// Creates a new TokenFactor containing \p Vals. If \p Vals contains 64k
/// values or more, move values into new TokenFactors in 64k-1 blocks, until
/// the final TokenFactor has less than 64k operands.
SDValue getTokenFactor(const SDLoc &DL, SmallVectorImpl<SDValue> &Vals);
/// *Mutate* the specified machine node's memory references to the provided
/// list.
void setNodeMemRefs(MachineSDNode *N,
ArrayRef<MachineMemOperand *> NewMemRefs);
// Calculate divergence of node \p N based on its operands.
bool calculateDivergence(SDNode *N);
// Propagates the change in divergence to users
void updateDivergence(SDNode * N);
/// These are used for target selectors to *mutate* the
/// specified node to have the specified return type, Target opcode, and
/// operands. Note that target opcodes are stored as
/// ~TargetOpcode in the node opcode field. The resultant node is returned.
SDNode *SelectNodeTo(SDNode *N, unsigned MachineOpc, EVT VT);
SDNode *SelectNodeTo(SDNode *N, unsigned MachineOpc, EVT VT, SDValue Op1);
SDNode *SelectNodeTo(SDNode *N, unsigned MachineOpc, EVT VT,
SDValue Op1, SDValue Op2);
SDNode *SelectNodeTo(SDNode *N, unsigned MachineOpc, EVT VT,
SDValue Op1, SDValue Op2, SDValue Op3);
SDNode *SelectNodeTo(SDNode *N, unsigned MachineOpc, EVT VT,
ArrayRef<SDValue> Ops);
SDNode *SelectNodeTo(SDNode *N, unsigned MachineOpc, EVT VT1, EVT VT2);
SDNode *SelectNodeTo(SDNode *N, unsigned MachineOpc, EVT VT1,
EVT VT2, ArrayRef<SDValue> Ops);
SDNode *SelectNodeTo(SDNode *N, unsigned MachineOpc, EVT VT1,
EVT VT2, EVT VT3, ArrayRef<SDValue> Ops);
SDNode *SelectNodeTo(SDNode *N, unsigned MachineOpc, EVT VT1,
EVT VT2, SDValue Op1, SDValue Op2);
SDNode *SelectNodeTo(SDNode *N, unsigned MachineOpc, SDVTList VTs,
ArrayRef<SDValue> Ops);
/// This *mutates* the specified node to have the specified
/// return type, opcode, and operands.
SDNode *MorphNodeTo(SDNode *N, unsigned Opc, SDVTList VTs,
ArrayRef<SDValue> Ops);
/// Mutate the specified strict FP node to its non-strict equivalent,
/// unlinking the node from its chain and dropping the metadata arguments.
/// The node must be a strict FP node.
SDNode *mutateStrictFPToFP(SDNode *Node);
/// These are used for target selectors to create a new node
/// with specified return type(s), MachineInstr opcode, and operands.
///
/// Note that getMachineNode returns the resultant node. If there is already
/// a node of the specified opcode and operands, it returns that node instead
/// of the current one.
MachineSDNode *getMachineNode(unsigned Opcode, const SDLoc &dl, EVT VT);
MachineSDNode *getMachineNode(unsigned Opcode, const SDLoc &dl, EVT VT,
SDValue Op1);
MachineSDNode *getMachineNode(unsigned Opcode, const SDLoc &dl, EVT VT,
SDValue Op1, SDValue Op2);
MachineSDNode *getMachineNode(unsigned Opcode, const SDLoc &dl, EVT VT,
SDValue Op1, SDValue Op2, SDValue Op3);
MachineSDNode *getMachineNode(unsigned Opcode, const SDLoc &dl, EVT VT,
ArrayRef<SDValue> Ops);
MachineSDNode *getMachineNode(unsigned Opcode, const SDLoc &dl, EVT VT1,
EVT VT2, SDValue Op1, SDValue Op2);
MachineSDNode *getMachineNode(unsigned Opcode, const SDLoc &dl, EVT VT1,
EVT VT2, SDValue Op1, SDValue Op2, SDValue Op3);
MachineSDNode *getMachineNode(unsigned Opcode, const SDLoc &dl, EVT VT1,
EVT VT2, ArrayRef<SDValue> Ops);
MachineSDNode *getMachineNode(unsigned Opcode, const SDLoc &dl, EVT VT1,
EVT VT2, EVT VT3, SDValue Op1, SDValue Op2);
MachineSDNode *getMachineNode(unsigned Opcode, const SDLoc &dl, EVT VT1,
EVT VT2, EVT VT3, SDValue Op1, SDValue Op2,
SDValue Op3);
MachineSDNode *getMachineNode(unsigned Opcode, const SDLoc &dl, EVT VT1,
EVT VT2, EVT VT3, ArrayRef<SDValue> Ops);
MachineSDNode *getMachineNode(unsigned Opcode, const SDLoc &dl,
ArrayRef<EVT> ResultTys, ArrayRef<SDValue> Ops);
MachineSDNode *getMachineNode(unsigned Opcode, const SDLoc &dl, SDVTList VTs,
ArrayRef<SDValue> Ops);
/// A convenience function for creating TargetInstrInfo::EXTRACT_SUBREG nodes.
SDValue getTargetExtractSubreg(int SRIdx, const SDLoc &DL, EVT VT,
SDValue Operand);
/// A convenience function for creating TargetInstrInfo::INSERT_SUBREG nodes.
SDValue getTargetInsertSubreg(int SRIdx, const SDLoc &DL, EVT VT,
SDValue Operand, SDValue Subreg);
/// Get the specified node if it's already available, or else return NULL.
SDNode *getNodeIfExists(unsigned Opcode, SDVTList VTList,
ArrayRef<SDValue> Ops, const SDNodeFlags Flags);
SDNode *getNodeIfExists(unsigned Opcode, SDVTList VTList,
ArrayRef<SDValue> Ops);
/// Check if a node exists without modifying its flags.
bool doesNodeExist(unsigned Opcode, SDVTList VTList, ArrayRef<SDValue> Ops);
/// Creates a SDDbgValue node.
SDDbgValue *getDbgValue(DIVariable *Var, DIExpression *Expr, SDNode *N,
unsigned R, bool IsIndirect, const DebugLoc &DL,
unsigned O);
/// Creates a constant SDDbgValue node.
SDDbgValue *getConstantDbgValue(DIVariable *Var, DIExpression *Expr,
const Value *C, const DebugLoc &DL,
unsigned O);
/// Creates a FrameIndex SDDbgValue node.
SDDbgValue *getFrameIndexDbgValue(DIVariable *Var, DIExpression *Expr,
unsigned FI, bool IsIndirect,
const DebugLoc &DL, unsigned O);
/// Creates a FrameIndex SDDbgValue node.
SDDbgValue *getFrameIndexDbgValue(DIVariable *Var, DIExpression *Expr,
unsigned FI,
ArrayRef<SDNode *> Dependencies,
bool IsIndirect, const DebugLoc &DL,
unsigned O);
/// Creates a VReg SDDbgValue node.
SDDbgValue *getVRegDbgValue(DIVariable *Var, DIExpression *Expr,
unsigned VReg, bool IsIndirect,
const DebugLoc &DL, unsigned O);
/// Creates a SDDbgValue node from a list of locations.
SDDbgValue *getDbgValueList(DIVariable *Var, DIExpression *Expr,
ArrayRef<SDDbgOperand> Locs,
ArrayRef<SDNode *> Dependencies, bool IsIndirect,
const DebugLoc &DL, unsigned O, bool IsVariadic);
/// Creates a SDDbgLabel node.
SDDbgLabel *getDbgLabel(DILabel *Label, const DebugLoc &DL, unsigned O);
/// Transfer debug values from one node to another, while optionally
/// generating fragment expressions for split-up values. If \p InvalidateDbg
/// is set, debug values are invalidated after they are transferred.
void transferDbgValues(SDValue From, SDValue To, unsigned OffsetInBits = 0,
unsigned SizeInBits = 0, bool InvalidateDbg = true);
/// Remove the specified node from the system. If any of its
/// operands then becomes dead, remove them as well. Inform UpdateListener
/// for each node deleted.
void RemoveDeadNode(SDNode *N);
/// This method deletes the unreachable nodes in the
/// given list, and any nodes that become unreachable as a result.
void RemoveDeadNodes(SmallVectorImpl<SDNode *> &DeadNodes);
/// Modify anything using 'From' to use 'To' instead.
/// This can cause recursive merging of nodes in the DAG. Use the first
/// version if 'From' is known to have a single result, use the second
/// if you have two nodes with identical results (or if 'To' has a superset
/// of the results of 'From'), use the third otherwise.
///
/// These methods all take an optional UpdateListener, which (if not null) is
/// informed about nodes that are deleted and modified due to recursive
/// changes in the dag.
///
/// These functions only replace all existing uses. It's possible that as
/// these replacements are being performed, CSE may cause the From node
/// to be given new uses. These new uses of From are left in place, and
/// not automatically transferred to To.
///
void ReplaceAllUsesWith(SDValue From, SDValue To);
void ReplaceAllUsesWith(SDNode *From, SDNode *To);
void ReplaceAllUsesWith(SDNode *From, const SDValue *To);
/// Replace any uses of From with To, leaving
/// uses of other values produced by From.getNode() alone.
void ReplaceAllUsesOfValueWith(SDValue From, SDValue To);
/// Like ReplaceAllUsesOfValueWith, but for multiple values at once.
/// This correctly handles the case where
/// there is an overlap between the From values and the To values.
void ReplaceAllUsesOfValuesWith(const SDValue *From, const SDValue *To,
unsigned Num);
/// If an existing load has uses of its chain, create a token factor node with
/// that chain and the new memory node's chain and update users of the old
/// chain to the token factor. This ensures that the new memory node will have
/// the same relative memory dependency position as the old load. Returns the
/// new merged load chain.
SDValue makeEquivalentMemoryOrdering(SDValue OldChain, SDValue NewMemOpChain);
/// If an existing load has uses of its chain, create a token factor node with
/// that chain and the new memory node's chain and update users of the old
/// chain to the token factor. This ensures that the new memory node will have
/// the same relative memory dependency position as the old load. Returns the
/// new merged load chain.
SDValue makeEquivalentMemoryOrdering(LoadSDNode *OldLoad, SDValue NewMemOp);
/// Topological-sort the AllNodes list and a
/// assign a unique node id for each node in the DAG based on their
/// topological order. Returns the number of nodes.
unsigned AssignTopologicalOrder();
/// Move node N in the AllNodes list to be immediately
/// before the given iterator Position. This may be used to update the
/// topological ordering when the list of nodes is modified.
void RepositionNode(allnodes_iterator Position, SDNode *N) {
AllNodes.insert(Position, AllNodes.remove(N));
}
/// Returns an APFloat semantics tag appropriate for the given type. If VT is
/// a vector type, the element semantics are returned.
static const fltSemantics &EVTToAPFloatSemantics(EVT VT) {
switch (VT.getScalarType().getSimpleVT().SimpleTy) {
default: llvm_unreachable("Unknown FP format");
case MVT::f16: return APFloat::IEEEhalf();
case MVT::bf16: return APFloat::BFloat();
case MVT::f32: return APFloat::IEEEsingle();
case MVT::f64: return APFloat::IEEEdouble();
case MVT::f80: return APFloat::x87DoubleExtended();
case MVT::f128: return APFloat::IEEEquad();
case MVT::ppcf128: return APFloat::PPCDoubleDouble();
}
}
/// Add a dbg_value SDNode. If SD is non-null that means the
/// value is produced by SD.
void AddDbgValue(SDDbgValue *DB, bool isParameter);
/// Add a dbg_label SDNode.
void AddDbgLabel(SDDbgLabel *DB);
/// Get the debug values which reference the given SDNode.
ArrayRef<SDDbgValue*> GetDbgValues(const SDNode* SD) const {
return DbgInfo->getSDDbgValues(SD);
}
public:
/// Return true if there are any SDDbgValue nodes associated
/// with this SelectionDAG.
bool hasDebugValues() const { return !DbgInfo->empty(); }
SDDbgInfo::DbgIterator DbgBegin() const { return DbgInfo->DbgBegin(); }
SDDbgInfo::DbgIterator DbgEnd() const { return DbgInfo->DbgEnd(); }
SDDbgInfo::DbgIterator ByvalParmDbgBegin() const {
return DbgInfo->ByvalParmDbgBegin();
}
SDDbgInfo::DbgIterator ByvalParmDbgEnd() const {
return DbgInfo->ByvalParmDbgEnd();
}
SDDbgInfo::DbgLabelIterator DbgLabelBegin() const {
return DbgInfo->DbgLabelBegin();
}
SDDbgInfo::DbgLabelIterator DbgLabelEnd() const {
return DbgInfo->DbgLabelEnd();
}
/// To be invoked on an SDNode that is slated to be erased. This
/// function mirrors \c llvm::salvageDebugInfo.
void salvageDebugInfo(SDNode &N);
/// Signal whether instruction referencing variable locations are desired for
/// this function's debug-info.
void useInstrRefDebugInfo(bool Flag) {
UseInstrRefDebugInfo = Flag;
}
bool getUseInstrRefDebugInfo() const {
return UseInstrRefDebugInfo;
}
void dump() const;
/// In most cases this function returns the ABI alignment for a given type,
/// except for illegal vector types where the alignment exceeds that of the
/// stack. In such cases we attempt to break the vector down to a legal type
/// and return the ABI alignment for that instead.
Align getReducedAlign(EVT VT, bool UseABI);
/// Create a stack temporary based on the size in bytes and the alignment
SDValue CreateStackTemporary(TypeSize Bytes, Align Alignment);
/// Create a stack temporary, suitable for holding the specified value type.
/// If minAlign is specified, the slot size will have at least that alignment.
SDValue CreateStackTemporary(EVT VT, unsigned minAlign = 1);
/// Create a stack temporary suitable for holding either of the specified
/// value types.
SDValue CreateStackTemporary(EVT VT1, EVT VT2);
SDValue FoldSymbolOffset(unsigned Opcode, EVT VT,
const GlobalAddressSDNode *GA,
const SDNode *N2);
SDValue FoldConstantArithmetic(unsigned Opcode, const SDLoc &DL, EVT VT,
ArrayRef<SDValue> Ops);
/// Fold floating-point operations with 2 operands when both operands are
/// constants and/or undefined.
SDValue foldConstantFPMath(unsigned Opcode, const SDLoc &DL, EVT VT,
SDValue N1, SDValue N2);
/// Constant fold a setcc to true or false.
SDValue FoldSetCC(EVT VT, SDValue N1, SDValue N2, ISD::CondCode Cond,
const SDLoc &dl);
/// See if the specified operand can be simplified with the knowledge that
/// only the bits specified by DemandedBits are used. If so, return the
/// simpler operand, otherwise return a null SDValue.
///
/// (This exists alongside SimplifyDemandedBits because GetDemandedBits can
/// simplify nodes with multiple uses more aggressively.)
SDValue GetDemandedBits(SDValue V, const APInt &DemandedBits);
/// See if the specified operand can be simplified with the knowledge that
/// only the bits specified by DemandedBits are used in the elements specified
/// by DemandedElts. If so, return the simpler operand, otherwise return a
/// null SDValue.
///
/// (This exists alongside SimplifyDemandedBits because GetDemandedBits can
/// simplify nodes with multiple uses more aggressively.)
SDValue GetDemandedBits(SDValue V, const APInt &DemandedBits,
const APInt &DemandedElts);
/// Return true if the sign bit of Op is known to be zero.
/// We use this predicate to simplify operations downstream.
bool SignBitIsZero(SDValue Op, unsigned Depth = 0) const;
/// Return true if 'Op & Mask' is known to be zero. We
/// use this predicate to simplify operations downstream. Op and Mask are
/// known to be the same type.
bool MaskedValueIsZero(SDValue Op, const APInt &Mask,
unsigned Depth = 0) const;
/// Return true if 'Op & Mask' is known to be zero in DemandedElts. We
/// use this predicate to simplify operations downstream. Op and Mask are
/// known to be the same type.
bool MaskedValueIsZero(SDValue Op, const APInt &Mask,
const APInt &DemandedElts, unsigned Depth = 0) const;
/// Return true if '(Op & Mask) == Mask'.
/// Op and Mask are known to be the same type.
bool MaskedValueIsAllOnes(SDValue Op, const APInt &Mask,
unsigned Depth = 0) const;
/// Determine which bits of Op are known to be either zero or one and return
/// them in Known. For vectors, the known bits are those that are shared by
/// every vector element.
/// Targets can implement the computeKnownBitsForTargetNode method in the
/// TargetLowering class to allow target nodes to be understood.
KnownBits computeKnownBits(SDValue Op, unsigned Depth = 0) const;
/// Determine which bits of Op are known to be either zero or one and return
/// them in Known. The DemandedElts argument allows us to only collect the
/// known bits that are shared by the requested vector elements.
/// Targets can implement the computeKnownBitsForTargetNode method in the
/// TargetLowering class to allow target nodes to be understood.
KnownBits computeKnownBits(SDValue Op, const APInt &DemandedElts,
unsigned Depth = 0) const;
/// Used to represent the possible overflow behavior of an operation.
/// Never: the operation cannot overflow.
/// Always: the operation will always overflow.
/// Sometime: the operation may or may not overflow.
enum OverflowKind {
OFK_Never,
OFK_Sometime,
OFK_Always,
};
/// Determine if the result of the addition of 2 node can overflow.
OverflowKind computeOverflowKind(SDValue N0, SDValue N1) const;
/// Test if the given value is known to have exactly one bit set. This differs
/// from computeKnownBits in that it doesn't necessarily determine which bit
/// is set.
bool isKnownToBeAPowerOfTwo(SDValue Val) const;
/// Return the number of times the sign bit of the register is replicated into
/// the other bits. We know that at least 1 bit is always equal to the sign
/// bit (itself), but other cases can give us information. For example,
/// immediately after an "SRA X, 2", we know that the top 3 bits are all equal
/// to each other, so we return 3. Targets can implement the
/// ComputeNumSignBitsForTarget method in the TargetLowering class to allow
/// target nodes to be understood.
unsigned ComputeNumSignBits(SDValue Op, unsigned Depth = 0) const;
/// Return the number of times the sign bit of the register is replicated into
/// the other bits. We know that at least 1 bit is always equal to the sign
/// bit (itself), but other cases can give us information. For example,
/// immediately after an "SRA X, 2", we know that the top 3 bits are all equal
/// to each other, so we return 3. The DemandedElts argument allows
/// us to only collect the minimum sign bits of the requested vector elements.
/// Targets can implement the ComputeNumSignBitsForTarget method in the
/// TargetLowering class to allow target nodes to be understood.
unsigned ComputeNumSignBits(SDValue Op, const APInt &DemandedElts,
unsigned Depth = 0) const;
/// Get the upper bound on bit size for this Value \p Op as a signed integer.
/// i.e. x == sext(trunc(x to MaxSignedBits) to bitwidth(x)).
/// Similar to the APInt::getSignificantBits function.
/// Helper wrapper to ComputeNumSignBits.
unsigned ComputeMaxSignificantBits(SDValue Op, unsigned Depth = 0) const;
/// Get the upper bound on bit size for this Value \p Op as a signed integer.
/// i.e. x == sext(trunc(x to MaxSignedBits) to bitwidth(x)).
/// Similar to the APInt::getSignificantBits function.
/// Helper wrapper to ComputeNumSignBits.
unsigned ComputeMaxSignificantBits(SDValue Op, const APInt &DemandedElts,
unsigned Depth = 0) const;
/// Return true if this function can prove that \p Op is never poison
/// and, if \p PoisonOnly is false, does not have undef bits.
bool isGuaranteedNotToBeUndefOrPoison(SDValue Op, bool PoisonOnly = false,
unsigned Depth = 0) const;
/// Return true if this function can prove that \p Op is never poison
/// and, if \p PoisonOnly is false, does not have undef bits. The DemandedElts
/// argument limits the check to the requested vector elements.
bool isGuaranteedNotToBeUndefOrPoison(SDValue Op, const APInt &DemandedElts,
bool PoisonOnly = false,
unsigned Depth = 0) const;
/// Return true if this function can prove that \p Op is never poison.
bool isGuaranteedNotToBePoison(SDValue Op, unsigned Depth = 0) const {
return isGuaranteedNotToBeUndefOrPoison(Op, /*PoisonOnly*/ true, Depth);
}
/// Return true if this function can prove that \p Op is never poison. The
/// DemandedElts argument limits the check to the requested vector elements.
bool isGuaranteedNotToBePoison(SDValue Op, const APInt &DemandedElts,
unsigned Depth = 0) const {
return isGuaranteedNotToBeUndefOrPoison(Op, DemandedElts,
/*PoisonOnly*/ true, Depth);
}
/// Return true if the specified operand is an ISD::ADD with a ConstantSDNode
/// on the right-hand side, or if it is an ISD::OR with a ConstantSDNode that
/// is guaranteed to have the same semantics as an ADD. This handles the
/// equivalence:
/// X|Cst == X+Cst iff X&Cst = 0.
bool isBaseWithConstantOffset(SDValue Op) const;
/// Test whether the given SDValue is known to never be NaN. If \p SNaN is
/// true, returns if \p Op is known to never be a signaling NaN (it may still
/// be a qNaN).
bool isKnownNeverNaN(SDValue Op, bool SNaN = false, unsigned Depth = 0) const;
/// \returns true if \p Op is known to never be a signaling NaN.
bool isKnownNeverSNaN(SDValue Op, unsigned Depth = 0) const {
return isKnownNeverNaN(Op, true, Depth);
}
/// Test whether the given floating point SDValue is known to never be
/// positive or negative zero.
bool isKnownNeverZeroFloat(SDValue Op) const;
/// Test whether the given SDValue is known to contain non-zero value(s).
bool isKnownNeverZero(SDValue Op) const;
/// Test whether two SDValues are known to compare equal. This
/// is true if they are the same value, or if one is negative zero and the
/// other positive zero.
bool isEqualTo(SDValue A, SDValue B) const;
/// Return true if A and B have no common bits set. As an example, this can
/// allow an 'add' to be transformed into an 'or'.
bool haveNoCommonBitsSet(SDValue A, SDValue B) const;
/// Test whether \p V has a splatted value for all the demanded elements.
///
/// On success \p UndefElts will indicate the elements that have UNDEF
/// values instead of the splat value, this is only guaranteed to be correct
/// for \p DemandedElts.
///
/// NOTE: The function will return true for a demanded splat of UNDEF values.
bool isSplatValue(SDValue V, const APInt &DemandedElts, APInt &UndefElts,
unsigned Depth = 0) const;
/// Test whether \p V has a splatted value.
bool isSplatValue(SDValue V, bool AllowUndefs = false) const;
/// If V is a splatted value, return the source vector and its splat index.
SDValue getSplatSourceVector(SDValue V, int &SplatIndex);
/// If V is a splat vector, return its scalar source operand by extracting
/// that element from the source vector. If LegalTypes is true, this method
/// may only return a legally-typed splat value. If it cannot legalize the
/// splatted value it will return SDValue().
SDValue getSplatValue(SDValue V, bool LegalTypes = false);
/// If a SHL/SRA/SRL node \p V has a constant or splat constant shift amount
/// that is less than the element bit-width of the shift node, return it.
const APInt *getValidShiftAmountConstant(SDValue V,
const APInt &DemandedElts) const;
/// If a SHL/SRA/SRL node \p V has constant shift amounts that are all less
/// than the element bit-width of the shift node, return the minimum value.
const APInt *
getValidMinimumShiftAmountConstant(SDValue V,
const APInt &DemandedElts) const;
/// If a SHL/SRA/SRL node \p V has constant shift amounts that are all less
/// than the element bit-width of the shift node, return the maximum value.
const APInt *
getValidMaximumShiftAmountConstant(SDValue V,
const APInt &DemandedElts) const;
/// Match a binop + shuffle pyramid that represents a horizontal reduction
/// over the elements of a vector starting from the EXTRACT_VECTOR_ELT node /p
/// Extract. The reduction must use one of the opcodes listed in /p
/// CandidateBinOps and on success /p BinOp will contain the matching opcode.
/// Returns the vector that is being reduced on, or SDValue() if a reduction
/// was not matched. If \p AllowPartials is set then in the case of a
/// reduction pattern that only matches the first few stages, the extracted
/// subvector of the start of the reduction is returned.
SDValue matchBinOpReduction(SDNode *Extract, ISD::NodeType &BinOp,
ArrayRef<ISD::NodeType> CandidateBinOps,
bool AllowPartials = false);
/// Utility function used by legalize and lowering to
/// "unroll" a vector operation by splitting out the scalars and operating
/// on each element individually. If the ResNE is 0, fully unroll the vector
/// op. If ResNE is less than the width of the vector op, unroll up to ResNE.
/// If the ResNE is greater than the width of the vector op, unroll the
/// vector op and fill the end of the resulting vector with UNDEFS.
SDValue UnrollVectorOp(SDNode *N, unsigned ResNE = 0);
/// Like UnrollVectorOp(), but for the [US](ADD|SUB|MUL)O family of opcodes.
/// This is a separate function because those opcodes have two results.
std::pair<SDValue, SDValue> UnrollVectorOverflowOp(SDNode *N,
unsigned ResNE = 0);
/// Return true if loads are next to each other and can be
/// merged. Check that both are nonvolatile and if LD is loading
/// 'Bytes' bytes from a location that is 'Dist' units away from the
/// location that the 'Base' load is loading from.
bool areNonVolatileConsecutiveLoads(LoadSDNode *LD, LoadSDNode *Base,
unsigned Bytes, int Dist) const;
/// Infer alignment of a load / store address. Return None if it cannot be
/// inferred.
MaybeAlign InferPtrAlign(SDValue Ptr) const;
/// Compute the VTs needed for the low/hi parts of a type
/// which is split (or expanded) into two not necessarily identical pieces.
std::pair<EVT, EVT> GetSplitDestVTs(const EVT &VT) const;
/// Compute the VTs needed for the low/hi parts of a type, dependent on an
/// enveloping VT that has been split into two identical pieces. Sets the
/// HisIsEmpty flag when hi type has zero storage size.
std::pair<EVT, EVT> GetDependentSplitDestVTs(const EVT &VT, const EVT &EnvVT,
bool *HiIsEmpty) const;
/// Split the vector with EXTRACT_SUBVECTOR using the provides
/// VTs and return the low/high part.
std::pair<SDValue, SDValue> SplitVector(const SDValue &N, const SDLoc &DL,
const EVT &LoVT, const EVT &HiVT);
/// Split the vector with EXTRACT_SUBVECTOR and return the low/high part.
std::pair<SDValue, SDValue> SplitVector(const SDValue &N, const SDLoc &DL) {
EVT LoVT, HiVT;
std::tie(LoVT, HiVT) = GetSplitDestVTs(N.getValueType());
return SplitVector(N, DL, LoVT, HiVT);
}
/// Split the explicit vector length parameter of a VP operation.
std::pair<SDValue, SDValue> SplitEVL(SDValue N, EVT VecVT, const SDLoc &DL);
/// Split the node's operand with EXTRACT_SUBVECTOR and
/// return the low/high part.
std::pair<SDValue, SDValue> SplitVectorOperand(const SDNode *N, unsigned OpNo)
{
return SplitVector(N->getOperand(OpNo), SDLoc(N));
}
/// Widen the vector up to the next power of two using INSERT_SUBVECTOR.
SDValue WidenVector(const SDValue &N, const SDLoc &DL);
/// Append the extracted elements from Start to Count out of the vector Op in
/// Args. If Count is 0, all of the elements will be extracted. The extracted
/// elements will have type EVT if it is provided, and otherwise their type
/// will be Op's element type.
void ExtractVectorElements(SDValue Op, SmallVectorImpl<SDValue> &Args,
unsigned Start = 0, unsigned Count = 0,
EVT EltVT = EVT());
/// Compute the default alignment value for the given type.
Align getEVTAlign(EVT MemoryVT) const;
/// Compute the default alignment value for the given type.
/// FIXME: Remove once transition to Align is over.
inline unsigned getEVTAlignment(EVT MemoryVT) const {
return getEVTAlign(MemoryVT).value();
}
/// Test whether the given value is a constant int or similar node.
SDNode *isConstantIntBuildVectorOrConstantInt(SDValue N) const;
/// Test whether the given value is a constant FP or similar node.
SDNode *isConstantFPBuildVectorOrConstantFP(SDValue N) const ;
/// \returns true if \p N is any kind of constant or build_vector of
/// constants, int or float. If a vector, it may not necessarily be a splat.
inline bool isConstantValueOfAnyType(SDValue N) const {
return isConstantIntBuildVectorOrConstantInt(N) ||
isConstantFPBuildVectorOrConstantFP(N);
}
void addCallSiteInfo(const SDNode *CallNode, CallSiteInfoImpl &&CallInfo) {
SDCallSiteDbgInfo[CallNode].CSInfo = std::move(CallInfo);
}
CallSiteInfo getSDCallSiteInfo(const SDNode *CallNode) {
auto I = SDCallSiteDbgInfo.find(CallNode);
if (I != SDCallSiteDbgInfo.end())
return std::move(I->second).CSInfo;
return CallSiteInfo();
}
void addHeapAllocSite(const SDNode *Node, MDNode *MD) {
SDCallSiteDbgInfo[Node].HeapAllocSite = MD;
}
/// Return the HeapAllocSite type associated with the SDNode, if it exists.
MDNode *getHeapAllocSite(const SDNode *Node) {
auto It = SDCallSiteDbgInfo.find(Node);
if (It == SDCallSiteDbgInfo.end())
return nullptr;
return It->second.HeapAllocSite;
}
void addNoMergeSiteInfo(const SDNode *Node, bool NoMerge) {
if (NoMerge)
SDCallSiteDbgInfo[Node].NoMerge = NoMerge;
}
bool getNoMergeSiteInfo(const SDNode *Node) {
auto I = SDCallSiteDbgInfo.find(Node);
if (I == SDCallSiteDbgInfo.end())
return false;
return I->second.NoMerge;
}
/// Return the current function's default denormal handling kind for the given
/// floating point type.
DenormalMode getDenormalMode(EVT VT) const {
return MF->getDenormalMode(EVTToAPFloatSemantics(VT));
}
bool shouldOptForSize() const;
/// Get the (commutative) neutral element for the given opcode, if it exists.
SDValue getNeutralElement(unsigned Opcode, const SDLoc &DL, EVT VT,
SDNodeFlags Flags);
private:
void InsertNode(SDNode *N);
bool RemoveNodeFromCSEMaps(SDNode *N);
void AddModifiedNodeToCSEMaps(SDNode *N);
SDNode *FindModifiedNodeSlot(SDNode *N, SDValue Op, void *&InsertPos);
SDNode *FindModifiedNodeSlot(SDNode *N, SDValue Op1, SDValue Op2,
void *&InsertPos);
SDNode *FindModifiedNodeSlot(SDNode *N, ArrayRef<SDValue> Ops,
void *&InsertPos);
SDNode *UpdateSDLocOnMergeSDNode(SDNode *N, const SDLoc &loc);
void DeleteNodeNotInCSEMaps(SDNode *N);
void DeallocateNode(SDNode *N);
void allnodes_clear();
/// Look up the node specified by ID in CSEMap. If it exists, return it. If
/// not, return the insertion token that will make insertion faster. This
/// overload is for nodes other than Constant or ConstantFP, use the other one
/// for those.
SDNode *FindNodeOrInsertPos(const FoldingSetNodeID &ID, void *&InsertPos);
/// Look up the node specified by ID in CSEMap. If it exists, return it. If
/// not, return the insertion token that will make insertion faster. Performs
/// additional processing for constant nodes.
SDNode *FindNodeOrInsertPos(const FoldingSetNodeID &ID, const SDLoc &DL,
void *&InsertPos);
/// List of non-single value types.
FoldingSet<SDVTListNode> VTListMap;
/// Maps to auto-CSE operations.
std::vector<CondCodeSDNode*> CondCodeNodes;
std::vector<SDNode*> ValueTypeNodes;
std::map<EVT, SDNode*, EVT::compareRawBits> ExtendedValueTypeNodes;
StringMap<SDNode*> ExternalSymbols;
std::map<std::pair<std::string, unsigned>, SDNode *> TargetExternalSymbols;
DenseMap<MCSymbol *, SDNode *> MCSymbols;
FlagInserter *Inserter = nullptr;
};
template <> struct GraphTraits<SelectionDAG*> : public GraphTraits<SDNode*> {
using nodes_iterator = pointer_iterator<SelectionDAG::allnodes_iterator>;
static nodes_iterator nodes_begin(SelectionDAG *G) {
return nodes_iterator(G->allnodes_begin());
}
static nodes_iterator nodes_end(SelectionDAG *G) {
return nodes_iterator(G->allnodes_end());
}
};
} // end namespace llvm
#endif // LLVM_CODEGEN_SELECTIONDAG_H
#ifdef __GNUC__
#pragma GCC diagnostic pop
#endif
|