1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
1001
1002
1003
1004
1005
1006
1007
1008
1009
1010
1011
1012
1013
1014
1015
1016
1017
1018
1019
1020
1021
1022
1023
1024
1025
1026
1027
1028
1029
1030
1031
1032
1033
1034
1035
1036
1037
1038
1039
1040
1041
1042
1043
1044
1045
1046
1047
1048
1049
1050
1051
1052
1053
1054
1055
1056
1057
1058
1059
1060
1061
1062
1063
1064
1065
1066
1067
1068
1069
1070
1071
1072
1073
1074
1075
1076
1077
1078
1079
1080
1081
1082
1083
1084
1085
1086
1087
1088
1089
1090
1091
1092
1093
1094
1095
1096
1097
1098
1099
1100
1101
1102
1103
1104
1105
1106
1107
1108
1109
1110
1111
1112
1113
1114
1115
1116
1117
1118
1119
1120
1121
1122
1123
1124
1125
1126
1127
1128
1129
1130
1131
1132
1133
1134
1135
1136
1137
1138
1139
1140
1141
1142
1143
1144
1145
1146
1147
1148
1149
1150
1151
1152
1153
1154
1155
1156
1157
1158
1159
1160
1161
1162
1163
1164
1165
1166
1167
1168
1169
1170
1171
1172
1173
1174
1175
1176
1177
1178
1179
1180
1181
1182
1183
1184
1185
1186
1187
1188
1189
1190
1191
1192
1193
1194
1195
1196
1197
1198
1199
1200
1201
1202
1203
1204
1205
1206
1207
1208
1209
1210
1211
1212
1213
1214
1215
1216
1217
1218
1219
1220
1221
1222
1223
1224
1225
1226
1227
1228
1229
1230
1231
1232
1233
1234
1235
1236
1237
1238
1239
1240
1241
1242
1243
1244
1245
1246
1247
1248
1249
1250
1251
1252
1253
1254
1255
1256
1257
1258
1259
1260
1261
1262
1263
1264
1265
1266
1267
1268
1269
1270
1271
1272
1273
1274
1275
1276
1277
1278
1279
1280
1281
1282
1283
1284
1285
1286
1287
1288
1289
1290
1291
1292
1293
1294
1295
1296
1297
1298
1299
1300
1301
1302
1303
1304
1305
1306
1307
1308
1309
1310
1311
1312
1313
1314
1315
1316
1317
1318
1319
1320
1321
1322
1323
1324
1325
1326
1327
1328
1329
1330
1331
1332
1333
1334
1335
1336
1337
1338
1339
1340
1341
1342
1343
1344
1345
1346
1347
1348
1349
1350
1351
1352
1353
1354
1355
1356
1357
1358
1359
1360
1361
1362
1363
1364
1365
1366
1367
1368
1369
1370
1371
1372
1373
1374
1375
1376
1377
1378
1379
1380
1381
1382
1383
1384
1385
1386
1387
1388
1389
1390
1391
1392
1393
1394
1395
1396
1397
1398
1399
1400
1401
1402
1403
1404
1405
1406
1407
1408
1409
1410
1411
1412
1413
1414
1415
1416
1417
1418
1419
1420
1421
1422
|
//===-- TargetInstrInfo.cpp - Target Instruction Information --------------===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file implements the TargetInstrInfo class.
//
//===----------------------------------------------------------------------===//
#include "llvm/CodeGen/TargetInstrInfo.h"
#include "llvm/ADT/StringExtras.h"
#include "llvm/CodeGen/MachineFrameInfo.h"
#include "llvm/CodeGen/MachineInstrBuilder.h"
#include "llvm/CodeGen/MachineMemOperand.h"
#include "llvm/CodeGen/MachineRegisterInfo.h"
#include "llvm/CodeGen/MachineScheduler.h"
#include "llvm/CodeGen/PseudoSourceValue.h"
#include "llvm/CodeGen/ScoreboardHazardRecognizer.h"
#include "llvm/CodeGen/StackMaps.h"
#include "llvm/CodeGen/TargetFrameLowering.h"
#include "llvm/CodeGen/TargetLowering.h"
#include "llvm/CodeGen/TargetRegisterInfo.h"
#include "llvm/CodeGen/TargetSchedule.h"
#include "llvm/IR/DataLayout.h"
#include "llvm/IR/DebugInfoMetadata.h"
#include "llvm/MC/MCAsmInfo.h"
#include "llvm/MC/MCInstrItineraries.h"
#include "llvm/Support/CommandLine.h"
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/raw_ostream.h"
#include "llvm/Target/TargetMachine.h"
#include <cctype>
using namespace llvm;
static cl::opt<bool> DisableHazardRecognizer(
"disable-sched-hazard", cl::Hidden, cl::init(false),
cl::desc("Disable hazard detection during preRA scheduling"));
TargetInstrInfo::~TargetInstrInfo() {
}
const TargetRegisterClass*
TargetInstrInfo::getRegClass(const MCInstrDesc &MCID, unsigned OpNum,
const TargetRegisterInfo *TRI,
const MachineFunction &MF) const {
if (OpNum >= MCID.getNumOperands())
return nullptr;
short RegClass = MCID.OpInfo[OpNum].RegClass;
if (MCID.OpInfo[OpNum].isLookupPtrRegClass())
return TRI->getPointerRegClass(MF, RegClass);
// Instructions like INSERT_SUBREG do not have fixed register classes.
if (RegClass < 0)
return nullptr;
// Otherwise just look it up normally.
return TRI->getRegClass(RegClass);
}
/// insertNoop - Insert a noop into the instruction stream at the specified
/// point.
void TargetInstrInfo::insertNoop(MachineBasicBlock &MBB,
MachineBasicBlock::iterator MI) const {
llvm_unreachable("Target didn't implement insertNoop!");
}
/// insertNoops - Insert noops into the instruction stream at the specified
/// point.
void TargetInstrInfo::insertNoops(MachineBasicBlock &MBB,
MachineBasicBlock::iterator MI,
unsigned Quantity) const {
for (unsigned i = 0; i < Quantity; ++i)
insertNoop(MBB, MI);
}
static bool isAsmComment(const char *Str, const MCAsmInfo &MAI) {
return strncmp(Str, MAI.getCommentString().data(),
MAI.getCommentString().size()) == 0;
}
/// Measure the specified inline asm to determine an approximation of its
/// length.
/// Comments (which run till the next SeparatorString or newline) do not
/// count as an instruction.
/// Any other non-whitespace text is considered an instruction, with
/// multiple instructions separated by SeparatorString or newlines.
/// Variable-length instructions are not handled here; this function
/// may be overloaded in the target code to do that.
/// We implement a special case of the .space directive which takes only a
/// single integer argument in base 10 that is the size in bytes. This is a
/// restricted form of the GAS directive in that we only interpret
/// simple--i.e. not a logical or arithmetic expression--size values without
/// the optional fill value. This is primarily used for creating arbitrary
/// sized inline asm blocks for testing purposes.
unsigned TargetInstrInfo::getInlineAsmLength(
const char *Str,
const MCAsmInfo &MAI, const TargetSubtargetInfo *STI) const {
// Count the number of instructions in the asm.
bool AtInsnStart = true;
unsigned Length = 0;
const unsigned MaxInstLength = MAI.getMaxInstLength(STI);
for (; *Str; ++Str) {
if (*Str == '\n' || strncmp(Str, MAI.getSeparatorString(),
strlen(MAI.getSeparatorString())) == 0) {
AtInsnStart = true;
} else if (isAsmComment(Str, MAI)) {
// Stop counting as an instruction after a comment until the next
// separator.
AtInsnStart = false;
}
if (AtInsnStart && !isSpace(static_cast<unsigned char>(*Str))) {
unsigned AddLength = MaxInstLength;
if (strncmp(Str, ".space", 6) == 0) {
char *EStr;
int SpaceSize;
SpaceSize = strtol(Str + 6, &EStr, 10);
SpaceSize = SpaceSize < 0 ? 0 : SpaceSize;
while (*EStr != '\n' && isSpace(static_cast<unsigned char>(*EStr)))
++EStr;
if (*EStr == '\0' || *EStr == '\n' ||
isAsmComment(EStr, MAI)) // Successfully parsed .space argument
AddLength = SpaceSize;
}
Length += AddLength;
AtInsnStart = false;
}
}
return Length;
}
/// ReplaceTailWithBranchTo - Delete the instruction OldInst and everything
/// after it, replacing it with an unconditional branch to NewDest.
void
TargetInstrInfo::ReplaceTailWithBranchTo(MachineBasicBlock::iterator Tail,
MachineBasicBlock *NewDest) const {
MachineBasicBlock *MBB = Tail->getParent();
// Remove all the old successors of MBB from the CFG.
while (!MBB->succ_empty())
MBB->removeSuccessor(MBB->succ_begin());
// Save off the debug loc before erasing the instruction.
DebugLoc DL = Tail->getDebugLoc();
// Update call site info and remove all the dead instructions
// from the end of MBB.
while (Tail != MBB->end()) {
auto MI = Tail++;
if (MI->shouldUpdateCallSiteInfo())
MBB->getParent()->eraseCallSiteInfo(&*MI);
MBB->erase(MI);
}
// If MBB isn't immediately before MBB, insert a branch to it.
if (++MachineFunction::iterator(MBB) != MachineFunction::iterator(NewDest))
insertBranch(*MBB, NewDest, nullptr, SmallVector<MachineOperand, 0>(), DL);
MBB->addSuccessor(NewDest);
}
MachineInstr *TargetInstrInfo::commuteInstructionImpl(MachineInstr &MI,
bool NewMI, unsigned Idx1,
unsigned Idx2) const {
const MCInstrDesc &MCID = MI.getDesc();
bool HasDef = MCID.getNumDefs();
if (HasDef && !MI.getOperand(0).isReg())
// No idea how to commute this instruction. Target should implement its own.
return nullptr;
unsigned CommutableOpIdx1 = Idx1; (void)CommutableOpIdx1;
unsigned CommutableOpIdx2 = Idx2; (void)CommutableOpIdx2;
assert(findCommutedOpIndices(MI, CommutableOpIdx1, CommutableOpIdx2) &&
CommutableOpIdx1 == Idx1 && CommutableOpIdx2 == Idx2 &&
"TargetInstrInfo::CommuteInstructionImpl(): not commutable operands.");
assert(MI.getOperand(Idx1).isReg() && MI.getOperand(Idx2).isReg() &&
"This only knows how to commute register operands so far");
Register Reg0 = HasDef ? MI.getOperand(0).getReg() : Register();
Register Reg1 = MI.getOperand(Idx1).getReg();
Register Reg2 = MI.getOperand(Idx2).getReg();
unsigned SubReg0 = HasDef ? MI.getOperand(0).getSubReg() : 0;
unsigned SubReg1 = MI.getOperand(Idx1).getSubReg();
unsigned SubReg2 = MI.getOperand(Idx2).getSubReg();
bool Reg1IsKill = MI.getOperand(Idx1).isKill();
bool Reg2IsKill = MI.getOperand(Idx2).isKill();
bool Reg1IsUndef = MI.getOperand(Idx1).isUndef();
bool Reg2IsUndef = MI.getOperand(Idx2).isUndef();
bool Reg1IsInternal = MI.getOperand(Idx1).isInternalRead();
bool Reg2IsInternal = MI.getOperand(Idx2).isInternalRead();
// Avoid calling isRenamable for virtual registers since we assert that
// renamable property is only queried/set for physical registers.
bool Reg1IsRenamable = Register::isPhysicalRegister(Reg1)
? MI.getOperand(Idx1).isRenamable()
: false;
bool Reg2IsRenamable = Register::isPhysicalRegister(Reg2)
? MI.getOperand(Idx2).isRenamable()
: false;
// If destination is tied to either of the commuted source register, then
// it must be updated.
if (HasDef && Reg0 == Reg1 &&
MI.getDesc().getOperandConstraint(Idx1, MCOI::TIED_TO) == 0) {
Reg2IsKill = false;
Reg0 = Reg2;
SubReg0 = SubReg2;
} else if (HasDef && Reg0 == Reg2 &&
MI.getDesc().getOperandConstraint(Idx2, MCOI::TIED_TO) == 0) {
Reg1IsKill = false;
Reg0 = Reg1;
SubReg0 = SubReg1;
}
MachineInstr *CommutedMI = nullptr;
if (NewMI) {
// Create a new instruction.
MachineFunction &MF = *MI.getMF();
CommutedMI = MF.CloneMachineInstr(&MI);
} else {
CommutedMI = &MI;
}
if (HasDef) {
CommutedMI->getOperand(0).setReg(Reg0);
CommutedMI->getOperand(0).setSubReg(SubReg0);
}
CommutedMI->getOperand(Idx2).setReg(Reg1);
CommutedMI->getOperand(Idx1).setReg(Reg2);
CommutedMI->getOperand(Idx2).setSubReg(SubReg1);
CommutedMI->getOperand(Idx1).setSubReg(SubReg2);
CommutedMI->getOperand(Idx2).setIsKill(Reg1IsKill);
CommutedMI->getOperand(Idx1).setIsKill(Reg2IsKill);
CommutedMI->getOperand(Idx2).setIsUndef(Reg1IsUndef);
CommutedMI->getOperand(Idx1).setIsUndef(Reg2IsUndef);
CommutedMI->getOperand(Idx2).setIsInternalRead(Reg1IsInternal);
CommutedMI->getOperand(Idx1).setIsInternalRead(Reg2IsInternal);
// Avoid calling setIsRenamable for virtual registers since we assert that
// renamable property is only queried/set for physical registers.
if (Register::isPhysicalRegister(Reg1))
CommutedMI->getOperand(Idx2).setIsRenamable(Reg1IsRenamable);
if (Register::isPhysicalRegister(Reg2))
CommutedMI->getOperand(Idx1).setIsRenamable(Reg2IsRenamable);
return CommutedMI;
}
MachineInstr *TargetInstrInfo::commuteInstruction(MachineInstr &MI, bool NewMI,
unsigned OpIdx1,
unsigned OpIdx2) const {
// If OpIdx1 or OpIdx2 is not specified, then this method is free to choose
// any commutable operand, which is done in findCommutedOpIndices() method
// called below.
if ((OpIdx1 == CommuteAnyOperandIndex || OpIdx2 == CommuteAnyOperandIndex) &&
!findCommutedOpIndices(MI, OpIdx1, OpIdx2)) {
assert(MI.isCommutable() &&
"Precondition violation: MI must be commutable.");
return nullptr;
}
return commuteInstructionImpl(MI, NewMI, OpIdx1, OpIdx2);
}
bool TargetInstrInfo::fixCommutedOpIndices(unsigned &ResultIdx1,
unsigned &ResultIdx2,
unsigned CommutableOpIdx1,
unsigned CommutableOpIdx2) {
if (ResultIdx1 == CommuteAnyOperandIndex &&
ResultIdx2 == CommuteAnyOperandIndex) {
ResultIdx1 = CommutableOpIdx1;
ResultIdx2 = CommutableOpIdx2;
} else if (ResultIdx1 == CommuteAnyOperandIndex) {
if (ResultIdx2 == CommutableOpIdx1)
ResultIdx1 = CommutableOpIdx2;
else if (ResultIdx2 == CommutableOpIdx2)
ResultIdx1 = CommutableOpIdx1;
else
return false;
} else if (ResultIdx2 == CommuteAnyOperandIndex) {
if (ResultIdx1 == CommutableOpIdx1)
ResultIdx2 = CommutableOpIdx2;
else if (ResultIdx1 == CommutableOpIdx2)
ResultIdx2 = CommutableOpIdx1;
else
return false;
} else
// Check that the result operand indices match the given commutable
// operand indices.
return (ResultIdx1 == CommutableOpIdx1 && ResultIdx2 == CommutableOpIdx2) ||
(ResultIdx1 == CommutableOpIdx2 && ResultIdx2 == CommutableOpIdx1);
return true;
}
bool TargetInstrInfo::findCommutedOpIndices(const MachineInstr &MI,
unsigned &SrcOpIdx1,
unsigned &SrcOpIdx2) const {
assert(!MI.isBundle() &&
"TargetInstrInfo::findCommutedOpIndices() can't handle bundles");
const MCInstrDesc &MCID = MI.getDesc();
if (!MCID.isCommutable())
return false;
// This assumes v0 = op v1, v2 and commuting would swap v1 and v2. If this
// is not true, then the target must implement this.
unsigned CommutableOpIdx1 = MCID.getNumDefs();
unsigned CommutableOpIdx2 = CommutableOpIdx1 + 1;
if (!fixCommutedOpIndices(SrcOpIdx1, SrcOpIdx2,
CommutableOpIdx1, CommutableOpIdx2))
return false;
if (!MI.getOperand(SrcOpIdx1).isReg() || !MI.getOperand(SrcOpIdx2).isReg())
// No idea.
return false;
return true;
}
bool TargetInstrInfo::isUnpredicatedTerminator(const MachineInstr &MI) const {
if (!MI.isTerminator()) return false;
// Conditional branch is a special case.
if (MI.isBranch() && !MI.isBarrier())
return true;
if (!MI.isPredicable())
return true;
return !isPredicated(MI);
}
bool TargetInstrInfo::PredicateInstruction(
MachineInstr &MI, ArrayRef<MachineOperand> Pred) const {
bool MadeChange = false;
assert(!MI.isBundle() &&
"TargetInstrInfo::PredicateInstruction() can't handle bundles");
const MCInstrDesc &MCID = MI.getDesc();
if (!MI.isPredicable())
return false;
for (unsigned j = 0, i = 0, e = MI.getNumOperands(); i != e; ++i) {
if (MCID.OpInfo[i].isPredicate()) {
MachineOperand &MO = MI.getOperand(i);
if (MO.isReg()) {
MO.setReg(Pred[j].getReg());
MadeChange = true;
} else if (MO.isImm()) {
MO.setImm(Pred[j].getImm());
MadeChange = true;
} else if (MO.isMBB()) {
MO.setMBB(Pred[j].getMBB());
MadeChange = true;
}
++j;
}
}
return MadeChange;
}
bool TargetInstrInfo::hasLoadFromStackSlot(
const MachineInstr &MI,
SmallVectorImpl<const MachineMemOperand *> &Accesses) const {
size_t StartSize = Accesses.size();
for (MachineInstr::mmo_iterator o = MI.memoperands_begin(),
oe = MI.memoperands_end();
o != oe; ++o) {
if ((*o)->isLoad() &&
dyn_cast_or_null<FixedStackPseudoSourceValue>((*o)->getPseudoValue()))
Accesses.push_back(*o);
}
return Accesses.size() != StartSize;
}
bool TargetInstrInfo::hasStoreToStackSlot(
const MachineInstr &MI,
SmallVectorImpl<const MachineMemOperand *> &Accesses) const {
size_t StartSize = Accesses.size();
for (MachineInstr::mmo_iterator o = MI.memoperands_begin(),
oe = MI.memoperands_end();
o != oe; ++o) {
if ((*o)->isStore() &&
dyn_cast_or_null<FixedStackPseudoSourceValue>((*o)->getPseudoValue()))
Accesses.push_back(*o);
}
return Accesses.size() != StartSize;
}
bool TargetInstrInfo::getStackSlotRange(const TargetRegisterClass *RC,
unsigned SubIdx, unsigned &Size,
unsigned &Offset,
const MachineFunction &MF) const {
const TargetRegisterInfo *TRI = MF.getSubtarget().getRegisterInfo();
if (!SubIdx) {
Size = TRI->getSpillSize(*RC);
Offset = 0;
return true;
}
unsigned BitSize = TRI->getSubRegIdxSize(SubIdx);
// Convert bit size to byte size.
if (BitSize % 8)
return false;
int BitOffset = TRI->getSubRegIdxOffset(SubIdx);
if (BitOffset < 0 || BitOffset % 8)
return false;
Size = BitSize / 8;
Offset = (unsigned)BitOffset / 8;
assert(TRI->getSpillSize(*RC) >= (Offset + Size) && "bad subregister range");
if (!MF.getDataLayout().isLittleEndian()) {
Offset = TRI->getSpillSize(*RC) - (Offset + Size);
}
return true;
}
void TargetInstrInfo::reMaterialize(MachineBasicBlock &MBB,
MachineBasicBlock::iterator I,
Register DestReg, unsigned SubIdx,
const MachineInstr &Orig,
const TargetRegisterInfo &TRI) const {
MachineInstr *MI = MBB.getParent()->CloneMachineInstr(&Orig);
MI->substituteRegister(MI->getOperand(0).getReg(), DestReg, SubIdx, TRI);
MBB.insert(I, MI);
}
bool TargetInstrInfo::produceSameValue(const MachineInstr &MI0,
const MachineInstr &MI1,
const MachineRegisterInfo *MRI) const {
return MI0.isIdenticalTo(MI1, MachineInstr::IgnoreVRegDefs);
}
MachineInstr &TargetInstrInfo::duplicate(MachineBasicBlock &MBB,
MachineBasicBlock::iterator InsertBefore, const MachineInstr &Orig) const {
assert(!Orig.isNotDuplicable() && "Instruction cannot be duplicated");
MachineFunction &MF = *MBB.getParent();
return MF.CloneMachineInstrBundle(MBB, InsertBefore, Orig);
}
// If the COPY instruction in MI can be folded to a stack operation, return
// the register class to use.
static const TargetRegisterClass *canFoldCopy(const MachineInstr &MI,
unsigned FoldIdx) {
assert(MI.isCopy() && "MI must be a COPY instruction");
if (MI.getNumOperands() != 2)
return nullptr;
assert(FoldIdx<2 && "FoldIdx refers no nonexistent operand");
const MachineOperand &FoldOp = MI.getOperand(FoldIdx);
const MachineOperand &LiveOp = MI.getOperand(1 - FoldIdx);
if (FoldOp.getSubReg() || LiveOp.getSubReg())
return nullptr;
Register FoldReg = FoldOp.getReg();
Register LiveReg = LiveOp.getReg();
assert(Register::isVirtualRegister(FoldReg) && "Cannot fold physregs");
const MachineRegisterInfo &MRI = MI.getMF()->getRegInfo();
const TargetRegisterClass *RC = MRI.getRegClass(FoldReg);
if (Register::isPhysicalRegister(LiveOp.getReg()))
return RC->contains(LiveOp.getReg()) ? RC : nullptr;
if (RC->hasSubClassEq(MRI.getRegClass(LiveReg)))
return RC;
// FIXME: Allow folding when register classes are memory compatible.
return nullptr;
}
void TargetInstrInfo::getNoop(MCInst &NopInst) const {
llvm_unreachable("Not implemented");
}
static MachineInstr *foldPatchpoint(MachineFunction &MF, MachineInstr &MI,
ArrayRef<unsigned> Ops, int FrameIndex,
const TargetInstrInfo &TII) {
unsigned StartIdx = 0;
unsigned NumDefs = 0;
switch (MI.getOpcode()) {
case TargetOpcode::STACKMAP: {
// StackMapLiveValues are foldable
StartIdx = StackMapOpers(&MI).getVarIdx();
break;
}
case TargetOpcode::PATCHPOINT: {
// For PatchPoint, the call args are not foldable (even if reported in the
// stackmap e.g. via anyregcc).
StartIdx = PatchPointOpers(&MI).getVarIdx();
break;
}
case TargetOpcode::STATEPOINT: {
// For statepoints, fold deopt and gc arguments, but not call arguments.
StartIdx = StatepointOpers(&MI).getVarIdx();
NumDefs = MI.getNumDefs();
break;
}
default:
llvm_unreachable("unexpected stackmap opcode");
}
unsigned DefToFoldIdx = MI.getNumOperands();
// Return false if any operands requested for folding are not foldable (not
// part of the stackmap's live values).
for (unsigned Op : Ops) {
if (Op < NumDefs) {
assert(DefToFoldIdx == MI.getNumOperands() && "Folding multiple defs");
DefToFoldIdx = Op;
} else if (Op < StartIdx) {
return nullptr;
}
if (MI.getOperand(Op).isTied())
return nullptr;
}
MachineInstr *NewMI =
MF.CreateMachineInstr(TII.get(MI.getOpcode()), MI.getDebugLoc(), true);
MachineInstrBuilder MIB(MF, NewMI);
// No need to fold return, the meta data, and function arguments
for (unsigned i = 0; i < StartIdx; ++i)
if (i != DefToFoldIdx)
MIB.add(MI.getOperand(i));
for (unsigned i = StartIdx, e = MI.getNumOperands(); i < e; ++i) {
MachineOperand &MO = MI.getOperand(i);
unsigned TiedTo = e;
(void)MI.isRegTiedToDefOperand(i, &TiedTo);
if (is_contained(Ops, i)) {
assert(TiedTo == e && "Cannot fold tied operands");
unsigned SpillSize;
unsigned SpillOffset;
// Compute the spill slot size and offset.
const TargetRegisterClass *RC =
MF.getRegInfo().getRegClass(MO.getReg());
bool Valid =
TII.getStackSlotRange(RC, MO.getSubReg(), SpillSize, SpillOffset, MF);
if (!Valid)
report_fatal_error("cannot spill patchpoint subregister operand");
MIB.addImm(StackMaps::IndirectMemRefOp);
MIB.addImm(SpillSize);
MIB.addFrameIndex(FrameIndex);
MIB.addImm(SpillOffset);
} else {
MIB.add(MO);
if (TiedTo < e) {
assert(TiedTo < NumDefs && "Bad tied operand");
if (TiedTo > DefToFoldIdx)
--TiedTo;
NewMI->tieOperands(TiedTo, NewMI->getNumOperands() - 1);
}
}
}
return NewMI;
}
MachineInstr *TargetInstrInfo::foldMemoryOperand(MachineInstr &MI,
ArrayRef<unsigned> Ops, int FI,
LiveIntervals *LIS,
VirtRegMap *VRM) const {
auto Flags = MachineMemOperand::MONone;
for (unsigned OpIdx : Ops)
Flags |= MI.getOperand(OpIdx).isDef() ? MachineMemOperand::MOStore
: MachineMemOperand::MOLoad;
MachineBasicBlock *MBB = MI.getParent();
assert(MBB && "foldMemoryOperand needs an inserted instruction");
MachineFunction &MF = *MBB->getParent();
// If we're not folding a load into a subreg, the size of the load is the
// size of the spill slot. But if we are, we need to figure out what the
// actual load size is.
int64_t MemSize = 0;
const MachineFrameInfo &MFI = MF.getFrameInfo();
const TargetRegisterInfo *TRI = MF.getSubtarget().getRegisterInfo();
if (Flags & MachineMemOperand::MOStore) {
MemSize = MFI.getObjectSize(FI);
} else {
for (unsigned OpIdx : Ops) {
int64_t OpSize = MFI.getObjectSize(FI);
if (auto SubReg = MI.getOperand(OpIdx).getSubReg()) {
unsigned SubRegSize = TRI->getSubRegIdxSize(SubReg);
if (SubRegSize > 0 && !(SubRegSize % 8))
OpSize = SubRegSize / 8;
}
MemSize = std::max(MemSize, OpSize);
}
}
assert(MemSize && "Did not expect a zero-sized stack slot");
MachineInstr *NewMI = nullptr;
if (MI.getOpcode() == TargetOpcode::STACKMAP ||
MI.getOpcode() == TargetOpcode::PATCHPOINT ||
MI.getOpcode() == TargetOpcode::STATEPOINT) {
// Fold stackmap/patchpoint.
NewMI = foldPatchpoint(MF, MI, Ops, FI, *this);
if (NewMI)
MBB->insert(MI, NewMI);
} else {
// Ask the target to do the actual folding.
NewMI = foldMemoryOperandImpl(MF, MI, Ops, MI, FI, LIS, VRM);
}
if (NewMI) {
NewMI->setMemRefs(MF, MI.memoperands());
// Add a memory operand, foldMemoryOperandImpl doesn't do that.
assert((!(Flags & MachineMemOperand::MOStore) ||
NewMI->mayStore()) &&
"Folded a def to a non-store!");
assert((!(Flags & MachineMemOperand::MOLoad) ||
NewMI->mayLoad()) &&
"Folded a use to a non-load!");
assert(MFI.getObjectOffset(FI) != -1);
MachineMemOperand *MMO =
MF.getMachineMemOperand(MachinePointerInfo::getFixedStack(MF, FI),
Flags, MemSize, MFI.getObjectAlign(FI));
NewMI->addMemOperand(MF, MMO);
// The pass "x86 speculative load hardening" always attaches symbols to
// call instructions. We need copy it form old instruction.
NewMI->cloneInstrSymbols(MF, MI);
return NewMI;
}
// Straight COPY may fold as load/store.
if (!MI.isCopy() || Ops.size() != 1)
return nullptr;
const TargetRegisterClass *RC = canFoldCopy(MI, Ops[0]);
if (!RC)
return nullptr;
const MachineOperand &MO = MI.getOperand(1 - Ops[0]);
MachineBasicBlock::iterator Pos = MI;
if (Flags == MachineMemOperand::MOStore)
storeRegToStackSlot(*MBB, Pos, MO.getReg(), MO.isKill(), FI, RC, TRI);
else
loadRegFromStackSlot(*MBB, Pos, MO.getReg(), FI, RC, TRI);
return &*--Pos;
}
MachineInstr *TargetInstrInfo::foldMemoryOperand(MachineInstr &MI,
ArrayRef<unsigned> Ops,
MachineInstr &LoadMI,
LiveIntervals *LIS) const {
assert(LoadMI.canFoldAsLoad() && "LoadMI isn't foldable!");
#ifndef NDEBUG
for (unsigned OpIdx : Ops)
assert(MI.getOperand(OpIdx).isUse() && "Folding load into def!");
#endif
MachineBasicBlock &MBB = *MI.getParent();
MachineFunction &MF = *MBB.getParent();
// Ask the target to do the actual folding.
MachineInstr *NewMI = nullptr;
int FrameIndex = 0;
if ((MI.getOpcode() == TargetOpcode::STACKMAP ||
MI.getOpcode() == TargetOpcode::PATCHPOINT ||
MI.getOpcode() == TargetOpcode::STATEPOINT) &&
isLoadFromStackSlot(LoadMI, FrameIndex)) {
// Fold stackmap/patchpoint.
NewMI = foldPatchpoint(MF, MI, Ops, FrameIndex, *this);
if (NewMI)
NewMI = &*MBB.insert(MI, NewMI);
} else {
// Ask the target to do the actual folding.
NewMI = foldMemoryOperandImpl(MF, MI, Ops, MI, LoadMI, LIS);
}
if (!NewMI)
return nullptr;
// Copy the memoperands from the load to the folded instruction.
if (MI.memoperands_empty()) {
NewMI->setMemRefs(MF, LoadMI.memoperands());
} else {
// Handle the rare case of folding multiple loads.
NewMI->setMemRefs(MF, MI.memoperands());
for (MachineInstr::mmo_iterator I = LoadMI.memoperands_begin(),
E = LoadMI.memoperands_end();
I != E; ++I) {
NewMI->addMemOperand(MF, *I);
}
}
return NewMI;
}
bool TargetInstrInfo::hasReassociableOperands(
const MachineInstr &Inst, const MachineBasicBlock *MBB) const {
const MachineOperand &Op1 = Inst.getOperand(1);
const MachineOperand &Op2 = Inst.getOperand(2);
const MachineRegisterInfo &MRI = MBB->getParent()->getRegInfo();
// We need virtual register definitions for the operands that we will
// reassociate.
MachineInstr *MI1 = nullptr;
MachineInstr *MI2 = nullptr;
if (Op1.isReg() && Register::isVirtualRegister(Op1.getReg()))
MI1 = MRI.getUniqueVRegDef(Op1.getReg());
if (Op2.isReg() && Register::isVirtualRegister(Op2.getReg()))
MI2 = MRI.getUniqueVRegDef(Op2.getReg());
// And they need to be in the trace (otherwise, they won't have a depth).
return MI1 && MI2 && MI1->getParent() == MBB && MI2->getParent() == MBB;
}
bool TargetInstrInfo::hasReassociableSibling(const MachineInstr &Inst,
bool &Commuted) const {
const MachineBasicBlock *MBB = Inst.getParent();
const MachineRegisterInfo &MRI = MBB->getParent()->getRegInfo();
MachineInstr *MI1 = MRI.getUniqueVRegDef(Inst.getOperand(1).getReg());
MachineInstr *MI2 = MRI.getUniqueVRegDef(Inst.getOperand(2).getReg());
unsigned AssocOpcode = Inst.getOpcode();
// If only one operand has the same opcode and it's the second source operand,
// the operands must be commuted.
Commuted = MI1->getOpcode() != AssocOpcode && MI2->getOpcode() == AssocOpcode;
if (Commuted)
std::swap(MI1, MI2);
// 1. The previous instruction must be the same type as Inst.
// 2. The previous instruction must also be associative/commutative (this can
// be different even for instructions with the same opcode if traits like
// fast-math-flags are included).
// 3. The previous instruction must have virtual register definitions for its
// operands in the same basic block as Inst.
// 4. The previous instruction's result must only be used by Inst.
return MI1->getOpcode() == AssocOpcode && isAssociativeAndCommutative(*MI1) &&
hasReassociableOperands(*MI1, MBB) &&
MRI.hasOneNonDBGUse(MI1->getOperand(0).getReg());
}
// 1. The operation must be associative and commutative.
// 2. The instruction must have virtual register definitions for its
// operands in the same basic block.
// 3. The instruction must have a reassociable sibling.
bool TargetInstrInfo::isReassociationCandidate(const MachineInstr &Inst,
bool &Commuted) const {
return isAssociativeAndCommutative(Inst) &&
hasReassociableOperands(Inst, Inst.getParent()) &&
hasReassociableSibling(Inst, Commuted);
}
// The concept of the reassociation pass is that these operations can benefit
// from this kind of transformation:
//
// A = ? op ?
// B = A op X (Prev)
// C = B op Y (Root)
// -->
// A = ? op ?
// B = X op Y
// C = A op B
//
// breaking the dependency between A and B, allowing them to be executed in
// parallel (or back-to-back in a pipeline) instead of depending on each other.
// FIXME: This has the potential to be expensive (compile time) while not
// improving the code at all. Some ways to limit the overhead:
// 1. Track successful transforms; bail out if hit rate gets too low.
// 2. Only enable at -O3 or some other non-default optimization level.
// 3. Pre-screen pattern candidates here: if an operand of the previous
// instruction is known to not increase the critical path, then don't match
// that pattern.
bool TargetInstrInfo::getMachineCombinerPatterns(
MachineInstr &Root, SmallVectorImpl<MachineCombinerPattern> &Patterns,
bool DoRegPressureReduce) const {
bool Commute;
if (isReassociationCandidate(Root, Commute)) {
// We found a sequence of instructions that may be suitable for a
// reassociation of operands to increase ILP. Specify each commutation
// possibility for the Prev instruction in the sequence and let the
// machine combiner decide if changing the operands is worthwhile.
if (Commute) {
Patterns.push_back(MachineCombinerPattern::REASSOC_AX_YB);
Patterns.push_back(MachineCombinerPattern::REASSOC_XA_YB);
} else {
Patterns.push_back(MachineCombinerPattern::REASSOC_AX_BY);
Patterns.push_back(MachineCombinerPattern::REASSOC_XA_BY);
}
return true;
}
return false;
}
/// Return true when a code sequence can improve loop throughput.
bool
TargetInstrInfo::isThroughputPattern(MachineCombinerPattern Pattern) const {
return false;
}
/// Attempt the reassociation transformation to reduce critical path length.
/// See the above comments before getMachineCombinerPatterns().
void TargetInstrInfo::reassociateOps(
MachineInstr &Root, MachineInstr &Prev,
MachineCombinerPattern Pattern,
SmallVectorImpl<MachineInstr *> &InsInstrs,
SmallVectorImpl<MachineInstr *> &DelInstrs,
DenseMap<unsigned, unsigned> &InstrIdxForVirtReg) const {
MachineFunction *MF = Root.getMF();
MachineRegisterInfo &MRI = MF->getRegInfo();
const TargetInstrInfo *TII = MF->getSubtarget().getInstrInfo();
const TargetRegisterInfo *TRI = MF->getSubtarget().getRegisterInfo();
const TargetRegisterClass *RC = Root.getRegClassConstraint(0, TII, TRI);
// This array encodes the operand index for each parameter because the
// operands may be commuted. Each row corresponds to a pattern value,
// and each column specifies the index of A, B, X, Y.
unsigned OpIdx[4][4] = {
{ 1, 1, 2, 2 },
{ 1, 2, 2, 1 },
{ 2, 1, 1, 2 },
{ 2, 2, 1, 1 }
};
int Row;
switch (Pattern) {
case MachineCombinerPattern::REASSOC_AX_BY: Row = 0; break;
case MachineCombinerPattern::REASSOC_AX_YB: Row = 1; break;
case MachineCombinerPattern::REASSOC_XA_BY: Row = 2; break;
case MachineCombinerPattern::REASSOC_XA_YB: Row = 3; break;
default: llvm_unreachable("unexpected MachineCombinerPattern");
}
MachineOperand &OpA = Prev.getOperand(OpIdx[Row][0]);
MachineOperand &OpB = Root.getOperand(OpIdx[Row][1]);
MachineOperand &OpX = Prev.getOperand(OpIdx[Row][2]);
MachineOperand &OpY = Root.getOperand(OpIdx[Row][3]);
MachineOperand &OpC = Root.getOperand(0);
Register RegA = OpA.getReg();
Register RegB = OpB.getReg();
Register RegX = OpX.getReg();
Register RegY = OpY.getReg();
Register RegC = OpC.getReg();
if (Register::isVirtualRegister(RegA))
MRI.constrainRegClass(RegA, RC);
if (Register::isVirtualRegister(RegB))
MRI.constrainRegClass(RegB, RC);
if (Register::isVirtualRegister(RegX))
MRI.constrainRegClass(RegX, RC);
if (Register::isVirtualRegister(RegY))
MRI.constrainRegClass(RegY, RC);
if (Register::isVirtualRegister(RegC))
MRI.constrainRegClass(RegC, RC);
// Create a new virtual register for the result of (X op Y) instead of
// recycling RegB because the MachineCombiner's computation of the critical
// path requires a new register definition rather than an existing one.
Register NewVR = MRI.createVirtualRegister(RC);
InstrIdxForVirtReg.insert(std::make_pair(NewVR, 0));
unsigned Opcode = Root.getOpcode();
bool KillA = OpA.isKill();
bool KillX = OpX.isKill();
bool KillY = OpY.isKill();
// Create new instructions for insertion.
MachineInstrBuilder MIB1 =
BuildMI(*MF, Prev.getDebugLoc(), TII->get(Opcode), NewVR)
.addReg(RegX, getKillRegState(KillX))
.addReg(RegY, getKillRegState(KillY));
MachineInstrBuilder MIB2 =
BuildMI(*MF, Root.getDebugLoc(), TII->get(Opcode), RegC)
.addReg(RegA, getKillRegState(KillA))
.addReg(NewVR, getKillRegState(true));
setSpecialOperandAttr(Root, Prev, *MIB1, *MIB2);
// Record new instructions for insertion and old instructions for deletion.
InsInstrs.push_back(MIB1);
InsInstrs.push_back(MIB2);
DelInstrs.push_back(&Prev);
DelInstrs.push_back(&Root);
}
void TargetInstrInfo::genAlternativeCodeSequence(
MachineInstr &Root, MachineCombinerPattern Pattern,
SmallVectorImpl<MachineInstr *> &InsInstrs,
SmallVectorImpl<MachineInstr *> &DelInstrs,
DenseMap<unsigned, unsigned> &InstIdxForVirtReg) const {
MachineRegisterInfo &MRI = Root.getMF()->getRegInfo();
// Select the previous instruction in the sequence based on the input pattern.
MachineInstr *Prev = nullptr;
switch (Pattern) {
case MachineCombinerPattern::REASSOC_AX_BY:
case MachineCombinerPattern::REASSOC_XA_BY:
Prev = MRI.getUniqueVRegDef(Root.getOperand(1).getReg());
break;
case MachineCombinerPattern::REASSOC_AX_YB:
case MachineCombinerPattern::REASSOC_XA_YB:
Prev = MRI.getUniqueVRegDef(Root.getOperand(2).getReg());
break;
default:
break;
}
assert(Prev && "Unknown pattern for machine combiner");
reassociateOps(Root, *Prev, Pattern, InsInstrs, DelInstrs, InstIdxForVirtReg);
}
bool TargetInstrInfo::isReallyTriviallyReMaterializableGeneric(
const MachineInstr &MI, AAResults *AA) const {
const MachineFunction &MF = *MI.getMF();
const MachineRegisterInfo &MRI = MF.getRegInfo();
// Remat clients assume operand 0 is the defined register.
if (!MI.getNumOperands() || !MI.getOperand(0).isReg())
return false;
Register DefReg = MI.getOperand(0).getReg();
// A sub-register definition can only be rematerialized if the instruction
// doesn't read the other parts of the register. Otherwise it is really a
// read-modify-write operation on the full virtual register which cannot be
// moved safely.
if (Register::isVirtualRegister(DefReg) && MI.getOperand(0).getSubReg() &&
MI.readsVirtualRegister(DefReg))
return false;
// A load from a fixed stack slot can be rematerialized. This may be
// redundant with subsequent checks, but it's target-independent,
// simple, and a common case.
int FrameIdx = 0;
if (isLoadFromStackSlot(MI, FrameIdx) &&
MF.getFrameInfo().isImmutableObjectIndex(FrameIdx))
return true;
// Avoid instructions obviously unsafe for remat.
if (MI.isNotDuplicable() || MI.mayStore() || MI.mayRaiseFPException() ||
MI.hasUnmodeledSideEffects())
return false;
// Don't remat inline asm. We have no idea how expensive it is
// even if it's side effect free.
if (MI.isInlineAsm())
return false;
// Avoid instructions which load from potentially varying memory.
if (MI.mayLoad() && !MI.isDereferenceableInvariantLoad(AA))
return false;
// If any of the registers accessed are non-constant, conservatively assume
// the instruction is not rematerializable.
for (unsigned i = 0, e = MI.getNumOperands(); i != e; ++i) {
const MachineOperand &MO = MI.getOperand(i);
if (!MO.isReg()) continue;
Register Reg = MO.getReg();
if (Reg == 0)
continue;
// Check for a well-behaved physical register.
if (Register::isPhysicalRegister(Reg)) {
if (MO.isUse()) {
// If the physreg has no defs anywhere, it's just an ambient register
// and we can freely move its uses. Alternatively, if it's allocatable,
// it could get allocated to something with a def during allocation.
if (!MRI.isConstantPhysReg(Reg))
return false;
} else {
// A physreg def. We can't remat it.
return false;
}
continue;
}
// Only allow one virtual-register def. There may be multiple defs of the
// same virtual register, though.
if (MO.isDef() && Reg != DefReg)
return false;
// Don't allow any virtual-register uses. Rematting an instruction with
// virtual register uses would length the live ranges of the uses, which
// is not necessarily a good idea, certainly not "trivial".
if (MO.isUse())
return false;
}
// Everything checked out.
return true;
}
int TargetInstrInfo::getSPAdjust(const MachineInstr &MI) const {
const MachineFunction *MF = MI.getMF();
const TargetFrameLowering *TFI = MF->getSubtarget().getFrameLowering();
bool StackGrowsDown =
TFI->getStackGrowthDirection() == TargetFrameLowering::StackGrowsDown;
unsigned FrameSetupOpcode = getCallFrameSetupOpcode();
unsigned FrameDestroyOpcode = getCallFrameDestroyOpcode();
if (!isFrameInstr(MI))
return 0;
int SPAdj = TFI->alignSPAdjust(getFrameSize(MI));
if ((!StackGrowsDown && MI.getOpcode() == FrameSetupOpcode) ||
(StackGrowsDown && MI.getOpcode() == FrameDestroyOpcode))
SPAdj = -SPAdj;
return SPAdj;
}
/// isSchedulingBoundary - Test if the given instruction should be
/// considered a scheduling boundary. This primarily includes labels
/// and terminators.
bool TargetInstrInfo::isSchedulingBoundary(const MachineInstr &MI,
const MachineBasicBlock *MBB,
const MachineFunction &MF) const {
// Terminators and labels can't be scheduled around.
if (MI.isTerminator() || MI.isPosition())
return true;
// INLINEASM_BR can jump to another block
if (MI.getOpcode() == TargetOpcode::INLINEASM_BR)
return true;
// Don't attempt to schedule around any instruction that defines
// a stack-oriented pointer, as it's unlikely to be profitable. This
// saves compile time, because it doesn't require every single
// stack slot reference to depend on the instruction that does the
// modification.
const TargetLowering &TLI = *MF.getSubtarget().getTargetLowering();
const TargetRegisterInfo *TRI = MF.getSubtarget().getRegisterInfo();
return MI.modifiesRegister(TLI.getStackPointerRegisterToSaveRestore(), TRI);
}
// Provide a global flag for disabling the PreRA hazard recognizer that targets
// may choose to honor.
bool TargetInstrInfo::usePreRAHazardRecognizer() const {
return !DisableHazardRecognizer;
}
// Default implementation of CreateTargetRAHazardRecognizer.
ScheduleHazardRecognizer *TargetInstrInfo::
CreateTargetHazardRecognizer(const TargetSubtargetInfo *STI,
const ScheduleDAG *DAG) const {
// Dummy hazard recognizer allows all instructions to issue.
return new ScheduleHazardRecognizer();
}
// Default implementation of CreateTargetMIHazardRecognizer.
ScheduleHazardRecognizer *TargetInstrInfo::CreateTargetMIHazardRecognizer(
const InstrItineraryData *II, const ScheduleDAGMI *DAG) const {
return new ScoreboardHazardRecognizer(II, DAG, "machine-scheduler");
}
// Default implementation of CreateTargetPostRAHazardRecognizer.
ScheduleHazardRecognizer *TargetInstrInfo::
CreateTargetPostRAHazardRecognizer(const InstrItineraryData *II,
const ScheduleDAG *DAG) const {
return new ScoreboardHazardRecognizer(II, DAG, "post-RA-sched");
}
// Default implementation of getMemOperandWithOffset.
bool TargetInstrInfo::getMemOperandWithOffset(
const MachineInstr &MI, const MachineOperand *&BaseOp, int64_t &Offset,
bool &OffsetIsScalable, const TargetRegisterInfo *TRI) const {
SmallVector<const MachineOperand *, 4> BaseOps;
unsigned Width;
if (!getMemOperandsWithOffsetWidth(MI, BaseOps, Offset, OffsetIsScalable,
Width, TRI) ||
BaseOps.size() != 1)
return false;
BaseOp = BaseOps.front();
return true;
}
//===----------------------------------------------------------------------===//
// SelectionDAG latency interface.
//===----------------------------------------------------------------------===//
int
TargetInstrInfo::getOperandLatency(const InstrItineraryData *ItinData,
SDNode *DefNode, unsigned DefIdx,
SDNode *UseNode, unsigned UseIdx) const {
if (!ItinData || ItinData->isEmpty())
return -1;
if (!DefNode->isMachineOpcode())
return -1;
unsigned DefClass = get(DefNode->getMachineOpcode()).getSchedClass();
if (!UseNode->isMachineOpcode())
return ItinData->getOperandCycle(DefClass, DefIdx);
unsigned UseClass = get(UseNode->getMachineOpcode()).getSchedClass();
return ItinData->getOperandLatency(DefClass, DefIdx, UseClass, UseIdx);
}
int TargetInstrInfo::getInstrLatency(const InstrItineraryData *ItinData,
SDNode *N) const {
if (!ItinData || ItinData->isEmpty())
return 1;
if (!N->isMachineOpcode())
return 1;
return ItinData->getStageLatency(get(N->getMachineOpcode()).getSchedClass());
}
//===----------------------------------------------------------------------===//
// MachineInstr latency interface.
//===----------------------------------------------------------------------===//
unsigned TargetInstrInfo::getNumMicroOps(const InstrItineraryData *ItinData,
const MachineInstr &MI) const {
if (!ItinData || ItinData->isEmpty())
return 1;
unsigned Class = MI.getDesc().getSchedClass();
int UOps = ItinData->Itineraries[Class].NumMicroOps;
if (UOps >= 0)
return UOps;
// The # of u-ops is dynamically determined. The specific target should
// override this function to return the right number.
return 1;
}
/// Return the default expected latency for a def based on it's opcode.
unsigned TargetInstrInfo::defaultDefLatency(const MCSchedModel &SchedModel,
const MachineInstr &DefMI) const {
if (DefMI.isTransient())
return 0;
if (DefMI.mayLoad())
return SchedModel.LoadLatency;
if (isHighLatencyDef(DefMI.getOpcode()))
return SchedModel.HighLatency;
return 1;
}
unsigned TargetInstrInfo::getPredicationCost(const MachineInstr &) const {
return 0;
}
unsigned TargetInstrInfo::getInstrLatency(const InstrItineraryData *ItinData,
const MachineInstr &MI,
unsigned *PredCost) const {
// Default to one cycle for no itinerary. However, an "empty" itinerary may
// still have a MinLatency property, which getStageLatency checks.
if (!ItinData)
return MI.mayLoad() ? 2 : 1;
return ItinData->getStageLatency(MI.getDesc().getSchedClass());
}
bool TargetInstrInfo::hasLowDefLatency(const TargetSchedModel &SchedModel,
const MachineInstr &DefMI,
unsigned DefIdx) const {
const InstrItineraryData *ItinData = SchedModel.getInstrItineraries();
if (!ItinData || ItinData->isEmpty())
return false;
unsigned DefClass = DefMI.getDesc().getSchedClass();
int DefCycle = ItinData->getOperandCycle(DefClass, DefIdx);
return (DefCycle != -1 && DefCycle <= 1);
}
Optional<ParamLoadedValue>
TargetInstrInfo::describeLoadedValue(const MachineInstr &MI,
Register Reg) const {
const MachineFunction *MF = MI.getMF();
const TargetRegisterInfo *TRI = MF->getSubtarget().getRegisterInfo();
DIExpression *Expr = DIExpression::get(MF->getFunction().getContext(), {});
int64_t Offset;
bool OffsetIsScalable;
// To simplify the sub-register handling, verify that we only need to
// consider physical registers.
assert(MF->getProperties().hasProperty(
MachineFunctionProperties::Property::NoVRegs));
if (auto DestSrc = isCopyInstr(MI)) {
Register DestReg = DestSrc->Destination->getReg();
// If the copy destination is the forwarding reg, describe the forwarding
// reg using the copy source as the backup location. Example:
//
// x0 = MOV x7
// call callee(x0) ; x0 described as x7
if (Reg == DestReg)
return ParamLoadedValue(*DestSrc->Source, Expr);
// Cases where super- or sub-registers needs to be described should
// be handled by the target's hook implementation.
assert(!TRI->isSuperOrSubRegisterEq(Reg, DestReg) &&
"TargetInstrInfo::describeLoadedValue can't describe super- or "
"sub-regs for copy instructions");
return None;
} else if (auto RegImm = isAddImmediate(MI, Reg)) {
Register SrcReg = RegImm->Reg;
Offset = RegImm->Imm;
Expr = DIExpression::prepend(Expr, DIExpression::ApplyOffset, Offset);
return ParamLoadedValue(MachineOperand::CreateReg(SrcReg, false), Expr);
} else if (MI.hasOneMemOperand()) {
// Only describe memory which provably does not escape the function. As
// described in llvm.org/PR43343, escaped memory may be clobbered by the
// callee (or by another thread).
const auto &TII = MF->getSubtarget().getInstrInfo();
const MachineFrameInfo &MFI = MF->getFrameInfo();
const MachineMemOperand *MMO = MI.memoperands()[0];
const PseudoSourceValue *PSV = MMO->getPseudoValue();
// If the address points to "special" memory (e.g. a spill slot), it's
// sufficient to check that it isn't aliased by any high-level IR value.
if (!PSV || PSV->mayAlias(&MFI))
return None;
const MachineOperand *BaseOp;
if (!TII->getMemOperandWithOffset(MI, BaseOp, Offset, OffsetIsScalable,
TRI))
return None;
// FIXME: Scalable offsets are not yet handled in the offset code below.
if (OffsetIsScalable)
return None;
// TODO: Can currently only handle mem instructions with a single define.
// An example from the x86 target:
// ...
// DIV64m $rsp, 1, $noreg, 24, $noreg, implicit-def dead $rax, implicit-def $rdx
// ...
//
if (MI.getNumExplicitDefs() != 1)
return None;
// TODO: In what way do we need to take Reg into consideration here?
SmallVector<uint64_t, 8> Ops;
DIExpression::appendOffset(Ops, Offset);
Ops.push_back(dwarf::DW_OP_deref_size);
Ops.push_back(MMO->getSize());
Expr = DIExpression::prependOpcodes(Expr, Ops);
return ParamLoadedValue(*BaseOp, Expr);
}
return None;
}
/// Both DefMI and UseMI must be valid. By default, call directly to the
/// itinerary. This may be overriden by the target.
int TargetInstrInfo::getOperandLatency(const InstrItineraryData *ItinData,
const MachineInstr &DefMI,
unsigned DefIdx,
const MachineInstr &UseMI,
unsigned UseIdx) const {
unsigned DefClass = DefMI.getDesc().getSchedClass();
unsigned UseClass = UseMI.getDesc().getSchedClass();
return ItinData->getOperandLatency(DefClass, DefIdx, UseClass, UseIdx);
}
/// If we can determine the operand latency from the def only, without itinerary
/// lookup, do so. Otherwise return -1.
int TargetInstrInfo::computeDefOperandLatency(
const InstrItineraryData *ItinData, const MachineInstr &DefMI) const {
// Let the target hook getInstrLatency handle missing itineraries.
if (!ItinData)
return getInstrLatency(ItinData, DefMI);
if(ItinData->isEmpty())
return defaultDefLatency(ItinData->SchedModel, DefMI);
// ...operand lookup required
return -1;
}
bool TargetInstrInfo::getRegSequenceInputs(
const MachineInstr &MI, unsigned DefIdx,
SmallVectorImpl<RegSubRegPairAndIdx> &InputRegs) const {
assert((MI.isRegSequence() ||
MI.isRegSequenceLike()) && "Instruction do not have the proper type");
if (!MI.isRegSequence())
return getRegSequenceLikeInputs(MI, DefIdx, InputRegs);
// We are looking at:
// Def = REG_SEQUENCE v0, sub0, v1, sub1, ...
assert(DefIdx == 0 && "REG_SEQUENCE only has one def");
for (unsigned OpIdx = 1, EndOpIdx = MI.getNumOperands(); OpIdx != EndOpIdx;
OpIdx += 2) {
const MachineOperand &MOReg = MI.getOperand(OpIdx);
if (MOReg.isUndef())
continue;
const MachineOperand &MOSubIdx = MI.getOperand(OpIdx + 1);
assert(MOSubIdx.isImm() &&
"One of the subindex of the reg_sequence is not an immediate");
// Record Reg:SubReg, SubIdx.
InputRegs.push_back(RegSubRegPairAndIdx(MOReg.getReg(), MOReg.getSubReg(),
(unsigned)MOSubIdx.getImm()));
}
return true;
}
bool TargetInstrInfo::getExtractSubregInputs(
const MachineInstr &MI, unsigned DefIdx,
RegSubRegPairAndIdx &InputReg) const {
assert((MI.isExtractSubreg() ||
MI.isExtractSubregLike()) && "Instruction do not have the proper type");
if (!MI.isExtractSubreg())
return getExtractSubregLikeInputs(MI, DefIdx, InputReg);
// We are looking at:
// Def = EXTRACT_SUBREG v0.sub1, sub0.
assert(DefIdx == 0 && "EXTRACT_SUBREG only has one def");
const MachineOperand &MOReg = MI.getOperand(1);
if (MOReg.isUndef())
return false;
const MachineOperand &MOSubIdx = MI.getOperand(2);
assert(MOSubIdx.isImm() &&
"The subindex of the extract_subreg is not an immediate");
InputReg.Reg = MOReg.getReg();
InputReg.SubReg = MOReg.getSubReg();
InputReg.SubIdx = (unsigned)MOSubIdx.getImm();
return true;
}
bool TargetInstrInfo::getInsertSubregInputs(
const MachineInstr &MI, unsigned DefIdx,
RegSubRegPair &BaseReg, RegSubRegPairAndIdx &InsertedReg) const {
assert((MI.isInsertSubreg() ||
MI.isInsertSubregLike()) && "Instruction do not have the proper type");
if (!MI.isInsertSubreg())
return getInsertSubregLikeInputs(MI, DefIdx, BaseReg, InsertedReg);
// We are looking at:
// Def = INSERT_SEQUENCE v0, v1, sub0.
assert(DefIdx == 0 && "INSERT_SUBREG only has one def");
const MachineOperand &MOBaseReg = MI.getOperand(1);
const MachineOperand &MOInsertedReg = MI.getOperand(2);
if (MOInsertedReg.isUndef())
return false;
const MachineOperand &MOSubIdx = MI.getOperand(3);
assert(MOSubIdx.isImm() &&
"One of the subindex of the reg_sequence is not an immediate");
BaseReg.Reg = MOBaseReg.getReg();
BaseReg.SubReg = MOBaseReg.getSubReg();
InsertedReg.Reg = MOInsertedReg.getReg();
InsertedReg.SubReg = MOInsertedReg.getSubReg();
InsertedReg.SubIdx = (unsigned)MOSubIdx.getImm();
return true;
}
// Returns a MIRPrinter comment for this machine operand.
std::string TargetInstrInfo::createMIROperandComment(
const MachineInstr &MI, const MachineOperand &Op, unsigned OpIdx,
const TargetRegisterInfo *TRI) const {
if (!MI.isInlineAsm())
return "";
std::string Flags;
raw_string_ostream OS(Flags);
if (OpIdx == InlineAsm::MIOp_ExtraInfo) {
// Print HasSideEffects, MayLoad, MayStore, IsAlignStack
unsigned ExtraInfo = Op.getImm();
bool First = true;
for (StringRef Info : InlineAsm::getExtraInfoNames(ExtraInfo)) {
if (!First)
OS << " ";
First = false;
OS << Info;
}
return OS.str();
}
int FlagIdx = MI.findInlineAsmFlagIdx(OpIdx);
if (FlagIdx < 0 || (unsigned)FlagIdx != OpIdx)
return "";
assert(Op.isImm() && "Expected flag operand to be an immediate");
// Pretty print the inline asm operand descriptor.
unsigned Flag = Op.getImm();
unsigned Kind = InlineAsm::getKind(Flag);
OS << InlineAsm::getKindName(Kind);
unsigned RCID = 0;
if (!InlineAsm::isImmKind(Flag) && !InlineAsm::isMemKind(Flag) &&
InlineAsm::hasRegClassConstraint(Flag, RCID)) {
if (TRI) {
OS << ':' << TRI->getRegClassName(TRI->getRegClass(RCID));
} else
OS << ":RC" << RCID;
}
if (InlineAsm::isMemKind(Flag)) {
unsigned MCID = InlineAsm::getMemoryConstraintID(Flag);
OS << ":" << InlineAsm::getMemConstraintName(MCID);
}
unsigned TiedTo = 0;
if (InlineAsm::isUseOperandTiedToDef(Flag, TiedTo))
OS << " tiedto:$" << TiedTo;
return OS.str();
}
TargetInstrInfo::PipelinerLoopInfo::~PipelinerLoopInfo() {}
|