aboutsummaryrefslogtreecommitdiffstats
path: root/contrib/libs/llvm16/lib/Target/AArch64/AArch64ExpandImm.cpp
blob: 4f324198f3dc403559f9ddeeb15c3789a9d0025e (plain) (blame)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
//===- AArch64ExpandImm.h - AArch64 Immediate Expansion -------------------===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file implements the AArch64ExpandImm stuff.
//
//===----------------------------------------------------------------------===//

#include "AArch64.h"
#include "AArch64ExpandImm.h"
#include "MCTargetDesc/AArch64AddressingModes.h"

using namespace llvm;
using namespace llvm::AArch64_IMM;

/// Helper function which extracts the specified 16-bit chunk from a
/// 64-bit value.
static uint64_t getChunk(uint64_t Imm, unsigned ChunkIdx) {
  assert(ChunkIdx < 4 && "Out of range chunk index specified!");

  return (Imm >> (ChunkIdx * 16)) & 0xFFFF;
}

/// Check whether the given 16-bit chunk replicated to full 64-bit width
/// can be materialized with an ORR instruction.
static bool canUseOrr(uint64_t Chunk, uint64_t &Encoding) {
  Chunk = (Chunk << 48) | (Chunk << 32) | (Chunk << 16) | Chunk;

  return AArch64_AM::processLogicalImmediate(Chunk, 64, Encoding);
}

/// Check for identical 16-bit chunks within the constant and if so
/// materialize them with a single ORR instruction. The remaining one or two
/// 16-bit chunks will be materialized with MOVK instructions.
///
/// This allows us to materialize constants like |A|B|A|A| or |A|B|C|A| (order
/// of the chunks doesn't matter), assuming |A|A|A|A| can be materialized with
/// an ORR instruction.
static bool tryToreplicateChunks(uint64_t UImm,
				 SmallVectorImpl<ImmInsnModel> &Insn) {
  using CountMap = DenseMap<uint64_t, unsigned>;

  CountMap Counts;

  // Scan the constant and count how often every chunk occurs.
  for (unsigned Idx = 0; Idx < 4; ++Idx)
    ++Counts[getChunk(UImm, Idx)];

  // Traverse the chunks to find one which occurs more than once.
  for (const auto &Chunk : Counts) {
    const uint64_t ChunkVal = Chunk.first;
    const unsigned Count = Chunk.second;

    uint64_t Encoding = 0;

    // We are looking for chunks which have two or three instances and can be
    // materialized with an ORR instruction.
    if ((Count != 2 && Count != 3) || !canUseOrr(ChunkVal, Encoding))
      continue;

    const bool CountThree = Count == 3;

    Insn.push_back({ AArch64::ORRXri, 0, Encoding });

    unsigned ShiftAmt = 0;
    uint64_t Imm16 = 0;
    // Find the first chunk not materialized with the ORR instruction.
    for (; ShiftAmt < 64; ShiftAmt += 16) {
      Imm16 = (UImm >> ShiftAmt) & 0xFFFF;

      if (Imm16 != ChunkVal)
        break;
    }

    // Create the first MOVK instruction.
    Insn.push_back({ AArch64::MOVKXi, Imm16,
		     AArch64_AM::getShifterImm(AArch64_AM::LSL, ShiftAmt) });

    // In case we have three instances the whole constant is now materialized
    // and we can exit.
    if (CountThree)
      return true;

    // Find the remaining chunk which needs to be materialized.
    for (ShiftAmt += 16; ShiftAmt < 64; ShiftAmt += 16) {
      Imm16 = (UImm >> ShiftAmt) & 0xFFFF;

      if (Imm16 != ChunkVal)
        break;
    }
    Insn.push_back({ AArch64::MOVKXi, Imm16,
                     AArch64_AM::getShifterImm(AArch64_AM::LSL, ShiftAmt) });
    return true;
  }

  return false;
}

/// Check whether this chunk matches the pattern '1...0...'. This pattern
/// starts a contiguous sequence of ones if we look at the bits from the LSB
/// towards the MSB.
static bool isStartChunk(uint64_t Chunk) {
  if (Chunk == 0 || Chunk == std::numeric_limits<uint64_t>::max())
    return false;

  return isMask_64(~Chunk);
}

/// Check whether this chunk matches the pattern '0...1...' This pattern
/// ends a contiguous sequence of ones if we look at the bits from the LSB
/// towards the MSB.
static bool isEndChunk(uint64_t Chunk) {
  if (Chunk == 0 || Chunk == std::numeric_limits<uint64_t>::max())
    return false;

  return isMask_64(Chunk);
}

/// Clear or set all bits in the chunk at the given index.
static uint64_t updateImm(uint64_t Imm, unsigned Idx, bool Clear) {
  const uint64_t Mask = 0xFFFF;

  if (Clear)
    // Clear chunk in the immediate.
    Imm &= ~(Mask << (Idx * 16));
  else
    // Set all bits in the immediate for the particular chunk.
    Imm |= Mask << (Idx * 16);

  return Imm;
}

/// Check whether the constant contains a sequence of contiguous ones,
/// which might be interrupted by one or two chunks. If so, materialize the
/// sequence of contiguous ones with an ORR instruction.
/// Materialize the chunks which are either interrupting the sequence or outside
/// of the sequence with a MOVK instruction.
///
/// Assuming S is a chunk which starts the sequence (1...0...), E is a chunk
/// which ends the sequence (0...1...). Then we are looking for constants which
/// contain at least one S and E chunk.
/// E.g. |E|A|B|S|, |A|E|B|S| or |A|B|E|S|.
///
/// We are also looking for constants like |S|A|B|E| where the contiguous
/// sequence of ones wraps around the MSB into the LSB.
static bool trySequenceOfOnes(uint64_t UImm,
                              SmallVectorImpl<ImmInsnModel> &Insn) {
  const int NotSet = -1;
  const uint64_t Mask = 0xFFFF;

  int StartIdx = NotSet;
  int EndIdx = NotSet;
  // Try to find the chunks which start/end a contiguous sequence of ones.
  for (int Idx = 0; Idx < 4; ++Idx) {
    int64_t Chunk = getChunk(UImm, Idx);
    // Sign extend the 16-bit chunk to 64-bit.
    Chunk = (Chunk << 48) >> 48;

    if (isStartChunk(Chunk))
      StartIdx = Idx;
    else if (isEndChunk(Chunk))
      EndIdx = Idx;
  }

  // Early exit in case we can't find a start/end chunk.
  if (StartIdx == NotSet || EndIdx == NotSet)
    return false;

  // Outside of the contiguous sequence of ones everything needs to be zero.
  uint64_t Outside = 0;
  // Chunks between the start and end chunk need to have all their bits set.
  uint64_t Inside = Mask;

  // If our contiguous sequence of ones wraps around from the MSB into the LSB,
  // just swap indices and pretend we are materializing a contiguous sequence
  // of zeros surrounded by a contiguous sequence of ones.
  if (StartIdx > EndIdx) {
    std::swap(StartIdx, EndIdx);
    std::swap(Outside, Inside);
  }

  uint64_t OrrImm = UImm;
  int FirstMovkIdx = NotSet;
  int SecondMovkIdx = NotSet;

  // Find out which chunks we need to patch up to obtain a contiguous sequence
  // of ones.
  for (int Idx = 0; Idx < 4; ++Idx) {
    const uint64_t Chunk = getChunk(UImm, Idx);

    // Check whether we are looking at a chunk which is not part of the
    // contiguous sequence of ones.
    if ((Idx < StartIdx || EndIdx < Idx) && Chunk != Outside) {
      OrrImm = updateImm(OrrImm, Idx, Outside == 0);

      // Remember the index we need to patch.
      if (FirstMovkIdx == NotSet)
        FirstMovkIdx = Idx;
      else
        SecondMovkIdx = Idx;

      // Check whether we are looking a chunk which is part of the contiguous
      // sequence of ones.
    } else if (Idx > StartIdx && Idx < EndIdx && Chunk != Inside) {
      OrrImm = updateImm(OrrImm, Idx, Inside != Mask);

      // Remember the index we need to patch.
      if (FirstMovkIdx == NotSet)
        FirstMovkIdx = Idx;
      else
        SecondMovkIdx = Idx;
    }
  }
  assert(FirstMovkIdx != NotSet && "Constant materializable with single ORR!");

  // Create the ORR-immediate instruction.
  uint64_t Encoding = 0;
  AArch64_AM::processLogicalImmediate(OrrImm, 64, Encoding);
  Insn.push_back({ AArch64::ORRXri, 0, Encoding });

  const bool SingleMovk = SecondMovkIdx == NotSet;
  Insn.push_back({ AArch64::MOVKXi, getChunk(UImm, FirstMovkIdx),
                   AArch64_AM::getShifterImm(AArch64_AM::LSL,
                                             FirstMovkIdx * 16) });

  // Early exit in case we only need to emit a single MOVK instruction.
  if (SingleMovk)
    return true;

  // Create the second MOVK instruction.
  Insn.push_back({ AArch64::MOVKXi, getChunk(UImm, SecondMovkIdx),
	           AArch64_AM::getShifterImm(AArch64_AM::LSL,
                                             SecondMovkIdx * 16) });

  return true;
}

/// \brief Expand a MOVi32imm or MOVi64imm pseudo instruction to a
/// MOVZ or MOVN of width BitSize followed by up to 3 MOVK instructions.
static inline void expandMOVImmSimple(uint64_t Imm, unsigned BitSize,
				      unsigned OneChunks, unsigned ZeroChunks,
				      SmallVectorImpl<ImmInsnModel> &Insn) {
  const unsigned Mask = 0xFFFF;

  // Use a MOVZ or MOVN instruction to set the high bits, followed by one or
  // more MOVK instructions to insert additional 16-bit portions into the
  // lower bits.
  bool isNeg = false;

  // Use MOVN to materialize the high bits if we have more all one chunks
  // than all zero chunks.
  if (OneChunks > ZeroChunks) {
    isNeg = true;
    Imm = ~Imm;
  }

  unsigned FirstOpc;
  if (BitSize == 32) {
    Imm &= (1LL << 32) - 1;
    FirstOpc = (isNeg ? AArch64::MOVNWi : AArch64::MOVZWi);
  } else {
    FirstOpc = (isNeg ? AArch64::MOVNXi : AArch64::MOVZXi);
  }
  unsigned Shift = 0;     // LSL amount for high bits with MOVZ/MOVN
  unsigned LastShift = 0; // LSL amount for last MOVK
  if (Imm != 0) {
    unsigned LZ = countLeadingZeros(Imm);
    unsigned TZ = countTrailingZeros(Imm);
    Shift = (TZ / 16) * 16;
    LastShift = ((63 - LZ) / 16) * 16;
  }
  unsigned Imm16 = (Imm >> Shift) & Mask;

  Insn.push_back({ FirstOpc, Imm16,
                   AArch64_AM::getShifterImm(AArch64_AM::LSL, Shift) });

  if (Shift == LastShift)
    return;

  // If a MOVN was used for the high bits of a negative value, flip the rest
  // of the bits back for use with MOVK.
  if (isNeg)
    Imm = ~Imm;

  unsigned Opc = (BitSize == 32 ? AArch64::MOVKWi : AArch64::MOVKXi);
  while (Shift < LastShift) {
    Shift += 16;
    Imm16 = (Imm >> Shift) & Mask;
    if (Imm16 == (isNeg ? Mask : 0))
      continue; // This 16-bit portion is already set correctly.

    Insn.push_back({ Opc, Imm16,
                     AArch64_AM::getShifterImm(AArch64_AM::LSL, Shift) });
  }
}

/// Expand a MOVi32imm or MOVi64imm pseudo instruction to one or more
/// real move-immediate instructions to synthesize the immediate.
void AArch64_IMM::expandMOVImm(uint64_t Imm, unsigned BitSize,
                               SmallVectorImpl<ImmInsnModel> &Insn) {
  const unsigned Mask = 0xFFFF;

  // Scan the immediate and count the number of 16-bit chunks which are either
  // all ones or all zeros.
  unsigned OneChunks = 0;
  unsigned ZeroChunks = 0;
  for (unsigned Shift = 0; Shift < BitSize; Shift += 16) {
    const unsigned Chunk = (Imm >> Shift) & Mask;
    if (Chunk == Mask)
      OneChunks++;
    else if (Chunk == 0)
      ZeroChunks++;
  }

  // Prefer MOVZ/MOVN over ORR because of the rules for the "mov" alias.
  if ((BitSize / 16) - OneChunks <= 1 || (BitSize / 16) - ZeroChunks <= 1) {
    expandMOVImmSimple(Imm, BitSize, OneChunks, ZeroChunks, Insn);
    return;
  }

  // Try a single ORR.
  uint64_t UImm = Imm << (64 - BitSize) >> (64 - BitSize);
  uint64_t Encoding;
  if (AArch64_AM::processLogicalImmediate(UImm, BitSize, Encoding)) {
    unsigned Opc = (BitSize == 32 ? AArch64::ORRWri : AArch64::ORRXri);
    Insn.push_back({ Opc, 0, Encoding });
    return;
  }

  // One to up three instruction sequences.
  //
  // Prefer MOVZ/MOVN followed by MOVK; it's more readable, and possibly the
  // fastest sequence with fast literal generation.
  if (OneChunks >= (BitSize / 16) - 2 || ZeroChunks >= (BitSize / 16) - 2) {
    expandMOVImmSimple(Imm, BitSize, OneChunks, ZeroChunks, Insn);
    return;
  }

  assert(BitSize == 64 && "All 32-bit immediates can be expanded with a"
                          "MOVZ/MOVK pair");

  // Try other two-instruction sequences.

  // 64-bit ORR followed by MOVK.
  // We try to construct the ORR immediate in three different ways: either we
  // zero out the chunk which will be replaced, we fill the chunk which will
  // be replaced with ones, or we take the bit pattern from the other half of
  // the 64-bit immediate. This is comprehensive because of the way ORR
  // immediates are constructed.
  for (unsigned Shift = 0; Shift < BitSize; Shift += 16) {
    uint64_t ShiftedMask = (0xFFFFULL << Shift);
    uint64_t ZeroChunk = UImm & ~ShiftedMask;
    uint64_t OneChunk = UImm | ShiftedMask;
    uint64_t RotatedImm = (UImm << 32) | (UImm >> 32);
    uint64_t ReplicateChunk = ZeroChunk | (RotatedImm & ShiftedMask);
    if (AArch64_AM::processLogicalImmediate(ZeroChunk, BitSize, Encoding) ||
        AArch64_AM::processLogicalImmediate(OneChunk, BitSize, Encoding) ||
        AArch64_AM::processLogicalImmediate(ReplicateChunk, BitSize,
                                            Encoding)) {
      // Create the ORR-immediate instruction.
      Insn.push_back({ AArch64::ORRXri, 0, Encoding });

      // Create the MOVK instruction.
      const unsigned Imm16 = getChunk(UImm, Shift / 16);
      Insn.push_back({ AArch64::MOVKXi, Imm16,
		       AArch64_AM::getShifterImm(AArch64_AM::LSL, Shift) });
      return;
    }
  }

  // FIXME: Add more two-instruction sequences.

  // Three instruction sequences.
  //
  // Prefer MOVZ/MOVN followed by two MOVK; it's more readable, and possibly
  // the fastest sequence with fast literal generation. (If neither MOVK is
  // part of a fast literal generation pair, it could be slower than the
  // four-instruction sequence, but we won't worry about that for now.)
  if (OneChunks || ZeroChunks) {
    expandMOVImmSimple(Imm, BitSize, OneChunks, ZeroChunks, Insn);
    return;
  }

  // Check for identical 16-bit chunks within the constant and if so materialize
  // them with a single ORR instruction. The remaining one or two 16-bit chunks
  // will be materialized with MOVK instructions.
  if (BitSize == 64 && tryToreplicateChunks(UImm, Insn))
    return;

  // Check whether the constant contains a sequence of contiguous ones, which
  // might be interrupted by one or two chunks. If so, materialize the sequence
  // of contiguous ones with an ORR instruction. Materialize the chunks which
  // are either interrupting the sequence or outside of the sequence with a
  // MOVK instruction.
  if (BitSize == 64 && trySequenceOfOnes(UImm, Insn))
    return;

  // We found no possible two or three instruction sequence; use the general
  // four-instruction sequence.
  expandMOVImmSimple(Imm, BitSize, OneChunks, ZeroChunks, Insn);
}