1 //===- AArch64AddressingModes.h - AArch64 Addressing Modes ------*- C++ -*-===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file contains the AArch64 addressing mode implementation stuff.
10 //
11 //===----------------------------------------------------------------------===//
12
13 #ifndef LLVM_LIB_TARGET_AARCH64_MCTARGETDESC_AARCH64ADDRESSINGMODES_H
14 #define LLVM_LIB_TARGET_AARCH64_MCTARGETDESC_AARCH64ADDRESSINGMODES_H
15
16 #include "AArch64ExpandImm.h"
17 #include "llvm/ADT/APFloat.h"
18 #include "llvm/ADT/APInt.h"
19 #include "llvm/ADT/bit.h"
20 #include "llvm/Support/ErrorHandling.h"
21 #include "llvm/Support/MathExtras.h"
22 #include <cassert>
23
24 namespace llvm {
25
26 /// AArch64_AM - AArch64 Addressing Mode Stuff
27 namespace AArch64_AM {
28
29 //===----------------------------------------------------------------------===//
30 // Shifts
31 //
32
33 enum ShiftExtendType {
34 InvalidShiftExtend = -1,
35 LSL = 0,
36 LSR,
37 ASR,
38 ROR,
39 MSL,
40
41 UXTB,
42 UXTH,
43 UXTW,
44 UXTX,
45
46 SXTB,
47 SXTH,
48 SXTW,
49 SXTX,
50 };
51
52 /// getShiftName - Get the string encoding for the shift type.
getShiftExtendName(AArch64_AM::ShiftExtendType ST)53 static inline const char *getShiftExtendName(AArch64_AM::ShiftExtendType ST) {
54 switch (ST) {
55 default: llvm_unreachable("unhandled shift type!");
56 case AArch64_AM::LSL: return "lsl";
57 case AArch64_AM::LSR: return "lsr";
58 case AArch64_AM::ASR: return "asr";
59 case AArch64_AM::ROR: return "ror";
60 case AArch64_AM::MSL: return "msl";
61 case AArch64_AM::UXTB: return "uxtb";
62 case AArch64_AM::UXTH: return "uxth";
63 case AArch64_AM::UXTW: return "uxtw";
64 case AArch64_AM::UXTX: return "uxtx";
65 case AArch64_AM::SXTB: return "sxtb";
66 case AArch64_AM::SXTH: return "sxth";
67 case AArch64_AM::SXTW: return "sxtw";
68 case AArch64_AM::SXTX: return "sxtx";
69 }
70 return nullptr;
71 }
72
73 /// getShiftType - Extract the shift type.
getShiftType(unsigned Imm)74 static inline AArch64_AM::ShiftExtendType getShiftType(unsigned Imm) {
75 switch ((Imm >> 6) & 0x7) {
76 default: return AArch64_AM::InvalidShiftExtend;
77 case 0: return AArch64_AM::LSL;
78 case 1: return AArch64_AM::LSR;
79 case 2: return AArch64_AM::ASR;
80 case 3: return AArch64_AM::ROR;
81 case 4: return AArch64_AM::MSL;
82 }
83 }
84
85 /// getShiftValue - Extract the shift value.
getShiftValue(unsigned Imm)86 static inline unsigned getShiftValue(unsigned Imm) {
87 return Imm & 0x3f;
88 }
89
90 /// getShifterImm - Encode the shift type and amount:
91 /// imm: 6-bit shift amount
92 /// shifter: 000 ==> lsl
93 /// 001 ==> lsr
94 /// 010 ==> asr
95 /// 011 ==> ror
96 /// 100 ==> msl
97 /// {8-6} = shifter
98 /// {5-0} = imm
getShifterImm(AArch64_AM::ShiftExtendType ST,unsigned Imm)99 static inline unsigned getShifterImm(AArch64_AM::ShiftExtendType ST,
100 unsigned Imm) {
101 assert((Imm & 0x3f) == Imm && "Illegal shifted immedate value!");
102 unsigned STEnc = 0;
103 switch (ST) {
104 default: llvm_unreachable("Invalid shift requested");
105 case AArch64_AM::LSL: STEnc = 0; break;
106 case AArch64_AM::LSR: STEnc = 1; break;
107 case AArch64_AM::ASR: STEnc = 2; break;
108 case AArch64_AM::ROR: STEnc = 3; break;
109 case AArch64_AM::MSL: STEnc = 4; break;
110 }
111 return (STEnc << 6) | (Imm & 0x3f);
112 }
113
114 //===----------------------------------------------------------------------===//
115 // Extends
116 //
117
118 /// getArithShiftValue - get the arithmetic shift value.
getArithShiftValue(unsigned Imm)119 static inline unsigned getArithShiftValue(unsigned Imm) {
120 return Imm & 0x7;
121 }
122
123 /// getExtendType - Extract the extend type for operands of arithmetic ops.
getExtendType(unsigned Imm)124 static inline AArch64_AM::ShiftExtendType getExtendType(unsigned Imm) {
125 assert((Imm & 0x7) == Imm && "invalid immediate!");
126 switch (Imm) {
127 default: llvm_unreachable("Compiler bug!");
128 case 0: return AArch64_AM::UXTB;
129 case 1: return AArch64_AM::UXTH;
130 case 2: return AArch64_AM::UXTW;
131 case 3: return AArch64_AM::UXTX;
132 case 4: return AArch64_AM::SXTB;
133 case 5: return AArch64_AM::SXTH;
134 case 6: return AArch64_AM::SXTW;
135 case 7: return AArch64_AM::SXTX;
136 }
137 }
138
getArithExtendType(unsigned Imm)139 static inline AArch64_AM::ShiftExtendType getArithExtendType(unsigned Imm) {
140 return getExtendType((Imm >> 3) & 0x7);
141 }
142
143 /// Mapping from extend bits to required operation:
144 /// shifter: 000 ==> uxtb
145 /// 001 ==> uxth
146 /// 010 ==> uxtw
147 /// 011 ==> uxtx
148 /// 100 ==> sxtb
149 /// 101 ==> sxth
150 /// 110 ==> sxtw
151 /// 111 ==> sxtx
getExtendEncoding(AArch64_AM::ShiftExtendType ET)152 inline unsigned getExtendEncoding(AArch64_AM::ShiftExtendType ET) {
153 switch (ET) {
154 default: llvm_unreachable("Invalid extend type requested");
155 case AArch64_AM::UXTB: return 0; break;
156 case AArch64_AM::UXTH: return 1; break;
157 case AArch64_AM::UXTW: return 2; break;
158 case AArch64_AM::UXTX: return 3; break;
159 case AArch64_AM::SXTB: return 4; break;
160 case AArch64_AM::SXTH: return 5; break;
161 case AArch64_AM::SXTW: return 6; break;
162 case AArch64_AM::SXTX: return 7; break;
163 }
164 }
165
166 /// getArithExtendImm - Encode the extend type and shift amount for an
167 /// arithmetic instruction:
168 /// imm: 3-bit extend amount
169 /// {5-3} = shifter
170 /// {2-0} = imm3
getArithExtendImm(AArch64_AM::ShiftExtendType ET,unsigned Imm)171 static inline unsigned getArithExtendImm(AArch64_AM::ShiftExtendType ET,
172 unsigned Imm) {
173 assert((Imm & 0x7) == Imm && "Illegal shifted immedate value!");
174 return (getExtendEncoding(ET) << 3) | (Imm & 0x7);
175 }
176
177 /// getMemDoShift - Extract the "do shift" flag value for load/store
178 /// instructions.
getMemDoShift(unsigned Imm)179 static inline bool getMemDoShift(unsigned Imm) {
180 return (Imm & 0x1) != 0;
181 }
182
183 /// getExtendType - Extract the extend type for the offset operand of
184 /// loads/stores.
getMemExtendType(unsigned Imm)185 static inline AArch64_AM::ShiftExtendType getMemExtendType(unsigned Imm) {
186 return getExtendType((Imm >> 1) & 0x7);
187 }
188
189 /// getExtendImm - Encode the extend type and amount for a load/store inst:
190 /// doshift: should the offset be scaled by the access size
191 /// shifter: 000 ==> uxtb
192 /// 001 ==> uxth
193 /// 010 ==> uxtw
194 /// 011 ==> uxtx
195 /// 100 ==> sxtb
196 /// 101 ==> sxth
197 /// 110 ==> sxtw
198 /// 111 ==> sxtx
199 /// {3-1} = shifter
200 /// {0} = doshift
getMemExtendImm(AArch64_AM::ShiftExtendType ET,bool DoShift)201 static inline unsigned getMemExtendImm(AArch64_AM::ShiftExtendType ET,
202 bool DoShift) {
203 return (getExtendEncoding(ET) << 1) | unsigned(DoShift);
204 }
205
ror(uint64_t elt,unsigned size)206 static inline uint64_t ror(uint64_t elt, unsigned size) {
207 return ((elt & 1) << (size-1)) | (elt >> 1);
208 }
209
210 /// processLogicalImmediate - Determine if an immediate value can be encoded
211 /// as the immediate operand of a logical instruction for the given register
212 /// size. If so, return true with "encoding" set to the encoded value in
213 /// the form N:immr:imms.
processLogicalImmediate(uint64_t Imm,unsigned RegSize,uint64_t & Encoding)214 static inline bool processLogicalImmediate(uint64_t Imm, unsigned RegSize,
215 uint64_t &Encoding) {
216 if (Imm == 0ULL || Imm == ~0ULL ||
217 (RegSize != 64 &&
218 (Imm >> RegSize != 0 || Imm == (~0ULL >> (64 - RegSize)))))
219 return false;
220
221 // First, determine the element size.
222 unsigned Size = RegSize;
223
224 do {
225 Size /= 2;
226 uint64_t Mask = (1ULL << Size) - 1;
227
228 if ((Imm & Mask) != ((Imm >> Size) & Mask)) {
229 Size *= 2;
230 break;
231 }
232 } while (Size > 2);
233
234 // Second, determine the rotation to make the element be: 0^m 1^n.
235 uint32_t CTO, I;
236 uint64_t Mask = ((uint64_t)-1LL) >> (64 - Size);
237 Imm &= Mask;
238
239 if (isShiftedMask_64(Imm)) {
240 I = countTrailingZeros(Imm);
241 assert(I < 64 && "undefined behavior");
242 CTO = countTrailingOnes(Imm >> I);
243 } else {
244 Imm |= ~Mask;
245 if (!isShiftedMask_64(~Imm))
246 return false;
247
248 unsigned CLO = countLeadingOnes(Imm);
249 I = 64 - CLO;
250 CTO = CLO + countTrailingOnes(Imm) - (64 - Size);
251 }
252
253 // Encode in Immr the number of RORs it would take to get *from* 0^m 1^n
254 // to our target value, where I is the number of RORs to go the opposite
255 // direction.
256 assert(Size > I && "I should be smaller than element size");
257 unsigned Immr = (Size - I) & (Size - 1);
258
259 // If size has a 1 in the n'th bit, create a value that has zeroes in
260 // bits [0, n] and ones above that.
261 uint64_t NImms = ~(Size-1) << 1;
262
263 // Or the CTO value into the low bits, which must be below the Nth bit
264 // bit mentioned above.
265 NImms |= (CTO-1);
266
267 // Extract the seventh bit and toggle it to create the N field.
268 unsigned N = ((NImms >> 6) & 1) ^ 1;
269
270 Encoding = (N << 12) | (Immr << 6) | (NImms & 0x3f);
271 return true;
272 }
273
274 /// isLogicalImmediate - Return true if the immediate is valid for a logical
275 /// immediate instruction of the given register size. Return false otherwise.
isLogicalImmediate(uint64_t imm,unsigned regSize)276 static inline bool isLogicalImmediate(uint64_t imm, unsigned regSize) {
277 uint64_t encoding;
278 return processLogicalImmediate(imm, regSize, encoding);
279 }
280
281 /// encodeLogicalImmediate - Return the encoded immediate value for a logical
282 /// immediate instruction of the given register size.
encodeLogicalImmediate(uint64_t imm,unsigned regSize)283 static inline uint64_t encodeLogicalImmediate(uint64_t imm, unsigned regSize) {
284 uint64_t encoding = 0;
285 bool res = processLogicalImmediate(imm, regSize, encoding);
286 assert(res && "invalid logical immediate");
287 (void)res;
288 return encoding;
289 }
290
291 /// decodeLogicalImmediate - Decode a logical immediate value in the form
292 /// "N:immr:imms" (where the immr and imms fields are each 6 bits) into the
293 /// integer value it represents with regSize bits.
decodeLogicalImmediate(uint64_t val,unsigned regSize)294 static inline uint64_t decodeLogicalImmediate(uint64_t val, unsigned regSize) {
295 // Extract the N, imms, and immr fields.
296 unsigned N = (val >> 12) & 1;
297 unsigned immr = (val >> 6) & 0x3f;
298 unsigned imms = val & 0x3f;
299
300 assert((regSize == 64 || N == 0) && "undefined logical immediate encoding");
301 int len = 31 - countLeadingZeros((N << 6) | (~imms & 0x3f));
302 assert(len >= 0 && "undefined logical immediate encoding");
303 unsigned size = (1 << len);
304 unsigned R = immr & (size - 1);
305 unsigned S = imms & (size - 1);
306 assert(S != size - 1 && "undefined logical immediate encoding");
307 uint64_t pattern = (1ULL << (S + 1)) - 1;
308 for (unsigned i = 0; i < R; ++i)
309 pattern = ror(pattern, size);
310
311 // Replicate the pattern to fill the regSize.
312 while (size != regSize) {
313 pattern |= (pattern << size);
314 size *= 2;
315 }
316 return pattern;
317 }
318
319 /// isValidDecodeLogicalImmediate - Check to see if the logical immediate value
320 /// in the form "N:immr:imms" (where the immr and imms fields are each 6 bits)
321 /// is a valid encoding for an integer value with regSize bits.
isValidDecodeLogicalImmediate(uint64_t val,unsigned regSize)322 static inline bool isValidDecodeLogicalImmediate(uint64_t val,
323 unsigned regSize) {
324 // Extract the N and imms fields needed for checking.
325 unsigned N = (val >> 12) & 1;
326 unsigned imms = val & 0x3f;
327
328 if (regSize == 32 && N != 0) // undefined logical immediate encoding
329 return false;
330 int len = 31 - countLeadingZeros((N << 6) | (~imms & 0x3f));
331 if (len < 0) // undefined logical immediate encoding
332 return false;
333 unsigned size = (1 << len);
334 unsigned S = imms & (size - 1);
335 if (S == size - 1) // undefined logical immediate encoding
336 return false;
337
338 return true;
339 }
340
341 //===----------------------------------------------------------------------===//
342 // Floating-point Immediates
343 //
getFPImmFloat(unsigned Imm)344 static inline float getFPImmFloat(unsigned Imm) {
345 // We expect an 8-bit binary encoding of a floating-point number here.
346
347 uint8_t Sign = (Imm >> 7) & 0x1;
348 uint8_t Exp = (Imm >> 4) & 0x7;
349 uint8_t Mantissa = Imm & 0xf;
350
351 // 8-bit FP IEEE Float Encoding
352 // abcd efgh aBbbbbbc defgh000 00000000 00000000
353 //
354 // where B = NOT(b);
355
356 uint32_t I = 0;
357 I |= Sign << 31;
358 I |= ((Exp & 0x4) != 0 ? 0 : 1) << 30;
359 I |= ((Exp & 0x4) != 0 ? 0x1f : 0) << 25;
360 I |= (Exp & 0x3) << 23;
361 I |= Mantissa << 19;
362 return bit_cast<float>(I);
363 }
364
365 /// getFP16Imm - Return an 8-bit floating-point version of the 16-bit
366 /// floating-point value. If the value cannot be represented as an 8-bit
367 /// floating-point value, then return -1.
getFP16Imm(const APInt & Imm)368 static inline int getFP16Imm(const APInt &Imm) {
369 uint32_t Sign = Imm.lshr(15).getZExtValue() & 1;
370 int32_t Exp = (Imm.lshr(10).getSExtValue() & 0x1f) - 15; // -14 to 15
371 int32_t Mantissa = Imm.getZExtValue() & 0x3ff; // 10 bits
372
373 // We can handle 4 bits of mantissa.
374 // mantissa = (16+UInt(e:f:g:h))/16.
375 if (Mantissa & 0x3f)
376 return -1;
377 Mantissa >>= 6;
378
379 // We can handle 3 bits of exponent: exp == UInt(NOT(b):c:d)-3
380 if (Exp < -3 || Exp > 4)
381 return -1;
382 Exp = ((Exp+3) & 0x7) ^ 4;
383
384 return ((int)Sign << 7) | (Exp << 4) | Mantissa;
385 }
386
getFP16Imm(const APFloat & FPImm)387 static inline int getFP16Imm(const APFloat &FPImm) {
388 return getFP16Imm(FPImm.bitcastToAPInt());
389 }
390
391 /// getFP32Imm - Return an 8-bit floating-point version of the 32-bit
392 /// floating-point value. If the value cannot be represented as an 8-bit
393 /// floating-point value, then return -1.
getFP32Imm(const APInt & Imm)394 static inline int getFP32Imm(const APInt &Imm) {
395 uint32_t Sign = Imm.lshr(31).getZExtValue() & 1;
396 int32_t Exp = (Imm.lshr(23).getSExtValue() & 0xff) - 127; // -126 to 127
397 int64_t Mantissa = Imm.getZExtValue() & 0x7fffff; // 23 bits
398
399 // We can handle 4 bits of mantissa.
400 // mantissa = (16+UInt(e:f:g:h))/16.
401 if (Mantissa & 0x7ffff)
402 return -1;
403 Mantissa >>= 19;
404 if ((Mantissa & 0xf) != Mantissa)
405 return -1;
406
407 // We can handle 3 bits of exponent: exp == UInt(NOT(b):c:d)-3
408 if (Exp < -3 || Exp > 4)
409 return -1;
410 Exp = ((Exp+3) & 0x7) ^ 4;
411
412 return ((int)Sign << 7) | (Exp << 4) | Mantissa;
413 }
414
getFP32Imm(const APFloat & FPImm)415 static inline int getFP32Imm(const APFloat &FPImm) {
416 return getFP32Imm(FPImm.bitcastToAPInt());
417 }
418
419 /// getFP64Imm - Return an 8-bit floating-point version of the 64-bit
420 /// floating-point value. If the value cannot be represented as an 8-bit
421 /// floating-point value, then return -1.
getFP64Imm(const APInt & Imm)422 static inline int getFP64Imm(const APInt &Imm) {
423 uint64_t Sign = Imm.lshr(63).getZExtValue() & 1;
424 int64_t Exp = (Imm.lshr(52).getSExtValue() & 0x7ff) - 1023; // -1022 to 1023
425 uint64_t Mantissa = Imm.getZExtValue() & 0xfffffffffffffULL;
426
427 // We can handle 4 bits of mantissa.
428 // mantissa = (16+UInt(e:f:g:h))/16.
429 if (Mantissa & 0xffffffffffffULL)
430 return -1;
431 Mantissa >>= 48;
432 if ((Mantissa & 0xf) != Mantissa)
433 return -1;
434
435 // We can handle 3 bits of exponent: exp == UInt(NOT(b):c:d)-3
436 if (Exp < -3 || Exp > 4)
437 return -1;
438 Exp = ((Exp+3) & 0x7) ^ 4;
439
440 return ((int)Sign << 7) | (Exp << 4) | Mantissa;
441 }
442
getFP64Imm(const APFloat & FPImm)443 static inline int getFP64Imm(const APFloat &FPImm) {
444 return getFP64Imm(FPImm.bitcastToAPInt());
445 }
446
447 //===--------------------------------------------------------------------===//
448 // AdvSIMD Modified Immediates
449 //===--------------------------------------------------------------------===//
450
451 // 0x00 0x00 0x00 abcdefgh 0x00 0x00 0x00 abcdefgh
isAdvSIMDModImmType1(uint64_t Imm)452 static inline bool isAdvSIMDModImmType1(uint64_t Imm) {
453 return ((Imm >> 32) == (Imm & 0xffffffffULL)) &&
454 ((Imm & 0xffffff00ffffff00ULL) == 0);
455 }
456
encodeAdvSIMDModImmType1(uint64_t Imm)457 static inline uint8_t encodeAdvSIMDModImmType1(uint64_t Imm) {
458 return (Imm & 0xffULL);
459 }
460
decodeAdvSIMDModImmType1(uint8_t Imm)461 static inline uint64_t decodeAdvSIMDModImmType1(uint8_t Imm) {
462 uint64_t EncVal = Imm;
463 return (EncVal << 32) | EncVal;
464 }
465
466 // 0x00 0x00 abcdefgh 0x00 0x00 0x00 abcdefgh 0x00
isAdvSIMDModImmType2(uint64_t Imm)467 static inline bool isAdvSIMDModImmType2(uint64_t Imm) {
468 return ((Imm >> 32) == (Imm & 0xffffffffULL)) &&
469 ((Imm & 0xffff00ffffff00ffULL) == 0);
470 }
471
encodeAdvSIMDModImmType2(uint64_t Imm)472 static inline uint8_t encodeAdvSIMDModImmType2(uint64_t Imm) {
473 return (Imm & 0xff00ULL) >> 8;
474 }
475
decodeAdvSIMDModImmType2(uint8_t Imm)476 static inline uint64_t decodeAdvSIMDModImmType2(uint8_t Imm) {
477 uint64_t EncVal = Imm;
478 return (EncVal << 40) | (EncVal << 8);
479 }
480
481 // 0x00 abcdefgh 0x00 0x00 0x00 abcdefgh 0x00 0x00
isAdvSIMDModImmType3(uint64_t Imm)482 static inline bool isAdvSIMDModImmType3(uint64_t Imm) {
483 return ((Imm >> 32) == (Imm & 0xffffffffULL)) &&
484 ((Imm & 0xff00ffffff00ffffULL) == 0);
485 }
486
encodeAdvSIMDModImmType3(uint64_t Imm)487 static inline uint8_t encodeAdvSIMDModImmType3(uint64_t Imm) {
488 return (Imm & 0xff0000ULL) >> 16;
489 }
490
decodeAdvSIMDModImmType3(uint8_t Imm)491 static inline uint64_t decodeAdvSIMDModImmType3(uint8_t Imm) {
492 uint64_t EncVal = Imm;
493 return (EncVal << 48) | (EncVal << 16);
494 }
495
496 // abcdefgh 0x00 0x00 0x00 abcdefgh 0x00 0x00 0x00
isAdvSIMDModImmType4(uint64_t Imm)497 static inline bool isAdvSIMDModImmType4(uint64_t Imm) {
498 return ((Imm >> 32) == (Imm & 0xffffffffULL)) &&
499 ((Imm & 0x00ffffff00ffffffULL) == 0);
500 }
501
encodeAdvSIMDModImmType4(uint64_t Imm)502 static inline uint8_t encodeAdvSIMDModImmType4(uint64_t Imm) {
503 return (Imm & 0xff000000ULL) >> 24;
504 }
505
decodeAdvSIMDModImmType4(uint8_t Imm)506 static inline uint64_t decodeAdvSIMDModImmType4(uint8_t Imm) {
507 uint64_t EncVal = Imm;
508 return (EncVal << 56) | (EncVal << 24);
509 }
510
511 // 0x00 abcdefgh 0x00 abcdefgh 0x00 abcdefgh 0x00 abcdefgh
isAdvSIMDModImmType5(uint64_t Imm)512 static inline bool isAdvSIMDModImmType5(uint64_t Imm) {
513 return ((Imm >> 32) == (Imm & 0xffffffffULL)) &&
514 (((Imm & 0x00ff0000ULL) >> 16) == (Imm & 0x000000ffULL)) &&
515 ((Imm & 0xff00ff00ff00ff00ULL) == 0);
516 }
517
encodeAdvSIMDModImmType5(uint64_t Imm)518 static inline uint8_t encodeAdvSIMDModImmType5(uint64_t Imm) {
519 return (Imm & 0xffULL);
520 }
521
decodeAdvSIMDModImmType5(uint8_t Imm)522 static inline uint64_t decodeAdvSIMDModImmType5(uint8_t Imm) {
523 uint64_t EncVal = Imm;
524 return (EncVal << 48) | (EncVal << 32) | (EncVal << 16) | EncVal;
525 }
526
527 // abcdefgh 0x00 abcdefgh 0x00 abcdefgh 0x00 abcdefgh 0x00
isAdvSIMDModImmType6(uint64_t Imm)528 static inline bool isAdvSIMDModImmType6(uint64_t Imm) {
529 return ((Imm >> 32) == (Imm & 0xffffffffULL)) &&
530 (((Imm & 0xff000000ULL) >> 16) == (Imm & 0x0000ff00ULL)) &&
531 ((Imm & 0x00ff00ff00ff00ffULL) == 0);
532 }
533
encodeAdvSIMDModImmType6(uint64_t Imm)534 static inline uint8_t encodeAdvSIMDModImmType6(uint64_t Imm) {
535 return (Imm & 0xff00ULL) >> 8;
536 }
537
decodeAdvSIMDModImmType6(uint8_t Imm)538 static inline uint64_t decodeAdvSIMDModImmType6(uint8_t Imm) {
539 uint64_t EncVal = Imm;
540 return (EncVal << 56) | (EncVal << 40) | (EncVal << 24) | (EncVal << 8);
541 }
542
543 // 0x00 0x00 abcdefgh 0xFF 0x00 0x00 abcdefgh 0xFF
isAdvSIMDModImmType7(uint64_t Imm)544 static inline bool isAdvSIMDModImmType7(uint64_t Imm) {
545 return ((Imm >> 32) == (Imm & 0xffffffffULL)) &&
546 ((Imm & 0xffff00ffffff00ffULL) == 0x000000ff000000ffULL);
547 }
548
encodeAdvSIMDModImmType7(uint64_t Imm)549 static inline uint8_t encodeAdvSIMDModImmType7(uint64_t Imm) {
550 return (Imm & 0xff00ULL) >> 8;
551 }
552
decodeAdvSIMDModImmType7(uint8_t Imm)553 static inline uint64_t decodeAdvSIMDModImmType7(uint8_t Imm) {
554 uint64_t EncVal = Imm;
555 return (EncVal << 40) | (EncVal << 8) | 0x000000ff000000ffULL;
556 }
557
558 // 0x00 abcdefgh 0xFF 0xFF 0x00 abcdefgh 0xFF 0xFF
isAdvSIMDModImmType8(uint64_t Imm)559 static inline bool isAdvSIMDModImmType8(uint64_t Imm) {
560 return ((Imm >> 32) == (Imm & 0xffffffffULL)) &&
561 ((Imm & 0xff00ffffff00ffffULL) == 0x0000ffff0000ffffULL);
562 }
563
decodeAdvSIMDModImmType8(uint8_t Imm)564 static inline uint64_t decodeAdvSIMDModImmType8(uint8_t Imm) {
565 uint64_t EncVal = Imm;
566 return (EncVal << 48) | (EncVal << 16) | 0x0000ffff0000ffffULL;
567 }
568
encodeAdvSIMDModImmType8(uint64_t Imm)569 static inline uint8_t encodeAdvSIMDModImmType8(uint64_t Imm) {
570 return (Imm & 0x00ff0000ULL) >> 16;
571 }
572
573 // abcdefgh abcdefgh abcdefgh abcdefgh abcdefgh abcdefgh abcdefgh abcdefgh
isAdvSIMDModImmType9(uint64_t Imm)574 static inline bool isAdvSIMDModImmType9(uint64_t Imm) {
575 return ((Imm >> 32) == (Imm & 0xffffffffULL)) &&
576 ((Imm >> 48) == (Imm & 0x0000ffffULL)) &&
577 ((Imm >> 56) == (Imm & 0x000000ffULL));
578 }
579
encodeAdvSIMDModImmType9(uint64_t Imm)580 static inline uint8_t encodeAdvSIMDModImmType9(uint64_t Imm) {
581 return (Imm & 0xffULL);
582 }
583
decodeAdvSIMDModImmType9(uint8_t Imm)584 static inline uint64_t decodeAdvSIMDModImmType9(uint8_t Imm) {
585 uint64_t EncVal = Imm;
586 EncVal |= (EncVal << 8);
587 EncVal |= (EncVal << 16);
588 EncVal |= (EncVal << 32);
589 return EncVal;
590 }
591
592 // aaaaaaaa bbbbbbbb cccccccc dddddddd eeeeeeee ffffffff gggggggg hhhhhhhh
593 // cmode: 1110, op: 1
isAdvSIMDModImmType10(uint64_t Imm)594 static inline bool isAdvSIMDModImmType10(uint64_t Imm) {
595 uint64_t ByteA = Imm & 0xff00000000000000ULL;
596 uint64_t ByteB = Imm & 0x00ff000000000000ULL;
597 uint64_t ByteC = Imm & 0x0000ff0000000000ULL;
598 uint64_t ByteD = Imm & 0x000000ff00000000ULL;
599 uint64_t ByteE = Imm & 0x00000000ff000000ULL;
600 uint64_t ByteF = Imm & 0x0000000000ff0000ULL;
601 uint64_t ByteG = Imm & 0x000000000000ff00ULL;
602 uint64_t ByteH = Imm & 0x00000000000000ffULL;
603
604 return (ByteA == 0ULL || ByteA == 0xff00000000000000ULL) &&
605 (ByteB == 0ULL || ByteB == 0x00ff000000000000ULL) &&
606 (ByteC == 0ULL || ByteC == 0x0000ff0000000000ULL) &&
607 (ByteD == 0ULL || ByteD == 0x000000ff00000000ULL) &&
608 (ByteE == 0ULL || ByteE == 0x00000000ff000000ULL) &&
609 (ByteF == 0ULL || ByteF == 0x0000000000ff0000ULL) &&
610 (ByteG == 0ULL || ByteG == 0x000000000000ff00ULL) &&
611 (ByteH == 0ULL || ByteH == 0x00000000000000ffULL);
612 }
613
encodeAdvSIMDModImmType10(uint64_t Imm)614 static inline uint8_t encodeAdvSIMDModImmType10(uint64_t Imm) {
615 uint8_t BitA = (Imm & 0xff00000000000000ULL) != 0;
616 uint8_t BitB = (Imm & 0x00ff000000000000ULL) != 0;
617 uint8_t BitC = (Imm & 0x0000ff0000000000ULL) != 0;
618 uint8_t BitD = (Imm & 0x000000ff00000000ULL) != 0;
619 uint8_t BitE = (Imm & 0x00000000ff000000ULL) != 0;
620 uint8_t BitF = (Imm & 0x0000000000ff0000ULL) != 0;
621 uint8_t BitG = (Imm & 0x000000000000ff00ULL) != 0;
622 uint8_t BitH = (Imm & 0x00000000000000ffULL) != 0;
623
624 uint8_t EncVal = BitA;
625 EncVal <<= 1;
626 EncVal |= BitB;
627 EncVal <<= 1;
628 EncVal |= BitC;
629 EncVal <<= 1;
630 EncVal |= BitD;
631 EncVal <<= 1;
632 EncVal |= BitE;
633 EncVal <<= 1;
634 EncVal |= BitF;
635 EncVal <<= 1;
636 EncVal |= BitG;
637 EncVal <<= 1;
638 EncVal |= BitH;
639 return EncVal;
640 }
641
decodeAdvSIMDModImmType10(uint8_t Imm)642 static inline uint64_t decodeAdvSIMDModImmType10(uint8_t Imm) {
643 uint64_t EncVal = 0;
644 if (Imm & 0x80) EncVal |= 0xff00000000000000ULL;
645 if (Imm & 0x40) EncVal |= 0x00ff000000000000ULL;
646 if (Imm & 0x20) EncVal |= 0x0000ff0000000000ULL;
647 if (Imm & 0x10) EncVal |= 0x000000ff00000000ULL;
648 if (Imm & 0x08) EncVal |= 0x00000000ff000000ULL;
649 if (Imm & 0x04) EncVal |= 0x0000000000ff0000ULL;
650 if (Imm & 0x02) EncVal |= 0x000000000000ff00ULL;
651 if (Imm & 0x01) EncVal |= 0x00000000000000ffULL;
652 return EncVal;
653 }
654
655 // aBbbbbbc defgh000 0x00 0x00 aBbbbbbc defgh000 0x00 0x00
isAdvSIMDModImmType11(uint64_t Imm)656 static inline bool isAdvSIMDModImmType11(uint64_t Imm) {
657 uint64_t BString = (Imm & 0x7E000000ULL) >> 25;
658 return ((Imm >> 32) == (Imm & 0xffffffffULL)) &&
659 (BString == 0x1f || BString == 0x20) &&
660 ((Imm & 0x0007ffff0007ffffULL) == 0);
661 }
662
encodeAdvSIMDModImmType11(uint64_t Imm)663 static inline uint8_t encodeAdvSIMDModImmType11(uint64_t Imm) {
664 uint8_t BitA = (Imm & 0x80000000ULL) != 0;
665 uint8_t BitB = (Imm & 0x20000000ULL) != 0;
666 uint8_t BitC = (Imm & 0x01000000ULL) != 0;
667 uint8_t BitD = (Imm & 0x00800000ULL) != 0;
668 uint8_t BitE = (Imm & 0x00400000ULL) != 0;
669 uint8_t BitF = (Imm & 0x00200000ULL) != 0;
670 uint8_t BitG = (Imm & 0x00100000ULL) != 0;
671 uint8_t BitH = (Imm & 0x00080000ULL) != 0;
672
673 uint8_t EncVal = BitA;
674 EncVal <<= 1;
675 EncVal |= BitB;
676 EncVal <<= 1;
677 EncVal |= BitC;
678 EncVal <<= 1;
679 EncVal |= BitD;
680 EncVal <<= 1;
681 EncVal |= BitE;
682 EncVal <<= 1;
683 EncVal |= BitF;
684 EncVal <<= 1;
685 EncVal |= BitG;
686 EncVal <<= 1;
687 EncVal |= BitH;
688 return EncVal;
689 }
690
decodeAdvSIMDModImmType11(uint8_t Imm)691 static inline uint64_t decodeAdvSIMDModImmType11(uint8_t Imm) {
692 uint64_t EncVal = 0;
693 if (Imm & 0x80) EncVal |= 0x80000000ULL;
694 if (Imm & 0x40) EncVal |= 0x3e000000ULL;
695 else EncVal |= 0x40000000ULL;
696 if (Imm & 0x20) EncVal |= 0x01000000ULL;
697 if (Imm & 0x10) EncVal |= 0x00800000ULL;
698 if (Imm & 0x08) EncVal |= 0x00400000ULL;
699 if (Imm & 0x04) EncVal |= 0x00200000ULL;
700 if (Imm & 0x02) EncVal |= 0x00100000ULL;
701 if (Imm & 0x01) EncVal |= 0x00080000ULL;
702 return (EncVal << 32) | EncVal;
703 }
704
705 // aBbbbbbb bbcdefgh 0x00 0x00 0x00 0x00 0x00 0x00
isAdvSIMDModImmType12(uint64_t Imm)706 static inline bool isAdvSIMDModImmType12(uint64_t Imm) {
707 uint64_t BString = (Imm & 0x7fc0000000000000ULL) >> 54;
708 return ((BString == 0xff || BString == 0x100) &&
709 ((Imm & 0x0000ffffffffffffULL) == 0));
710 }
711
encodeAdvSIMDModImmType12(uint64_t Imm)712 static inline uint8_t encodeAdvSIMDModImmType12(uint64_t Imm) {
713 uint8_t BitA = (Imm & 0x8000000000000000ULL) != 0;
714 uint8_t BitB = (Imm & 0x0040000000000000ULL) != 0;
715 uint8_t BitC = (Imm & 0x0020000000000000ULL) != 0;
716 uint8_t BitD = (Imm & 0x0010000000000000ULL) != 0;
717 uint8_t BitE = (Imm & 0x0008000000000000ULL) != 0;
718 uint8_t BitF = (Imm & 0x0004000000000000ULL) != 0;
719 uint8_t BitG = (Imm & 0x0002000000000000ULL) != 0;
720 uint8_t BitH = (Imm & 0x0001000000000000ULL) != 0;
721
722 uint8_t EncVal = BitA;
723 EncVal <<= 1;
724 EncVal |= BitB;
725 EncVal <<= 1;
726 EncVal |= BitC;
727 EncVal <<= 1;
728 EncVal |= BitD;
729 EncVal <<= 1;
730 EncVal |= BitE;
731 EncVal <<= 1;
732 EncVal |= BitF;
733 EncVal <<= 1;
734 EncVal |= BitG;
735 EncVal <<= 1;
736 EncVal |= BitH;
737 return EncVal;
738 }
739
decodeAdvSIMDModImmType12(uint8_t Imm)740 static inline uint64_t decodeAdvSIMDModImmType12(uint8_t Imm) {
741 uint64_t EncVal = 0;
742 if (Imm & 0x80) EncVal |= 0x8000000000000000ULL;
743 if (Imm & 0x40) EncVal |= 0x3fc0000000000000ULL;
744 else EncVal |= 0x4000000000000000ULL;
745 if (Imm & 0x20) EncVal |= 0x0020000000000000ULL;
746 if (Imm & 0x10) EncVal |= 0x0010000000000000ULL;
747 if (Imm & 0x08) EncVal |= 0x0008000000000000ULL;
748 if (Imm & 0x04) EncVal |= 0x0004000000000000ULL;
749 if (Imm & 0x02) EncVal |= 0x0002000000000000ULL;
750 if (Imm & 0x01) EncVal |= 0x0001000000000000ULL;
751 return (EncVal << 32) | EncVal;
752 }
753
754 /// Returns true if Imm is the concatenation of a repeating pattern of type T.
755 template <typename T>
isSVEMaskOfIdenticalElements(int64_t Imm)756 static inline bool isSVEMaskOfIdenticalElements(int64_t Imm) {
757 auto Parts = bit_cast<std::array<T, sizeof(int64_t) / sizeof(T)>>(Imm);
758 return llvm::all_equal(Parts);
759 }
760
761 /// Returns true if Imm is valid for CPY/DUP.
762 template <typename T>
isSVECpyImm(int64_t Imm)763 static inline bool isSVECpyImm(int64_t Imm) {
764 // Imm is interpreted as a signed value, which means top bits must be all ones
765 // (sign bits if the immediate value is negative and passed in a larger
766 // container), or all zeroes.
767 int64_t Mask = ~int64_t(std::numeric_limits<std::make_unsigned_t<T>>::max());
768 if ((Imm & Mask) != 0 && (Imm & Mask) != Mask)
769 return false;
770
771 // Imm is a signed 8-bit value.
772 // Top bits must be zeroes or sign bits.
773 if (Imm & 0xff)
774 return int8_t(Imm) == T(Imm);
775
776 // Imm is a signed 16-bit value and multiple of 256.
777 // Top bits must be zeroes or sign bits.
778 if (Imm & 0xff00)
779 return int16_t(Imm) == T(Imm);
780
781 return Imm == 0;
782 }
783
784 /// Returns true if Imm is valid for ADD/SUB.
785 template <typename T>
isSVEAddSubImm(int64_t Imm)786 static inline bool isSVEAddSubImm(int64_t Imm) {
787 bool IsInt8t = std::is_same<int8_t, std::make_signed_t<T>>::value ||
788 std::is_same<int8_t, T>::value;
789 return uint8_t(Imm) == Imm || (!IsInt8t && uint16_t(Imm & ~0xff) == Imm);
790 }
791
792 /// Return true if Imm is valid for DUPM and has no single CPY/DUP equivalent.
isSVEMoveMaskPreferredLogicalImmediate(int64_t Imm)793 static inline bool isSVEMoveMaskPreferredLogicalImmediate(int64_t Imm) {
794 if (isSVECpyImm<int64_t>(Imm))
795 return false;
796
797 auto S = bit_cast<std::array<int32_t, 2>>(Imm);
798 auto H = bit_cast<std::array<int16_t, 4>>(Imm);
799 auto B = bit_cast<std::array<int8_t, 8>>(Imm);
800
801 if (isSVEMaskOfIdenticalElements<int32_t>(Imm) && isSVECpyImm<int32_t>(S[0]))
802 return false;
803 if (isSVEMaskOfIdenticalElements<int16_t>(Imm) && isSVECpyImm<int16_t>(H[0]))
804 return false;
805 if (isSVEMaskOfIdenticalElements<int8_t>(Imm) && isSVECpyImm<int8_t>(B[0]))
806 return false;
807 return isLogicalImmediate(Imm, 64);
808 }
809
isAnyMOVZMovAlias(uint64_t Value,int RegWidth)810 inline static bool isAnyMOVZMovAlias(uint64_t Value, int RegWidth) {
811 for (int Shift = 0; Shift <= RegWidth - 16; Shift += 16)
812 if ((Value & ~(0xffffULL << Shift)) == 0)
813 return true;
814
815 return false;
816 }
817
isMOVZMovAlias(uint64_t Value,int Shift,int RegWidth)818 inline static bool isMOVZMovAlias(uint64_t Value, int Shift, int RegWidth) {
819 if (RegWidth == 32)
820 Value &= 0xffffffffULL;
821
822 // "lsl #0" takes precedence: in practice this only affects "#0, lsl #0".
823 if (Value == 0 && Shift != 0)
824 return false;
825
826 return (Value & ~(0xffffULL << Shift)) == 0;
827 }
828
isMOVNMovAlias(uint64_t Value,int Shift,int RegWidth)829 inline static bool isMOVNMovAlias(uint64_t Value, int Shift, int RegWidth) {
830 // MOVZ takes precedence over MOVN.
831 if (isAnyMOVZMovAlias(Value, RegWidth))
832 return false;
833
834 Value = ~Value;
835 if (RegWidth == 32)
836 Value &= 0xffffffffULL;
837
838 return isMOVZMovAlias(Value, Shift, RegWidth);
839 }
840
isAnyMOVWMovAlias(uint64_t Value,int RegWidth)841 inline static bool isAnyMOVWMovAlias(uint64_t Value, int RegWidth) {
842 if (isAnyMOVZMovAlias(Value, RegWidth))
843 return true;
844
845 // It's not a MOVZ, but it might be a MOVN.
846 Value = ~Value;
847 if (RegWidth == 32)
848 Value &= 0xffffffffULL;
849
850 return isAnyMOVZMovAlias(Value, RegWidth);
851 }
852
853 } // end namespace AArch64_AM
854
855 } // end namespace llvm
856
857 #endif
858