1 //===-- ARMAddressingModes.h - ARM Addressing Modes -------------*- C++ -*-===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file contains the ARM addressing mode implementation stuff.
10 //
11 //===----------------------------------------------------------------------===//
12 
13 #ifndef LLVM_LIB_TARGET_ARM_MCTARGETDESC_ARMADDRESSINGMODES_H
14 #define LLVM_LIB_TARGET_ARM_MCTARGETDESC_ARMADDRESSINGMODES_H
15 
16 #include "llvm/ADT/APFloat.h"
17 #include "llvm/ADT/APInt.h"
18 #include "llvm/ADT/bit.h"
19 #include "llvm/Support/ErrorHandling.h"
20 #include "llvm/Support/MathExtras.h"
21 #include <cassert>
22 
23 namespace llvm {
24 
25 /// ARM_AM - ARM Addressing Mode Stuff
26 namespace ARM_AM {
27   enum ShiftOpc {
28     no_shift = 0,
29     asr,
30     lsl,
31     lsr,
32     ror,
33     rrx,
34     uxtw
35   };
36 
37   enum AddrOpc {
38     sub = 0,
39     add
40   };
41 
42   inline const char *getAddrOpcStr(AddrOpc Op) { return Op == sub ? "-" : ""; }
43 
44   inline const char *getShiftOpcStr(ShiftOpc Op) {
45     switch (Op) {
46     default: llvm_unreachable("Unknown shift opc!");
47     case ARM_AM::asr: return "asr";
48     case ARM_AM::lsl: return "lsl";
49     case ARM_AM::lsr: return "lsr";
50     case ARM_AM::ror: return "ror";
51     case ARM_AM::rrx: return "rrx";
52     case ARM_AM::uxtw: return "uxtw";
53     }
54   }
55 
56   inline unsigned getShiftOpcEncoding(ShiftOpc Op) {
57     switch (Op) {
58     default: llvm_unreachable("Unknown shift opc!");
59     case ARM_AM::asr: return 2;
60     case ARM_AM::lsl: return 0;
61     case ARM_AM::lsr: return 1;
62     case ARM_AM::ror: return 3;
63     }
64   }
65 
66   enum AMSubMode {
67     bad_am_submode = 0,
68     ia,
69     ib,
70     da,
71     db
72   };
73 
74   inline const char *getAMSubModeStr(AMSubMode Mode) {
75     switch (Mode) {
76     default: llvm_unreachable("Unknown addressing sub-mode!");
77     case ARM_AM::ia: return "ia";
78     case ARM_AM::ib: return "ib";
79     case ARM_AM::da: return "da";
80     case ARM_AM::db: return "db";
81     }
82   }
83 
84   /// rotr32 - Rotate a 32-bit unsigned value right by a specified # bits.
85   ///
86   inline unsigned rotr32(unsigned Val, unsigned Amt) {
87     assert(Amt < 32 && "Invalid rotate amount");
88     return (Val >> Amt) | (Val << ((32-Amt)&31));
89   }
90 
91   /// rotl32 - Rotate a 32-bit unsigned value left by a specified # bits.
92   ///
93   inline unsigned rotl32(unsigned Val, unsigned Amt) {
94     assert(Amt < 32 && "Invalid rotate amount");
95     return (Val << Amt) | (Val >> ((32-Amt)&31));
96   }
97 
98   //===--------------------------------------------------------------------===//
99   // Addressing Mode #1: shift_operand with registers
100   //===--------------------------------------------------------------------===//
101   //
102   // This 'addressing mode' is used for arithmetic instructions.  It can
103   // represent things like:
104   //   reg
105   //   reg [asr|lsl|lsr|ror|rrx] reg
106   //   reg [asr|lsl|lsr|ror|rrx] imm
107   //
108   // This is stored three operands [rega, regb, opc].  The first is the base
109   // reg, the second is the shift amount (or reg0 if not present or imm).  The
110   // third operand encodes the shift opcode and the imm if a reg isn't present.
111   //
112   inline unsigned getSORegOpc(ShiftOpc ShOp, unsigned Imm) {
113     return ShOp | (Imm << 3);
114   }
115   inline unsigned getSORegOffset(unsigned Op) { return Op >> 3; }
116   inline ShiftOpc getSORegShOp(unsigned Op) { return (ShiftOpc)(Op & 7); }
117 
118   /// getSOImmValImm - Given an encoded imm field for the reg/imm form, return
119   /// the 8-bit imm value.
120   inline unsigned getSOImmValImm(unsigned Imm) { return Imm & 0xFF; }
121   /// getSOImmValRot - Given an encoded imm field for the reg/imm form, return
122   /// the rotate amount.
123   inline unsigned getSOImmValRot(unsigned Imm) { return (Imm >> 8) * 2; }
124 
125   /// getSOImmValRotate - Try to handle Imm with an immediate shifter operand,
126   /// computing the rotate amount to use.  If this immediate value cannot be
127   /// handled with a single shifter-op, determine a good rotate amount that will
128   /// take a maximal chunk of bits out of the immediate.
129   inline unsigned getSOImmValRotate(unsigned Imm) {
130     // 8-bit (or less) immediates are trivially shifter_operands with a rotate
131     // of zero.
132     if ((Imm & ~255U) == 0) return 0;
133 
134     // Use CTZ to compute the rotate amount.
135     unsigned TZ = countTrailingZeros(Imm);
136 
137     // Rotate amount must be even.  Something like 0x200 must be rotated 8 bits,
138     // not 9.
139     unsigned RotAmt = TZ & ~1;
140 
141     // If we can handle this spread, return it.
142     if ((rotr32(Imm, RotAmt) & ~255U) == 0)
143       return (32-RotAmt)&31;  // HW rotates right, not left.
144 
145     // For values like 0xF000000F, we should ignore the low 6 bits, then
146     // retry the hunt.
147     if (Imm & 63U) {
148       unsigned TZ2 = countTrailingZeros(Imm & ~63U);
149       unsigned RotAmt2 = TZ2 & ~1;
150       if ((rotr32(Imm, RotAmt2) & ~255U) == 0)
151         return (32-RotAmt2)&31;  // HW rotates right, not left.
152     }
153 
154     // Otherwise, we have no way to cover this span of bits with a single
155     // shifter_op immediate.  Return a chunk of bits that will be useful to
156     // handle.
157     return (32-RotAmt)&31;  // HW rotates right, not left.
158   }
159 
160   /// getSOImmVal - Given a 32-bit immediate, if it is something that can fit
161   /// into an shifter_operand immediate operand, return the 12-bit encoding for
162   /// it.  If not, return -1.
163   inline int getSOImmVal(unsigned Arg) {
164     // 8-bit (or less) immediates are trivially shifter_operands with a rotate
165     // of zero.
166     if ((Arg & ~255U) == 0) return Arg;
167 
168     unsigned RotAmt = getSOImmValRotate(Arg);
169 
170     // If this cannot be handled with a single shifter_op, bail out.
171     if (rotr32(~255U, RotAmt) & Arg)
172       return -1;
173 
174     // Encode this correctly.
175     return rotl32(Arg, RotAmt) | ((RotAmt>>1) << 8);
176   }
177 
178   /// isSOImmTwoPartVal - Return true if the specified value can be obtained by
179   /// or'ing together two SOImmVal's.
180   inline bool isSOImmTwoPartVal(unsigned V) {
181     // If this can be handled with a single shifter_op, bail out.
182     V = rotr32(~255U, getSOImmValRotate(V)) & V;
183     if (V == 0)
184       return false;
185 
186     // If this can be handled with two shifter_op's, accept.
187     V = rotr32(~255U, getSOImmValRotate(V)) & V;
188     return V == 0;
189   }
190 
191   /// getSOImmTwoPartFirst - If V is a value that satisfies isSOImmTwoPartVal,
192   /// return the first chunk of it.
193   inline unsigned getSOImmTwoPartFirst(unsigned V) {
194     return rotr32(255U, getSOImmValRotate(V)) & V;
195   }
196 
197   /// getSOImmTwoPartSecond - If V is a value that satisfies isSOImmTwoPartVal,
198   /// return the second chunk of it.
199   inline unsigned getSOImmTwoPartSecond(unsigned V) {
200     // Mask out the first hunk.
201     V = rotr32(~255U, getSOImmValRotate(V)) & V;
202 
203     // Take what's left.
204     assert(V == (rotr32(255U, getSOImmValRotate(V)) & V));
205     return V;
206   }
207 
208   /// isSOImmTwoPartValNeg - Return true if the specified value can be obtained
209   /// by two SOImmVal, that -V = First + Second.
210   /// "R+V" can be optimized to (sub (sub R, First), Second).
211   /// "R=V" can be optimized to (sub (mvn R, ~(-First)), Second).
212   inline bool isSOImmTwoPartValNeg(unsigned V) {
213     unsigned First;
214     if (!isSOImmTwoPartVal(-V))
215       return false;
216     // Return false if ~(-First) is not a SoImmval.
217     First = getSOImmTwoPartFirst(-V);
218     First = ~(-First);
219     return !(rotr32(~255U, getSOImmValRotate(First)) & First);
220   }
221 
222   /// getThumbImmValShift - Try to handle Imm with a 8-bit immediate followed
223   /// by a left shift. Returns the shift amount to use.
224   inline unsigned getThumbImmValShift(unsigned Imm) {
225     // 8-bit (or less) immediates are trivially immediate operand with a shift
226     // of zero.
227     if ((Imm & ~255U) == 0) return 0;
228 
229     // Use CTZ to compute the shift amount.
230     return countTrailingZeros(Imm);
231   }
232 
233   /// isThumbImmShiftedVal - Return true if the specified value can be obtained
234   /// by left shifting a 8-bit immediate.
235   inline bool isThumbImmShiftedVal(unsigned V) {
236     // If this can be handled with
237     V = (~255U << getThumbImmValShift(V)) & V;
238     return V == 0;
239   }
240 
241   /// getThumbImm16ValShift - Try to handle Imm with a 16-bit immediate followed
242   /// by a left shift. Returns the shift amount to use.
243   inline unsigned getThumbImm16ValShift(unsigned Imm) {
244     // 16-bit (or less) immediates are trivially immediate operand with a shift
245     // of zero.
246     if ((Imm & ~65535U) == 0) return 0;
247 
248     // Use CTZ to compute the shift amount.
249     return countTrailingZeros(Imm);
250   }
251 
252   /// isThumbImm16ShiftedVal - Return true if the specified value can be
253   /// obtained by left shifting a 16-bit immediate.
254   inline bool isThumbImm16ShiftedVal(unsigned V) {
255     // If this can be handled with
256     V = (~65535U << getThumbImm16ValShift(V)) & V;
257     return V == 0;
258   }
259 
260   /// getThumbImmNonShiftedVal - If V is a value that satisfies
261   /// isThumbImmShiftedVal, return the non-shiftd value.
262   inline unsigned getThumbImmNonShiftedVal(unsigned V) {
263     return V >> getThumbImmValShift(V);
264   }
265 
266 
267   /// getT2SOImmValSplat - Return the 12-bit encoded representation
268   /// if the specified value can be obtained by splatting the low 8 bits
269   /// into every other byte or every byte of a 32-bit value. i.e.,
270   ///     00000000 00000000 00000000 abcdefgh    control = 0
271   ///     00000000 abcdefgh 00000000 abcdefgh    control = 1
272   ///     abcdefgh 00000000 abcdefgh 00000000    control = 2
273   ///     abcdefgh abcdefgh abcdefgh abcdefgh    control = 3
274   /// Return -1 if none of the above apply.
275   /// See ARM Reference Manual A6.3.2.
276   inline int getT2SOImmValSplatVal(unsigned V) {
277     unsigned u, Vs, Imm;
278     // control = 0
279     if ((V & 0xffffff00) == 0)
280       return V;
281 
282     // If the value is zeroes in the first byte, just shift those off
283     Vs = ((V & 0xff) == 0) ? V >> 8 : V;
284     // Any passing value only has 8 bits of payload, splatted across the word
285     Imm = Vs & 0xff;
286     // Likewise, any passing values have the payload splatted into the 3rd byte
287     u = Imm | (Imm << 16);
288 
289     // control = 1 or 2
290     if (Vs == u)
291       return (((Vs == V) ? 1 : 2) << 8) | Imm;
292 
293     // control = 3
294     if (Vs == (u | (u << 8)))
295       return (3 << 8) | Imm;
296 
297     return -1;
298   }
299 
300   /// getT2SOImmValRotateVal - Return the 12-bit encoded representation if the
301   /// specified value is a rotated 8-bit value. Return -1 if no rotation
302   /// encoding is possible.
303   /// See ARM Reference Manual A6.3.2.
304   inline int getT2SOImmValRotateVal(unsigned V) {
305     unsigned RotAmt = countLeadingZeros(V);
306     if (RotAmt >= 24)
307       return -1;
308 
309     // If 'Arg' can be handled with a single shifter_op return the value.
310     if ((rotr32(0xff000000U, RotAmt) & V) == V)
311       return (rotr32(V, 24 - RotAmt) & 0x7f) | ((RotAmt + 8) << 7);
312 
313     return -1;
314   }
315 
316   /// getT2SOImmVal - Given a 32-bit immediate, if it is something that can fit
317   /// into a Thumb-2 shifter_operand immediate operand, return the 12-bit
318   /// encoding for it.  If not, return -1.
319   /// See ARM Reference Manual A6.3.2.
320   inline int getT2SOImmVal(unsigned Arg) {
321     // If 'Arg' is an 8-bit splat, then get the encoded value.
322     int Splat = getT2SOImmValSplatVal(Arg);
323     if (Splat != -1)
324       return Splat;
325 
326     // If 'Arg' can be handled with a single shifter_op return the value.
327     int Rot = getT2SOImmValRotateVal(Arg);
328     if (Rot != -1)
329       return Rot;
330 
331     return -1;
332   }
333 
334   inline unsigned getT2SOImmValRotate(unsigned V) {
335     if ((V & ~255U) == 0) return 0;
336     // Use CTZ to compute the rotate amount.
337     unsigned RotAmt = countTrailingZeros(V);
338     return (32 - RotAmt) & 31;
339   }
340 
341   inline bool isT2SOImmTwoPartVal(unsigned Imm) {
342     unsigned V = Imm;
343     // Passing values can be any combination of splat values and shifter
344     // values. If this can be handled with a single shifter or splat, bail
345     // out. Those should be handled directly, not with a two-part val.
346     if (getT2SOImmValSplatVal(V) != -1)
347       return false;
348     V = rotr32 (~255U, getT2SOImmValRotate(V)) & V;
349     if (V == 0)
350       return false;
351 
352     // If this can be handled as an immediate, accept.
353     if (getT2SOImmVal(V) != -1) return true;
354 
355     // Likewise, try masking out a splat value first.
356     V = Imm;
357     if (getT2SOImmValSplatVal(V & 0xff00ff00U) != -1)
358       V &= ~0xff00ff00U;
359     else if (getT2SOImmValSplatVal(V & 0x00ff00ffU) != -1)
360       V &= ~0x00ff00ffU;
361     // If what's left can be handled as an immediate, accept.
362     if (getT2SOImmVal(V) != -1) return true;
363 
364     // Otherwise, do not accept.
365     return false;
366   }
367 
368   inline unsigned getT2SOImmTwoPartFirst(unsigned Imm) {
369     assert (isT2SOImmTwoPartVal(Imm) &&
370             "Immedate cannot be encoded as two part immediate!");
371     // Try a shifter operand as one part
372     unsigned V = rotr32 (~255, getT2SOImmValRotate(Imm)) & Imm;
373     // If the rest is encodable as an immediate, then return it.
374     if (getT2SOImmVal(V) != -1) return V;
375 
376     // Try masking out a splat value first.
377     if (getT2SOImmValSplatVal(Imm & 0xff00ff00U) != -1)
378       return Imm & 0xff00ff00U;
379 
380     // The other splat is all that's left as an option.
381     assert (getT2SOImmValSplatVal(Imm & 0x00ff00ffU) != -1);
382     return Imm & 0x00ff00ffU;
383   }
384 
385   inline unsigned getT2SOImmTwoPartSecond(unsigned Imm) {
386     // Mask out the first hunk
387     Imm ^= getT2SOImmTwoPartFirst(Imm);
388     // Return what's left
389     assert (getT2SOImmVal(Imm) != -1 &&
390             "Unable to encode second part of T2 two part SO immediate");
391     return Imm;
392   }
393 
394 
395   //===--------------------------------------------------------------------===//
396   // Addressing Mode #2
397   //===--------------------------------------------------------------------===//
398   //
399   // This is used for most simple load/store instructions.
400   //
401   // addrmode2 := reg +/- reg shop imm
402   // addrmode2 := reg +/- imm12
403   //
404   // The first operand is always a Reg.  The second operand is a reg if in
405   // reg/reg form, otherwise it's reg#0.  The third field encodes the operation
406   // in bit 12, the immediate in bits 0-11, and the shift op in 13-15. The
407   // fourth operand 16-17 encodes the index mode.
408   //
409   // If this addressing mode is a frame index (before prolog/epilog insertion
410   // and code rewriting), this operand will have the form:  FI#, reg0, <offs>
411   // with no shift amount for the frame offset.
412   //
413   inline unsigned getAM2Opc(AddrOpc Opc, unsigned Imm12, ShiftOpc SO,
414                             unsigned IdxMode = 0) {
415     assert(Imm12 < (1 << 12) && "Imm too large!");
416     bool isSub = Opc == sub;
417     return Imm12 | ((int)isSub << 12) | (SO << 13) | (IdxMode << 16) ;
418   }
419   inline unsigned getAM2Offset(unsigned AM2Opc) {
420     return AM2Opc & ((1 << 12)-1);
421   }
422   inline AddrOpc getAM2Op(unsigned AM2Opc) {
423     return ((AM2Opc >> 12) & 1) ? sub : add;
424   }
425   inline ShiftOpc getAM2ShiftOpc(unsigned AM2Opc) {
426     return (ShiftOpc)((AM2Opc >> 13) & 7);
427   }
428   inline unsigned getAM2IdxMode(unsigned AM2Opc) { return (AM2Opc >> 16); }
429 
430   //===--------------------------------------------------------------------===//
431   // Addressing Mode #3
432   //===--------------------------------------------------------------------===//
433   //
434   // This is used for sign-extending loads, and load/store-pair instructions.
435   //
436   // addrmode3 := reg +/- reg
437   // addrmode3 := reg +/- imm8
438   //
439   // The first operand is always a Reg.  The second operand is a reg if in
440   // reg/reg form, otherwise it's reg#0.  The third field encodes the operation
441   // in bit 8, the immediate in bits 0-7. The fourth operand 9-10 encodes the
442   // index mode.
443 
444   /// getAM3Opc - This function encodes the addrmode3 opc field.
445   inline unsigned getAM3Opc(AddrOpc Opc, unsigned char Offset,
446                             unsigned IdxMode = 0) {
447     bool isSub = Opc == sub;
448     return ((int)isSub << 8) | Offset | (IdxMode << 9);
449   }
450   inline unsigned char getAM3Offset(unsigned AM3Opc) { return AM3Opc & 0xFF; }
451   inline AddrOpc getAM3Op(unsigned AM3Opc) {
452     return ((AM3Opc >> 8) & 1) ? sub : add;
453   }
454   inline unsigned getAM3IdxMode(unsigned AM3Opc) { return (AM3Opc >> 9); }
455 
456   //===--------------------------------------------------------------------===//
457   // Addressing Mode #4
458   //===--------------------------------------------------------------------===//
459   //
460   // This is used for load / store multiple instructions.
461   //
462   // addrmode4 := reg, <mode>
463   //
464   // The four modes are:
465   //    IA - Increment after
466   //    IB - Increment before
467   //    DA - Decrement after
468   //    DB - Decrement before
469   // For VFP instructions, only the IA and DB modes are valid.
470 
471   inline AMSubMode getAM4SubMode(unsigned Mode) {
472     return (AMSubMode)(Mode & 0x7);
473   }
474 
475   inline unsigned getAM4ModeImm(AMSubMode SubMode) { return (int)SubMode; }
476 
477   //===--------------------------------------------------------------------===//
478   // Addressing Mode #5
479   //===--------------------------------------------------------------------===//
480   //
481   // This is used for coprocessor instructions, such as FP load/stores.
482   //
483   // addrmode5 := reg +/- imm8*4
484   //
485   // The first operand is always a Reg.  The second operand encodes the
486   // operation (add or subtract) in bit 8 and the immediate in bits 0-7.
487 
488   /// getAM5Opc - This function encodes the addrmode5 opc field.
489   inline unsigned getAM5Opc(AddrOpc Opc, unsigned char Offset) {
490     bool isSub = Opc == sub;
491     return ((int)isSub << 8) | Offset;
492   }
493   inline unsigned char getAM5Offset(unsigned AM5Opc) { return AM5Opc & 0xFF; }
494   inline AddrOpc getAM5Op(unsigned AM5Opc) {
495     return ((AM5Opc >> 8) & 1) ? sub : add;
496   }
497 
498   //===--------------------------------------------------------------------===//
499   // Addressing Mode #5 FP16
500   //===--------------------------------------------------------------------===//
501   //
502   // This is used for coprocessor instructions, such as 16-bit FP load/stores.
503   //
504   // addrmode5fp16 := reg +/- imm8*2
505   //
506   // The first operand is always a Reg.  The second operand encodes the
507   // operation (add or subtract) in bit 8 and the immediate in bits 0-7.
508 
509   /// getAM5FP16Opc - This function encodes the addrmode5fp16 opc field.
510   inline unsigned getAM5FP16Opc(AddrOpc Opc, unsigned char Offset) {
511     bool isSub = Opc == sub;
512     return ((int)isSub << 8) | Offset;
513   }
514   inline unsigned char getAM5FP16Offset(unsigned AM5Opc) {
515     return AM5Opc & 0xFF;
516   }
517   inline AddrOpc getAM5FP16Op(unsigned AM5Opc) {
518     return ((AM5Opc >> 8) & 1) ? sub : add;
519   }
520 
521   //===--------------------------------------------------------------------===//
522   // Addressing Mode #6
523   //===--------------------------------------------------------------------===//
524   //
525   // This is used for NEON load / store instructions.
526   //
527   // addrmode6 := reg with optional alignment
528   //
529   // This is stored in two operands [regaddr, align].  The first is the
530   // address register.  The second operand is the value of the alignment
531   // specifier in bytes or zero if no explicit alignment.
532   // Valid alignments depend on the specific instruction.
533 
534   //===--------------------------------------------------------------------===//
535   // NEON/MVE Modified Immediates
536   //===--------------------------------------------------------------------===//
537   //
538   // Several NEON and MVE instructions (e.g., VMOV) take a "modified immediate"
539   // vector operand, where a small immediate encoded in the instruction
540   // specifies a full NEON vector value.  These modified immediates are
541   // represented here as encoded integers.  The low 8 bits hold the immediate
542   // value; bit 12 holds the "Op" field of the instruction, and bits 11-8 hold
543   // the "Cmode" field of the instruction.  The interfaces below treat the
544   // Op and Cmode values as a single 5-bit value.
545 
546   inline unsigned createVMOVModImm(unsigned OpCmode, unsigned Val) {
547     return (OpCmode << 8) | Val;
548   }
549   inline unsigned getVMOVModImmOpCmode(unsigned ModImm) {
550     return (ModImm >> 8) & 0x1f;
551   }
552   inline unsigned getVMOVModImmVal(unsigned ModImm) { return ModImm & 0xff; }
553 
554   /// decodeVMOVModImm - Decode a NEON/MVE modified immediate value into the
555   /// element value and the element size in bits.  (If the element size is
556   /// smaller than the vector, it is splatted into all the elements.)
557   inline uint64_t decodeVMOVModImm(unsigned ModImm, unsigned &EltBits) {
558     unsigned OpCmode = getVMOVModImmOpCmode(ModImm);
559     unsigned Imm8 = getVMOVModImmVal(ModImm);
560     uint64_t Val = 0;
561 
562     if (OpCmode == 0xe) {
563       // 8-bit vector elements
564       Val = Imm8;
565       EltBits = 8;
566     } else if ((OpCmode & 0xc) == 0x8) {
567       // 16-bit vector elements
568       unsigned ByteNum = (OpCmode & 0x6) >> 1;
569       Val = Imm8 << (8 * ByteNum);
570       EltBits = 16;
571     } else if ((OpCmode & 0x8) == 0) {
572       // 32-bit vector elements, zero with one byte set
573       unsigned ByteNum = (OpCmode & 0x6) >> 1;
574       Val = Imm8 << (8 * ByteNum);
575       EltBits = 32;
576     } else if ((OpCmode & 0xe) == 0xc) {
577       // 32-bit vector elements, one byte with low bits set
578       unsigned ByteNum = 1 + (OpCmode & 0x1);
579       Val = (Imm8 << (8 * ByteNum)) | (0xffff >> (8 * (2 - ByteNum)));
580       EltBits = 32;
581     } else if (OpCmode == 0x1e) {
582       // 64-bit vector elements
583       for (unsigned ByteNum = 0; ByteNum < 8; ++ByteNum) {
584         if ((ModImm >> ByteNum) & 1)
585           Val |= (uint64_t)0xff << (8 * ByteNum);
586       }
587       EltBits = 64;
588     } else {
589       llvm_unreachable("Unsupported VMOV immediate");
590     }
591     return Val;
592   }
593 
594   // Generic validation for single-byte immediate (0X00, 00X0, etc).
595   inline bool isNEONBytesplat(unsigned Value, unsigned Size) {
596     assert(Size >= 1 && Size <= 4 && "Invalid size");
597     unsigned count = 0;
598     for (unsigned i = 0; i < Size; ++i) {
599       if (Value & 0xff) count++;
600       Value >>= 8;
601     }
602     return count == 1;
603   }
604 
605   /// Checks if Value is a correct immediate for instructions like VBIC/VORR.
606   inline bool isNEONi16splat(unsigned Value) {
607     if (Value > 0xffff)
608       return false;
609     // i16 value with set bits only in one byte X0 or 0X.
610     return Value == 0 || isNEONBytesplat(Value, 2);
611   }
612 
613   // Encode NEON 16 bits Splat immediate for instructions like VBIC/VORR
614   inline unsigned encodeNEONi16splat(unsigned Value) {
615     assert(isNEONi16splat(Value) && "Invalid NEON splat value");
616     if (Value >= 0x100)
617       Value = (Value >> 8) | 0xa00;
618     else
619       Value |= 0x800;
620     return Value;
621   }
622 
623   /// Checks if Value is a correct immediate for instructions like VBIC/VORR.
624   inline bool isNEONi32splat(unsigned Value) {
625     // i32 value with set bits only in one byte X000, 0X00, 00X0, or 000X.
626     return Value == 0 || isNEONBytesplat(Value, 4);
627   }
628 
629   /// Encode NEON 32 bits Splat immediate for instructions like VBIC/VORR.
630   inline unsigned encodeNEONi32splat(unsigned Value) {
631     assert(isNEONi32splat(Value) && "Invalid NEON splat value");
632     if (Value >= 0x100 && Value <= 0xff00)
633       Value = (Value >> 8) | 0x200;
634     else if (Value > 0xffff && Value <= 0xff0000)
635       Value = (Value >> 16) | 0x400;
636     else if (Value > 0xffffff)
637       Value = (Value >> 24) | 0x600;
638     return Value;
639   }
640 
641   //===--------------------------------------------------------------------===//
642   // Floating-point Immediates
643   //
644   inline float getFPImmFloat(unsigned Imm) {
645     // We expect an 8-bit binary encoding of a floating-point number here.
646 
647     uint8_t Sign = (Imm >> 7) & 0x1;
648     uint8_t Exp = (Imm >> 4) & 0x7;
649     uint8_t Mantissa = Imm & 0xf;
650 
651     //   8-bit FP    IEEE Float Encoding
652     //   abcd efgh   aBbbbbbc defgh000 00000000 00000000
653     //
654     // where B = NOT(b);
655     uint32_t I = 0;
656     I |= Sign << 31;
657     I |= ((Exp & 0x4) != 0 ? 0 : 1) << 30;
658     I |= ((Exp & 0x4) != 0 ? 0x1f : 0) << 25;
659     I |= (Exp & 0x3) << 23;
660     I |= Mantissa << 19;
661     return bit_cast<float>(I);
662   }
663 
664   /// getFP16Imm - Return an 8-bit floating-point version of the 16-bit
665   /// floating-point value. If the value cannot be represented as an 8-bit
666   /// floating-point value, then return -1.
667   inline int getFP16Imm(const APInt &Imm) {
668     uint32_t Sign = Imm.lshr(15).getZExtValue() & 1;
669     int32_t Exp = (Imm.lshr(10).getSExtValue() & 0x1f) - 15;  // -14 to 15
670     int64_t Mantissa = Imm.getZExtValue() & 0x3ff;  // 10 bits
671 
672     // We can handle 4 bits of mantissa.
673     // mantissa = (16+UInt(e:f:g:h))/16.
674     if (Mantissa & 0x3f)
675       return -1;
676     Mantissa >>= 6;
677 
678     // We can handle 3 bits of exponent: exp == UInt(NOT(b):c:d)-3
679     if (Exp < -3 || Exp > 4)
680       return -1;
681     Exp = ((Exp+3) & 0x7) ^ 4;
682 
683     return ((int)Sign << 7) | (Exp << 4) | Mantissa;
684   }
685 
686   inline int getFP16Imm(const APFloat &FPImm) {
687     return getFP16Imm(FPImm.bitcastToAPInt());
688   }
689 
690   /// If this is a FP16Imm encoded as a fp32 value, return the 8-bit encoding
691   /// for it. Otherwise return -1 like getFP16Imm.
692   inline int getFP32FP16Imm(const APInt &Imm) {
693     if (Imm.getActiveBits() > 16)
694       return -1;
695     return ARM_AM::getFP16Imm(Imm.trunc(16));
696   }
697 
698   inline int getFP32FP16Imm(const APFloat &FPImm) {
699     return getFP32FP16Imm(FPImm.bitcastToAPInt());
700   }
701 
702   /// getFP32Imm - Return an 8-bit floating-point version of the 32-bit
703   /// floating-point value. If the value cannot be represented as an 8-bit
704   /// floating-point value, then return -1.
705   inline int getFP32Imm(const APInt &Imm) {
706     uint32_t Sign = Imm.lshr(31).getZExtValue() & 1;
707     int32_t Exp = (Imm.lshr(23).getSExtValue() & 0xff) - 127;  // -126 to 127
708     int64_t Mantissa = Imm.getZExtValue() & 0x7fffff;  // 23 bits
709 
710     // We can handle 4 bits of mantissa.
711     // mantissa = (16+UInt(e:f:g:h))/16.
712     if (Mantissa & 0x7ffff)
713       return -1;
714     Mantissa >>= 19;
715     if ((Mantissa & 0xf) != Mantissa)
716       return -1;
717 
718     // We can handle 3 bits of exponent: exp == UInt(NOT(b):c:d)-3
719     if (Exp < -3 || Exp > 4)
720       return -1;
721     Exp = ((Exp+3) & 0x7) ^ 4;
722 
723     return ((int)Sign << 7) | (Exp << 4) | Mantissa;
724   }
725 
726   inline int getFP32Imm(const APFloat &FPImm) {
727     return getFP32Imm(FPImm.bitcastToAPInt());
728   }
729 
730   /// getFP64Imm - Return an 8-bit floating-point version of the 64-bit
731   /// floating-point value. If the value cannot be represented as an 8-bit
732   /// floating-point value, then return -1.
733   inline int getFP64Imm(const APInt &Imm) {
734     uint64_t Sign = Imm.lshr(63).getZExtValue() & 1;
735     int64_t Exp = (Imm.lshr(52).getSExtValue() & 0x7ff) - 1023; // -1022 to 1023
736     uint64_t Mantissa = Imm.getZExtValue() & 0xfffffffffffffULL;
737 
738     // We can handle 4 bits of mantissa.
739     // mantissa = (16+UInt(e:f:g:h))/16.
740     if (Mantissa & 0xffffffffffffULL)
741       return -1;
742     Mantissa >>= 48;
743     if ((Mantissa & 0xf) != Mantissa)
744       return -1;
745 
746     // We can handle 3 bits of exponent: exp == UInt(NOT(b):c:d)-3
747     if (Exp < -3 || Exp > 4)
748       return -1;
749     Exp = ((Exp+3) & 0x7) ^ 4;
750 
751     return ((int)Sign << 7) | (Exp << 4) | Mantissa;
752   }
753 
754   inline int getFP64Imm(const APFloat &FPImm) {
755     return getFP64Imm(FPImm.bitcastToAPInt());
756   }
757 
758 } // end namespace ARM_AM
759 } // end namespace llvm
760 
761 #endif
762 
763