1 //===- AArch64ExpandImm.h - AArch64 Immediate Expansion -------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file implements the AArch64ExpandImm stuff.
10 //
11 //===----------------------------------------------------------------------===//
12 
13 #include "AArch64.h"
14 #include "AArch64ExpandImm.h"
15 #include "MCTargetDesc/AArch64AddressingModes.h"
16 
17 using namespace llvm;
18 using namespace llvm::AArch64_IMM;
19 
20 /// Helper function which extracts the specified 16-bit chunk from a
21 /// 64-bit value.
22 static uint64_t getChunk(uint64_t Imm, unsigned ChunkIdx) {
23   assert(ChunkIdx < 4 && "Out of range chunk index specified!");
24 
25   return (Imm >> (ChunkIdx * 16)) & 0xFFFF;
26 }
27 
28 /// Check whether the given 16-bit chunk replicated to full 64-bit width
29 /// can be materialized with an ORR instruction.
30 static bool canUseOrr(uint64_t Chunk, uint64_t &Encoding) {
31   Chunk = (Chunk << 48) | (Chunk << 32) | (Chunk << 16) | Chunk;
32 
33   return AArch64_AM::processLogicalImmediate(Chunk, 64, Encoding);
34 }
35 
36 /// Check for identical 16-bit chunks within the constant and if so
37 /// materialize them with a single ORR instruction. The remaining one or two
38 /// 16-bit chunks will be materialized with MOVK instructions.
39 ///
40 /// This allows us to materialize constants like |A|B|A|A| or |A|B|C|A| (order
41 /// of the chunks doesn't matter), assuming |A|A|A|A| can be materialized with
42 /// an ORR instruction.
43 static bool tryToreplicateChunks(uint64_t UImm,
44 				 SmallVectorImpl<ImmInsnModel> &Insn) {
45   using CountMap = DenseMap<uint64_t, unsigned>;
46 
47   CountMap Counts;
48 
49   // Scan the constant and count how often every chunk occurs.
50   for (unsigned Idx = 0; Idx < 4; ++Idx)
51     ++Counts[getChunk(UImm, Idx)];
52 
53   // Traverse the chunks to find one which occurs more than once.
54   for (CountMap::const_iterator Chunk = Counts.begin(), End = Counts.end();
55        Chunk != End; ++Chunk) {
56     const uint64_t ChunkVal = Chunk->first;
57     const unsigned Count = Chunk->second;
58 
59     uint64_t Encoding = 0;
60 
61     // We are looking for chunks which have two or three instances and can be
62     // materialized with an ORR instruction.
63     if ((Count != 2 && Count != 3) || !canUseOrr(ChunkVal, Encoding))
64       continue;
65 
66     const bool CountThree = Count == 3;
67 
68     Insn.push_back({ AArch64::ORRXri, 0, Encoding });
69 
70     unsigned ShiftAmt = 0;
71     uint64_t Imm16 = 0;
72     // Find the first chunk not materialized with the ORR instruction.
73     for (; ShiftAmt < 64; ShiftAmt += 16) {
74       Imm16 = (UImm >> ShiftAmt) & 0xFFFF;
75 
76       if (Imm16 != ChunkVal)
77         break;
78     }
79 
80     // Create the first MOVK instruction.
81     Insn.push_back({ AArch64::MOVKXi, Imm16,
82 		     AArch64_AM::getShifterImm(AArch64_AM::LSL, ShiftAmt) });
83 
84     // In case we have three instances the whole constant is now materialized
85     // and we can exit.
86     if (CountThree)
87       return true;
88 
89     // Find the remaining chunk which needs to be materialized.
90     for (ShiftAmt += 16; ShiftAmt < 64; ShiftAmt += 16) {
91       Imm16 = (UImm >> ShiftAmt) & 0xFFFF;
92 
93       if (Imm16 != ChunkVal)
94         break;
95     }
96     Insn.push_back({ AArch64::MOVKXi, Imm16,
97                      AArch64_AM::getShifterImm(AArch64_AM::LSL, ShiftAmt) });
98     return true;
99   }
100 
101   return false;
102 }
103 
104 /// Check whether this chunk matches the pattern '1...0...'. This pattern
105 /// starts a contiguous sequence of ones if we look at the bits from the LSB
106 /// towards the MSB.
107 static bool isStartChunk(uint64_t Chunk) {
108   if (Chunk == 0 || Chunk == std::numeric_limits<uint64_t>::max())
109     return false;
110 
111   return isMask_64(~Chunk);
112 }
113 
114 /// Check whether this chunk matches the pattern '0...1...' This pattern
115 /// ends a contiguous sequence of ones if we look at the bits from the LSB
116 /// towards the MSB.
117 static bool isEndChunk(uint64_t Chunk) {
118   if (Chunk == 0 || Chunk == std::numeric_limits<uint64_t>::max())
119     return false;
120 
121   return isMask_64(Chunk);
122 }
123 
124 /// Clear or set all bits in the chunk at the given index.
125 static uint64_t updateImm(uint64_t Imm, unsigned Idx, bool Clear) {
126   const uint64_t Mask = 0xFFFF;
127 
128   if (Clear)
129     // Clear chunk in the immediate.
130     Imm &= ~(Mask << (Idx * 16));
131   else
132     // Set all bits in the immediate for the particular chunk.
133     Imm |= Mask << (Idx * 16);
134 
135   return Imm;
136 }
137 
138 /// Check whether the constant contains a sequence of contiguous ones,
139 /// which might be interrupted by one or two chunks. If so, materialize the
140 /// sequence of contiguous ones with an ORR instruction.
141 /// Materialize the chunks which are either interrupting the sequence or outside
142 /// of the sequence with a MOVK instruction.
143 ///
144 /// Assuming S is a chunk which starts the sequence (1...0...), E is a chunk
145 /// which ends the sequence (0...1...). Then we are looking for constants which
146 /// contain at least one S and E chunk.
147 /// E.g. |E|A|B|S|, |A|E|B|S| or |A|B|E|S|.
148 ///
149 /// We are also looking for constants like |S|A|B|E| where the contiguous
150 /// sequence of ones wraps around the MSB into the LSB.
151 static bool trySequenceOfOnes(uint64_t UImm,
152                               SmallVectorImpl<ImmInsnModel> &Insn) {
153   const int NotSet = -1;
154   const uint64_t Mask = 0xFFFF;
155 
156   int StartIdx = NotSet;
157   int EndIdx = NotSet;
158   // Try to find the chunks which start/end a contiguous sequence of ones.
159   for (int Idx = 0; Idx < 4; ++Idx) {
160     int64_t Chunk = getChunk(UImm, Idx);
161     // Sign extend the 16-bit chunk to 64-bit.
162     Chunk = (Chunk << 48) >> 48;
163 
164     if (isStartChunk(Chunk))
165       StartIdx = Idx;
166     else if (isEndChunk(Chunk))
167       EndIdx = Idx;
168   }
169 
170   // Early exit in case we can't find a start/end chunk.
171   if (StartIdx == NotSet || EndIdx == NotSet)
172     return false;
173 
174   // Outside of the contiguous sequence of ones everything needs to be zero.
175   uint64_t Outside = 0;
176   // Chunks between the start and end chunk need to have all their bits set.
177   uint64_t Inside = Mask;
178 
179   // If our contiguous sequence of ones wraps around from the MSB into the LSB,
180   // just swap indices and pretend we are materializing a contiguous sequence
181   // of zeros surrounded by a contiguous sequence of ones.
182   if (StartIdx > EndIdx) {
183     std::swap(StartIdx, EndIdx);
184     std::swap(Outside, Inside);
185   }
186 
187   uint64_t OrrImm = UImm;
188   int FirstMovkIdx = NotSet;
189   int SecondMovkIdx = NotSet;
190 
191   // Find out which chunks we need to patch up to obtain a contiguous sequence
192   // of ones.
193   for (int Idx = 0; Idx < 4; ++Idx) {
194     const uint64_t Chunk = getChunk(UImm, Idx);
195 
196     // Check whether we are looking at a chunk which is not part of the
197     // contiguous sequence of ones.
198     if ((Idx < StartIdx || EndIdx < Idx) && Chunk != Outside) {
199       OrrImm = updateImm(OrrImm, Idx, Outside == 0);
200 
201       // Remember the index we need to patch.
202       if (FirstMovkIdx == NotSet)
203         FirstMovkIdx = Idx;
204       else
205         SecondMovkIdx = Idx;
206 
207       // Check whether we are looking a chunk which is part of the contiguous
208       // sequence of ones.
209     } else if (Idx > StartIdx && Idx < EndIdx && Chunk != Inside) {
210       OrrImm = updateImm(OrrImm, Idx, Inside != Mask);
211 
212       // Remember the index we need to patch.
213       if (FirstMovkIdx == NotSet)
214         FirstMovkIdx = Idx;
215       else
216         SecondMovkIdx = Idx;
217     }
218   }
219   assert(FirstMovkIdx != NotSet && "Constant materializable with single ORR!");
220 
221   // Create the ORR-immediate instruction.
222   uint64_t Encoding = 0;
223   AArch64_AM::processLogicalImmediate(OrrImm, 64, Encoding);
224   Insn.push_back({ AArch64::ORRXri, 0, Encoding });
225 
226   const bool SingleMovk = SecondMovkIdx == NotSet;
227   Insn.push_back({ AArch64::MOVKXi, getChunk(UImm, FirstMovkIdx),
228                    AArch64_AM::getShifterImm(AArch64_AM::LSL,
229                                              FirstMovkIdx * 16) });
230 
231   // Early exit in case we only need to emit a single MOVK instruction.
232   if (SingleMovk)
233     return true;
234 
235   // Create the second MOVK instruction.
236   Insn.push_back({ AArch64::MOVKXi, getChunk(UImm, SecondMovkIdx),
237 	           AArch64_AM::getShifterImm(AArch64_AM::LSL,
238                                              SecondMovkIdx * 16) });
239 
240   return true;
241 }
242 
243 /// \brief Expand a MOVi32imm or MOVi64imm pseudo instruction to a
244 /// MOVZ or MOVN of width BitSize followed by up to 3 MOVK instructions.
245 static inline void expandMOVImmSimple(uint64_t Imm, unsigned BitSize,
246 				      unsigned OneChunks, unsigned ZeroChunks,
247 				      SmallVectorImpl<ImmInsnModel> &Insn) {
248   const unsigned Mask = 0xFFFF;
249 
250   // Use a MOVZ or MOVN instruction to set the high bits, followed by one or
251   // more MOVK instructions to insert additional 16-bit portions into the
252   // lower bits.
253   bool isNeg = false;
254 
255   // Use MOVN to materialize the high bits if we have more all one chunks
256   // than all zero chunks.
257   if (OneChunks > ZeroChunks) {
258     isNeg = true;
259     Imm = ~Imm;
260   }
261 
262   unsigned FirstOpc;
263   if (BitSize == 32) {
264     Imm &= (1LL << 32) - 1;
265     FirstOpc = (isNeg ? AArch64::MOVNWi : AArch64::MOVZWi);
266   } else {
267     FirstOpc = (isNeg ? AArch64::MOVNXi : AArch64::MOVZXi);
268   }
269   unsigned Shift = 0;     // LSL amount for high bits with MOVZ/MOVN
270   unsigned LastShift = 0; // LSL amount for last MOVK
271   if (Imm != 0) {
272     unsigned LZ = countLeadingZeros(Imm);
273     unsigned TZ = countTrailingZeros(Imm);
274     Shift = (TZ / 16) * 16;
275     LastShift = ((63 - LZ) / 16) * 16;
276   }
277   unsigned Imm16 = (Imm >> Shift) & Mask;
278 
279   Insn.push_back({ FirstOpc, Imm16,
280                    AArch64_AM::getShifterImm(AArch64_AM::LSL, Shift) });
281 
282   if (Shift == LastShift)
283     return;
284 
285   // If a MOVN was used for the high bits of a negative value, flip the rest
286   // of the bits back for use with MOVK.
287   if (isNeg)
288     Imm = ~Imm;
289 
290   unsigned Opc = (BitSize == 32 ? AArch64::MOVKWi : AArch64::MOVKXi);
291   while (Shift < LastShift) {
292     Shift += 16;
293     Imm16 = (Imm >> Shift) & Mask;
294     if (Imm16 == (isNeg ? Mask : 0))
295       continue; // This 16-bit portion is already set correctly.
296 
297     Insn.push_back({ Opc, Imm16,
298                      AArch64_AM::getShifterImm(AArch64_AM::LSL, Shift) });
299   }
300 }
301 
302 /// Expand a MOVi32imm or MOVi64imm pseudo instruction to one or more
303 /// real move-immediate instructions to synthesize the immediate.
304 void AArch64_IMM::expandMOVImm(uint64_t Imm, unsigned BitSize,
305                                SmallVectorImpl<ImmInsnModel> &Insn) {
306   const unsigned Mask = 0xFFFF;
307 
308   // Scan the immediate and count the number of 16-bit chunks which are either
309   // all ones or all zeros.
310   unsigned OneChunks = 0;
311   unsigned ZeroChunks = 0;
312   for (unsigned Shift = 0; Shift < BitSize; Shift += 16) {
313     const unsigned Chunk = (Imm >> Shift) & Mask;
314     if (Chunk == Mask)
315       OneChunks++;
316     else if (Chunk == 0)
317       ZeroChunks++;
318   }
319 
320   // Prefer MOVZ/MOVN over ORR because of the rules for the "mov" alias.
321   if ((BitSize / 16) - OneChunks <= 1 || (BitSize / 16) - ZeroChunks <= 1) {
322     expandMOVImmSimple(Imm, BitSize, OneChunks, ZeroChunks, Insn);
323     return;
324   }
325 
326   // Try a single ORR.
327   uint64_t UImm = Imm << (64 - BitSize) >> (64 - BitSize);
328   uint64_t Encoding;
329   if (AArch64_AM::processLogicalImmediate(UImm, BitSize, Encoding)) {
330     unsigned Opc = (BitSize == 32 ? AArch64::ORRWri : AArch64::ORRXri);
331     Insn.push_back({ Opc, 0, Encoding });
332     return;
333   }
334 
335   // One to up three instruction sequences.
336   //
337   // Prefer MOVZ/MOVN followed by MOVK; it's more readable, and possibly the
338   // fastest sequence with fast literal generation.
339   if (OneChunks >= (BitSize / 16) - 2 || ZeroChunks >= (BitSize / 16) - 2) {
340     expandMOVImmSimple(Imm, BitSize, OneChunks, ZeroChunks, Insn);
341     return;
342   }
343 
344   assert(BitSize == 64 && "All 32-bit immediates can be expanded with a"
345                           "MOVZ/MOVK pair");
346 
347   // Try other two-instruction sequences.
348 
349   // 64-bit ORR followed by MOVK.
350   // We try to construct the ORR immediate in three different ways: either we
351   // zero out the chunk which will be replaced, we fill the chunk which will
352   // be replaced with ones, or we take the bit pattern from the other half of
353   // the 64-bit immediate. This is comprehensive because of the way ORR
354   // immediates are constructed.
355   for (unsigned Shift = 0; Shift < BitSize; Shift += 16) {
356     uint64_t ShiftedMask = (0xFFFFULL << Shift);
357     uint64_t ZeroChunk = UImm & ~ShiftedMask;
358     uint64_t OneChunk = UImm | ShiftedMask;
359     uint64_t RotatedImm = (UImm << 32) | (UImm >> 32);
360     uint64_t ReplicateChunk = ZeroChunk | (RotatedImm & ShiftedMask);
361     if (AArch64_AM::processLogicalImmediate(ZeroChunk, BitSize, Encoding) ||
362         AArch64_AM::processLogicalImmediate(OneChunk, BitSize, Encoding) ||
363         AArch64_AM::processLogicalImmediate(ReplicateChunk, BitSize,
364                                             Encoding)) {
365       // Create the ORR-immediate instruction.
366       Insn.push_back({ AArch64::ORRXri, 0, Encoding });
367 
368       // Create the MOVK instruction.
369       const unsigned Imm16 = getChunk(UImm, Shift / 16);
370       Insn.push_back({ AArch64::MOVKXi, Imm16,
371 		       AArch64_AM::getShifterImm(AArch64_AM::LSL, Shift) });
372       return;
373     }
374   }
375 
376   // FIXME: Add more two-instruction sequences.
377 
378   // Three instruction sequences.
379   //
380   // Prefer MOVZ/MOVN followed by two MOVK; it's more readable, and possibly
381   // the fastest sequence with fast literal generation. (If neither MOVK is
382   // part of a fast literal generation pair, it could be slower than the
383   // four-instruction sequence, but we won't worry about that for now.)
384   if (OneChunks || ZeroChunks) {
385     expandMOVImmSimple(Imm, BitSize, OneChunks, ZeroChunks, Insn);
386     return;
387   }
388 
389   // Check for identical 16-bit chunks within the constant and if so materialize
390   // them with a single ORR instruction. The remaining one or two 16-bit chunks
391   // will be materialized with MOVK instructions.
392   if (BitSize == 64 && tryToreplicateChunks(UImm, Insn))
393     return;
394 
395   // Check whether the constant contains a sequence of contiguous ones, which
396   // might be interrupted by one or two chunks. If so, materialize the sequence
397   // of contiguous ones with an ORR instruction. Materialize the chunks which
398   // are either interrupting the sequence or outside of the sequence with a
399   // MOVK instruction.
400   if (BitSize == 64 && trySequenceOfOnes(UImm, Insn))
401     return;
402 
403   // We found no possible two or three instruction sequence; use the general
404   // four-instruction sequence.
405   expandMOVImmSimple(Imm, BitSize, OneChunks, ZeroChunks, Insn);
406 }
407