1 //===- AArch64LegalizerInfo.cpp ----------------------------------*- C++ -*-==//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 /// \file
9 /// This file implements the targeting of the Machinelegalizer class for
10 /// AArch64.
11 /// \todo This should be generated by TableGen.
12 //===----------------------------------------------------------------------===//
13 
14 #include "AArch64LegalizerInfo.h"
15 #include "AArch64RegisterBankInfo.h"
16 #include "AArch64Subtarget.h"
17 #include "llvm/CodeGen/GlobalISel/LegalizerHelper.h"
18 #include "llvm/CodeGen/GlobalISel/LegalizerInfo.h"
19 #include "llvm/CodeGen/GlobalISel/MachineIRBuilder.h"
20 #include "llvm/CodeGen/GlobalISel/Utils.h"
21 #include "llvm/CodeGen/MachineInstr.h"
22 #include "llvm/CodeGen/MachineRegisterInfo.h"
23 #include "llvm/CodeGen/TargetOpcodes.h"
24 #include "llvm/CodeGen/ValueTypes.h"
25 #include "llvm/IR/DerivedTypes.h"
26 #include "llvm/IR/IntrinsicsAArch64.h"
27 #include "llvm/IR/Type.h"
28 #include "llvm/Support/MathExtras.h"
29 #include <initializer_list>
30 
31 #define DEBUG_TYPE "aarch64-legalinfo"
32 
33 using namespace llvm;
34 using namespace LegalizeActions;
35 using namespace LegalizeMutations;
36 using namespace LegalityPredicates;
37 
AArch64LegalizerInfo(const AArch64Subtarget & ST)38 AArch64LegalizerInfo::AArch64LegalizerInfo(const AArch64Subtarget &ST)
39     : ST(&ST) {
40   using namespace TargetOpcode;
41   const LLT p0 = LLT::pointer(0, 64);
42   const LLT s1 = LLT::scalar(1);
43   const LLT s8 = LLT::scalar(8);
44   const LLT s16 = LLT::scalar(16);
45   const LLT s32 = LLT::scalar(32);
46   const LLT s64 = LLT::scalar(64);
47   const LLT s128 = LLT::scalar(128);
48   const LLT s256 = LLT::scalar(256);
49   const LLT s512 = LLT::scalar(512);
50   const LLT v16s8 = LLT::vector(16, 8);
51   const LLT v8s8 = LLT::vector(8, 8);
52   const LLT v4s8 = LLT::vector(4, 8);
53   const LLT v8s16 = LLT::vector(8, 16);
54   const LLT v4s16 = LLT::vector(4, 16);
55   const LLT v2s16 = LLT::vector(2, 16);
56   const LLT v2s32 = LLT::vector(2, 32);
57   const LLT v4s32 = LLT::vector(4, 32);
58   const LLT v2s64 = LLT::vector(2, 64);
59   const LLT v2p0 = LLT::vector(2, p0);
60 
61   std::initializer_list<LLT> PackedVectorAllTypeList = {/* Begin 128bit types */
62                                                         v16s8, v8s16, v4s32,
63                                                         v2s64, v2p0,
64                                                         /* End 128bit types */
65                                                         /* Begin 64bit types */
66                                                         v8s8, v4s16, v2s32};
67 
68   const TargetMachine &TM = ST.getTargetLowering()->getTargetMachine();
69 
70   // FIXME: support subtargets which have neon/fp-armv8 disabled.
71   if (!ST.hasNEON() || !ST.hasFPARMv8()) {
72     computeTables();
73     return;
74   }
75 
76   // Some instructions only support s16 if the subtarget has full 16-bit FP
77   // support.
78   const bool HasFP16 = ST.hasFullFP16();
79   const LLT &MinFPScalar = HasFP16 ? s16 : s32;
80 
81   getActionDefinitionsBuilder({G_IMPLICIT_DEF, G_FREEZE})
82       .legalFor({p0, s1, s8, s16, s32, s64})
83       .legalFor(PackedVectorAllTypeList)
84       .clampScalar(0, s1, s64)
85       .widenScalarToNextPow2(0, 8)
86       .fewerElementsIf(
87           [=](const LegalityQuery &Query) {
88             return Query.Types[0].isVector() &&
89                    (Query.Types[0].getElementType() != s64 ||
90                     Query.Types[0].getNumElements() != 2);
91           },
92           [=](const LegalityQuery &Query) {
93             LLT EltTy = Query.Types[0].getElementType();
94             if (EltTy == s64)
95               return std::make_pair(0, LLT::vector(2, 64));
96             return std::make_pair(0, EltTy);
97           });
98 
99   getActionDefinitionsBuilder(G_PHI).legalFor({p0, s16, s32, s64})
100       .legalFor(PackedVectorAllTypeList)
101       .clampScalar(0, s16, s64)
102       .widenScalarToNextPow2(0);
103 
104   getActionDefinitionsBuilder(G_BSWAP)
105       .legalFor({s32, s64, v4s32, v2s32, v2s64})
106       .clampScalar(0, s32, s64)
107       .widenScalarToNextPow2(0);
108 
109   getActionDefinitionsBuilder({G_ADD, G_SUB, G_MUL, G_AND, G_OR, G_XOR})
110       .legalFor({s32, s64, v2s32, v4s32, v4s16, v8s16, v16s8, v8s8})
111       .scalarizeIf(
112           [=](const LegalityQuery &Query) {
113             return Query.Opcode == G_MUL && Query.Types[0] == v2s64;
114           },
115           0)
116       .legalFor({v2s64})
117       .clampScalar(0, s32, s64)
118       .widenScalarToNextPow2(0)
119       .clampNumElements(0, v2s32, v4s32)
120       .clampNumElements(0, v2s64, v2s64)
121       .moreElementsToNextPow2(0);
122 
123   getActionDefinitionsBuilder({G_SHL, G_ASHR, G_LSHR})
124       .customIf([=](const LegalityQuery &Query) {
125         const auto &SrcTy = Query.Types[0];
126         const auto &AmtTy = Query.Types[1];
127         return !SrcTy.isVector() && SrcTy.getSizeInBits() == 32 &&
128                AmtTy.getSizeInBits() == 32;
129       })
130       .legalFor({
131           {s32, s32},
132           {s32, s64},
133           {s64, s64},
134           {v8s8, v8s8},
135           {v16s8, v16s8},
136           {v4s16, v4s16},
137           {v8s16, v8s16},
138           {v2s32, v2s32},
139           {v4s32, v4s32},
140           {v2s64, v2s64},
141       })
142       .clampScalar(1, s32, s64)
143       .clampScalar(0, s32, s64)
144       .widenScalarToNextPow2(0)
145       .clampNumElements(0, v2s32, v4s32)
146       .clampNumElements(0, v2s64, v2s64)
147       .moreElementsToNextPow2(0)
148       .minScalarSameAs(1, 0);
149 
150   getActionDefinitionsBuilder(G_PTR_ADD)
151       .legalFor({{p0, s64}, {v2p0, v2s64}})
152       .clampScalar(1, s64, s64);
153 
154   getActionDefinitionsBuilder(G_PTRMASK).legalFor({{p0, s64}});
155 
156   getActionDefinitionsBuilder({G_SDIV, G_UDIV})
157       .legalFor({s32, s64})
158       .libcallFor({s128})
159       .clampScalar(0, s32, s64)
160       .widenScalarToNextPow2(0)
161       .scalarize(0);
162 
163   getActionDefinitionsBuilder({G_SREM, G_UREM, G_SDIVREM, G_UDIVREM})
164       .lowerFor({s1, s8, s16, s32, s64});
165 
166   getActionDefinitionsBuilder({G_SMULO, G_UMULO}).lowerFor({{s64, s1}});
167 
168   getActionDefinitionsBuilder({G_SMULH, G_UMULH}).legalFor({s32, s64});
169 
170   getActionDefinitionsBuilder({G_SMIN, G_SMAX, G_UMIN, G_UMAX})
171       .lowerIf([=](const LegalityQuery &Q) { return Q.Types[0].isScalar(); });
172 
173   getActionDefinitionsBuilder(
174       {G_SADDE, G_SSUBE, G_UADDE, G_USUBE, G_SADDO, G_SSUBO, G_UADDO, G_USUBO})
175       .legalFor({{s32, s1}, {s64, s1}})
176       .clampScalar(0, s32, s64)
177       .widenScalarToNextPow2(0);
178 
179   getActionDefinitionsBuilder({G_FADD, G_FSUB, G_FMUL, G_FDIV, G_FNEG})
180       .legalFor({s32, s64, v2s64, v4s32, v2s32})
181       .clampNumElements(0, v2s32, v4s32)
182       .clampNumElements(0, v2s64, v2s64);
183 
184   getActionDefinitionsBuilder(G_FREM).libcallFor({s32, s64});
185 
186   getActionDefinitionsBuilder({G_FCEIL, G_FABS, G_FSQRT, G_FFLOOR, G_FRINT,
187                                G_FMA, G_INTRINSIC_TRUNC, G_INTRINSIC_ROUND,
188                                G_FNEARBYINT, G_INTRINSIC_LRINT})
189       // If we don't have full FP16 support, then scalarize the elements of
190       // vectors containing fp16 types.
191       .fewerElementsIf(
192           [=, &ST](const LegalityQuery &Query) {
193             const auto &Ty = Query.Types[0];
194             return Ty.isVector() && Ty.getElementType() == s16 &&
195                    !ST.hasFullFP16();
196           },
197           [=](const LegalityQuery &Query) { return std::make_pair(0, s16); })
198       // If we don't have full FP16 support, then widen s16 to s32 if we
199       // encounter it.
200       .widenScalarIf(
201           [=, &ST](const LegalityQuery &Query) {
202             return Query.Types[0] == s16 && !ST.hasFullFP16();
203           },
204           [=](const LegalityQuery &Query) { return std::make_pair(0, s32); })
205       .legalFor({s16, s32, s64, v2s32, v4s32, v2s64, v2s16, v4s16, v8s16});
206 
207   getActionDefinitionsBuilder(
208       {G_FCOS, G_FSIN, G_FLOG10, G_FLOG, G_FLOG2, G_FEXP, G_FEXP2, G_FPOW})
209       // We need a call for these, so we always need to scalarize.
210       .scalarize(0)
211       // Regardless of FP16 support, widen 16-bit elements to 32-bits.
212       .minScalar(0, s32)
213       .libcallFor({s32, s64, v2s32, v4s32, v2s64});
214 
215   getActionDefinitionsBuilder(G_INSERT)
216       .unsupportedIf([=](const LegalityQuery &Query) {
217         return Query.Types[0].getSizeInBits() <= Query.Types[1].getSizeInBits();
218       })
219       .legalIf([=](const LegalityQuery &Query) {
220         const LLT &Ty0 = Query.Types[0];
221         const LLT &Ty1 = Query.Types[1];
222         if (Ty0 != s32 && Ty0 != s64 && Ty0 != p0)
223           return false;
224         return isPowerOf2_32(Ty1.getSizeInBits()) &&
225                (Ty1.getSizeInBits() == 1 || Ty1.getSizeInBits() >= 8);
226       })
227       .clampScalar(0, s32, s64)
228       .widenScalarToNextPow2(0)
229       .maxScalarIf(typeInSet(0, {s32}), 1, s16)
230       .maxScalarIf(typeInSet(0, {s64}), 1, s32)
231       .widenScalarToNextPow2(1);
232 
233   getActionDefinitionsBuilder(G_EXTRACT)
234       .unsupportedIf([=](const LegalityQuery &Query) {
235         return Query.Types[0].getSizeInBits() >= Query.Types[1].getSizeInBits();
236       })
237       .legalIf([=](const LegalityQuery &Query) {
238         const LLT &Ty0 = Query.Types[0];
239         const LLT &Ty1 = Query.Types[1];
240         if (Ty1 != s32 && Ty1 != s64 && Ty1 != s128)
241           return false;
242         if (Ty1 == p0)
243           return true;
244         return isPowerOf2_32(Ty0.getSizeInBits()) &&
245                (Ty0.getSizeInBits() == 1 || Ty0.getSizeInBits() >= 8);
246       })
247       .clampScalar(1, s32, s128)
248       .widenScalarToNextPow2(1)
249       .maxScalarIf(typeInSet(1, {s32}), 0, s16)
250       .maxScalarIf(typeInSet(1, {s64}), 0, s32)
251       .widenScalarToNextPow2(0);
252 
253   getActionDefinitionsBuilder({G_SEXTLOAD, G_ZEXTLOAD})
254       .legalForTypesWithMemDesc({{s32, p0, 8, 8},
255                                  {s32, p0, 16, 8},
256                                  {s32, p0, 32, 8},
257                                  {s64, p0, 8, 2},
258                                  {s64, p0, 16, 2},
259                                  {s64, p0, 32, 4},
260                                  {s64, p0, 64, 8},
261                                  {p0, p0, 64, 8},
262                                  {v2s32, p0, 64, 8}})
263       .clampScalar(0, s32, s64)
264       .widenScalarToNextPow2(0)
265       // TODO: We could support sum-of-pow2's but the lowering code doesn't know
266       //       how to do that yet.
267       .unsupportedIfMemSizeNotPow2()
268       // Lower anything left over into G_*EXT and G_LOAD
269       .lower();
270 
271   auto IsPtrVecPred = [=](const LegalityQuery &Query) {
272     const LLT &ValTy = Query.Types[0];
273     if (!ValTy.isVector())
274       return false;
275     const LLT EltTy = ValTy.getElementType();
276     return EltTy.isPointer() && EltTy.getAddressSpace() == 0;
277   };
278 
279   getActionDefinitionsBuilder(G_LOAD)
280       .legalForTypesWithMemDesc({{s8, p0, 8, 8},
281                                  {s16, p0, 16, 8},
282                                  {s32, p0, 32, 8},
283                                  {s64, p0, 64, 8},
284                                  {p0, p0, 64, 8},
285                                  {s128, p0, 128, 8},
286                                  {v8s8, p0, 64, 8},
287                                  {v16s8, p0, 128, 8},
288                                  {v4s16, p0, 64, 8},
289                                  {v8s16, p0, 128, 8},
290                                  {v2s32, p0, 64, 8},
291                                  {v4s32, p0, 128, 8},
292                                  {v2s64, p0, 128, 8}})
293       // These extends are also legal
294       .legalForTypesWithMemDesc({{s32, p0, 8, 8}, {s32, p0, 16, 8}})
295       .clampScalar(0, s8, s64)
296       .lowerIfMemSizeNotPow2()
297       // Lower any any-extending loads left into G_ANYEXT and G_LOAD
298       .lowerIf([=](const LegalityQuery &Query) {
299         return Query.Types[0].getSizeInBits() != Query.MMODescrs[0].SizeInBits;
300       })
301       .widenScalarToNextPow2(0)
302       .clampMaxNumElements(0, s8, 16)
303       .clampMaxNumElements(0, s16, 8)
304       .clampMaxNumElements(0, s32, 4)
305       .clampMaxNumElements(0, s64, 2)
306       .customIf(IsPtrVecPred);
307 
308   getActionDefinitionsBuilder(G_STORE)
309       .legalForTypesWithMemDesc({{s8, p0, 8, 8},
310                                  {s16, p0, 8, 8}, // truncstorei8 from s16
311                                  {s32, p0, 8, 8}, // truncstorei8 from s32
312                                  {s64, p0, 8, 8}, // truncstorei8 from s64
313                                  {s16, p0, 16, 8},
314                                  {s32, p0, 16, 8}, // truncstorei16 from s32
315                                  {s64, p0, 16, 8}, // truncstorei16 from s64
316                                  {s32, p0, 8, 8},
317                                  {s32, p0, 16, 8},
318                                  {s32, p0, 32, 8},
319                                  {s64, p0, 64, 8},
320                                  {s64, p0, 32, 8}, // truncstorei32 from s64
321                                  {p0, p0, 64, 8},
322                                  {s128, p0, 128, 8},
323                                  {v16s8, p0, 128, 8},
324                                  {v8s8, p0, 64, 8},
325                                  {v4s16, p0, 64, 8},
326                                  {v8s16, p0, 128, 8},
327                                  {v2s32, p0, 64, 8},
328                                  {v4s32, p0, 128, 8},
329                                  {v2s64, p0, 128, 8}})
330       .clampScalar(0, s8, s64)
331       .lowerIfMemSizeNotPow2()
332       .lowerIf([=](const LegalityQuery &Query) {
333         return Query.Types[0].isScalar() &&
334                Query.Types[0].getSizeInBits() != Query.MMODescrs[0].SizeInBits;
335       })
336       // Maximum: sN * k = 128
337       .clampMaxNumElements(0, s8, 16)
338       .clampMaxNumElements(0, s16, 8)
339       .clampMaxNumElements(0, s32, 4)
340       .clampMaxNumElements(0, s64, 2)
341       .customIf(IsPtrVecPred);
342 
343   // Constants
344   getActionDefinitionsBuilder(G_CONSTANT)
345       .legalFor({p0, s8, s16, s32, s64})
346       .clampScalar(0, s8, s64)
347       .widenScalarToNextPow2(0);
348   getActionDefinitionsBuilder(G_FCONSTANT)
349       .legalIf([=](const LegalityQuery &Query) {
350         const auto &Ty = Query.Types[0];
351         if (HasFP16 && Ty == s16)
352           return true;
353         return Ty == s32 || Ty == s64 || Ty == s128;
354       })
355       .clampScalar(0, MinFPScalar, s128);
356 
357   getActionDefinitionsBuilder({G_ICMP, G_FCMP})
358       .legalFor({{s32, s32},
359                  {s32, s64},
360                  {s32, p0},
361                  {v4s32, v4s32},
362                  {v2s32, v2s32},
363                  {v2s64, v2s64},
364                  {v2s64, v2p0},
365                  {v4s16, v4s16},
366                  {v8s16, v8s16},
367                  {v8s8, v8s8},
368                  {v16s8, v16s8}})
369       .clampScalar(1, s32, s64)
370       .clampScalar(0, s32, s32)
371       .minScalarEltSameAsIf(
372           [=](const LegalityQuery &Query) {
373             const LLT &Ty = Query.Types[0];
374             const LLT &SrcTy = Query.Types[1];
375             return Ty.isVector() && !SrcTy.getElementType().isPointer() &&
376                    Ty.getElementType() != SrcTy.getElementType();
377           },
378           0, 1)
379       .minScalarOrEltIf(
380           [=](const LegalityQuery &Query) { return Query.Types[1] == v2s16; },
381           1, s32)
382       .minScalarOrEltIf(
383           [=](const LegalityQuery &Query) { return Query.Types[1] == v2p0; }, 0,
384           s64)
385       .widenScalarOrEltToNextPow2(1)
386       .clampNumElements(0, v2s32, v4s32);
387 
388   // Extensions
389   auto ExtLegalFunc = [=](const LegalityQuery &Query) {
390     unsigned DstSize = Query.Types[0].getSizeInBits();
391 
392     if (DstSize == 128 && !Query.Types[0].isVector())
393       return false; // Extending to a scalar s128 needs narrowing.
394 
395     // Make sure that we have something that will fit in a register, and
396     // make sure it's a power of 2.
397     if (DstSize < 8 || DstSize > 128 || !isPowerOf2_32(DstSize))
398       return false;
399 
400     const LLT &SrcTy = Query.Types[1];
401 
402     // Special case for s1.
403     if (SrcTy == s1)
404       return true;
405 
406     // Make sure we fit in a register otherwise. Don't bother checking that
407     // the source type is below 128 bits. We shouldn't be allowing anything
408     // through which is wider than the destination in the first place.
409     unsigned SrcSize = SrcTy.getSizeInBits();
410     if (SrcSize < 8 || !isPowerOf2_32(SrcSize))
411       return false;
412 
413     return true;
414   };
415   getActionDefinitionsBuilder({G_ZEXT, G_SEXT, G_ANYEXT})
416       .legalIf(ExtLegalFunc)
417       .clampScalar(0, s64, s64); // Just for s128, others are handled above.
418 
419   getActionDefinitionsBuilder(G_TRUNC)
420       .minScalarOrEltIf(
421           [=](const LegalityQuery &Query) { return Query.Types[0].isVector(); },
422           0, s8)
423       .customIf([=](const LegalityQuery &Query) {
424         LLT DstTy = Query.Types[0];
425         LLT SrcTy = Query.Types[1];
426         return DstTy == v8s8 && SrcTy.getSizeInBits() > 128;
427       })
428       .alwaysLegal();
429 
430   getActionDefinitionsBuilder(G_SEXT_INREG).legalFor({s32, s64}).lower();
431 
432   // FP conversions
433   getActionDefinitionsBuilder(G_FPTRUNC)
434       .legalFor(
435           {{s16, s32}, {s16, s64}, {s32, s64}, {v4s16, v4s32}, {v2s32, v2s64}})
436       .clampMaxNumElements(0, s32, 2);
437   getActionDefinitionsBuilder(G_FPEXT)
438       .legalFor(
439           {{s32, s16}, {s64, s16}, {s64, s32}, {v4s32, v4s16}, {v2s64, v2s32}})
440       .clampMaxNumElements(0, s64, 2);
441 
442   // Conversions
443   getActionDefinitionsBuilder({G_FPTOSI, G_FPTOUI})
444       .legalForCartesianProduct({s32, s64, v2s64, v4s32, v2s32})
445       .clampScalar(0, s32, s64)
446       .widenScalarToNextPow2(0)
447       .clampScalar(1, s32, s64)
448       .widenScalarToNextPow2(1);
449 
450   getActionDefinitionsBuilder({G_SITOFP, G_UITOFP})
451       .legalForCartesianProduct({s32, s64, v2s64, v4s32, v2s32})
452       .clampScalar(1, s32, s64)
453       .minScalarSameAs(1, 0)
454       .clampScalar(0, s32, s64)
455       .widenScalarToNextPow2(0);
456 
457   // Control-flow
458   getActionDefinitionsBuilder(G_BRCOND).legalFor({s1, s8, s16, s32});
459   getActionDefinitionsBuilder(G_BRINDIRECT).legalFor({p0});
460 
461   getActionDefinitionsBuilder(G_SELECT)
462       .legalFor({{s32, s1}, {s64, s1}, {p0, s1}})
463       .clampScalar(0, s32, s64)
464       .widenScalarToNextPow2(0)
465       .minScalarEltSameAsIf(all(isVector(0), isVector(1)), 1, 0)
466       .lowerIf(isVector(0));
467 
468   // Pointer-handling
469   getActionDefinitionsBuilder(G_FRAME_INDEX).legalFor({p0});
470 
471   if (TM.getCodeModel() == CodeModel::Small)
472     getActionDefinitionsBuilder(G_GLOBAL_VALUE).custom();
473   else
474     getActionDefinitionsBuilder(G_GLOBAL_VALUE).legalFor({p0});
475 
476   getActionDefinitionsBuilder(G_PTRTOINT)
477       .legalForCartesianProduct({s1, s8, s16, s32, s64}, {p0})
478       .maxScalar(0, s64)
479       .widenScalarToNextPow2(0, /*Min*/ 8);
480 
481   getActionDefinitionsBuilder(G_INTTOPTR)
482       .unsupportedIf([&](const LegalityQuery &Query) {
483         return Query.Types[0].getSizeInBits() != Query.Types[1].getSizeInBits();
484       })
485       .legalFor({{p0, s64}});
486 
487   // Casts for 32 and 64-bit width type are just copies.
488   // Same for 128-bit width type, except they are on the FPR bank.
489   getActionDefinitionsBuilder(G_BITCAST)
490       // FIXME: This is wrong since G_BITCAST is not allowed to change the
491       // number of bits but it's what the previous code described and fixing
492       // it breaks tests.
493       .legalForCartesianProduct({s1, s8, s16, s32, s64, s128, v16s8, v8s8, v4s8,
494                                  v8s16, v4s16, v2s16, v4s32, v2s32, v2s64,
495                                  v2p0});
496 
497   getActionDefinitionsBuilder(G_VASTART).legalFor({p0});
498 
499   // va_list must be a pointer, but most sized types are pretty easy to handle
500   // as the destination.
501   getActionDefinitionsBuilder(G_VAARG)
502       .customForCartesianProduct({s8, s16, s32, s64, p0}, {p0})
503       .clampScalar(0, s8, s64)
504       .widenScalarToNextPow2(0, /*Min*/ 8);
505 
506   getActionDefinitionsBuilder(G_ATOMIC_CMPXCHG_WITH_SUCCESS)
507       .lowerIf(
508           all(typeInSet(0, {s8, s16, s32, s64, s128}), typeIs(1, s1), typeIs(2, p0)));
509 
510   getActionDefinitionsBuilder(G_ATOMIC_CMPXCHG)
511       .legalIf(all(typeInSet(0, {s8, s16, s32, s64}), typeIs(1, p0)))
512       .customIf([](const LegalityQuery &Query) {
513         return Query.Types[0].getSizeInBits() == 128;
514       });
515 
516   getActionDefinitionsBuilder(
517       {G_ATOMICRMW_XCHG, G_ATOMICRMW_ADD, G_ATOMICRMW_SUB, G_ATOMICRMW_AND,
518        G_ATOMICRMW_OR, G_ATOMICRMW_XOR, G_ATOMICRMW_MIN, G_ATOMICRMW_MAX,
519        G_ATOMICRMW_UMIN, G_ATOMICRMW_UMAX})
520       .legalIf(all(typeInSet(0, {s8, s16, s32, s64}), typeIs(1, p0)));
521 
522   getActionDefinitionsBuilder(G_BLOCK_ADDR).legalFor({p0});
523 
524   // Merge/Unmerge
525   for (unsigned Op : {G_MERGE_VALUES, G_UNMERGE_VALUES}) {
526     unsigned BigTyIdx = Op == G_MERGE_VALUES ? 0 : 1;
527     unsigned LitTyIdx = Op == G_MERGE_VALUES ? 1 : 0;
528 
529     auto notValidElt = [](const LegalityQuery &Query, unsigned TypeIdx) {
530       const LLT &Ty = Query.Types[TypeIdx];
531       if (Ty.isVector()) {
532         const LLT &EltTy = Ty.getElementType();
533         if (EltTy.getSizeInBits() < 8 || EltTy.getSizeInBits() > 64)
534           return true;
535         if (!isPowerOf2_32(EltTy.getSizeInBits()))
536           return true;
537       }
538       return false;
539     };
540 
541     // FIXME: This rule is horrible, but specifies the same as what we had
542     // before with the particularly strange definitions removed (e.g.
543     // s8 = G_MERGE_VALUES s32, s32).
544     // Part of the complexity comes from these ops being extremely flexible. For
545     // example, you can build/decompose vectors with it, concatenate vectors,
546     // etc. and in addition to this you can also bitcast with it at the same
547     // time. We've been considering breaking it up into multiple ops to make it
548     // more manageable throughout the backend.
549     getActionDefinitionsBuilder(Op)
550         // Break up vectors with weird elements into scalars
551         .fewerElementsIf(
552             [=](const LegalityQuery &Query) { return notValidElt(Query, 0); },
553             scalarize(0))
554         .fewerElementsIf(
555             [=](const LegalityQuery &Query) { return notValidElt(Query, 1); },
556             scalarize(1))
557         // Clamp the big scalar to s8-s512 and make it either a power of 2, 192,
558         // or 384.
559         .clampScalar(BigTyIdx, s8, s512)
560         .widenScalarIf(
561             [=](const LegalityQuery &Query) {
562               const LLT &Ty = Query.Types[BigTyIdx];
563               return !isPowerOf2_32(Ty.getSizeInBits()) &&
564                      Ty.getSizeInBits() % 64 != 0;
565             },
566             [=](const LegalityQuery &Query) {
567               // Pick the next power of 2, or a multiple of 64 over 128.
568               // Whichever is smaller.
569               const LLT &Ty = Query.Types[BigTyIdx];
570               unsigned NewSizeInBits = 1
571                                        << Log2_32_Ceil(Ty.getSizeInBits() + 1);
572               if (NewSizeInBits >= 256) {
573                 unsigned RoundedTo = alignTo<64>(Ty.getSizeInBits() + 1);
574                 if (RoundedTo < NewSizeInBits)
575                   NewSizeInBits = RoundedTo;
576               }
577               return std::make_pair(BigTyIdx, LLT::scalar(NewSizeInBits));
578             })
579         // Clamp the little scalar to s8-s256 and make it a power of 2. It's not
580         // worth considering the multiples of 64 since 2*192 and 2*384 are not
581         // valid.
582         .clampScalar(LitTyIdx, s8, s256)
583         .widenScalarToNextPow2(LitTyIdx, /*Min*/ 8)
584         // So at this point, we have s8, s16, s32, s64, s128, s192, s256, s384,
585         // s512, <X x s8>, <X x s16>, <X x s32>, or <X x s64>.
586         // At this point it's simple enough to accept the legal types.
587         .legalIf([=](const LegalityQuery &Query) {
588           const LLT &BigTy = Query.Types[BigTyIdx];
589           const LLT &LitTy = Query.Types[LitTyIdx];
590           if (BigTy.isVector() && BigTy.getSizeInBits() < 32)
591             return false;
592           if (LitTy.isVector() && LitTy.getSizeInBits() < 32)
593             return false;
594           return BigTy.getSizeInBits() % LitTy.getSizeInBits() == 0;
595         })
596         // Any vectors left are the wrong size. Scalarize them.
597         .scalarize(0)
598         .scalarize(1);
599   }
600 
601   getActionDefinitionsBuilder(G_EXTRACT_VECTOR_ELT)
602       .unsupportedIf([=](const LegalityQuery &Query) {
603         const LLT &EltTy = Query.Types[1].getElementType();
604         return Query.Types[0] != EltTy;
605       })
606       .minScalar(2, s64)
607       .legalIf([=](const LegalityQuery &Query) {
608         const LLT &VecTy = Query.Types[1];
609         return VecTy == v2s16 || VecTy == v4s16 || VecTy == v8s16 ||
610                VecTy == v4s32 || VecTy == v2s64 || VecTy == v2s32 ||
611                VecTy == v16s8 || VecTy == v2s32 || VecTy == v2p0;
612       })
613       .minScalarOrEltIf(
614           [=](const LegalityQuery &Query) {
615             // We want to promote to <M x s1> to <M x s64> if that wouldn't
616             // cause the total vec size to be > 128b.
617             return Query.Types[1].getNumElements() <= 2;
618           },
619           0, s64)
620       .minScalarOrEltIf(
621           [=](const LegalityQuery &Query) {
622             return Query.Types[1].getNumElements() <= 4;
623           },
624           0, s32)
625       .minScalarOrEltIf(
626           [=](const LegalityQuery &Query) {
627             return Query.Types[1].getNumElements() <= 8;
628           },
629           0, s16)
630       .minScalarOrEltIf(
631           [=](const LegalityQuery &Query) {
632             return Query.Types[1].getNumElements() <= 16;
633           },
634           0, s8)
635       .minScalarOrElt(0, s8); // Worst case, we need at least s8.
636 
637   getActionDefinitionsBuilder(G_INSERT_VECTOR_ELT)
638       .legalIf(typeInSet(0, {v8s16, v2s32, v4s32, v2s64}));
639 
640   getActionDefinitionsBuilder(G_BUILD_VECTOR)
641       .legalFor({{v8s8, s8},
642                  {v16s8, s8},
643                  {v4s16, s16},
644                  {v8s16, s16},
645                  {v2s32, s32},
646                  {v4s32, s32},
647                  {v2p0, p0},
648                  {v2s64, s64}})
649       .clampNumElements(0, v4s32, v4s32)
650       .clampNumElements(0, v2s64, v2s64)
651       .minScalarSameAs(1, 0);
652 
653   getActionDefinitionsBuilder(G_BUILD_VECTOR_TRUNC).lower();
654 
655   getActionDefinitionsBuilder(G_CTLZ)
656       .legalForCartesianProduct(
657           {s32, s64, v8s8, v16s8, v4s16, v8s16, v2s32, v4s32})
658       .scalarize(1);
659   getActionDefinitionsBuilder(G_CTLZ_ZERO_UNDEF).lower();
660 
661   getActionDefinitionsBuilder(G_SHUFFLE_VECTOR)
662       .legalIf([=](const LegalityQuery &Query) {
663         const LLT &DstTy = Query.Types[0];
664         const LLT &SrcTy = Query.Types[1];
665         // For now just support the TBL2 variant which needs the source vectors
666         // to be the same size as the dest.
667         if (DstTy != SrcTy)
668           return false;
669         for (auto &Ty : {v2s32, v4s32, v2s64, v2p0, v16s8, v8s16}) {
670           if (DstTy == Ty)
671             return true;
672         }
673         return false;
674       })
675       // G_SHUFFLE_VECTOR can have scalar sources (from 1 x s vectors), we
676       // just want those lowered into G_BUILD_VECTOR
677       .lowerIf([=](const LegalityQuery &Query) {
678         return !Query.Types[1].isVector();
679       })
680       .clampNumElements(0, v4s32, v4s32)
681       .clampNumElements(0, v2s64, v2s64);
682 
683   getActionDefinitionsBuilder(G_CONCAT_VECTORS)
684       .legalFor({{v4s32, v2s32}, {v8s16, v4s16}});
685 
686   getActionDefinitionsBuilder(G_JUMP_TABLE).legalFor({{p0}, {s64}});
687 
688   getActionDefinitionsBuilder(G_BRJT).legalIf([=](const LegalityQuery &Query) {
689     return Query.Types[0] == p0 && Query.Types[1] == s64;
690   });
691 
692   getActionDefinitionsBuilder(G_DYN_STACKALLOC).lower();
693 
694   getActionDefinitionsBuilder({G_BZERO, G_MEMCPY, G_MEMMOVE, G_MEMSET})
695       .libcall();
696 
697   // FIXME: Legal types are only legal with NEON.
698   getActionDefinitionsBuilder(G_ABS)
699       .lowerIf(isScalar(0))
700       .legalFor(PackedVectorAllTypeList);
701 
702   getActionDefinitionsBuilder(G_VECREDUCE_FADD)
703       // We only have FADDP to do reduction-like operations. Lower the rest.
704       .legalFor({{s32, v2s32}, {s64, v2s64}})
705       .clampMaxNumElements(1, s64, 2)
706       .clampMaxNumElements(1, s32, 2)
707       .lower();
708 
709   getActionDefinitionsBuilder(G_VECREDUCE_ADD)
710       .legalFor(
711           {{s8, v16s8}, {s16, v8s16}, {s32, v4s32}, {s32, v2s32}, {s64, v2s64}})
712       .clampMaxNumElements(1, s64, 2)
713       .clampMaxNumElements(1, s32, 4)
714       .lower();
715 
716   getActionDefinitionsBuilder({G_UADDSAT, G_USUBSAT})
717       .lowerIf([=](const LegalityQuery &Q) { return Q.Types[0].isScalar(); });
718 
719   getActionDefinitionsBuilder({G_FSHL, G_FSHR}).lower();
720 
721   getActionDefinitionsBuilder(G_ROTR)
722       .legalFor({{s32, s64}, {s64, s64}})
723       .customIf([=](const LegalityQuery &Q) {
724         return Q.Types[0].isScalar() && Q.Types[1].getScalarSizeInBits() < 64;
725       })
726       .lower();
727   getActionDefinitionsBuilder(G_ROTL).lower();
728 
729   getActionDefinitionsBuilder({G_SBFX, G_UBFX})
730       .customFor({{s32, s32}, {s64, s64}});
731 
732   // TODO: Custom legalization for s128
733   // TODO: v2s64, v2s32, v4s32, v4s16, v8s16
734   // TODO: Use generic lowering when custom lowering is not possible.
735   auto always = [=](const LegalityQuery &Q) { return true; };
736   getActionDefinitionsBuilder(G_CTPOP)
737       .legalFor({{v8s8, v8s8}, {v16s8, v16s8}})
738       .clampScalar(0, s32, s128)
739       .widenScalarToNextPow2(0)
740       .minScalarEltSameAsIf(always, 1, 0)
741       .maxScalarEltSameAsIf(always, 1, 0)
742       .customFor({{s32, s32}, {s64, s64}});
743 
744   computeTables();
745   verify(*ST.getInstrInfo());
746 }
747 
legalizeCustom(LegalizerHelper & Helper,MachineInstr & MI) const748 bool AArch64LegalizerInfo::legalizeCustom(LegalizerHelper &Helper,
749                                           MachineInstr &MI) const {
750   MachineIRBuilder &MIRBuilder = Helper.MIRBuilder;
751   MachineRegisterInfo &MRI = *MIRBuilder.getMRI();
752   GISelChangeObserver &Observer = Helper.Observer;
753   switch (MI.getOpcode()) {
754   default:
755     // No idea what to do.
756     return false;
757   case TargetOpcode::G_VAARG:
758     return legalizeVaArg(MI, MRI, MIRBuilder);
759   case TargetOpcode::G_LOAD:
760   case TargetOpcode::G_STORE:
761     return legalizeLoadStore(MI, MRI, MIRBuilder, Observer);
762   case TargetOpcode::G_SHL:
763   case TargetOpcode::G_ASHR:
764   case TargetOpcode::G_LSHR:
765     return legalizeShlAshrLshr(MI, MRI, MIRBuilder, Observer);
766   case TargetOpcode::G_GLOBAL_VALUE:
767     return legalizeSmallCMGlobalValue(MI, MRI, MIRBuilder, Observer);
768   case TargetOpcode::G_TRUNC:
769     return legalizeVectorTrunc(MI, Helper);
770   case TargetOpcode::G_SBFX:
771   case TargetOpcode::G_UBFX:
772     return legalizeBitfieldExtract(MI, MRI, Helper);
773   case TargetOpcode::G_ROTR:
774     return legalizeRotate(MI, MRI, Helper);
775   case TargetOpcode::G_CTPOP:
776     return legalizeCTPOP(MI, MRI, Helper);
777   case TargetOpcode::G_ATOMIC_CMPXCHG:
778     return legalizeAtomicCmpxchg128(MI, MRI, Helper);
779   }
780 
781   llvm_unreachable("expected switch to return");
782 }
783 
legalizeRotate(MachineInstr & MI,MachineRegisterInfo & MRI,LegalizerHelper & Helper) const784 bool AArch64LegalizerInfo::legalizeRotate(MachineInstr &MI,
785                                           MachineRegisterInfo &MRI,
786                                           LegalizerHelper &Helper) const {
787   // To allow for imported patterns to match, we ensure that the rotate amount
788   // is 64b with an extension.
789   Register AmtReg = MI.getOperand(2).getReg();
790   LLT AmtTy = MRI.getType(AmtReg);
791   (void)AmtTy;
792   assert(AmtTy.isScalar() && "Expected a scalar rotate");
793   assert(AmtTy.getSizeInBits() < 64 && "Expected this rotate to be legal");
794   auto NewAmt = Helper.MIRBuilder.buildSExt(LLT::scalar(64), AmtReg);
795   Helper.Observer.changingInstr(MI);
796   MI.getOperand(2).setReg(NewAmt.getReg(0));
797   Helper.Observer.changedInstr(MI);
798   return true;
799 }
800 
extractParts(Register Reg,MachineRegisterInfo & MRI,MachineIRBuilder & MIRBuilder,LLT Ty,int NumParts,SmallVectorImpl<Register> & VRegs)801 static void extractParts(Register Reg, MachineRegisterInfo &MRI,
802                          MachineIRBuilder &MIRBuilder, LLT Ty, int NumParts,
803                          SmallVectorImpl<Register> &VRegs) {
804   for (int I = 0; I < NumParts; ++I)
805     VRegs.push_back(MRI.createGenericVirtualRegister(Ty));
806   MIRBuilder.buildUnmerge(VRegs, Reg);
807 }
808 
legalizeVectorTrunc(MachineInstr & MI,LegalizerHelper & Helper) const809 bool AArch64LegalizerInfo::legalizeVectorTrunc(
810     MachineInstr &MI, LegalizerHelper &Helper) const {
811   MachineIRBuilder &MIRBuilder = Helper.MIRBuilder;
812   MachineRegisterInfo &MRI = *MIRBuilder.getMRI();
813   // Similar to how operand splitting is done in SelectiondDAG, we can handle
814   // %res(v8s8) = G_TRUNC %in(v8s32) by generating:
815   //   %inlo(<4x s32>), %inhi(<4 x s32>) = G_UNMERGE %in(<8 x s32>)
816   //   %lo16(<4 x s16>) = G_TRUNC %inlo
817   //   %hi16(<4 x s16>) = G_TRUNC %inhi
818   //   %in16(<8 x s16>) = G_CONCAT_VECTORS %lo16, %hi16
819   //   %res(<8 x s8>) = G_TRUNC %in16
820 
821   Register DstReg = MI.getOperand(0).getReg();
822   Register SrcReg = MI.getOperand(1).getReg();
823   LLT DstTy = MRI.getType(DstReg);
824   LLT SrcTy = MRI.getType(SrcReg);
825   assert(isPowerOf2_32(DstTy.getSizeInBits()) &&
826          isPowerOf2_32(SrcTy.getSizeInBits()));
827 
828   // Split input type.
829   LLT SplitSrcTy = SrcTy.changeNumElements(SrcTy.getNumElements() / 2);
830   // First, split the source into two smaller vectors.
831   SmallVector<Register, 2> SplitSrcs;
832   extractParts(SrcReg, MRI, MIRBuilder, SplitSrcTy, 2, SplitSrcs);
833 
834   // Truncate the splits into intermediate narrower elements.
835   LLT InterTy = SplitSrcTy.changeElementSize(DstTy.getScalarSizeInBits() * 2);
836   for (unsigned I = 0; I < SplitSrcs.size(); ++I)
837     SplitSrcs[I] = MIRBuilder.buildTrunc(InterTy, SplitSrcs[I]).getReg(0);
838 
839   auto Concat = MIRBuilder.buildConcatVectors(
840       DstTy.changeElementSize(DstTy.getScalarSizeInBits() * 2), SplitSrcs);
841 
842   Helper.Observer.changingInstr(MI);
843   MI.getOperand(1).setReg(Concat.getReg(0));
844   Helper.Observer.changedInstr(MI);
845   return true;
846 }
847 
legalizeSmallCMGlobalValue(MachineInstr & MI,MachineRegisterInfo & MRI,MachineIRBuilder & MIRBuilder,GISelChangeObserver & Observer) const848 bool AArch64LegalizerInfo::legalizeSmallCMGlobalValue(
849     MachineInstr &MI, MachineRegisterInfo &MRI, MachineIRBuilder &MIRBuilder,
850     GISelChangeObserver &Observer) const {
851   assert(MI.getOpcode() == TargetOpcode::G_GLOBAL_VALUE);
852   // We do this custom legalization to convert G_GLOBAL_VALUE into target ADRP +
853   // G_ADD_LOW instructions.
854   // By splitting this here, we can optimize accesses in the small code model by
855   // folding in the G_ADD_LOW into the load/store offset.
856   auto &GlobalOp = MI.getOperand(1);
857   const auto* GV = GlobalOp.getGlobal();
858   if (GV->isThreadLocal())
859     return true; // Don't want to modify TLS vars.
860 
861   auto &TM = ST->getTargetLowering()->getTargetMachine();
862   unsigned OpFlags = ST->ClassifyGlobalReference(GV, TM);
863 
864   if (OpFlags & AArch64II::MO_GOT)
865     return true;
866 
867   auto Offset = GlobalOp.getOffset();
868   Register DstReg = MI.getOperand(0).getReg();
869   auto ADRP = MIRBuilder.buildInstr(AArch64::ADRP, {LLT::pointer(0, 64)}, {})
870                   .addGlobalAddress(GV, Offset, OpFlags | AArch64II::MO_PAGE);
871   // Set the regclass on the dest reg too.
872   MRI.setRegClass(ADRP.getReg(0), &AArch64::GPR64RegClass);
873 
874   // MO_TAGGED on the page indicates a tagged address. Set the tag now. We do so
875   // by creating a MOVK that sets bits 48-63 of the register to (global address
876   // + 0x100000000 - PC) >> 48. The additional 0x100000000 offset here is to
877   // prevent an incorrect tag being generated during relocation when the the
878   // global appears before the code section. Without the offset, a global at
879   // `0x0f00'0000'0000'1000` (i.e. at `0x1000` with tag `0xf`) that's referenced
880   // by code at `0x2000` would result in `0x0f00'0000'0000'1000 - 0x2000 =
881   // 0x0eff'ffff'ffff'f000`, meaning the tag would be incorrectly set to `0xe`
882   // instead of `0xf`.
883   // This assumes that we're in the small code model so we can assume a binary
884   // size of <= 4GB, which makes the untagged PC relative offset positive. The
885   // binary must also be loaded into address range [0, 2^48). Both of these
886   // properties need to be ensured at runtime when using tagged addresses.
887   if (OpFlags & AArch64II::MO_TAGGED) {
888     assert(!Offset &&
889            "Should not have folded in an offset for a tagged global!");
890     ADRP = MIRBuilder.buildInstr(AArch64::MOVKXi, {LLT::pointer(0, 64)}, {ADRP})
891                .addGlobalAddress(GV, 0x100000000,
892                                  AArch64II::MO_PREL | AArch64II::MO_G3)
893                .addImm(48);
894     MRI.setRegClass(ADRP.getReg(0), &AArch64::GPR64RegClass);
895   }
896 
897   MIRBuilder.buildInstr(AArch64::G_ADD_LOW, {DstReg}, {ADRP})
898       .addGlobalAddress(GV, Offset,
899                         OpFlags | AArch64II::MO_PAGEOFF | AArch64II::MO_NC);
900   MI.eraseFromParent();
901   return true;
902 }
903 
legalizeIntrinsic(LegalizerHelper & Helper,MachineInstr & MI) const904 bool AArch64LegalizerInfo::legalizeIntrinsic(LegalizerHelper &Helper,
905                                              MachineInstr &MI) const {
906   return true;
907 }
908 
legalizeShlAshrLshr(MachineInstr & MI,MachineRegisterInfo & MRI,MachineIRBuilder & MIRBuilder,GISelChangeObserver & Observer) const909 bool AArch64LegalizerInfo::legalizeShlAshrLshr(
910     MachineInstr &MI, MachineRegisterInfo &MRI, MachineIRBuilder &MIRBuilder,
911     GISelChangeObserver &Observer) const {
912   assert(MI.getOpcode() == TargetOpcode::G_ASHR ||
913          MI.getOpcode() == TargetOpcode::G_LSHR ||
914          MI.getOpcode() == TargetOpcode::G_SHL);
915   // If the shift amount is a G_CONSTANT, promote it to a 64 bit type so the
916   // imported patterns can select it later. Either way, it will be legal.
917   Register AmtReg = MI.getOperand(2).getReg();
918   auto VRegAndVal = getConstantVRegValWithLookThrough(AmtReg, MRI);
919   if (!VRegAndVal)
920     return true;
921   // Check the shift amount is in range for an immediate form.
922   int64_t Amount = VRegAndVal->Value.getSExtValue();
923   if (Amount > 31)
924     return true; // This will have to remain a register variant.
925   auto ExtCst = MIRBuilder.buildConstant(LLT::scalar(64), Amount);
926   Observer.changingInstr(MI);
927   MI.getOperand(2).setReg(ExtCst.getReg(0));
928   Observer.changedInstr(MI);
929   return true;
930 }
931 
legalizeLoadStore(MachineInstr & MI,MachineRegisterInfo & MRI,MachineIRBuilder & MIRBuilder,GISelChangeObserver & Observer) const932 bool AArch64LegalizerInfo::legalizeLoadStore(
933     MachineInstr &MI, MachineRegisterInfo &MRI, MachineIRBuilder &MIRBuilder,
934     GISelChangeObserver &Observer) const {
935   assert(MI.getOpcode() == TargetOpcode::G_STORE ||
936          MI.getOpcode() == TargetOpcode::G_LOAD);
937   // Here we just try to handle vector loads/stores where our value type might
938   // have pointer elements, which the SelectionDAG importer can't handle. To
939   // allow the existing patterns for s64 to fire for p0, we just try to bitcast
940   // the value to use s64 types.
941 
942   // Custom legalization requires the instruction, if not deleted, must be fully
943   // legalized. In order to allow further legalization of the inst, we create
944   // a new instruction and erase the existing one.
945 
946   Register ValReg = MI.getOperand(0).getReg();
947   const LLT ValTy = MRI.getType(ValReg);
948 
949   if (!ValTy.isVector() || !ValTy.getElementType().isPointer() ||
950       ValTy.getElementType().getAddressSpace() != 0) {
951     LLVM_DEBUG(dbgs() << "Tried to do custom legalization on wrong load/store");
952     return false;
953   }
954 
955   unsigned PtrSize = ValTy.getElementType().getSizeInBits();
956   const LLT NewTy = LLT::vector(ValTy.getNumElements(), PtrSize);
957   auto &MMO = **MI.memoperands_begin();
958   if (MI.getOpcode() == TargetOpcode::G_STORE) {
959     auto Bitcast = MIRBuilder.buildBitcast(NewTy, ValReg);
960     MIRBuilder.buildStore(Bitcast.getReg(0), MI.getOperand(1), MMO);
961   } else {
962     auto NewLoad = MIRBuilder.buildLoad(NewTy, MI.getOperand(1), MMO);
963     MIRBuilder.buildBitcast(ValReg, NewLoad);
964   }
965   MI.eraseFromParent();
966   return true;
967 }
968 
legalizeVaArg(MachineInstr & MI,MachineRegisterInfo & MRI,MachineIRBuilder & MIRBuilder) const969 bool AArch64LegalizerInfo::legalizeVaArg(MachineInstr &MI,
970                                          MachineRegisterInfo &MRI,
971                                          MachineIRBuilder &MIRBuilder) const {
972   MachineFunction &MF = MIRBuilder.getMF();
973   Align Alignment(MI.getOperand(2).getImm());
974   Register Dst = MI.getOperand(0).getReg();
975   Register ListPtr = MI.getOperand(1).getReg();
976 
977   LLT PtrTy = MRI.getType(ListPtr);
978   LLT IntPtrTy = LLT::scalar(PtrTy.getSizeInBits());
979 
980   const unsigned PtrSize = PtrTy.getSizeInBits() / 8;
981   const Align PtrAlign = Align(PtrSize);
982   auto List = MIRBuilder.buildLoad(
983       PtrTy, ListPtr,
984       *MF.getMachineMemOperand(MachinePointerInfo(), MachineMemOperand::MOLoad,
985                                PtrSize, PtrAlign));
986 
987   MachineInstrBuilder DstPtr;
988   if (Alignment > PtrAlign) {
989     // Realign the list to the actual required alignment.
990     auto AlignMinus1 =
991         MIRBuilder.buildConstant(IntPtrTy, Alignment.value() - 1);
992     auto ListTmp = MIRBuilder.buildPtrAdd(PtrTy, List, AlignMinus1.getReg(0));
993     DstPtr = MIRBuilder.buildMaskLowPtrBits(PtrTy, ListTmp, Log2(Alignment));
994   } else
995     DstPtr = List;
996 
997   uint64_t ValSize = MRI.getType(Dst).getSizeInBits() / 8;
998   MIRBuilder.buildLoad(
999       Dst, DstPtr,
1000       *MF.getMachineMemOperand(MachinePointerInfo(), MachineMemOperand::MOLoad,
1001                                ValSize, std::max(Alignment, PtrAlign)));
1002 
1003   auto Size = MIRBuilder.buildConstant(IntPtrTy, alignTo(ValSize, PtrAlign));
1004 
1005   auto NewList = MIRBuilder.buildPtrAdd(PtrTy, DstPtr, Size.getReg(0));
1006 
1007   MIRBuilder.buildStore(NewList, ListPtr,
1008                         *MF.getMachineMemOperand(MachinePointerInfo(),
1009                                                  MachineMemOperand::MOStore,
1010                                                  PtrSize, PtrAlign));
1011 
1012   MI.eraseFromParent();
1013   return true;
1014 }
1015 
legalizeBitfieldExtract(MachineInstr & MI,MachineRegisterInfo & MRI,LegalizerHelper & Helper) const1016 bool AArch64LegalizerInfo::legalizeBitfieldExtract(
1017     MachineInstr &MI, MachineRegisterInfo &MRI, LegalizerHelper &Helper) const {
1018   // Only legal if we can select immediate forms.
1019   // TODO: Lower this otherwise.
1020   return getConstantVRegValWithLookThrough(MI.getOperand(2).getReg(), MRI) &&
1021          getConstantVRegValWithLookThrough(MI.getOperand(3).getReg(), MRI);
1022 }
1023 
legalizeCTPOP(MachineInstr & MI,MachineRegisterInfo & MRI,LegalizerHelper & Helper) const1024 bool AArch64LegalizerInfo::legalizeCTPOP(MachineInstr &MI,
1025                                          MachineRegisterInfo &MRI,
1026                                          LegalizerHelper &Helper) const {
1027   // While there is no integer popcount instruction, it can
1028   // be more efficiently lowered to the following sequence that uses
1029   // AdvSIMD registers/instructions as long as the copies to/from
1030   // the AdvSIMD registers are cheap.
1031   //  FMOV    D0, X0        // copy 64-bit int to vector, high bits zero'd
1032   //  CNT     V0.8B, V0.8B  // 8xbyte pop-counts
1033   //  ADDV    B0, V0.8B     // sum 8xbyte pop-counts
1034   //  UMOV    X0, V0.B[0]   // copy byte result back to integer reg
1035   if (!ST->hasNEON() ||
1036       MI.getMF()->getFunction().hasFnAttribute(Attribute::NoImplicitFloat))
1037     return false;
1038   MachineIRBuilder &MIRBuilder = Helper.MIRBuilder;
1039   Register Dst = MI.getOperand(0).getReg();
1040   Register Val = MI.getOperand(1).getReg();
1041   LLT Ty = MRI.getType(Val);
1042 
1043   // TODO: Handle vector types.
1044   assert(!Ty.isVector() && "Vector types not handled yet!");
1045   assert(Ty == MRI.getType(Dst) &&
1046          "Expected src and dst to have the same type!");
1047   // TODO: Handle s128.
1048   unsigned Size = Ty.getSizeInBits();
1049   assert((Size == 32 || Size == 64) && "Expected only 32 or 64 bit scalars!");
1050   if (Size == 32)
1051     Val = MIRBuilder.buildZExt(LLT::scalar(64), Val).getReg(0);
1052   const LLT V8S8 = LLT::vector(8, LLT::scalar(8));
1053   Val = MIRBuilder.buildBitcast(V8S8, Val).getReg(0);
1054   auto CTPOP = MIRBuilder.buildCTPOP(V8S8, Val);
1055   auto UADDLV =
1056       MIRBuilder
1057           .buildIntrinsic(Intrinsic::aarch64_neon_uaddlv, {LLT::scalar(32)},
1058                           /*HasSideEffects = */ false)
1059           .addUse(CTPOP.getReg(0));
1060   if (Size == 64)
1061     MIRBuilder.buildZExt(Dst, UADDLV);
1062   else
1063     UADDLV->getOperand(0).setReg(Dst);
1064   MI.eraseFromParent();
1065   return true;
1066 }
1067 
legalizeAtomicCmpxchg128(MachineInstr & MI,MachineRegisterInfo & MRI,LegalizerHelper & Helper) const1068 bool AArch64LegalizerInfo::legalizeAtomicCmpxchg128(
1069     MachineInstr &MI, MachineRegisterInfo &MRI, LegalizerHelper &Helper) const {
1070   MachineIRBuilder &MIRBuilder = Helper.MIRBuilder;
1071   LLT s64 = LLT::scalar(64);
1072   auto Addr = MI.getOperand(1).getReg();
1073   auto DesiredI = MIRBuilder.buildUnmerge({s64, s64}, MI.getOperand(2));
1074   auto NewI = MIRBuilder.buildUnmerge({s64, s64}, MI.getOperand(3));
1075   auto DstLo = MRI.createGenericVirtualRegister(s64);
1076   auto DstHi = MRI.createGenericVirtualRegister(s64);
1077 
1078   MachineInstrBuilder CAS;
1079   if (ST->hasLSE()) {
1080     // We have 128-bit CASP instructions taking XSeqPair registers, which are
1081     // s128. We need the merge/unmerge to bracket the expansion and pair up with
1082     // the rest of the MIR so we must reassemble the extracted registers into a
1083     // 128-bit known-regclass one with code like this:
1084     //
1085     //     %in1 = REG_SEQUENCE Lo, Hi    ; One for each input
1086     //     %out = CASP %in1, ...
1087     //     %OldLo = G_EXTRACT %out, 0
1088     //     %OldHi = G_EXTRACT %out, 64
1089     auto Ordering = (*MI.memoperands_begin())->getOrdering();
1090     unsigned Opcode;
1091     switch (Ordering) {
1092     case AtomicOrdering::Acquire:
1093       Opcode = AArch64::CASPAX;
1094       break;
1095     case AtomicOrdering::Release:
1096       Opcode = AArch64::CASPLX;
1097       break;
1098     case AtomicOrdering::AcquireRelease:
1099     case AtomicOrdering::SequentiallyConsistent:
1100       Opcode = AArch64::CASPALX;
1101       break;
1102     default:
1103       Opcode = AArch64::CASPX;
1104       break;
1105     }
1106 
1107     LLT s128 = LLT::scalar(128);
1108     auto CASDst = MRI.createGenericVirtualRegister(s128);
1109     auto CASDesired = MRI.createGenericVirtualRegister(s128);
1110     auto CASNew = MRI.createGenericVirtualRegister(s128);
1111     MIRBuilder.buildInstr(TargetOpcode::REG_SEQUENCE, {CASDesired}, {})
1112         .addUse(DesiredI->getOperand(0).getReg())
1113         .addImm(AArch64::sube64)
1114         .addUse(DesiredI->getOperand(1).getReg())
1115         .addImm(AArch64::subo64);
1116     MIRBuilder.buildInstr(TargetOpcode::REG_SEQUENCE, {CASNew}, {})
1117         .addUse(NewI->getOperand(0).getReg())
1118         .addImm(AArch64::sube64)
1119         .addUse(NewI->getOperand(1).getReg())
1120         .addImm(AArch64::subo64);
1121 
1122     CAS = MIRBuilder.buildInstr(Opcode, {CASDst}, {CASDesired, CASNew, Addr});
1123 
1124     MIRBuilder.buildExtract({DstLo}, {CASDst}, 0);
1125     MIRBuilder.buildExtract({DstHi}, {CASDst}, 64);
1126   } else {
1127     // The -O0 CMP_SWAP_128 is friendlier to generate code for because LDXP/STXP
1128     // can take arbitrary registers so it just has the normal GPR64 operands the
1129     // rest of AArch64 is expecting.
1130     auto Scratch = MRI.createVirtualRegister(&AArch64::GPR64RegClass);
1131     CAS = MIRBuilder.buildInstr(AArch64::CMP_SWAP_128, {DstLo, DstHi, Scratch},
1132                                 {Addr, DesiredI->getOperand(0),
1133                                  DesiredI->getOperand(1), NewI->getOperand(0),
1134                                  NewI->getOperand(1)});
1135   }
1136 
1137   CAS.cloneMemRefs(MI);
1138   constrainSelectedInstRegOperands(*CAS, *ST->getInstrInfo(),
1139                                    *MRI.getTargetRegisterInfo(),
1140                                    *ST->getRegBankInfo());
1141 
1142   MIRBuilder.buildMerge(MI.getOperand(0), {DstLo, DstHi});
1143   MI.eraseFromParent();
1144   return true;
1145 }
1146