1 //===- AArch64LegalizerInfo.cpp ----------------------------------*- C++ -*-==//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 /// \file
9 /// This file implements the targeting of the Machinelegalizer class for
10 /// AArch64.
11 /// \todo This should be generated by TableGen.
12 //===----------------------------------------------------------------------===//
13 
14 #include "AArch64LegalizerInfo.h"
15 #include "AArch64RegisterBankInfo.h"
16 #include "AArch64Subtarget.h"
17 #include "llvm/CodeGen/GlobalISel/LegalizerHelper.h"
18 #include "llvm/CodeGen/GlobalISel/LegalizerInfo.h"
19 #include "llvm/CodeGen/GlobalISel/MIPatternMatch.h"
20 #include "llvm/CodeGen/GlobalISel/MachineIRBuilder.h"
21 #include "llvm/CodeGen/GlobalISel/Utils.h"
22 #include "llvm/CodeGen/MachineInstr.h"
23 #include "llvm/CodeGen/MachineRegisterInfo.h"
24 #include "llvm/CodeGen/TargetOpcodes.h"
25 #include "llvm/CodeGen/ValueTypes.h"
26 #include "llvm/IR/DerivedTypes.h"
27 #include "llvm/IR/Intrinsics.h"
28 #include "llvm/IR/IntrinsicsAArch64.h"
29 #include "llvm/IR/Type.h"
30 #include "llvm/Support/MathExtras.h"
31 #include <initializer_list>
32 
33 #define DEBUG_TYPE "aarch64-legalinfo"
34 
35 using namespace llvm;
36 using namespace LegalizeActions;
37 using namespace LegalizeMutations;
38 using namespace LegalityPredicates;
39 using namespace MIPatternMatch;
40 
41 AArch64LegalizerInfo::AArch64LegalizerInfo(const AArch64Subtarget &ST)
42     : ST(&ST) {
43   using namespace TargetOpcode;
44   const LLT p0 = LLT::pointer(0, 64);
45   const LLT s8 = LLT::scalar(8);
46   const LLT s16 = LLT::scalar(16);
47   const LLT s32 = LLT::scalar(32);
48   const LLT s64 = LLT::scalar(64);
49   const LLT s128 = LLT::scalar(128);
50   const LLT v16s8 = LLT::fixed_vector(16, 8);
51   const LLT v8s8 = LLT::fixed_vector(8, 8);
52   const LLT v4s8 = LLT::fixed_vector(4, 8);
53   const LLT v8s16 = LLT::fixed_vector(8, 16);
54   const LLT v4s16 = LLT::fixed_vector(4, 16);
55   const LLT v2s16 = LLT::fixed_vector(2, 16);
56   const LLT v2s32 = LLT::fixed_vector(2, 32);
57   const LLT v4s32 = LLT::fixed_vector(4, 32);
58   const LLT v2s64 = LLT::fixed_vector(2, 64);
59   const LLT v2p0 = LLT::fixed_vector(2, p0);
60 
61   std::initializer_list<LLT> PackedVectorAllTypeList = {/* Begin 128bit types */
62                                                         v16s8, v8s16, v4s32,
63                                                         v2s64, v2p0,
64                                                         /* End 128bit types */
65                                                         /* Begin 64bit types */
66                                                         v8s8, v4s16, v2s32};
67 
68   const TargetMachine &TM = ST.getTargetLowering()->getTargetMachine();
69 
70   // FIXME: support subtargets which have neon/fp-armv8 disabled.
71   if (!ST.hasNEON() || !ST.hasFPARMv8()) {
72     getLegacyLegalizerInfo().computeTables();
73     return;
74   }
75 
76   // Some instructions only support s16 if the subtarget has full 16-bit FP
77   // support.
78   const bool HasFP16 = ST.hasFullFP16();
79   const LLT &MinFPScalar = HasFP16 ? s16 : s32;
80 
81   const bool HasCSSC = ST.hasCSSC();
82   const bool HasRCPC3 = ST.hasRCPC3();
83 
84   getActionDefinitionsBuilder(
85       {G_IMPLICIT_DEF, G_FREEZE, G_CONSTANT_FOLD_BARRIER})
86       .legalFor({p0, s8, s16, s32, s64})
87       .legalFor(PackedVectorAllTypeList)
88       .widenScalarToNextPow2(0)
89       .clampScalar(0, s8, s64)
90       .fewerElementsIf(
91           [=](const LegalityQuery &Query) {
92             return Query.Types[0].isVector() &&
93                    (Query.Types[0].getElementType() != s64 ||
94                     Query.Types[0].getNumElements() != 2);
95           },
96           [=](const LegalityQuery &Query) {
97             LLT EltTy = Query.Types[0].getElementType();
98             if (EltTy == s64)
99               return std::make_pair(0, LLT::fixed_vector(2, 64));
100             return std::make_pair(0, EltTy);
101           });
102 
103   getActionDefinitionsBuilder(G_PHI)
104       .legalFor({p0, s16, s32, s64})
105       .legalFor(PackedVectorAllTypeList)
106       .widenScalarToNextPow2(0)
107       .clampScalar(0, s16, s64)
108       // Maximum: sN * k = 128
109       .clampMaxNumElements(0, s8, 16)
110       .clampMaxNumElements(0, s16, 8)
111       .clampMaxNumElements(0, s32, 4)
112       .clampMaxNumElements(0, s64, 2)
113       .clampMaxNumElements(0, p0, 2);
114 
115   getActionDefinitionsBuilder(G_BSWAP)
116       .legalFor({s32, s64, v4s32, v2s32, v2s64})
117       .widenScalarToNextPow2(0)
118       .clampScalar(0, s32, s64);
119 
120   getActionDefinitionsBuilder({G_ADD, G_SUB, G_MUL, G_AND, G_OR, G_XOR})
121       .legalFor({s32, s64, v2s32, v4s32, v4s16, v8s16, v16s8, v8s8})
122       .scalarizeIf(
123           [=](const LegalityQuery &Query) {
124             return Query.Opcode == G_MUL && Query.Types[0] == v2s64;
125           },
126           0)
127       .legalFor({v2s64})
128       .widenScalarToNextPow2(0)
129       .clampScalar(0, s32, s64)
130       .clampMaxNumElements(0, s8, 16)
131       .clampMaxNumElements(0, s16, 8)
132       .clampNumElements(0, v2s32, v4s32)
133       .clampNumElements(0, v2s64, v2s64)
134       .minScalarOrEltIf(
135           [=](const LegalityQuery &Query) {
136             return Query.Types[0].getNumElements() <= 2;
137           },
138           0, s32)
139       .minScalarOrEltIf(
140           [=](const LegalityQuery &Query) {
141             return Query.Types[0].getNumElements() <= 4;
142           },
143           0, s16)
144       .minScalarOrEltIf(
145           [=](const LegalityQuery &Query) {
146             return Query.Types[0].getNumElements() <= 16;
147           },
148           0, s8)
149       .moreElementsToNextPow2(0);
150 
151   getActionDefinitionsBuilder({G_SHL, G_ASHR, G_LSHR})
152       .customIf([=](const LegalityQuery &Query) {
153         const auto &SrcTy = Query.Types[0];
154         const auto &AmtTy = Query.Types[1];
155         return !SrcTy.isVector() && SrcTy.getSizeInBits() == 32 &&
156                AmtTy.getSizeInBits() == 32;
157       })
158       .legalFor({
159           {s32, s32},
160           {s32, s64},
161           {s64, s64},
162           {v8s8, v8s8},
163           {v16s8, v16s8},
164           {v4s16, v4s16},
165           {v8s16, v8s16},
166           {v2s32, v2s32},
167           {v4s32, v4s32},
168           {v2s64, v2s64},
169       })
170       .widenScalarToNextPow2(0)
171       .clampScalar(1, s32, s64)
172       .clampScalar(0, s32, s64)
173       .clampNumElements(0, v2s32, v4s32)
174       .clampNumElements(0, v2s64, v2s64)
175       .moreElementsToNextPow2(0)
176       .minScalarSameAs(1, 0);
177 
178   getActionDefinitionsBuilder(G_PTR_ADD)
179       .legalFor({{p0, s64}, {v2p0, v2s64}})
180       .clampScalar(1, s64, s64);
181 
182   getActionDefinitionsBuilder(G_PTRMASK).legalFor({{p0, s64}});
183 
184   getActionDefinitionsBuilder({G_SDIV, G_UDIV})
185       .legalFor({s32, s64})
186       .libcallFor({s128})
187       .clampScalar(0, s32, s64)
188       .widenScalarToNextPow2(0)
189       .scalarize(0);
190 
191   getActionDefinitionsBuilder({G_SREM, G_UREM, G_SDIVREM, G_UDIVREM})
192       .lowerFor({s8, s16, s32, s64, v2s64, v4s32, v2s32})
193       .widenScalarOrEltToNextPow2(0)
194       .clampScalarOrElt(0, s32, s64)
195       .clampNumElements(0, v2s32, v4s32)
196       .clampNumElements(0, v2s64, v2s64)
197       .moreElementsToNextPow2(0);
198 
199 
200   getActionDefinitionsBuilder({G_SMULO, G_UMULO})
201       .widenScalarToNextPow2(0, /*Min = */ 32)
202       .clampScalar(0, s32, s64)
203       .lower();
204 
205   getActionDefinitionsBuilder({G_SMULH, G_UMULH})
206       .legalFor({s64, v8s16, v16s8, v4s32})
207       .lower();
208 
209   auto &MinMaxActions = getActionDefinitionsBuilder(
210       {G_SMIN, G_SMAX, G_UMIN, G_UMAX});
211   if (HasCSSC)
212     MinMaxActions
213         .legalFor({s32, s64, v8s8, v16s8, v4s16, v8s16, v2s32, v4s32})
214         // Making clamping conditional on CSSC extension as without legal types we
215         // lower to CMP which can fold one of the two sxtb's we'd otherwise need
216         // if we detect a type smaller than 32-bit.
217         .minScalar(0, s32);
218   else
219     MinMaxActions
220         .legalFor({v8s8, v16s8, v4s16, v8s16, v2s32, v4s32});
221   MinMaxActions
222       .clampNumElements(0, v8s8, v16s8)
223       .clampNumElements(0, v4s16, v8s16)
224       .clampNumElements(0, v2s32, v4s32)
225       // FIXME: This sholdn't be needed as v2s64 types are going to
226       // be expanded anyway, but G_ICMP doesn't support splitting vectors yet
227       .clampNumElements(0, v2s64, v2s64)
228       .lower();
229 
230   getActionDefinitionsBuilder(
231       {G_SADDE, G_SSUBE, G_UADDE, G_USUBE, G_SADDO, G_SSUBO, G_UADDO, G_USUBO})
232       .legalFor({{s32, s32}, {s64, s32}})
233       .clampScalar(0, s32, s64)
234        .clampScalar(1, s32, s64)
235       .widenScalarToNextPow2(0);
236 
237   getActionDefinitionsBuilder({G_FADD, G_FSUB, G_FMUL, G_FDIV, G_FNEG})
238       .legalFor({MinFPScalar, s32, s64, v2s64, v4s32, v2s32})
239       .clampScalar(0, MinFPScalar, s64)
240       .clampNumElements(0, v2s32, v4s32)
241       .clampNumElements(0, v2s64, v2s64);
242 
243   getActionDefinitionsBuilder(G_FREM).libcallFor({s32, s64});
244 
245   getActionDefinitionsBuilder({G_FCEIL, G_FABS, G_FSQRT, G_FFLOOR, G_FRINT,
246                                G_FMA, G_INTRINSIC_TRUNC, G_INTRINSIC_ROUND,
247                                G_FNEARBYINT, G_INTRINSIC_LRINT})
248       // If we don't have full FP16 support, then scalarize the elements of
249       // vectors containing fp16 types.
250       .fewerElementsIf(
251           [=, &ST](const LegalityQuery &Query) {
252             const auto &Ty = Query.Types[0];
253             return Ty.isVector() && Ty.getElementType() == s16 &&
254                    !ST.hasFullFP16();
255           },
256           [=](const LegalityQuery &Query) { return std::make_pair(0, s16); })
257       // If we don't have full FP16 support, then widen s16 to s32 if we
258       // encounter it.
259       .widenScalarIf(
260           [=, &ST](const LegalityQuery &Query) {
261             return Query.Types[0] == s16 && !ST.hasFullFP16();
262           },
263           [=](const LegalityQuery &Query) { return std::make_pair(0, s32); })
264       .legalFor({s16, s32, s64, v2s32, v4s32, v2s64, v2s16, v4s16, v8s16});
265 
266   getActionDefinitionsBuilder(
267       {G_FCOS, G_FSIN, G_FLOG10, G_FLOG, G_FLOG2, G_FEXP, G_FEXP2, G_FPOW})
268       // We need a call for these, so we always need to scalarize.
269       .scalarize(0)
270       // Regardless of FP16 support, widen 16-bit elements to 32-bits.
271       .minScalar(0, s32)
272       .libcallFor({s32, s64, v2s32, v4s32, v2s64});
273 
274   getActionDefinitionsBuilder(G_INSERT)
275       .legalIf(all(typeInSet(0, {s32, s64, p0}),
276                    typeInSet(1, {s8, s16, s32}), smallerThan(1, 0)))
277       .widenScalarToNextPow2(0)
278       .clampScalar(0, s32, s64)
279       .widenScalarToNextPow2(1)
280       .minScalar(1, s8)
281       .maxScalarIf(typeInSet(0, {s32}), 1, s16)
282       .maxScalarIf(typeInSet(0, {s64, p0}), 1, s32);
283 
284   getActionDefinitionsBuilder(G_EXTRACT)
285       .legalIf(all(typeInSet(0, {s16, s32, s64, p0}),
286                    typeInSet(1, {s32, s64, s128, p0}), smallerThan(0, 1)))
287       .widenScalarToNextPow2(1)
288       .clampScalar(1, s32, s128)
289       .widenScalarToNextPow2(0)
290       .minScalar(0, s16)
291       .maxScalarIf(typeInSet(1, {s32}), 0, s16)
292       .maxScalarIf(typeInSet(1, {s64, p0}), 0, s32)
293       .maxScalarIf(typeInSet(1, {s128}), 0, s64);
294 
295 
296   for (unsigned Op : {G_SEXTLOAD, G_ZEXTLOAD}) {
297     auto &Actions =  getActionDefinitionsBuilder(Op);
298 
299     if (Op == G_SEXTLOAD)
300       Actions.lowerIf(atomicOrderingAtLeastOrStrongerThan(0, AtomicOrdering::Unordered));
301 
302     // Atomics have zero extending behavior.
303     Actions
304       .legalForTypesWithMemDesc({{s32, p0, s8, 8},
305                                  {s32, p0, s16, 8},
306                                  {s32, p0, s32, 8},
307                                  {s64, p0, s8, 2},
308                                  {s64, p0, s16, 2},
309                                  {s64, p0, s32, 4},
310                                  {s64, p0, s64, 8},
311                                  {p0, p0, s64, 8},
312                                  {v2s32, p0, s64, 8}})
313       .widenScalarToNextPow2(0)
314       .clampScalar(0, s32, s64)
315       // TODO: We could support sum-of-pow2's but the lowering code doesn't know
316       //       how to do that yet.
317       .unsupportedIfMemSizeNotPow2()
318       // Lower anything left over into G_*EXT and G_LOAD
319       .lower();
320   }
321 
322   auto IsPtrVecPred = [=](const LegalityQuery &Query) {
323     const LLT &ValTy = Query.Types[0];
324     if (!ValTy.isVector())
325       return false;
326     const LLT EltTy = ValTy.getElementType();
327     return EltTy.isPointer() && EltTy.getAddressSpace() == 0;
328   };
329 
330   getActionDefinitionsBuilder(G_LOAD)
331       .customIf([=](const LegalityQuery &Query) {
332         return HasRCPC3 && Query.Types[0] == s128 &&
333                Query.MMODescrs[0].Ordering == AtomicOrdering::Acquire;
334       })
335       .customIf([=](const LegalityQuery &Query) {
336         return Query.Types[0] == s128 &&
337                Query.MMODescrs[0].Ordering != AtomicOrdering::NotAtomic;
338       })
339       .legalForTypesWithMemDesc({{s8, p0, s8, 8},
340                                  {s16, p0, s16, 8},
341                                  {s32, p0, s32, 8},
342                                  {s64, p0, s64, 8},
343                                  {p0, p0, s64, 8},
344                                  {s128, p0, s128, 8},
345                                  {v8s8, p0, s64, 8},
346                                  {v16s8, p0, s128, 8},
347                                  {v4s16, p0, s64, 8},
348                                  {v8s16, p0, s128, 8},
349                                  {v2s32, p0, s64, 8},
350                                  {v4s32, p0, s128, 8},
351                                  {v2s64, p0, s128, 8}})
352       // These extends are also legal
353       .legalForTypesWithMemDesc({{s32, p0, s8, 8}, {s32, p0, s16, 8}})
354       .widenScalarToNextPow2(0, /* MinSize = */ 8)
355       .lowerIfMemSizeNotByteSizePow2()
356       .clampScalar(0, s8, s64)
357       .narrowScalarIf(
358           [=](const LegalityQuery &Query) {
359             // Clamp extending load results to 32-bits.
360             return Query.Types[0].isScalar() &&
361                    Query.Types[0] != Query.MMODescrs[0].MemoryTy &&
362                    Query.Types[0].getSizeInBits() > 32;
363           },
364           changeTo(0, s32))
365       .clampMaxNumElements(0, s8, 16)
366       .clampMaxNumElements(0, s16, 8)
367       .clampMaxNumElements(0, s32, 4)
368       .clampMaxNumElements(0, s64, 2)
369       .clampMaxNumElements(0, p0, 2)
370       .customIf(IsPtrVecPred)
371       .scalarizeIf(typeIs(0, v2s16), 0);
372 
373   getActionDefinitionsBuilder(G_STORE)
374       .customIf([=](const LegalityQuery &Query) {
375         return HasRCPC3 && Query.Types[0] == s128 &&
376                Query.MMODescrs[0].Ordering == AtomicOrdering::Release;
377       })
378       .customIf([=](const LegalityQuery &Query) {
379         return Query.Types[0] == s128 &&
380                Query.MMODescrs[0].Ordering != AtomicOrdering::NotAtomic;
381       })
382       .legalForTypesWithMemDesc(
383           {{s8, p0, s8, 8},     {s16, p0, s8, 8},  // truncstorei8 from s16
384            {s32, p0, s8, 8},                       // truncstorei8 from s32
385            {s64, p0, s8, 8},                       // truncstorei8 from s64
386            {s16, p0, s16, 8},   {s32, p0, s16, 8}, // truncstorei16 from s32
387            {s64, p0, s16, 8},                      // truncstorei16 from s64
388            {s32, p0, s8, 8},    {s32, p0, s16, 8},    {s32, p0, s32, 8},
389            {s64, p0, s64, 8},   {s64, p0, s32, 8}, // truncstorei32 from s64
390            {p0, p0, s64, 8},    {s128, p0, s128, 8},  {v16s8, p0, s128, 8},
391            {v8s8, p0, s64, 8},  {v4s16, p0, s64, 8},  {v8s16, p0, s128, 8},
392            {v2s32, p0, s64, 8}, {v4s32, p0, s128, 8}, {v2s64, p0, s128, 8}})
393       .clampScalar(0, s8, s64)
394       .lowerIf([=](const LegalityQuery &Query) {
395         return Query.Types[0].isScalar() &&
396                Query.Types[0] != Query.MMODescrs[0].MemoryTy;
397       })
398       // Maximum: sN * k = 128
399       .clampMaxNumElements(0, s8, 16)
400       .clampMaxNumElements(0, s16, 8)
401       .clampMaxNumElements(0, s32, 4)
402       .clampMaxNumElements(0, s64, 2)
403       .clampMaxNumElements(0, p0, 2)
404       .lowerIfMemSizeNotPow2()
405       .customIf(IsPtrVecPred)
406       .scalarizeIf(typeIs(0, v2s16), 0);
407 
408   // Constants
409   getActionDefinitionsBuilder(G_CONSTANT)
410       .legalFor({p0, s8, s16, s32, s64})
411       .widenScalarToNextPow2(0)
412       .clampScalar(0, s8, s64);
413   getActionDefinitionsBuilder(G_FCONSTANT)
414       .legalIf([=](const LegalityQuery &Query) {
415         const auto &Ty = Query.Types[0];
416         if (HasFP16 && Ty == s16)
417           return true;
418         return Ty == s32 || Ty == s64 || Ty == s128;
419       })
420       .clampScalar(0, MinFPScalar, s128);
421 
422   getActionDefinitionsBuilder(G_ICMP)
423       .legalFor({{s32, s32},
424                  {s32, s64},
425                  {s32, p0},
426                  {v4s32, v4s32},
427                  {v2s32, v2s32},
428                  {v2s64, v2s64},
429                  {v2s64, v2p0},
430                  {v4s16, v4s16},
431                  {v8s16, v8s16},
432                  {v8s8, v8s8},
433                  {v16s8, v16s8}})
434       .widenScalarOrEltToNextPow2(1)
435       .clampScalar(1, s32, s64)
436       .clampScalar(0, s32, s32)
437       .minScalarEltSameAsIf(
438           [=](const LegalityQuery &Query) {
439             const LLT &Ty = Query.Types[0];
440             const LLT &SrcTy = Query.Types[1];
441             return Ty.isVector() && !SrcTy.getElementType().isPointer() &&
442                    Ty.getElementType() != SrcTy.getElementType();
443           },
444           0, 1)
445       .minScalarOrEltIf(
446           [=](const LegalityQuery &Query) { return Query.Types[1] == v2s16; },
447           1, s32)
448       .minScalarOrEltIf(
449           [=](const LegalityQuery &Query) { return Query.Types[1] == v2p0; }, 0,
450           s64)
451       .clampNumElements(0, v2s32, v4s32);
452 
453   getActionDefinitionsBuilder(G_FCMP)
454       // If we don't have full FP16 support, then scalarize the elements of
455       // vectors containing fp16 types.
456       .fewerElementsIf(
457           [=](const LegalityQuery &Query) {
458             const auto &Ty = Query.Types[0];
459             return Ty.isVector() && Ty.getElementType() == s16 && !HasFP16;
460           },
461           [=](const LegalityQuery &Query) { return std::make_pair(0, s16); })
462       // If we don't have full FP16 support, then widen s16 to s32 if we
463       // encounter it.
464       .widenScalarIf(
465           [=](const LegalityQuery &Query) {
466             return Query.Types[0] == s16 && !HasFP16;
467           },
468           [=](const LegalityQuery &Query) { return std::make_pair(0, s32); })
469       .legalFor({{s16, s16},
470                  {s32, s32},
471                  {s32, s64},
472                  {v4s32, v4s32},
473                  {v2s32, v2s32},
474                  {v2s64, v2s64},
475                  {v4s16, v4s16},
476                  {v8s16, v8s16}})
477       .widenScalarOrEltToNextPow2(1)
478       .clampScalar(1, s32, s64)
479       .clampScalar(0, s32, s32)
480       .minScalarEltSameAsIf(
481           [=](const LegalityQuery &Query) {
482             const LLT &Ty = Query.Types[0];
483             const LLT &SrcTy = Query.Types[1];
484             return Ty.isVector() && !SrcTy.getElementType().isPointer() &&
485                    Ty.getElementType() != SrcTy.getElementType();
486           },
487           0, 1)
488       .clampNumElements(0, v2s32, v4s32);
489 
490   // Extensions
491   auto ExtLegalFunc = [=](const LegalityQuery &Query) {
492     unsigned DstSize = Query.Types[0].getSizeInBits();
493 
494     if (DstSize == 128 && !Query.Types[0].isVector())
495       return false; // Extending to a scalar s128 needs narrowing.
496 
497     // Make sure that we have something that will fit in a register, and
498     // make sure it's a power of 2.
499     if (DstSize < 8 || DstSize > 128 || !isPowerOf2_32(DstSize))
500       return false;
501 
502     const LLT &SrcTy = Query.Types[1];
503 
504     // Make sure we fit in a register otherwise. Don't bother checking that
505     // the source type is below 128 bits. We shouldn't be allowing anything
506     // through which is wider than the destination in the first place.
507     unsigned SrcSize = SrcTy.getSizeInBits();
508     if (SrcSize < 8 || !isPowerOf2_32(SrcSize))
509       return false;
510 
511     return true;
512   };
513   getActionDefinitionsBuilder({G_ZEXT, G_SEXT, G_ANYEXT})
514       .legalIf(ExtLegalFunc)
515       .clampScalar(0, s64, s64); // Just for s128, others are handled above.
516 
517   getActionDefinitionsBuilder(G_TRUNC)
518       .minScalarOrEltIf(
519           [=](const LegalityQuery &Query) { return Query.Types[0].isVector(); },
520           0, s8)
521       .customIf([=](const LegalityQuery &Query) {
522         LLT DstTy = Query.Types[0];
523         LLT SrcTy = Query.Types[1];
524         return DstTy == v8s8 && SrcTy.getSizeInBits() > 128;
525       })
526       .alwaysLegal();
527 
528   getActionDefinitionsBuilder(G_SEXT_INREG)
529       .legalFor({s32, s64})
530       .legalFor(PackedVectorAllTypeList)
531       .lower();
532 
533   // FP conversions
534   getActionDefinitionsBuilder(G_FPTRUNC)
535       .legalFor(
536           {{s16, s32}, {s16, s64}, {s32, s64}, {v4s16, v4s32}, {v2s32, v2s64}})
537       .clampNumElements(0, v4s16, v4s16)
538       .clampNumElements(0, v2s32, v2s32)
539       .scalarize(0);
540 
541   getActionDefinitionsBuilder(G_FPEXT)
542       .legalFor(
543           {{s32, s16}, {s64, s16}, {s64, s32}, {v4s32, v4s16}, {v2s64, v2s32}})
544       .clampNumElements(0, v4s32, v4s32)
545       .clampNumElements(0, v2s64, v2s64)
546       .scalarize(0);
547 
548   // Conversions
549   getActionDefinitionsBuilder({G_FPTOSI, G_FPTOUI})
550       .legalForCartesianProduct({s32, s64, v2s64, v4s32, v2s32})
551       .widenScalarToNextPow2(0)
552       .clampScalar(0, s32, s64)
553       .widenScalarToNextPow2(1)
554       .clampScalar(1, s32, s64);
555 
556   getActionDefinitionsBuilder({G_SITOFP, G_UITOFP})
557       .legalForCartesianProduct({s32, s64, v2s64, v4s32, v2s32})
558       .clampScalar(1, s32, s64)
559       .minScalarSameAs(1, 0)
560       .clampScalar(0, s32, s64)
561       .widenScalarToNextPow2(0);
562 
563   // Control-flow
564   getActionDefinitionsBuilder(G_BRCOND)
565     .legalFor({s32})
566     .clampScalar(0, s32, s32);
567   getActionDefinitionsBuilder(G_BRINDIRECT).legalFor({p0});
568 
569   getActionDefinitionsBuilder(G_SELECT)
570       .legalFor({{s32, s32}, {s64, s32}, {p0, s32}})
571       .widenScalarToNextPow2(0)
572       .clampScalar(0, s32, s64)
573       .clampScalar(1, s32, s32)
574       .minScalarEltSameAsIf(all(isVector(0), isVector(1)), 1, 0)
575       .lowerIf(isVector(0));
576 
577   // Pointer-handling
578   getActionDefinitionsBuilder(G_FRAME_INDEX).legalFor({p0});
579 
580   if (TM.getCodeModel() == CodeModel::Small)
581     getActionDefinitionsBuilder(G_GLOBAL_VALUE).custom();
582   else
583     getActionDefinitionsBuilder(G_GLOBAL_VALUE).legalFor({p0});
584 
585   getActionDefinitionsBuilder(G_PTRTOINT)
586       .legalFor({{s64, p0}, {v2s64, v2p0}})
587       .widenScalarToNextPow2(0, 64)
588       .clampScalar(0, s64, s64);
589 
590   getActionDefinitionsBuilder(G_INTTOPTR)
591       .unsupportedIf([&](const LegalityQuery &Query) {
592         return Query.Types[0].getSizeInBits() != Query.Types[1].getSizeInBits();
593       })
594       .legalFor({{p0, s64}, {v2p0, v2s64}});
595 
596   // Casts for 32 and 64-bit width type are just copies.
597   // Same for 128-bit width type, except they are on the FPR bank.
598   getActionDefinitionsBuilder(G_BITCAST)
599       // FIXME: This is wrong since G_BITCAST is not allowed to change the
600       // number of bits but it's what the previous code described and fixing
601       // it breaks tests.
602       .legalForCartesianProduct({s8, s16, s32, s64, s128, v16s8, v8s8, v4s8,
603                                  v8s16, v4s16, v2s16, v4s32, v2s32, v2s64,
604                                  v2p0});
605 
606   getActionDefinitionsBuilder(G_VASTART).legalFor({p0});
607 
608   // va_list must be a pointer, but most sized types are pretty easy to handle
609   // as the destination.
610   getActionDefinitionsBuilder(G_VAARG)
611       .customForCartesianProduct({s8, s16, s32, s64, p0}, {p0})
612       .clampScalar(0, s8, s64)
613       .widenScalarToNextPow2(0, /*Min*/ 8);
614 
615   getActionDefinitionsBuilder(G_ATOMIC_CMPXCHG_WITH_SUCCESS)
616       .lowerIf(
617           all(typeInSet(0, {s8, s16, s32, s64, s128}), typeIs(2, p0)));
618 
619   getActionDefinitionsBuilder(G_ATOMIC_CMPXCHG)
620       .customIf([](const LegalityQuery &Query) {
621         return Query.Types[0].getSizeInBits() == 128;
622       })
623       .clampScalar(0, s32, s64)
624       .legalIf(all(typeInSet(0, {s32, s64}), typeIs(1, p0)));
625 
626   getActionDefinitionsBuilder(
627       {G_ATOMICRMW_XCHG, G_ATOMICRMW_ADD, G_ATOMICRMW_SUB, G_ATOMICRMW_AND,
628        G_ATOMICRMW_OR, G_ATOMICRMW_XOR, G_ATOMICRMW_MIN, G_ATOMICRMW_MAX,
629        G_ATOMICRMW_UMIN, G_ATOMICRMW_UMAX})
630       .clampScalar(0, s32, s64)
631       .legalIf(all(typeInSet(0, {s32, s64}), typeIs(1, p0)));
632 
633   getActionDefinitionsBuilder(G_BLOCK_ADDR).legalFor({p0});
634 
635   // Merge/Unmerge
636   for (unsigned Op : {G_MERGE_VALUES, G_UNMERGE_VALUES}) {
637     unsigned BigTyIdx = Op == G_MERGE_VALUES ? 0 : 1;
638     unsigned LitTyIdx = Op == G_MERGE_VALUES ? 1 : 0;
639     getActionDefinitionsBuilder(Op)
640         .widenScalarToNextPow2(LitTyIdx, 8)
641         .widenScalarToNextPow2(BigTyIdx, 32)
642         .clampScalar(LitTyIdx, s8, s64)
643         .clampScalar(BigTyIdx, s32, s128)
644         .legalIf([=](const LegalityQuery &Q) {
645           switch (Q.Types[BigTyIdx].getSizeInBits()) {
646           case 32:
647           case 64:
648           case 128:
649             break;
650           default:
651             return false;
652           }
653           switch (Q.Types[LitTyIdx].getSizeInBits()) {
654           case 8:
655           case 16:
656           case 32:
657           case 64:
658             return true;
659           default:
660             return false;
661           }
662         });
663   }
664 
665   getActionDefinitionsBuilder(G_EXTRACT_VECTOR_ELT)
666       .unsupportedIf([=](const LegalityQuery &Query) {
667         const LLT &EltTy = Query.Types[1].getElementType();
668         return Query.Types[0] != EltTy;
669       })
670       .minScalar(2, s64)
671       .legalIf([=](const LegalityQuery &Query) {
672         const LLT &VecTy = Query.Types[1];
673         return VecTy == v2s16 || VecTy == v4s16 || VecTy == v8s16 ||
674                VecTy == v4s32 || VecTy == v2s64 || VecTy == v2s32 ||
675                VecTy == v8s8 || VecTy == v16s8 || VecTy == v2s32 ||
676                VecTy == v2p0;
677       })
678       .minScalarOrEltIf(
679           [=](const LegalityQuery &Query) {
680             // We want to promote to <M x s1> to <M x s64> if that wouldn't
681             // cause the total vec size to be > 128b.
682             return Query.Types[1].getNumElements() <= 2;
683           },
684           0, s64)
685       .minScalarOrEltIf(
686           [=](const LegalityQuery &Query) {
687             return Query.Types[1].getNumElements() <= 4;
688           },
689           0, s32)
690       .minScalarOrEltIf(
691           [=](const LegalityQuery &Query) {
692             return Query.Types[1].getNumElements() <= 8;
693           },
694           0, s16)
695       .minScalarOrEltIf(
696           [=](const LegalityQuery &Query) {
697             return Query.Types[1].getNumElements() <= 16;
698           },
699           0, s8)
700       .minScalarOrElt(0, s8) // Worst case, we need at least s8.
701       .clampMaxNumElements(1, s64, 2)
702       .clampMaxNumElements(1, s32, 4)
703       .clampMaxNumElements(1, s16, 8)
704       .clampMaxNumElements(1, p0, 2);
705 
706   getActionDefinitionsBuilder(G_INSERT_VECTOR_ELT)
707       .legalIf(typeInSet(0, {v16s8, v8s8, v8s16, v4s16, v4s32, v2s32, v2s64}))
708       .clampMinNumElements(0, s16, 4)
709       .clampMaxNumElements(0, s16, 8);
710 
711   getActionDefinitionsBuilder(G_BUILD_VECTOR)
712       .legalFor({{v8s8, s8},
713                  {v16s8, s8},
714                  {v4s16, s16},
715                  {v8s16, s16},
716                  {v2s32, s32},
717                  {v4s32, s32},
718                  {v2p0, p0},
719                  {v2s64, s64}})
720       .clampNumElements(0, v4s32, v4s32)
721       .clampNumElements(0, v2s64, v2s64)
722       .minScalarOrElt(0, s8)
723       .widenVectorEltsToVectorMinSize(0, 64)
724       .minScalarSameAs(1, 0);
725 
726   getActionDefinitionsBuilder(G_BUILD_VECTOR_TRUNC).lower();
727 
728   getActionDefinitionsBuilder(G_CTLZ)
729       .legalForCartesianProduct(
730           {s32, s64, v8s8, v16s8, v4s16, v8s16, v2s32, v4s32})
731       .scalarize(1)
732       .widenScalarToNextPow2(1, /*Min=*/32)
733       .clampScalar(1, s32, s64)
734       .scalarSameSizeAs(0, 1);
735   getActionDefinitionsBuilder(G_CTLZ_ZERO_UNDEF).lower();
736 
737   // TODO: Custom lowering for v2s32, v4s32, v2s64.
738   getActionDefinitionsBuilder(G_BITREVERSE)
739       .legalFor({s32, s64, v8s8, v16s8})
740       .widenScalarToNextPow2(0, /*Min = */ 32)
741       .clampScalar(0, s32, s64);
742 
743   getActionDefinitionsBuilder(G_CTTZ_ZERO_UNDEF).lower();
744 
745   getActionDefinitionsBuilder(G_CTTZ)
746       .lowerIf(isVector(0))
747       .widenScalarToNextPow2(1, /*Min=*/32)
748       .clampScalar(1, s32, s64)
749       .scalarSameSizeAs(0, 1)
750       .legalIf([=](const LegalityQuery &Query) {
751         return (HasCSSC && typeInSet(0, {s32, s64})(Query));
752       })
753       .customIf([=](const LegalityQuery &Query) {
754         return (!HasCSSC && typeInSet(0, {s32, s64})(Query));
755       });
756 
757   getActionDefinitionsBuilder(G_SHUFFLE_VECTOR)
758       .legalIf([=](const LegalityQuery &Query) {
759         const LLT &DstTy = Query.Types[0];
760         const LLT &SrcTy = Query.Types[1];
761         // For now just support the TBL2 variant which needs the source vectors
762         // to be the same size as the dest.
763         if (DstTy != SrcTy)
764           return false;
765         return llvm::is_contained({v2s32, v4s32, v2s64, v2p0, v16s8, v8s16},
766                                   DstTy);
767       })
768       // G_SHUFFLE_VECTOR can have scalar sources (from 1 x s vectors), we
769       // just want those lowered into G_BUILD_VECTOR
770       .lowerIf([=](const LegalityQuery &Query) {
771         return !Query.Types[1].isVector();
772       })
773       .moreElementsIf(
774           [](const LegalityQuery &Query) {
775             return Query.Types[0].isVector() && Query.Types[1].isVector() &&
776                    Query.Types[0].getNumElements() >
777                        Query.Types[1].getNumElements();
778           },
779           changeTo(1, 0))
780       .moreElementsToNextPow2(0)
781       .clampNumElements(0, v4s32, v4s32)
782       .clampNumElements(0, v2s64, v2s64)
783       .moreElementsIf(
784           [](const LegalityQuery &Query) {
785             return Query.Types[0].isVector() && Query.Types[1].isVector() &&
786                    Query.Types[0].getNumElements() <
787                        Query.Types[1].getNumElements();
788           },
789           changeTo(0, 1));
790 
791   getActionDefinitionsBuilder(G_CONCAT_VECTORS)
792       .legalFor({{v4s32, v2s32}, {v8s16, v4s16}, {v16s8, v8s8}});
793 
794   getActionDefinitionsBuilder(G_JUMP_TABLE).legalFor({{p0}, {s64}});
795 
796   getActionDefinitionsBuilder(G_BRJT).legalIf([=](const LegalityQuery &Query) {
797     return Query.Types[0] == p0 && Query.Types[1] == s64;
798   });
799 
800   getActionDefinitionsBuilder(G_DYN_STACKALLOC).lower();
801 
802   if (ST.hasMOPS()) {
803     // G_BZERO is not supported. Currently it is only emitted by
804     // PreLegalizerCombiner for G_MEMSET with zero constant.
805     getActionDefinitionsBuilder(G_BZERO).unsupported();
806 
807     getActionDefinitionsBuilder(G_MEMSET)
808         .legalForCartesianProduct({p0}, {s64}, {s64})
809         .customForCartesianProduct({p0}, {s8}, {s64})
810         .immIdx(0); // Inform verifier imm idx 0 is handled.
811 
812     getActionDefinitionsBuilder({G_MEMCPY, G_MEMMOVE})
813         .legalForCartesianProduct({p0}, {p0}, {s64})
814         .immIdx(0); // Inform verifier imm idx 0 is handled.
815 
816     // G_MEMCPY_INLINE does not have a tailcall immediate
817     getActionDefinitionsBuilder(G_MEMCPY_INLINE)
818         .legalForCartesianProduct({p0}, {p0}, {s64});
819 
820   } else {
821     getActionDefinitionsBuilder({G_BZERO, G_MEMCPY, G_MEMMOVE, G_MEMSET})
822         .libcall();
823   }
824 
825   // FIXME: Legal vector types are only legal with NEON.
826   auto &ABSActions = getActionDefinitionsBuilder(G_ABS);
827   if (HasCSSC)
828     ABSActions
829         .legalFor({s32, s64});
830   ABSActions
831       .legalFor(PackedVectorAllTypeList)
832       .lowerIf(isScalar(0));
833 
834   getActionDefinitionsBuilder(G_VECREDUCE_FADD)
835       // We only have FADDP to do reduction-like operations. Lower the rest.
836       .legalFor({{s32, v2s32}, {s64, v2s64}})
837       .clampMaxNumElements(1, s64, 2)
838       .clampMaxNumElements(1, s32, 2)
839       .lower();
840 
841   getActionDefinitionsBuilder(G_VECREDUCE_ADD)
842       .legalFor(
843           {{s8, v16s8}, {s16, v8s16}, {s32, v4s32}, {s32, v2s32}, {s64, v2s64}})
844       .clampMaxNumElements(1, s64, 2)
845       .clampMaxNumElements(1, s32, 4)
846       .lower();
847 
848   getActionDefinitionsBuilder(
849       {G_VECREDUCE_OR, G_VECREDUCE_AND, G_VECREDUCE_XOR})
850       // Try to break down into smaller vectors as long as they're at least 64
851       // bits. This lets us use vector operations for some parts of the
852       // reduction.
853       .fewerElementsIf(
854           [=](const LegalityQuery &Q) {
855             LLT SrcTy = Q.Types[1];
856             if (SrcTy.isScalar())
857               return false;
858             if (!isPowerOf2_32(SrcTy.getNumElements()))
859               return false;
860             // We can usually perform 64b vector operations.
861             return SrcTy.getSizeInBits() > 64;
862           },
863           [=](const LegalityQuery &Q) {
864             LLT SrcTy = Q.Types[1];
865             return std::make_pair(1, SrcTy.divide(2));
866           })
867       .scalarize(1)
868       .lower();
869 
870   getActionDefinitionsBuilder({G_UADDSAT, G_USUBSAT})
871       .lowerIf([=](const LegalityQuery &Q) { return Q.Types[0].isScalar(); });
872 
873   getActionDefinitionsBuilder({G_FSHL, G_FSHR}).lower();
874 
875   getActionDefinitionsBuilder(G_ROTR)
876       .legalFor({{s32, s64}, {s64, s64}})
877       .customIf([=](const LegalityQuery &Q) {
878         return Q.Types[0].isScalar() && Q.Types[1].getScalarSizeInBits() < 64;
879       })
880       .lower();
881   getActionDefinitionsBuilder(G_ROTL).lower();
882 
883   getActionDefinitionsBuilder({G_SBFX, G_UBFX})
884       .customFor({{s32, s32}, {s64, s64}});
885 
886   auto always = [=](const LegalityQuery &Q) { return true; };
887   auto &CTPOPActions = getActionDefinitionsBuilder(G_CTPOP);
888   if (HasCSSC)
889     CTPOPActions
890         .legalFor({{s32, s32},
891                    {s64, s64},
892                    {v8s8, v8s8},
893                    {v16s8, v16s8}})
894         .customFor({{s128, s128},
895                     {v2s64, v2s64},
896                     {v2s32, v2s32},
897                     {v4s32, v4s32},
898                     {v4s16, v4s16},
899                     {v8s16, v8s16}});
900   else
901     CTPOPActions
902         .legalFor({{v8s8, v8s8},
903                    {v16s8, v16s8}})
904         .customFor({{s32, s32},
905                     {s64, s64},
906                     {s128, s128},
907                     {v2s64, v2s64},
908                     {v2s32, v2s32},
909                     {v4s32, v4s32},
910                     {v4s16, v4s16},
911                     {v8s16, v8s16}});
912   CTPOPActions
913       .clampScalar(0, s32, s128)
914       .widenScalarToNextPow2(0)
915       .minScalarEltSameAsIf(always, 1, 0)
916       .maxScalarEltSameAsIf(always, 1, 0);
917 
918   // TODO: Vector types.
919   getActionDefinitionsBuilder({G_SADDSAT, G_SSUBSAT}).lowerIf(isScalar(0));
920 
921   // TODO: Vector types.
922   getActionDefinitionsBuilder({G_FMAXNUM, G_FMINNUM})
923       .legalFor({MinFPScalar, s32, s64})
924       .libcallFor({s128})
925       .minScalar(0, MinFPScalar);
926 
927   getActionDefinitionsBuilder({G_FMAXIMUM, G_FMINIMUM})
928       .legalFor({MinFPScalar, s32, s64, v2s32, v4s32, v2s64})
929       .legalIf([=](const LegalityQuery &Query) {
930         const auto &Ty = Query.Types[0];
931         return (Ty == v8s16 || Ty == v4s16) && HasFP16;
932       })
933       .minScalar(0, MinFPScalar)
934       .clampNumElements(0, v4s16, v8s16)
935       .clampNumElements(0, v2s32, v4s32)
936       .clampNumElements(0, v2s64, v2s64);
937 
938   // TODO: Libcall support for s128.
939   // TODO: s16 should be legal with full FP16 support.
940   getActionDefinitionsBuilder({G_LROUND, G_LLROUND})
941       .legalFor({{s64, s32}, {s64, s64}});
942 
943   // TODO: Custom legalization for vector types.
944   // TODO: Custom legalization for mismatched types.
945   // TODO: s16 support.
946   getActionDefinitionsBuilder(G_FCOPYSIGN).customFor({{s32, s32}, {s64, s64}});
947 
948   getActionDefinitionsBuilder(G_FMAD).lower();
949 
950   getLegacyLegalizerInfo().computeTables();
951   verify(*ST.getInstrInfo());
952 }
953 
954 bool AArch64LegalizerInfo::legalizeCustom(LegalizerHelper &Helper,
955                                           MachineInstr &MI) const {
956   MachineIRBuilder &MIRBuilder = Helper.MIRBuilder;
957   MachineRegisterInfo &MRI = *MIRBuilder.getMRI();
958   GISelChangeObserver &Observer = Helper.Observer;
959   switch (MI.getOpcode()) {
960   default:
961     // No idea what to do.
962     return false;
963   case TargetOpcode::G_VAARG:
964     return legalizeVaArg(MI, MRI, MIRBuilder);
965   case TargetOpcode::G_LOAD:
966   case TargetOpcode::G_STORE:
967     return legalizeLoadStore(MI, MRI, MIRBuilder, Observer);
968   case TargetOpcode::G_SHL:
969   case TargetOpcode::G_ASHR:
970   case TargetOpcode::G_LSHR:
971     return legalizeShlAshrLshr(MI, MRI, MIRBuilder, Observer);
972   case TargetOpcode::G_GLOBAL_VALUE:
973     return legalizeSmallCMGlobalValue(MI, MRI, MIRBuilder, Observer);
974   case TargetOpcode::G_TRUNC:
975     return legalizeVectorTrunc(MI, Helper);
976   case TargetOpcode::G_SBFX:
977   case TargetOpcode::G_UBFX:
978     return legalizeBitfieldExtract(MI, MRI, Helper);
979   case TargetOpcode::G_ROTR:
980     return legalizeRotate(MI, MRI, Helper);
981   case TargetOpcode::G_CTPOP:
982     return legalizeCTPOP(MI, MRI, Helper);
983   case TargetOpcode::G_ATOMIC_CMPXCHG:
984     return legalizeAtomicCmpxchg128(MI, MRI, Helper);
985   case TargetOpcode::G_CTTZ:
986     return legalizeCTTZ(MI, Helper);
987   case TargetOpcode::G_BZERO:
988   case TargetOpcode::G_MEMCPY:
989   case TargetOpcode::G_MEMMOVE:
990   case TargetOpcode::G_MEMSET:
991     return legalizeMemOps(MI, Helper);
992   case TargetOpcode::G_FCOPYSIGN:
993     return legalizeFCopySign(MI, Helper);
994   }
995 
996   llvm_unreachable("expected switch to return");
997 }
998 
999 bool AArch64LegalizerInfo::legalizeRotate(MachineInstr &MI,
1000                                           MachineRegisterInfo &MRI,
1001                                           LegalizerHelper &Helper) const {
1002   // To allow for imported patterns to match, we ensure that the rotate amount
1003   // is 64b with an extension.
1004   Register AmtReg = MI.getOperand(2).getReg();
1005   LLT AmtTy = MRI.getType(AmtReg);
1006   (void)AmtTy;
1007   assert(AmtTy.isScalar() && "Expected a scalar rotate");
1008   assert(AmtTy.getSizeInBits() < 64 && "Expected this rotate to be legal");
1009   auto NewAmt = Helper.MIRBuilder.buildZExt(LLT::scalar(64), AmtReg);
1010   Helper.Observer.changingInstr(MI);
1011   MI.getOperand(2).setReg(NewAmt.getReg(0));
1012   Helper.Observer.changedInstr(MI);
1013   return true;
1014 }
1015 
1016 static void extractParts(Register Reg, MachineRegisterInfo &MRI,
1017                          MachineIRBuilder &MIRBuilder, LLT Ty, int NumParts,
1018                          SmallVectorImpl<Register> &VRegs) {
1019   for (int I = 0; I < NumParts; ++I)
1020     VRegs.push_back(MRI.createGenericVirtualRegister(Ty));
1021   MIRBuilder.buildUnmerge(VRegs, Reg);
1022 }
1023 
1024 bool AArch64LegalizerInfo::legalizeVectorTrunc(
1025     MachineInstr &MI, LegalizerHelper &Helper) const {
1026   MachineIRBuilder &MIRBuilder = Helper.MIRBuilder;
1027   MachineRegisterInfo &MRI = *MIRBuilder.getMRI();
1028   // Similar to how operand splitting is done in SelectiondDAG, we can handle
1029   // %res(v8s8) = G_TRUNC %in(v8s32) by generating:
1030   //   %inlo(<4x s32>), %inhi(<4 x s32>) = G_UNMERGE %in(<8 x s32>)
1031   //   %lo16(<4 x s16>) = G_TRUNC %inlo
1032   //   %hi16(<4 x s16>) = G_TRUNC %inhi
1033   //   %in16(<8 x s16>) = G_CONCAT_VECTORS %lo16, %hi16
1034   //   %res(<8 x s8>) = G_TRUNC %in16
1035 
1036   Register DstReg = MI.getOperand(0).getReg();
1037   Register SrcReg = MI.getOperand(1).getReg();
1038   LLT DstTy = MRI.getType(DstReg);
1039   LLT SrcTy = MRI.getType(SrcReg);
1040   assert(llvm::has_single_bit<uint32_t>(DstTy.getSizeInBits()) &&
1041          llvm::has_single_bit<uint32_t>(SrcTy.getSizeInBits()));
1042 
1043   // Split input type.
1044   LLT SplitSrcTy =
1045       SrcTy.changeElementCount(SrcTy.getElementCount().divideCoefficientBy(2));
1046   // First, split the source into two smaller vectors.
1047   SmallVector<Register, 2> SplitSrcs;
1048   extractParts(SrcReg, MRI, MIRBuilder, SplitSrcTy, 2, SplitSrcs);
1049 
1050   // Truncate the splits into intermediate narrower elements.
1051   LLT InterTy = SplitSrcTy.changeElementSize(DstTy.getScalarSizeInBits() * 2);
1052   for (unsigned I = 0; I < SplitSrcs.size(); ++I)
1053     SplitSrcs[I] = MIRBuilder.buildTrunc(InterTy, SplitSrcs[I]).getReg(0);
1054 
1055   auto Concat = MIRBuilder.buildConcatVectors(
1056       DstTy.changeElementSize(DstTy.getScalarSizeInBits() * 2), SplitSrcs);
1057 
1058   Helper.Observer.changingInstr(MI);
1059   MI.getOperand(1).setReg(Concat.getReg(0));
1060   Helper.Observer.changedInstr(MI);
1061   return true;
1062 }
1063 
1064 bool AArch64LegalizerInfo::legalizeSmallCMGlobalValue(
1065     MachineInstr &MI, MachineRegisterInfo &MRI, MachineIRBuilder &MIRBuilder,
1066     GISelChangeObserver &Observer) const {
1067   assert(MI.getOpcode() == TargetOpcode::G_GLOBAL_VALUE);
1068   // We do this custom legalization to convert G_GLOBAL_VALUE into target ADRP +
1069   // G_ADD_LOW instructions.
1070   // By splitting this here, we can optimize accesses in the small code model by
1071   // folding in the G_ADD_LOW into the load/store offset.
1072   auto &GlobalOp = MI.getOperand(1);
1073   const auto* GV = GlobalOp.getGlobal();
1074   if (GV->isThreadLocal())
1075     return true; // Don't want to modify TLS vars.
1076 
1077   auto &TM = ST->getTargetLowering()->getTargetMachine();
1078   unsigned OpFlags = ST->ClassifyGlobalReference(GV, TM);
1079 
1080   if (OpFlags & AArch64II::MO_GOT)
1081     return true;
1082 
1083   auto Offset = GlobalOp.getOffset();
1084   Register DstReg = MI.getOperand(0).getReg();
1085   auto ADRP = MIRBuilder.buildInstr(AArch64::ADRP, {LLT::pointer(0, 64)}, {})
1086                   .addGlobalAddress(GV, Offset, OpFlags | AArch64II::MO_PAGE);
1087   // Set the regclass on the dest reg too.
1088   MRI.setRegClass(ADRP.getReg(0), &AArch64::GPR64RegClass);
1089 
1090   // MO_TAGGED on the page indicates a tagged address. Set the tag now. We do so
1091   // by creating a MOVK that sets bits 48-63 of the register to (global address
1092   // + 0x100000000 - PC) >> 48. The additional 0x100000000 offset here is to
1093   // prevent an incorrect tag being generated during relocation when the the
1094   // global appears before the code section. Without the offset, a global at
1095   // `0x0f00'0000'0000'1000` (i.e. at `0x1000` with tag `0xf`) that's referenced
1096   // by code at `0x2000` would result in `0x0f00'0000'0000'1000 - 0x2000 =
1097   // 0x0eff'ffff'ffff'f000`, meaning the tag would be incorrectly set to `0xe`
1098   // instead of `0xf`.
1099   // This assumes that we're in the small code model so we can assume a binary
1100   // size of <= 4GB, which makes the untagged PC relative offset positive. The
1101   // binary must also be loaded into address range [0, 2^48). Both of these
1102   // properties need to be ensured at runtime when using tagged addresses.
1103   if (OpFlags & AArch64II::MO_TAGGED) {
1104     assert(!Offset &&
1105            "Should not have folded in an offset for a tagged global!");
1106     ADRP = MIRBuilder.buildInstr(AArch64::MOVKXi, {LLT::pointer(0, 64)}, {ADRP})
1107                .addGlobalAddress(GV, 0x100000000,
1108                                  AArch64II::MO_PREL | AArch64II::MO_G3)
1109                .addImm(48);
1110     MRI.setRegClass(ADRP.getReg(0), &AArch64::GPR64RegClass);
1111   }
1112 
1113   MIRBuilder.buildInstr(AArch64::G_ADD_LOW, {DstReg}, {ADRP})
1114       .addGlobalAddress(GV, Offset,
1115                         OpFlags | AArch64II::MO_PAGEOFF | AArch64II::MO_NC);
1116   MI.eraseFromParent();
1117   return true;
1118 }
1119 
1120 bool AArch64LegalizerInfo::legalizeIntrinsic(LegalizerHelper &Helper,
1121                                              MachineInstr &MI) const {
1122   switch (MI.getIntrinsicID()) {
1123   case Intrinsic::vacopy: {
1124     unsigned PtrSize = ST->isTargetILP32() ? 4 : 8;
1125     unsigned VaListSize =
1126       (ST->isTargetDarwin() || ST->isTargetWindows())
1127           ? PtrSize
1128           : ST->isTargetILP32() ? 20 : 32;
1129 
1130     MachineFunction &MF = *MI.getMF();
1131     auto Val = MF.getRegInfo().createGenericVirtualRegister(
1132         LLT::scalar(VaListSize * 8));
1133     MachineIRBuilder MIB(MI);
1134     MIB.buildLoad(Val, MI.getOperand(2),
1135                   *MF.getMachineMemOperand(MachinePointerInfo(),
1136                                            MachineMemOperand::MOLoad,
1137                                            VaListSize, Align(PtrSize)));
1138     MIB.buildStore(Val, MI.getOperand(1),
1139                    *MF.getMachineMemOperand(MachinePointerInfo(),
1140                                             MachineMemOperand::MOStore,
1141                                             VaListSize, Align(PtrSize)));
1142     MI.eraseFromParent();
1143     return true;
1144   }
1145   case Intrinsic::get_dynamic_area_offset: {
1146     MachineIRBuilder &MIB = Helper.MIRBuilder;
1147     MIB.buildConstant(MI.getOperand(0).getReg(), 0);
1148     MI.eraseFromParent();
1149     return true;
1150   }
1151   case Intrinsic::aarch64_mops_memset_tag: {
1152     assert(MI.getOpcode() == TargetOpcode::G_INTRINSIC_W_SIDE_EFFECTS);
1153     // Zext the value to 64 bit
1154     MachineIRBuilder MIB(MI);
1155     auto &Value = MI.getOperand(3);
1156     Register ZExtValueReg = MIB.buildAnyExt(LLT::scalar(64), Value).getReg(0);
1157     Value.setReg(ZExtValueReg);
1158     return true;
1159   }
1160   case Intrinsic::prefetch: {
1161     MachineIRBuilder MIB(MI);
1162     auto &AddrVal = MI.getOperand(1);
1163 
1164     int64_t IsWrite = MI.getOperand(2).getImm();
1165     int64_t Locality = MI.getOperand(3).getImm();
1166     int64_t IsData = MI.getOperand(4).getImm();
1167 
1168     bool IsStream = Locality == 0;
1169     if (Locality != 0) {
1170       assert(Locality <= 3 && "Prefetch locality out-of-range");
1171       // The locality degree is the opposite of the cache speed.
1172       // Put the number the other way around.
1173       // The encoding starts at 0 for level 1
1174       Locality = 3 - Locality;
1175     }
1176 
1177     unsigned PrfOp =
1178         (IsWrite << 4) | (!IsData << 3) | (Locality << 1) | IsStream;
1179 
1180     MIB.buildInstr(AArch64::G_PREFETCH).addImm(PrfOp).add(AddrVal);
1181     MI.eraseFromParent();
1182     return true;
1183   }
1184   case Intrinsic::aarch64_prefetch: {
1185     MachineIRBuilder MIB(MI);
1186     auto &AddrVal = MI.getOperand(1);
1187 
1188     int64_t IsWrite = MI.getOperand(2).getImm();
1189     int64_t Target = MI.getOperand(3).getImm();
1190     int64_t IsStream = MI.getOperand(4).getImm();
1191     int64_t IsData = MI.getOperand(5).getImm();
1192 
1193     unsigned PrfOp = (IsWrite << 4) |    // Load/Store bit
1194                      (!IsData << 3) |    // IsDataCache bit
1195                      (Target << 1) |     // Cache level bits
1196                      (unsigned)IsStream; // Stream bit
1197 
1198     MIB.buildInstr(AArch64::G_PREFETCH).addImm(PrfOp).add(AddrVal);
1199     MI.eraseFromParent();
1200     return true;
1201   }
1202   }
1203 
1204   return true;
1205 }
1206 
1207 bool AArch64LegalizerInfo::legalizeShlAshrLshr(
1208     MachineInstr &MI, MachineRegisterInfo &MRI, MachineIRBuilder &MIRBuilder,
1209     GISelChangeObserver &Observer) const {
1210   assert(MI.getOpcode() == TargetOpcode::G_ASHR ||
1211          MI.getOpcode() == TargetOpcode::G_LSHR ||
1212          MI.getOpcode() == TargetOpcode::G_SHL);
1213   // If the shift amount is a G_CONSTANT, promote it to a 64 bit type so the
1214   // imported patterns can select it later. Either way, it will be legal.
1215   Register AmtReg = MI.getOperand(2).getReg();
1216   auto VRegAndVal = getIConstantVRegValWithLookThrough(AmtReg, MRI);
1217   if (!VRegAndVal)
1218     return true;
1219   // Check the shift amount is in range for an immediate form.
1220   int64_t Amount = VRegAndVal->Value.getSExtValue();
1221   if (Amount > 31)
1222     return true; // This will have to remain a register variant.
1223   auto ExtCst = MIRBuilder.buildConstant(LLT::scalar(64), Amount);
1224   Observer.changingInstr(MI);
1225   MI.getOperand(2).setReg(ExtCst.getReg(0));
1226   Observer.changedInstr(MI);
1227   return true;
1228 }
1229 
1230 static void matchLDPSTPAddrMode(Register Root, Register &Base, int &Offset,
1231                                 MachineRegisterInfo &MRI) {
1232   Base = Root;
1233   Offset = 0;
1234 
1235   Register NewBase;
1236   int64_t NewOffset;
1237   if (mi_match(Root, MRI, m_GPtrAdd(m_Reg(NewBase), m_ICst(NewOffset))) &&
1238       isShiftedInt<7, 3>(NewOffset)) {
1239     Base = NewBase;
1240     Offset = NewOffset;
1241   }
1242 }
1243 
1244 // FIXME: This should be removed and replaced with the generic bitcast legalize
1245 // action.
1246 bool AArch64LegalizerInfo::legalizeLoadStore(
1247     MachineInstr &MI, MachineRegisterInfo &MRI, MachineIRBuilder &MIRBuilder,
1248     GISelChangeObserver &Observer) const {
1249   assert(MI.getOpcode() == TargetOpcode::G_STORE ||
1250          MI.getOpcode() == TargetOpcode::G_LOAD);
1251   // Here we just try to handle vector loads/stores where our value type might
1252   // have pointer elements, which the SelectionDAG importer can't handle. To
1253   // allow the existing patterns for s64 to fire for p0, we just try to bitcast
1254   // the value to use s64 types.
1255 
1256   // Custom legalization requires the instruction, if not deleted, must be fully
1257   // legalized. In order to allow further legalization of the inst, we create
1258   // a new instruction and erase the existing one.
1259 
1260   Register ValReg = MI.getOperand(0).getReg();
1261   const LLT ValTy = MRI.getType(ValReg);
1262 
1263   if (ValTy == LLT::scalar(128)) {
1264 
1265     AtomicOrdering Ordering = (*MI.memoperands_begin())->getSuccessOrdering();
1266     bool IsLoad = MI.getOpcode() == TargetOpcode::G_LOAD;
1267     bool IsLoadAcquire = IsLoad && Ordering == AtomicOrdering::Acquire;
1268     bool IsStoreRelease = !IsLoad && Ordering == AtomicOrdering::Release;
1269     bool IsRcpC3 =
1270         ST->hasLSE2() && ST->hasRCPC3() && (IsLoadAcquire || IsStoreRelease);
1271 
1272     LLT s64 = LLT::scalar(64);
1273 
1274     unsigned Opcode;
1275     if (IsRcpC3) {
1276       Opcode = IsLoad ? AArch64::LDIAPPX : AArch64::STILPX;
1277     } else {
1278       // For LSE2, loads/stores should have been converted to monotonic and had
1279       // a fence inserted after them.
1280       assert(Ordering == AtomicOrdering::Monotonic ||
1281              Ordering == AtomicOrdering::Unordered);
1282       assert(ST->hasLSE2() && "ldp/stp not single copy atomic without +lse2");
1283 
1284       Opcode = IsLoad ? AArch64::LDPXi : AArch64::STPXi;
1285     }
1286 
1287     MachineInstrBuilder NewI;
1288     if (IsLoad) {
1289       NewI = MIRBuilder.buildInstr(Opcode, {s64, s64}, {});
1290       MIRBuilder.buildMergeLikeInstr(
1291           ValReg, {NewI->getOperand(0), NewI->getOperand(1)});
1292     } else {
1293       auto Split = MIRBuilder.buildUnmerge(s64, MI.getOperand(0));
1294       NewI = MIRBuilder.buildInstr(
1295           Opcode, {}, {Split->getOperand(0), Split->getOperand(1)});
1296     }
1297 
1298     if (IsRcpC3) {
1299       NewI.addUse(MI.getOperand(1).getReg());
1300     } else {
1301       Register Base;
1302       int Offset;
1303       matchLDPSTPAddrMode(MI.getOperand(1).getReg(), Base, Offset, MRI);
1304       NewI.addUse(Base);
1305       NewI.addImm(Offset / 8);
1306     }
1307 
1308     NewI.cloneMemRefs(MI);
1309     constrainSelectedInstRegOperands(*NewI, *ST->getInstrInfo(),
1310                                      *MRI.getTargetRegisterInfo(),
1311                                      *ST->getRegBankInfo());
1312     MI.eraseFromParent();
1313     return true;
1314   }
1315 
1316   if (!ValTy.isVector() || !ValTy.getElementType().isPointer() ||
1317       ValTy.getElementType().getAddressSpace() != 0) {
1318     LLVM_DEBUG(dbgs() << "Tried to do custom legalization on wrong load/store");
1319     return false;
1320   }
1321 
1322   unsigned PtrSize = ValTy.getElementType().getSizeInBits();
1323   const LLT NewTy = LLT::vector(ValTy.getElementCount(), PtrSize);
1324   auto &MMO = **MI.memoperands_begin();
1325   MMO.setType(NewTy);
1326 
1327   if (MI.getOpcode() == TargetOpcode::G_STORE) {
1328     auto Bitcast = MIRBuilder.buildBitcast(NewTy, ValReg);
1329     MIRBuilder.buildStore(Bitcast.getReg(0), MI.getOperand(1), MMO);
1330   } else {
1331     auto NewLoad = MIRBuilder.buildLoad(NewTy, MI.getOperand(1), MMO);
1332     MIRBuilder.buildBitcast(ValReg, NewLoad);
1333   }
1334   MI.eraseFromParent();
1335   return true;
1336 }
1337 
1338 bool AArch64LegalizerInfo::legalizeVaArg(MachineInstr &MI,
1339                                          MachineRegisterInfo &MRI,
1340                                          MachineIRBuilder &MIRBuilder) const {
1341   MachineFunction &MF = MIRBuilder.getMF();
1342   Align Alignment(MI.getOperand(2).getImm());
1343   Register Dst = MI.getOperand(0).getReg();
1344   Register ListPtr = MI.getOperand(1).getReg();
1345 
1346   LLT PtrTy = MRI.getType(ListPtr);
1347   LLT IntPtrTy = LLT::scalar(PtrTy.getSizeInBits());
1348 
1349   const unsigned PtrSize = PtrTy.getSizeInBits() / 8;
1350   const Align PtrAlign = Align(PtrSize);
1351   auto List = MIRBuilder.buildLoad(
1352       PtrTy, ListPtr,
1353       *MF.getMachineMemOperand(MachinePointerInfo(), MachineMemOperand::MOLoad,
1354                                PtrTy, PtrAlign));
1355 
1356   MachineInstrBuilder DstPtr;
1357   if (Alignment > PtrAlign) {
1358     // Realign the list to the actual required alignment.
1359     auto AlignMinus1 =
1360         MIRBuilder.buildConstant(IntPtrTy, Alignment.value() - 1);
1361     auto ListTmp = MIRBuilder.buildPtrAdd(PtrTy, List, AlignMinus1.getReg(0));
1362     DstPtr = MIRBuilder.buildMaskLowPtrBits(PtrTy, ListTmp, Log2(Alignment));
1363   } else
1364     DstPtr = List;
1365 
1366   LLT ValTy = MRI.getType(Dst);
1367   uint64_t ValSize = ValTy.getSizeInBits() / 8;
1368   MIRBuilder.buildLoad(
1369       Dst, DstPtr,
1370       *MF.getMachineMemOperand(MachinePointerInfo(), MachineMemOperand::MOLoad,
1371                                ValTy, std::max(Alignment, PtrAlign)));
1372 
1373   auto Size = MIRBuilder.buildConstant(IntPtrTy, alignTo(ValSize, PtrAlign));
1374 
1375   auto NewList = MIRBuilder.buildPtrAdd(PtrTy, DstPtr, Size.getReg(0));
1376 
1377   MIRBuilder.buildStore(NewList, ListPtr,
1378                         *MF.getMachineMemOperand(MachinePointerInfo(),
1379                                                  MachineMemOperand::MOStore,
1380                                                  PtrTy, PtrAlign));
1381 
1382   MI.eraseFromParent();
1383   return true;
1384 }
1385 
1386 bool AArch64LegalizerInfo::legalizeBitfieldExtract(
1387     MachineInstr &MI, MachineRegisterInfo &MRI, LegalizerHelper &Helper) const {
1388   // Only legal if we can select immediate forms.
1389   // TODO: Lower this otherwise.
1390   return getIConstantVRegValWithLookThrough(MI.getOperand(2).getReg(), MRI) &&
1391          getIConstantVRegValWithLookThrough(MI.getOperand(3).getReg(), MRI);
1392 }
1393 
1394 bool AArch64LegalizerInfo::legalizeCTPOP(MachineInstr &MI,
1395                                          MachineRegisterInfo &MRI,
1396                                          LegalizerHelper &Helper) const {
1397   // When there is no integer popcount instruction (FEAT_CSSC isn't available),
1398   // it can be more efficiently lowered to the following sequence that uses
1399   // AdvSIMD registers/instructions as long as the copies to/from the AdvSIMD
1400   // registers are cheap.
1401   //  FMOV    D0, X0        // copy 64-bit int to vector, high bits zero'd
1402   //  CNT     V0.8B, V0.8B  // 8xbyte pop-counts
1403   //  ADDV    B0, V0.8B     // sum 8xbyte pop-counts
1404   //  UMOV    X0, V0.B[0]   // copy byte result back to integer reg
1405   //
1406   // For 128 bit vector popcounts, we lower to the following sequence:
1407   //  cnt.16b   v0, v0  // v8s16, v4s32, v2s64
1408   //  uaddlp.8h v0, v0  // v8s16, v4s32, v2s64
1409   //  uaddlp.4s v0, v0  //        v4s32, v2s64
1410   //  uaddlp.2d v0, v0  //               v2s64
1411   //
1412   // For 64 bit vector popcounts, we lower to the following sequence:
1413   //  cnt.8b    v0, v0  // v4s16, v2s32
1414   //  uaddlp.4h v0, v0  // v4s16, v2s32
1415   //  uaddlp.2s v0, v0  //        v2s32
1416 
1417   MachineIRBuilder &MIRBuilder = Helper.MIRBuilder;
1418   Register Dst = MI.getOperand(0).getReg();
1419   Register Val = MI.getOperand(1).getReg();
1420   LLT Ty = MRI.getType(Val);
1421   unsigned Size = Ty.getSizeInBits();
1422 
1423   assert(Ty == MRI.getType(Dst) &&
1424          "Expected src and dst to have the same type!");
1425 
1426   if (ST->hasCSSC() && Ty.isScalar() && Size == 128) {
1427     LLT s64 = LLT::scalar(64);
1428 
1429     auto Split = MIRBuilder.buildUnmerge(s64, Val);
1430     auto CTPOP1 = MIRBuilder.buildCTPOP(s64, Split->getOperand(0));
1431     auto CTPOP2 = MIRBuilder.buildCTPOP(s64, Split->getOperand(1));
1432     auto Add = MIRBuilder.buildAdd(s64, CTPOP1, CTPOP2);
1433 
1434     MIRBuilder.buildZExt(Dst, Add);
1435     MI.eraseFromParent();
1436     return true;
1437   }
1438 
1439   if (!ST->hasNEON() ||
1440       MI.getMF()->getFunction().hasFnAttribute(Attribute::NoImplicitFloat)) {
1441     // Use generic lowering when custom lowering is not possible.
1442     return Ty.isScalar() && (Size == 32 || Size == 64) &&
1443            Helper.lowerBitCount(MI) ==
1444                LegalizerHelper::LegalizeResult::Legalized;
1445   }
1446 
1447   // Pre-conditioning: widen Val up to the nearest vector type.
1448   // s32,s64,v4s16,v2s32 -> v8i8
1449   // v8s16,v4s32,v2s64 -> v16i8
1450   LLT VTy = Size == 128 ? LLT::fixed_vector(16, 8) : LLT::fixed_vector(8, 8);
1451   if (Ty.isScalar()) {
1452     assert((Size == 32 || Size == 64 || Size == 128) && "Expected only 32, 64, or 128 bit scalars!");
1453     if (Size == 32) {
1454       Val = MIRBuilder.buildZExt(LLT::scalar(64), Val).getReg(0);
1455     }
1456   }
1457   Val = MIRBuilder.buildBitcast(VTy, Val).getReg(0);
1458 
1459   // Count bits in each byte-sized lane.
1460   auto CTPOP = MIRBuilder.buildCTPOP(VTy, Val);
1461 
1462   // Sum across lanes.
1463   Register HSum = CTPOP.getReg(0);
1464   unsigned Opc;
1465   SmallVector<LLT> HAddTys;
1466   if (Ty.isScalar()) {
1467     Opc = Intrinsic::aarch64_neon_uaddlv;
1468     HAddTys.push_back(LLT::scalar(32));
1469   } else if (Ty == LLT::fixed_vector(8, 16)) {
1470     Opc = Intrinsic::aarch64_neon_uaddlp;
1471     HAddTys.push_back(LLT::fixed_vector(8, 16));
1472   } else if (Ty == LLT::fixed_vector(4, 32)) {
1473     Opc = Intrinsic::aarch64_neon_uaddlp;
1474     HAddTys.push_back(LLT::fixed_vector(8, 16));
1475     HAddTys.push_back(LLT::fixed_vector(4, 32));
1476   } else if (Ty == LLT::fixed_vector(2, 64)) {
1477     Opc = Intrinsic::aarch64_neon_uaddlp;
1478     HAddTys.push_back(LLT::fixed_vector(8, 16));
1479     HAddTys.push_back(LLT::fixed_vector(4, 32));
1480     HAddTys.push_back(LLT::fixed_vector(2, 64));
1481   } else if (Ty == LLT::fixed_vector(4, 16)) {
1482     Opc = Intrinsic::aarch64_neon_uaddlp;
1483     HAddTys.push_back(LLT::fixed_vector(4, 16));
1484   } else if (Ty == LLT::fixed_vector(2, 32)) {
1485     Opc = Intrinsic::aarch64_neon_uaddlp;
1486     HAddTys.push_back(LLT::fixed_vector(4, 16));
1487     HAddTys.push_back(LLT::fixed_vector(2, 32));
1488   } else
1489     llvm_unreachable("unexpected vector shape");
1490   MachineInstrBuilder UADD;
1491   for (LLT HTy : HAddTys) {
1492     UADD = MIRBuilder.buildIntrinsic(Opc, {HTy}, /*HasSideEffects =*/false)
1493                      .addUse(HSum);
1494     HSum = UADD.getReg(0);
1495   }
1496 
1497   // Post-conditioning.
1498   if (Ty.isScalar() && (Size == 64 || Size == 128))
1499     MIRBuilder.buildZExt(Dst, UADD);
1500   else
1501     UADD->getOperand(0).setReg(Dst);
1502   MI.eraseFromParent();
1503   return true;
1504 }
1505 
1506 bool AArch64LegalizerInfo::legalizeAtomicCmpxchg128(
1507     MachineInstr &MI, MachineRegisterInfo &MRI, LegalizerHelper &Helper) const {
1508   MachineIRBuilder &MIRBuilder = Helper.MIRBuilder;
1509   LLT s64 = LLT::scalar(64);
1510   auto Addr = MI.getOperand(1).getReg();
1511   auto DesiredI = MIRBuilder.buildUnmerge({s64, s64}, MI.getOperand(2));
1512   auto NewI = MIRBuilder.buildUnmerge({s64, s64}, MI.getOperand(3));
1513   auto DstLo = MRI.createGenericVirtualRegister(s64);
1514   auto DstHi = MRI.createGenericVirtualRegister(s64);
1515 
1516   MachineInstrBuilder CAS;
1517   if (ST->hasLSE()) {
1518     // We have 128-bit CASP instructions taking XSeqPair registers, which are
1519     // s128. We need the merge/unmerge to bracket the expansion and pair up with
1520     // the rest of the MIR so we must reassemble the extracted registers into a
1521     // 128-bit known-regclass one with code like this:
1522     //
1523     //     %in1 = REG_SEQUENCE Lo, Hi    ; One for each input
1524     //     %out = CASP %in1, ...
1525     //     %OldLo = G_EXTRACT %out, 0
1526     //     %OldHi = G_EXTRACT %out, 64
1527     auto Ordering = (*MI.memoperands_begin())->getMergedOrdering();
1528     unsigned Opcode;
1529     switch (Ordering) {
1530     case AtomicOrdering::Acquire:
1531       Opcode = AArch64::CASPAX;
1532       break;
1533     case AtomicOrdering::Release:
1534       Opcode = AArch64::CASPLX;
1535       break;
1536     case AtomicOrdering::AcquireRelease:
1537     case AtomicOrdering::SequentiallyConsistent:
1538       Opcode = AArch64::CASPALX;
1539       break;
1540     default:
1541       Opcode = AArch64::CASPX;
1542       break;
1543     }
1544 
1545     LLT s128 = LLT::scalar(128);
1546     auto CASDst = MRI.createGenericVirtualRegister(s128);
1547     auto CASDesired = MRI.createGenericVirtualRegister(s128);
1548     auto CASNew = MRI.createGenericVirtualRegister(s128);
1549     MIRBuilder.buildInstr(TargetOpcode::REG_SEQUENCE, {CASDesired}, {})
1550         .addUse(DesiredI->getOperand(0).getReg())
1551         .addImm(AArch64::sube64)
1552         .addUse(DesiredI->getOperand(1).getReg())
1553         .addImm(AArch64::subo64);
1554     MIRBuilder.buildInstr(TargetOpcode::REG_SEQUENCE, {CASNew}, {})
1555         .addUse(NewI->getOperand(0).getReg())
1556         .addImm(AArch64::sube64)
1557         .addUse(NewI->getOperand(1).getReg())
1558         .addImm(AArch64::subo64);
1559 
1560     CAS = MIRBuilder.buildInstr(Opcode, {CASDst}, {CASDesired, CASNew, Addr});
1561 
1562     MIRBuilder.buildExtract({DstLo}, {CASDst}, 0);
1563     MIRBuilder.buildExtract({DstHi}, {CASDst}, 64);
1564   } else {
1565     // The -O0 CMP_SWAP_128 is friendlier to generate code for because LDXP/STXP
1566     // can take arbitrary registers so it just has the normal GPR64 operands the
1567     // rest of AArch64 is expecting.
1568     auto Ordering = (*MI.memoperands_begin())->getMergedOrdering();
1569     unsigned Opcode;
1570     switch (Ordering) {
1571     case AtomicOrdering::Acquire:
1572       Opcode = AArch64::CMP_SWAP_128_ACQUIRE;
1573       break;
1574     case AtomicOrdering::Release:
1575       Opcode = AArch64::CMP_SWAP_128_RELEASE;
1576       break;
1577     case AtomicOrdering::AcquireRelease:
1578     case AtomicOrdering::SequentiallyConsistent:
1579       Opcode = AArch64::CMP_SWAP_128;
1580       break;
1581     default:
1582       Opcode = AArch64::CMP_SWAP_128_MONOTONIC;
1583       break;
1584     }
1585 
1586     auto Scratch = MRI.createVirtualRegister(&AArch64::GPR64RegClass);
1587     CAS = MIRBuilder.buildInstr(Opcode, {DstLo, DstHi, Scratch},
1588                                 {Addr, DesiredI->getOperand(0),
1589                                  DesiredI->getOperand(1), NewI->getOperand(0),
1590                                  NewI->getOperand(1)});
1591   }
1592 
1593   CAS.cloneMemRefs(MI);
1594   constrainSelectedInstRegOperands(*CAS, *ST->getInstrInfo(),
1595                                    *MRI.getTargetRegisterInfo(),
1596                                    *ST->getRegBankInfo());
1597 
1598   MIRBuilder.buildMergeLikeInstr(MI.getOperand(0), {DstLo, DstHi});
1599   MI.eraseFromParent();
1600   return true;
1601 }
1602 
1603 bool AArch64LegalizerInfo::legalizeCTTZ(MachineInstr &MI,
1604                                         LegalizerHelper &Helper) const {
1605   MachineIRBuilder &MIRBuilder = Helper.MIRBuilder;
1606   MachineRegisterInfo &MRI = *MIRBuilder.getMRI();
1607   LLT Ty = MRI.getType(MI.getOperand(1).getReg());
1608   auto BitReverse = MIRBuilder.buildBitReverse(Ty, MI.getOperand(1));
1609   MIRBuilder.buildCTLZ(MI.getOperand(0).getReg(), BitReverse);
1610   MI.eraseFromParent();
1611   return true;
1612 }
1613 
1614 bool AArch64LegalizerInfo::legalizeMemOps(MachineInstr &MI,
1615                                           LegalizerHelper &Helper) const {
1616   MachineIRBuilder &MIRBuilder = Helper.MIRBuilder;
1617 
1618   // Tagged version MOPSMemorySetTagged is legalised in legalizeIntrinsic
1619   if (MI.getOpcode() == TargetOpcode::G_MEMSET) {
1620     // Zext the value operand to 64 bit
1621     auto &Value = MI.getOperand(1);
1622     Register ZExtValueReg =
1623         MIRBuilder.buildAnyExt(LLT::scalar(64), Value).getReg(0);
1624     Value.setReg(ZExtValueReg);
1625     return true;
1626   }
1627 
1628   return false;
1629 }
1630 
1631 bool AArch64LegalizerInfo::legalizeFCopySign(MachineInstr &MI,
1632                                              LegalizerHelper &Helper) const {
1633   MachineIRBuilder &MIRBuilder = Helper.MIRBuilder;
1634   MachineRegisterInfo &MRI = *MIRBuilder.getMRI();
1635   Register Dst = MI.getOperand(0).getReg();
1636   LLT DstTy = MRI.getType(Dst);
1637   assert(DstTy.isScalar() && "Only expected scalars right now!");
1638   const unsigned DstSize = DstTy.getSizeInBits();
1639   assert((DstSize == 32 || DstSize == 64) && "Unexpected dst type!");
1640   assert(MRI.getType(MI.getOperand(2).getReg()) == DstTy &&
1641          "Expected homogeneous types!");
1642 
1643   // We want to materialize a mask with the high bit set.
1644   uint64_t EltMask;
1645   LLT VecTy;
1646 
1647   // TODO: s16 support.
1648   switch (DstSize) {
1649   default:
1650     llvm_unreachable("Unexpected type for G_FCOPYSIGN!");
1651   case 64: {
1652     // AdvSIMD immediate moves cannot materialize out mask in a single
1653     // instruction for 64-bit elements. Instead, materialize zero and then
1654     // negate it.
1655     EltMask = 0;
1656     VecTy = LLT::fixed_vector(2, DstTy);
1657     break;
1658   }
1659   case 32:
1660     EltMask = 0x80000000ULL;
1661     VecTy = LLT::fixed_vector(4, DstTy);
1662     break;
1663   }
1664 
1665   // Widen In1 and In2 to 128 bits. We want these to eventually become
1666   // INSERT_SUBREGs.
1667   auto Undef = MIRBuilder.buildUndef(VecTy);
1668   auto Zero = MIRBuilder.buildConstant(DstTy, 0);
1669   auto Ins1 = MIRBuilder.buildInsertVectorElement(
1670       VecTy, Undef, MI.getOperand(1).getReg(), Zero);
1671   auto Ins2 = MIRBuilder.buildInsertVectorElement(
1672       VecTy, Undef, MI.getOperand(2).getReg(), Zero);
1673 
1674   // Construct the mask.
1675   auto Mask = MIRBuilder.buildConstant(VecTy, EltMask);
1676   if (DstSize == 64)
1677     Mask = MIRBuilder.buildFNeg(VecTy, Mask);
1678 
1679   auto Sel = MIRBuilder.buildInstr(AArch64::G_BIT, {VecTy}, {Ins1, Ins2, Mask});
1680 
1681   // Build an unmerge whose 0th elt is the original G_FCOPYSIGN destination. We
1682   // want this to eventually become an EXTRACT_SUBREG.
1683   SmallVector<Register, 2> DstRegs(1, Dst);
1684   for (unsigned I = 1, E = VecTy.getNumElements(); I < E; ++I)
1685     DstRegs.push_back(MRI.createGenericVirtualRegister(DstTy));
1686   MIRBuilder.buildUnmerge(DstRegs, Sel);
1687   MI.eraseFromParent();
1688   return true;
1689 }
1690