1 //===- AArch64AsmPrinter.cpp - AArch64 LLVM assembly writer ---------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file contains a printer that converts from our internal representation
10 // of machine-dependent LLVM code to the AArch64 assembly language.
11 //
12 //===----------------------------------------------------------------------===//
13 
14 #include "AArch64.h"
15 #include "AArch64MCInstLower.h"
16 #include "AArch64MachineFunctionInfo.h"
17 #include "AArch64RegisterInfo.h"
18 #include "AArch64Subtarget.h"
19 #include "AArch64TargetObjectFile.h"
20 #include "MCTargetDesc/AArch64AddressingModes.h"
21 #include "MCTargetDesc/AArch64InstPrinter.h"
22 #include "MCTargetDesc/AArch64MCExpr.h"
23 #include "MCTargetDesc/AArch64MCTargetDesc.h"
24 #include "MCTargetDesc/AArch64TargetStreamer.h"
25 #include "TargetInfo/AArch64TargetInfo.h"
26 #include "Utils/AArch64BaseInfo.h"
27 #include "llvm/ADT/SmallString.h"
28 #include "llvm/ADT/SmallVector.h"
29 #include "llvm/ADT/StringRef.h"
30 #include "llvm/ADT/Twine.h"
31 #include "llvm/BinaryFormat/COFF.h"
32 #include "llvm/BinaryFormat/ELF.h"
33 #include "llvm/CodeGen/AsmPrinter.h"
34 #include "llvm/CodeGen/FaultMaps.h"
35 #include "llvm/CodeGen/MachineBasicBlock.h"
36 #include "llvm/CodeGen/MachineFunction.h"
37 #include "llvm/CodeGen/MachineInstr.h"
38 #include "llvm/CodeGen/MachineJumpTableInfo.h"
39 #include "llvm/CodeGen/MachineModuleInfoImpls.h"
40 #include "llvm/CodeGen/MachineOperand.h"
41 #include "llvm/CodeGen/StackMaps.h"
42 #include "llvm/CodeGen/TargetRegisterInfo.h"
43 #include "llvm/IR/DataLayout.h"
44 #include "llvm/IR/DebugInfoMetadata.h"
45 #include "llvm/MC/MCAsmInfo.h"
46 #include "llvm/MC/MCContext.h"
47 #include "llvm/MC/MCInst.h"
48 #include "llvm/MC/MCInstBuilder.h"
49 #include "llvm/MC/MCSectionELF.h"
50 #include "llvm/MC/MCStreamer.h"
51 #include "llvm/MC/MCSymbol.h"
52 #include "llvm/MC/TargetRegistry.h"
53 #include "llvm/Support/Casting.h"
54 #include "llvm/Support/ErrorHandling.h"
55 #include "llvm/Support/raw_ostream.h"
56 #include "llvm/Target/TargetMachine.h"
57 #include "llvm/TargetParser/Triple.h"
58 #include "llvm/Transforms/Instrumentation/HWAddressSanitizer.h"
59 #include <algorithm>
60 #include <cassert>
61 #include <cstdint>
62 #include <map>
63 #include <memory>
64 
65 using namespace llvm;
66 
67 #define DEBUG_TYPE "asm-printer"
68 
69 namespace {
70 
71 class AArch64AsmPrinter : public AsmPrinter {
72   AArch64MCInstLower MCInstLowering;
73   FaultMaps FM;
74   const AArch64Subtarget *STI;
75   bool ShouldEmitWeakSwiftAsyncExtendedFramePointerFlags = false;
76 
77 public:
78   AArch64AsmPrinter(TargetMachine &TM, std::unique_ptr<MCStreamer> Streamer)
79       : AsmPrinter(TM, std::move(Streamer)), MCInstLowering(OutContext, *this),
80         FM(*this) {}
81 
82   StringRef getPassName() const override { return "AArch64 Assembly Printer"; }
83 
84   /// Wrapper for MCInstLowering.lowerOperand() for the
85   /// tblgen'erated pseudo lowering.
86   bool lowerOperand(const MachineOperand &MO, MCOperand &MCOp) const {
87     return MCInstLowering.lowerOperand(MO, MCOp);
88   }
89 
90   void emitStartOfAsmFile(Module &M) override;
91   void emitJumpTableInfo() override;
92 
93   void emitFunctionEntryLabel() override;
94 
95   void LowerJumpTableDest(MCStreamer &OutStreamer, const MachineInstr &MI);
96 
97   void LowerMOPS(MCStreamer &OutStreamer, const MachineInstr &MI);
98 
99   void LowerSTACKMAP(MCStreamer &OutStreamer, StackMaps &SM,
100                      const MachineInstr &MI);
101   void LowerPATCHPOINT(MCStreamer &OutStreamer, StackMaps &SM,
102                        const MachineInstr &MI);
103   void LowerSTATEPOINT(MCStreamer &OutStreamer, StackMaps &SM,
104                        const MachineInstr &MI);
105   void LowerFAULTING_OP(const MachineInstr &MI);
106 
107   void LowerPATCHABLE_FUNCTION_ENTER(const MachineInstr &MI);
108   void LowerPATCHABLE_FUNCTION_EXIT(const MachineInstr &MI);
109   void LowerPATCHABLE_TAIL_CALL(const MachineInstr &MI);
110   void LowerPATCHABLE_EVENT_CALL(const MachineInstr &MI, bool Typed);
111 
112   typedef std::tuple<unsigned, bool, uint32_t> HwasanMemaccessTuple;
113   std::map<HwasanMemaccessTuple, MCSymbol *> HwasanMemaccessSymbols;
114   void LowerKCFI_CHECK(const MachineInstr &MI);
115   void LowerHWASAN_CHECK_MEMACCESS(const MachineInstr &MI);
116   void emitHwasanMemaccessSymbols(Module &M);
117 
118   void emitSled(const MachineInstr &MI, SledKind Kind);
119 
120   /// tblgen'erated driver function for lowering simple MI->MC
121   /// pseudo instructions.
122   bool emitPseudoExpansionLowering(MCStreamer &OutStreamer,
123                                    const MachineInstr *MI);
124 
125   void emitInstruction(const MachineInstr *MI) override;
126 
127   void emitFunctionHeaderComment() override;
128 
129   void getAnalysisUsage(AnalysisUsage &AU) const override {
130     AsmPrinter::getAnalysisUsage(AU);
131     AU.setPreservesAll();
132   }
133 
134   bool runOnMachineFunction(MachineFunction &MF) override {
135     AArch64FI = MF.getInfo<AArch64FunctionInfo>();
136     STI = &MF.getSubtarget<AArch64Subtarget>();
137 
138     SetupMachineFunction(MF);
139 
140     if (STI->isTargetCOFF()) {
141       bool Internal = MF.getFunction().hasInternalLinkage();
142       COFF::SymbolStorageClass Scl = Internal ? COFF::IMAGE_SYM_CLASS_STATIC
143                                               : COFF::IMAGE_SYM_CLASS_EXTERNAL;
144       int Type =
145         COFF::IMAGE_SYM_DTYPE_FUNCTION << COFF::SCT_COMPLEX_TYPE_SHIFT;
146 
147       OutStreamer->beginCOFFSymbolDef(CurrentFnSym);
148       OutStreamer->emitCOFFSymbolStorageClass(Scl);
149       OutStreamer->emitCOFFSymbolType(Type);
150       OutStreamer->endCOFFSymbolDef();
151     }
152 
153     // Emit the rest of the function body.
154     emitFunctionBody();
155 
156     // Emit the XRay table for this function.
157     emitXRayTable();
158 
159     // We didn't modify anything.
160     return false;
161   }
162 
163 private:
164   void printOperand(const MachineInstr *MI, unsigned OpNum, raw_ostream &O);
165   bool printAsmMRegister(const MachineOperand &MO, char Mode, raw_ostream &O);
166   bool printAsmRegInClass(const MachineOperand &MO,
167                           const TargetRegisterClass *RC, unsigned AltName,
168                           raw_ostream &O);
169 
170   bool PrintAsmOperand(const MachineInstr *MI, unsigned OpNum,
171                        const char *ExtraCode, raw_ostream &O) override;
172   bool PrintAsmMemoryOperand(const MachineInstr *MI, unsigned OpNum,
173                              const char *ExtraCode, raw_ostream &O) override;
174 
175   void PrintDebugValueComment(const MachineInstr *MI, raw_ostream &OS);
176 
177   void emitFunctionBodyEnd() override;
178 
179   MCSymbol *GetCPISymbol(unsigned CPID) const override;
180   void emitEndOfAsmFile(Module &M) override;
181 
182   AArch64FunctionInfo *AArch64FI = nullptr;
183 
184   /// Emit the LOHs contained in AArch64FI.
185   void emitLOHs();
186 
187   /// Emit instruction to set float register to zero.
188   void emitFMov0(const MachineInstr &MI);
189 
190   using MInstToMCSymbol = std::map<const MachineInstr *, MCSymbol *>;
191 
192   MInstToMCSymbol LOHInstToLabel;
193 
194   bool shouldEmitWeakSwiftAsyncExtendedFramePointerFlags() const override {
195     return ShouldEmitWeakSwiftAsyncExtendedFramePointerFlags;
196   }
197 };
198 
199 } // end anonymous namespace
200 
201 void AArch64AsmPrinter::emitStartOfAsmFile(Module &M) {
202   const Triple &TT = TM.getTargetTriple();
203 
204   if (TT.isOSBinFormatCOFF()) {
205     // Emit an absolute @feat.00 symbol
206     MCSymbol *S = MMI->getContext().getOrCreateSymbol(StringRef("@feat.00"));
207     OutStreamer->beginCOFFSymbolDef(S);
208     OutStreamer->emitCOFFSymbolStorageClass(COFF::IMAGE_SYM_CLASS_STATIC);
209     OutStreamer->emitCOFFSymbolType(COFF::IMAGE_SYM_DTYPE_NULL);
210     OutStreamer->endCOFFSymbolDef();
211     int64_t Feat00Value = 0;
212 
213     if (M.getModuleFlag("cfguard")) {
214       // Object is CFG-aware.
215       Feat00Value |= COFF::Feat00Flags::GuardCF;
216     }
217 
218     if (M.getModuleFlag("ehcontguard")) {
219       // Object also has EHCont.
220       Feat00Value |= COFF::Feat00Flags::GuardEHCont;
221     }
222 
223     if (M.getModuleFlag("ms-kernel")) {
224       // Object is compiled with /kernel.
225       Feat00Value |= COFF::Feat00Flags::Kernel;
226     }
227 
228     OutStreamer->emitSymbolAttribute(S, MCSA_Global);
229     OutStreamer->emitAssignment(
230         S, MCConstantExpr::create(Feat00Value, MMI->getContext()));
231   }
232 
233   if (!TT.isOSBinFormatELF())
234     return;
235 
236   // Assemble feature flags that may require creation of a note section.
237   unsigned Flags = 0;
238   if (const auto *BTE = mdconst::extract_or_null<ConstantInt>(
239           M.getModuleFlag("branch-target-enforcement")))
240     if (BTE->getZExtValue())
241       Flags |= ELF::GNU_PROPERTY_AARCH64_FEATURE_1_BTI;
242 
243   if (const auto *Sign = mdconst::extract_or_null<ConstantInt>(
244           M.getModuleFlag("sign-return-address")))
245     if (Sign->getZExtValue())
246       Flags |= ELF::GNU_PROPERTY_AARCH64_FEATURE_1_PAC;
247 
248   if (Flags == 0)
249     return;
250 
251   // Emit a .note.gnu.property section with the flags.
252   auto *TS =
253       static_cast<AArch64TargetStreamer *>(OutStreamer->getTargetStreamer());
254   TS->emitNoteSection(Flags);
255 }
256 
257 void AArch64AsmPrinter::emitFunctionHeaderComment() {
258   const AArch64FunctionInfo *FI = MF->getInfo<AArch64FunctionInfo>();
259   std::optional<std::string> OutlinerString = FI->getOutliningStyle();
260   if (OutlinerString != std::nullopt)
261     OutStreamer->getCommentOS() << ' ' << OutlinerString;
262 }
263 
264 void AArch64AsmPrinter::LowerPATCHABLE_FUNCTION_ENTER(const MachineInstr &MI)
265 {
266   const Function &F = MF->getFunction();
267   if (F.hasFnAttribute("patchable-function-entry")) {
268     unsigned Num;
269     if (F.getFnAttribute("patchable-function-entry")
270             .getValueAsString()
271             .getAsInteger(10, Num))
272       return;
273     emitNops(Num);
274     return;
275   }
276 
277   emitSled(MI, SledKind::FUNCTION_ENTER);
278 }
279 
280 void AArch64AsmPrinter::LowerPATCHABLE_FUNCTION_EXIT(const MachineInstr &MI) {
281   emitSled(MI, SledKind::FUNCTION_EXIT);
282 }
283 
284 void AArch64AsmPrinter::LowerPATCHABLE_TAIL_CALL(const MachineInstr &MI) {
285   emitSled(MI, SledKind::TAIL_CALL);
286 }
287 
288 void AArch64AsmPrinter::emitSled(const MachineInstr &MI, SledKind Kind) {
289   static const int8_t NoopsInSledCount = 7;
290   // We want to emit the following pattern:
291   //
292   // .Lxray_sled_N:
293   //   ALIGN
294   //   B #32
295   //   ; 7 NOP instructions (28 bytes)
296   // .tmpN
297   //
298   // We need the 28 bytes (7 instructions) because at runtime, we'd be patching
299   // over the full 32 bytes (8 instructions) with the following pattern:
300   //
301   //   STP X0, X30, [SP, #-16]! ; push X0 and the link register to the stack
302   //   LDR W17, #12 ; W17 := function ID
303   //   LDR X16,#12 ; X16 := addr of __xray_FunctionEntry or __xray_FunctionExit
304   //   BLR X16 ; call the tracing trampoline
305   //   ;DATA: 32 bits of function ID
306   //   ;DATA: lower 32 bits of the address of the trampoline
307   //   ;DATA: higher 32 bits of the address of the trampoline
308   //   LDP X0, X30, [SP], #16 ; pop X0 and the link register from the stack
309   //
310   OutStreamer->emitCodeAlignment(Align(4), &getSubtargetInfo());
311   auto CurSled = OutContext.createTempSymbol("xray_sled_", true);
312   OutStreamer->emitLabel(CurSled);
313   auto Target = OutContext.createTempSymbol();
314 
315   // Emit "B #32" instruction, which jumps over the next 28 bytes.
316   // The operand has to be the number of 4-byte instructions to jump over,
317   // including the current instruction.
318   EmitToStreamer(*OutStreamer, MCInstBuilder(AArch64::B).addImm(8));
319 
320   for (int8_t I = 0; I < NoopsInSledCount; I++)
321     EmitToStreamer(*OutStreamer, MCInstBuilder(AArch64::HINT).addImm(0));
322 
323   OutStreamer->emitLabel(Target);
324   recordSled(CurSled, MI, Kind, 2);
325 }
326 
327 // Emit the following code for Intrinsic::{xray_customevent,xray_typedevent}
328 // (built-in functions __xray_customevent/__xray_typedevent).
329 //
330 // .Lxray_event_sled_N:
331 //   b 1f
332 //   save x0 and x1 (and also x2 for TYPED_EVENT_CALL)
333 //   set up x0 and x1 (and also x2 for TYPED_EVENT_CALL)
334 //   bl __xray_CustomEvent or __xray_TypedEvent
335 //   restore x0 and x1 (and also x2 for TYPED_EVENT_CALL)
336 // 1:
337 //
338 // There are 6 instructions for EVENT_CALL and 9 for TYPED_EVENT_CALL.
339 //
340 // Then record a sled of kind CUSTOM_EVENT or TYPED_EVENT.
341 // After patching, b .+N will become a nop.
342 void AArch64AsmPrinter::LowerPATCHABLE_EVENT_CALL(const MachineInstr &MI,
343                                                   bool Typed) {
344   auto &O = *OutStreamer;
345   MCSymbol *CurSled = OutContext.createTempSymbol("xray_sled_", true);
346   O.emitLabel(CurSled);
347   MCInst MovX0Op0 = MCInstBuilder(AArch64::ORRXrs)
348                         .addReg(AArch64::X0)
349                         .addReg(AArch64::XZR)
350                         .addReg(MI.getOperand(0).getReg())
351                         .addImm(0);
352   MCInst MovX1Op1 = MCInstBuilder(AArch64::ORRXrs)
353                         .addReg(AArch64::X1)
354                         .addReg(AArch64::XZR)
355                         .addReg(MI.getOperand(1).getReg())
356                         .addImm(0);
357   bool MachO = TM.getTargetTriple().isOSBinFormatMachO();
358   auto *Sym = MCSymbolRefExpr::create(
359       OutContext.getOrCreateSymbol(
360           Twine(MachO ? "_" : "") +
361           (Typed ? "__xray_TypedEvent" : "__xray_CustomEvent")),
362       OutContext);
363   if (Typed) {
364     O.AddComment("Begin XRay typed event");
365     EmitToStreamer(O, MCInstBuilder(AArch64::B).addImm(9));
366     EmitToStreamer(O, MCInstBuilder(AArch64::STPXpre)
367                           .addReg(AArch64::SP)
368                           .addReg(AArch64::X0)
369                           .addReg(AArch64::X1)
370                           .addReg(AArch64::SP)
371                           .addImm(-4));
372     EmitToStreamer(O, MCInstBuilder(AArch64::STRXui)
373                           .addReg(AArch64::X2)
374                           .addReg(AArch64::SP)
375                           .addImm(2));
376     EmitToStreamer(O, MovX0Op0);
377     EmitToStreamer(O, MovX1Op1);
378     EmitToStreamer(O, MCInstBuilder(AArch64::ORRXrs)
379                           .addReg(AArch64::X2)
380                           .addReg(AArch64::XZR)
381                           .addReg(MI.getOperand(2).getReg())
382                           .addImm(0));
383     EmitToStreamer(O, MCInstBuilder(AArch64::BL).addExpr(Sym));
384     EmitToStreamer(O, MCInstBuilder(AArch64::LDRXui)
385                           .addReg(AArch64::X2)
386                           .addReg(AArch64::SP)
387                           .addImm(2));
388     O.AddComment("End XRay typed event");
389     EmitToStreamer(O, MCInstBuilder(AArch64::LDPXpost)
390                           .addReg(AArch64::SP)
391                           .addReg(AArch64::X0)
392                           .addReg(AArch64::X1)
393                           .addReg(AArch64::SP)
394                           .addImm(4));
395 
396     recordSled(CurSled, MI, SledKind::TYPED_EVENT, 2);
397   } else {
398     O.AddComment("Begin XRay custom event");
399     EmitToStreamer(O, MCInstBuilder(AArch64::B).addImm(6));
400     EmitToStreamer(O, MCInstBuilder(AArch64::STPXpre)
401                           .addReg(AArch64::SP)
402                           .addReg(AArch64::X0)
403                           .addReg(AArch64::X1)
404                           .addReg(AArch64::SP)
405                           .addImm(-2));
406     EmitToStreamer(O, MovX0Op0);
407     EmitToStreamer(O, MovX1Op1);
408     EmitToStreamer(O, MCInstBuilder(AArch64::BL).addExpr(Sym));
409     O.AddComment("End XRay custom event");
410     EmitToStreamer(O, MCInstBuilder(AArch64::LDPXpost)
411                           .addReg(AArch64::SP)
412                           .addReg(AArch64::X0)
413                           .addReg(AArch64::X1)
414                           .addReg(AArch64::SP)
415                           .addImm(2));
416 
417     recordSled(CurSled, MI, SledKind::CUSTOM_EVENT, 2);
418   }
419 }
420 
421 void AArch64AsmPrinter::LowerKCFI_CHECK(const MachineInstr &MI) {
422   Register AddrReg = MI.getOperand(0).getReg();
423   assert(std::next(MI.getIterator())->isCall() &&
424          "KCFI_CHECK not followed by a call instruction");
425   assert(std::next(MI.getIterator())->getOperand(0).getReg() == AddrReg &&
426          "KCFI_CHECK call target doesn't match call operand");
427 
428   // Default to using the intra-procedure-call temporary registers for
429   // comparing the hashes.
430   unsigned ScratchRegs[] = {AArch64::W16, AArch64::W17};
431   if (AddrReg == AArch64::XZR) {
432     // Checking XZR makes no sense. Instead of emitting a load, zero
433     // ScratchRegs[0] and use it for the ESR AddrIndex below.
434     AddrReg = getXRegFromWReg(ScratchRegs[0]);
435     EmitToStreamer(*OutStreamer, MCInstBuilder(AArch64::ORRXrs)
436                                      .addReg(AddrReg)
437                                      .addReg(AArch64::XZR)
438                                      .addReg(AArch64::XZR)
439                                      .addImm(0));
440   } else {
441     // If one of the scratch registers is used for the call target (e.g.
442     // with AArch64::TCRETURNriBTI), we can clobber another caller-saved
443     // temporary register instead (in this case, AArch64::W9) as the check
444     // is immediately followed by the call instruction.
445     for (auto &Reg : ScratchRegs) {
446       if (Reg == getWRegFromXReg(AddrReg)) {
447         Reg = AArch64::W9;
448         break;
449       }
450     }
451     assert(ScratchRegs[0] != AddrReg && ScratchRegs[1] != AddrReg &&
452            "Invalid scratch registers for KCFI_CHECK");
453 
454     // Adjust the offset for patchable-function-prefix. This assumes that
455     // patchable-function-prefix is the same for all functions.
456     int64_t PrefixNops = 0;
457     (void)MI.getMF()
458         ->getFunction()
459         .getFnAttribute("patchable-function-prefix")
460         .getValueAsString()
461         .getAsInteger(10, PrefixNops);
462 
463     // Load the target function type hash.
464     EmitToStreamer(*OutStreamer, MCInstBuilder(AArch64::LDURWi)
465                                      .addReg(ScratchRegs[0])
466                                      .addReg(AddrReg)
467                                      .addImm(-(PrefixNops * 4 + 4)));
468   }
469 
470   // Load the expected type hash.
471   const int64_t Type = MI.getOperand(1).getImm();
472   EmitToStreamer(*OutStreamer, MCInstBuilder(AArch64::MOVKWi)
473                                    .addReg(ScratchRegs[1])
474                                    .addReg(ScratchRegs[1])
475                                    .addImm(Type & 0xFFFF)
476                                    .addImm(0));
477   EmitToStreamer(*OutStreamer, MCInstBuilder(AArch64::MOVKWi)
478                                    .addReg(ScratchRegs[1])
479                                    .addReg(ScratchRegs[1])
480                                    .addImm((Type >> 16) & 0xFFFF)
481                                    .addImm(16));
482 
483   // Compare the hashes and trap if there's a mismatch.
484   EmitToStreamer(*OutStreamer, MCInstBuilder(AArch64::SUBSWrs)
485                                    .addReg(AArch64::WZR)
486                                    .addReg(ScratchRegs[0])
487                                    .addReg(ScratchRegs[1])
488                                    .addImm(0));
489 
490   MCSymbol *Pass = OutContext.createTempSymbol();
491   EmitToStreamer(*OutStreamer,
492                  MCInstBuilder(AArch64::Bcc)
493                      .addImm(AArch64CC::EQ)
494                      .addExpr(MCSymbolRefExpr::create(Pass, OutContext)));
495 
496   // The base ESR is 0x8000 and the register information is encoded in bits
497   // 0-9 as follows:
498   // - 0-4: n, where the register Xn contains the target address
499   // - 5-9: m, where the register Wm contains the expected type hash
500   // Where n, m are in [0, 30].
501   unsigned TypeIndex = ScratchRegs[1] - AArch64::W0;
502   unsigned AddrIndex;
503   switch (AddrReg) {
504   default:
505     AddrIndex = AddrReg - AArch64::X0;
506     break;
507   case AArch64::FP:
508     AddrIndex = 29;
509     break;
510   case AArch64::LR:
511     AddrIndex = 30;
512     break;
513   }
514 
515   assert(AddrIndex < 31 && TypeIndex < 31);
516 
517   unsigned ESR = 0x8000 | ((TypeIndex & 31) << 5) | (AddrIndex & 31);
518   EmitToStreamer(*OutStreamer, MCInstBuilder(AArch64::BRK).addImm(ESR));
519   OutStreamer->emitLabel(Pass);
520 }
521 
522 void AArch64AsmPrinter::LowerHWASAN_CHECK_MEMACCESS(const MachineInstr &MI) {
523   Register Reg = MI.getOperand(0).getReg();
524   bool IsShort =
525       MI.getOpcode() == AArch64::HWASAN_CHECK_MEMACCESS_SHORTGRANULES;
526   uint32_t AccessInfo = MI.getOperand(1).getImm();
527   MCSymbol *&Sym =
528       HwasanMemaccessSymbols[HwasanMemaccessTuple(Reg, IsShort, AccessInfo)];
529   if (!Sym) {
530     // FIXME: Make this work on non-ELF.
531     if (!TM.getTargetTriple().isOSBinFormatELF())
532       report_fatal_error("llvm.hwasan.check.memaccess only supported on ELF");
533 
534     std::string SymName = "__hwasan_check_x" + utostr(Reg - AArch64::X0) + "_" +
535                           utostr(AccessInfo);
536     if (IsShort)
537       SymName += "_short_v2";
538     Sym = OutContext.getOrCreateSymbol(SymName);
539   }
540 
541   EmitToStreamer(*OutStreamer,
542                  MCInstBuilder(AArch64::BL)
543                      .addExpr(MCSymbolRefExpr::create(Sym, OutContext)));
544 }
545 
546 void AArch64AsmPrinter::emitHwasanMemaccessSymbols(Module &M) {
547   if (HwasanMemaccessSymbols.empty())
548     return;
549 
550   const Triple &TT = TM.getTargetTriple();
551   assert(TT.isOSBinFormatELF());
552   std::unique_ptr<MCSubtargetInfo> STI(
553       TM.getTarget().createMCSubtargetInfo(TT.str(), "", ""));
554   assert(STI && "Unable to create subtarget info");
555 
556   MCSymbol *HwasanTagMismatchV1Sym =
557       OutContext.getOrCreateSymbol("__hwasan_tag_mismatch");
558   MCSymbol *HwasanTagMismatchV2Sym =
559       OutContext.getOrCreateSymbol("__hwasan_tag_mismatch_v2");
560 
561   const MCSymbolRefExpr *HwasanTagMismatchV1Ref =
562       MCSymbolRefExpr::create(HwasanTagMismatchV1Sym, OutContext);
563   const MCSymbolRefExpr *HwasanTagMismatchV2Ref =
564       MCSymbolRefExpr::create(HwasanTagMismatchV2Sym, OutContext);
565 
566   for (auto &P : HwasanMemaccessSymbols) {
567     unsigned Reg = std::get<0>(P.first);
568     bool IsShort = std::get<1>(P.first);
569     uint32_t AccessInfo = std::get<2>(P.first);
570     const MCSymbolRefExpr *HwasanTagMismatchRef =
571         IsShort ? HwasanTagMismatchV2Ref : HwasanTagMismatchV1Ref;
572     MCSymbol *Sym = P.second;
573 
574     bool HasMatchAllTag =
575         (AccessInfo >> HWASanAccessInfo::HasMatchAllShift) & 1;
576     uint8_t MatchAllTag =
577         (AccessInfo >> HWASanAccessInfo::MatchAllShift) & 0xff;
578     unsigned Size =
579         1 << ((AccessInfo >> HWASanAccessInfo::AccessSizeShift) & 0xf);
580     bool CompileKernel =
581         (AccessInfo >> HWASanAccessInfo::CompileKernelShift) & 1;
582 
583     OutStreamer->switchSection(OutContext.getELFSection(
584         ".text.hot", ELF::SHT_PROGBITS,
585         ELF::SHF_EXECINSTR | ELF::SHF_ALLOC | ELF::SHF_GROUP, 0, Sym->getName(),
586         /*IsComdat=*/true));
587 
588     OutStreamer->emitSymbolAttribute(Sym, MCSA_ELF_TypeFunction);
589     OutStreamer->emitSymbolAttribute(Sym, MCSA_Weak);
590     OutStreamer->emitSymbolAttribute(Sym, MCSA_Hidden);
591     OutStreamer->emitLabel(Sym);
592 
593     OutStreamer->emitInstruction(MCInstBuilder(AArch64::SBFMXri)
594                                      .addReg(AArch64::X16)
595                                      .addReg(Reg)
596                                      .addImm(4)
597                                      .addImm(55),
598                                  *STI);
599     OutStreamer->emitInstruction(
600         MCInstBuilder(AArch64::LDRBBroX)
601             .addReg(AArch64::W16)
602             .addReg(IsShort ? AArch64::X20 : AArch64::X9)
603             .addReg(AArch64::X16)
604             .addImm(0)
605             .addImm(0),
606         *STI);
607     OutStreamer->emitInstruction(
608         MCInstBuilder(AArch64::SUBSXrs)
609             .addReg(AArch64::XZR)
610             .addReg(AArch64::X16)
611             .addReg(Reg)
612             .addImm(AArch64_AM::getShifterImm(AArch64_AM::LSR, 56)),
613         *STI);
614     MCSymbol *HandleMismatchOrPartialSym = OutContext.createTempSymbol();
615     OutStreamer->emitInstruction(
616         MCInstBuilder(AArch64::Bcc)
617             .addImm(AArch64CC::NE)
618             .addExpr(MCSymbolRefExpr::create(HandleMismatchOrPartialSym,
619                                              OutContext)),
620         *STI);
621     MCSymbol *ReturnSym = OutContext.createTempSymbol();
622     OutStreamer->emitLabel(ReturnSym);
623     OutStreamer->emitInstruction(
624         MCInstBuilder(AArch64::RET).addReg(AArch64::LR), *STI);
625     OutStreamer->emitLabel(HandleMismatchOrPartialSym);
626 
627     if (HasMatchAllTag) {
628       OutStreamer->emitInstruction(MCInstBuilder(AArch64::UBFMXri)
629                                        .addReg(AArch64::X17)
630                                        .addReg(Reg)
631                                        .addImm(56)
632                                        .addImm(63),
633                                    *STI);
634       OutStreamer->emitInstruction(MCInstBuilder(AArch64::SUBSXri)
635                                        .addReg(AArch64::XZR)
636                                        .addReg(AArch64::X17)
637                                        .addImm(MatchAllTag)
638                                        .addImm(0),
639                                    *STI);
640       OutStreamer->emitInstruction(
641           MCInstBuilder(AArch64::Bcc)
642               .addImm(AArch64CC::EQ)
643               .addExpr(MCSymbolRefExpr::create(ReturnSym, OutContext)),
644           *STI);
645     }
646 
647     if (IsShort) {
648       OutStreamer->emitInstruction(MCInstBuilder(AArch64::SUBSWri)
649                                        .addReg(AArch64::WZR)
650                                        .addReg(AArch64::W16)
651                                        .addImm(15)
652                                        .addImm(0),
653                                    *STI);
654       MCSymbol *HandleMismatchSym = OutContext.createTempSymbol();
655       OutStreamer->emitInstruction(
656           MCInstBuilder(AArch64::Bcc)
657               .addImm(AArch64CC::HI)
658               .addExpr(MCSymbolRefExpr::create(HandleMismatchSym, OutContext)),
659           *STI);
660 
661       OutStreamer->emitInstruction(
662           MCInstBuilder(AArch64::ANDXri)
663               .addReg(AArch64::X17)
664               .addReg(Reg)
665               .addImm(AArch64_AM::encodeLogicalImmediate(0xf, 64)),
666           *STI);
667       if (Size != 1)
668         OutStreamer->emitInstruction(MCInstBuilder(AArch64::ADDXri)
669                                          .addReg(AArch64::X17)
670                                          .addReg(AArch64::X17)
671                                          .addImm(Size - 1)
672                                          .addImm(0),
673                                      *STI);
674       OutStreamer->emitInstruction(MCInstBuilder(AArch64::SUBSWrs)
675                                        .addReg(AArch64::WZR)
676                                        .addReg(AArch64::W16)
677                                        .addReg(AArch64::W17)
678                                        .addImm(0),
679                                    *STI);
680       OutStreamer->emitInstruction(
681           MCInstBuilder(AArch64::Bcc)
682               .addImm(AArch64CC::LS)
683               .addExpr(MCSymbolRefExpr::create(HandleMismatchSym, OutContext)),
684           *STI);
685 
686       OutStreamer->emitInstruction(
687           MCInstBuilder(AArch64::ORRXri)
688               .addReg(AArch64::X16)
689               .addReg(Reg)
690               .addImm(AArch64_AM::encodeLogicalImmediate(0xf, 64)),
691           *STI);
692       OutStreamer->emitInstruction(MCInstBuilder(AArch64::LDRBBui)
693                                        .addReg(AArch64::W16)
694                                        .addReg(AArch64::X16)
695                                        .addImm(0),
696                                    *STI);
697       OutStreamer->emitInstruction(
698           MCInstBuilder(AArch64::SUBSXrs)
699               .addReg(AArch64::XZR)
700               .addReg(AArch64::X16)
701               .addReg(Reg)
702               .addImm(AArch64_AM::getShifterImm(AArch64_AM::LSR, 56)),
703           *STI);
704       OutStreamer->emitInstruction(
705           MCInstBuilder(AArch64::Bcc)
706               .addImm(AArch64CC::EQ)
707               .addExpr(MCSymbolRefExpr::create(ReturnSym, OutContext)),
708           *STI);
709 
710       OutStreamer->emitLabel(HandleMismatchSym);
711     }
712 
713     OutStreamer->emitInstruction(MCInstBuilder(AArch64::STPXpre)
714                                      .addReg(AArch64::SP)
715                                      .addReg(AArch64::X0)
716                                      .addReg(AArch64::X1)
717                                      .addReg(AArch64::SP)
718                                      .addImm(-32),
719                                  *STI);
720     OutStreamer->emitInstruction(MCInstBuilder(AArch64::STPXi)
721                                      .addReg(AArch64::FP)
722                                      .addReg(AArch64::LR)
723                                      .addReg(AArch64::SP)
724                                      .addImm(29),
725                                  *STI);
726 
727     if (Reg != AArch64::X0)
728       OutStreamer->emitInstruction(MCInstBuilder(AArch64::ORRXrs)
729                                        .addReg(AArch64::X0)
730                                        .addReg(AArch64::XZR)
731                                        .addReg(Reg)
732                                        .addImm(0),
733                                    *STI);
734     OutStreamer->emitInstruction(
735         MCInstBuilder(AArch64::MOVZXi)
736             .addReg(AArch64::X1)
737             .addImm(AccessInfo & HWASanAccessInfo::RuntimeMask)
738             .addImm(0),
739         *STI);
740 
741     if (CompileKernel) {
742       // The Linux kernel's dynamic loader doesn't support GOT relative
743       // relocations, but it doesn't support late binding either, so just call
744       // the function directly.
745       OutStreamer->emitInstruction(
746           MCInstBuilder(AArch64::B).addExpr(HwasanTagMismatchRef), *STI);
747     } else {
748       // Intentionally load the GOT entry and branch to it, rather than possibly
749       // late binding the function, which may clobber the registers before we
750       // have a chance to save them.
751       OutStreamer->emitInstruction(
752           MCInstBuilder(AArch64::ADRP)
753               .addReg(AArch64::X16)
754               .addExpr(AArch64MCExpr::create(
755                   HwasanTagMismatchRef, AArch64MCExpr::VariantKind::VK_GOT_PAGE,
756                   OutContext)),
757           *STI);
758       OutStreamer->emitInstruction(
759           MCInstBuilder(AArch64::LDRXui)
760               .addReg(AArch64::X16)
761               .addReg(AArch64::X16)
762               .addExpr(AArch64MCExpr::create(
763                   HwasanTagMismatchRef, AArch64MCExpr::VariantKind::VK_GOT_LO12,
764                   OutContext)),
765           *STI);
766       OutStreamer->emitInstruction(
767           MCInstBuilder(AArch64::BR).addReg(AArch64::X16), *STI);
768     }
769   }
770 }
771 
772 void AArch64AsmPrinter::emitEndOfAsmFile(Module &M) {
773   emitHwasanMemaccessSymbols(M);
774 
775   const Triple &TT = TM.getTargetTriple();
776   if (TT.isOSBinFormatMachO()) {
777     // Funny Darwin hack: This flag tells the linker that no global symbols
778     // contain code that falls through to other global symbols (e.g. the obvious
779     // implementation of multiple entry points).  If this doesn't occur, the
780     // linker can safely perform dead code stripping.  Since LLVM never
781     // generates code that does this, it is always safe to set.
782     OutStreamer->emitAssemblerFlag(MCAF_SubsectionsViaSymbols);
783   }
784 
785   // Emit stack and fault map information.
786   FM.serializeToFaultMapSection();
787 
788 }
789 
790 void AArch64AsmPrinter::emitLOHs() {
791   SmallVector<MCSymbol *, 3> MCArgs;
792 
793   for (const auto &D : AArch64FI->getLOHContainer()) {
794     for (const MachineInstr *MI : D.getArgs()) {
795       MInstToMCSymbol::iterator LabelIt = LOHInstToLabel.find(MI);
796       assert(LabelIt != LOHInstToLabel.end() &&
797              "Label hasn't been inserted for LOH related instruction");
798       MCArgs.push_back(LabelIt->second);
799     }
800     OutStreamer->emitLOHDirective(D.getKind(), MCArgs);
801     MCArgs.clear();
802   }
803 }
804 
805 void AArch64AsmPrinter::emitFunctionBodyEnd() {
806   if (!AArch64FI->getLOHRelated().empty())
807     emitLOHs();
808 }
809 
810 /// GetCPISymbol - Return the symbol for the specified constant pool entry.
811 MCSymbol *AArch64AsmPrinter::GetCPISymbol(unsigned CPID) const {
812   // Darwin uses a linker-private symbol name for constant-pools (to
813   // avoid addends on the relocation?), ELF has no such concept and
814   // uses a normal private symbol.
815   if (!getDataLayout().getLinkerPrivateGlobalPrefix().empty())
816     return OutContext.getOrCreateSymbol(
817         Twine(getDataLayout().getLinkerPrivateGlobalPrefix()) + "CPI" +
818         Twine(getFunctionNumber()) + "_" + Twine(CPID));
819 
820   return AsmPrinter::GetCPISymbol(CPID);
821 }
822 
823 void AArch64AsmPrinter::printOperand(const MachineInstr *MI, unsigned OpNum,
824                                      raw_ostream &O) {
825   const MachineOperand &MO = MI->getOperand(OpNum);
826   switch (MO.getType()) {
827   default:
828     llvm_unreachable("<unknown operand type>");
829   case MachineOperand::MO_Register: {
830     Register Reg = MO.getReg();
831     assert(Reg.isPhysical());
832     assert(!MO.getSubReg() && "Subregs should be eliminated!");
833     O << AArch64InstPrinter::getRegisterName(Reg);
834     break;
835   }
836   case MachineOperand::MO_Immediate: {
837     O << MO.getImm();
838     break;
839   }
840   case MachineOperand::MO_GlobalAddress: {
841     PrintSymbolOperand(MO, O);
842     break;
843   }
844   case MachineOperand::MO_BlockAddress: {
845     MCSymbol *Sym = GetBlockAddressSymbol(MO.getBlockAddress());
846     Sym->print(O, MAI);
847     break;
848   }
849   }
850 }
851 
852 bool AArch64AsmPrinter::printAsmMRegister(const MachineOperand &MO, char Mode,
853                                           raw_ostream &O) {
854   Register Reg = MO.getReg();
855   switch (Mode) {
856   default:
857     return true; // Unknown mode.
858   case 'w':
859     Reg = getWRegFromXReg(Reg);
860     break;
861   case 'x':
862     Reg = getXRegFromWReg(Reg);
863     break;
864   case 't':
865     Reg = getXRegFromXRegTuple(Reg);
866     break;
867   }
868 
869   O << AArch64InstPrinter::getRegisterName(Reg);
870   return false;
871 }
872 
873 // Prints the register in MO using class RC using the offset in the
874 // new register class. This should not be used for cross class
875 // printing.
876 bool AArch64AsmPrinter::printAsmRegInClass(const MachineOperand &MO,
877                                            const TargetRegisterClass *RC,
878                                            unsigned AltName, raw_ostream &O) {
879   assert(MO.isReg() && "Should only get here with a register!");
880   const TargetRegisterInfo *RI = STI->getRegisterInfo();
881   Register Reg = MO.getReg();
882   unsigned RegToPrint = RC->getRegister(RI->getEncodingValue(Reg));
883   if (!RI->regsOverlap(RegToPrint, Reg))
884     return true;
885   O << AArch64InstPrinter::getRegisterName(RegToPrint, AltName);
886   return false;
887 }
888 
889 bool AArch64AsmPrinter::PrintAsmOperand(const MachineInstr *MI, unsigned OpNum,
890                                         const char *ExtraCode, raw_ostream &O) {
891   const MachineOperand &MO = MI->getOperand(OpNum);
892 
893   // First try the generic code, which knows about modifiers like 'c' and 'n'.
894   if (!AsmPrinter::PrintAsmOperand(MI, OpNum, ExtraCode, O))
895     return false;
896 
897   // Does this asm operand have a single letter operand modifier?
898   if (ExtraCode && ExtraCode[0]) {
899     if (ExtraCode[1] != 0)
900       return true; // Unknown modifier.
901 
902     switch (ExtraCode[0]) {
903     default:
904       return true; // Unknown modifier.
905     case 'w':      // Print W register
906     case 'x':      // Print X register
907       if (MO.isReg())
908         return printAsmMRegister(MO, ExtraCode[0], O);
909       if (MO.isImm() && MO.getImm() == 0) {
910         unsigned Reg = ExtraCode[0] == 'w' ? AArch64::WZR : AArch64::XZR;
911         O << AArch64InstPrinter::getRegisterName(Reg);
912         return false;
913       }
914       printOperand(MI, OpNum, O);
915       return false;
916     case 'b': // Print B register.
917     case 'h': // Print H register.
918     case 's': // Print S register.
919     case 'd': // Print D register.
920     case 'q': // Print Q register.
921     case 'z': // Print Z register.
922       if (MO.isReg()) {
923         const TargetRegisterClass *RC;
924         switch (ExtraCode[0]) {
925         case 'b':
926           RC = &AArch64::FPR8RegClass;
927           break;
928         case 'h':
929           RC = &AArch64::FPR16RegClass;
930           break;
931         case 's':
932           RC = &AArch64::FPR32RegClass;
933           break;
934         case 'd':
935           RC = &AArch64::FPR64RegClass;
936           break;
937         case 'q':
938           RC = &AArch64::FPR128RegClass;
939           break;
940         case 'z':
941           RC = &AArch64::ZPRRegClass;
942           break;
943         default:
944           return true;
945         }
946         return printAsmRegInClass(MO, RC, AArch64::NoRegAltName, O);
947       }
948       printOperand(MI, OpNum, O);
949       return false;
950     }
951   }
952 
953   // According to ARM, we should emit x and v registers unless we have a
954   // modifier.
955   if (MO.isReg()) {
956     Register Reg = MO.getReg();
957 
958     // If this is a w or x register, print an x register.
959     if (AArch64::GPR32allRegClass.contains(Reg) ||
960         AArch64::GPR64allRegClass.contains(Reg))
961       return printAsmMRegister(MO, 'x', O);
962 
963     // If this is an x register tuple, print an x register.
964     if (AArch64::GPR64x8ClassRegClass.contains(Reg))
965       return printAsmMRegister(MO, 't', O);
966 
967     unsigned AltName = AArch64::NoRegAltName;
968     const TargetRegisterClass *RegClass;
969     if (AArch64::ZPRRegClass.contains(Reg)) {
970       RegClass = &AArch64::ZPRRegClass;
971     } else if (AArch64::PPRRegClass.contains(Reg)) {
972       RegClass = &AArch64::PPRRegClass;
973     } else {
974       RegClass = &AArch64::FPR128RegClass;
975       AltName = AArch64::vreg;
976     }
977 
978     // If this is a b, h, s, d, or q register, print it as a v register.
979     return printAsmRegInClass(MO, RegClass, AltName, O);
980   }
981 
982   printOperand(MI, OpNum, O);
983   return false;
984 }
985 
986 bool AArch64AsmPrinter::PrintAsmMemoryOperand(const MachineInstr *MI,
987                                               unsigned OpNum,
988                                               const char *ExtraCode,
989                                               raw_ostream &O) {
990   if (ExtraCode && ExtraCode[0] && ExtraCode[0] != 'a')
991     return true; // Unknown modifier.
992 
993   const MachineOperand &MO = MI->getOperand(OpNum);
994   assert(MO.isReg() && "unexpected inline asm memory operand");
995   O << "[" << AArch64InstPrinter::getRegisterName(MO.getReg()) << "]";
996   return false;
997 }
998 
999 void AArch64AsmPrinter::PrintDebugValueComment(const MachineInstr *MI,
1000                                                raw_ostream &OS) {
1001   unsigned NOps = MI->getNumOperands();
1002   assert(NOps == 4);
1003   OS << '\t' << MAI->getCommentString() << "DEBUG_VALUE: ";
1004   // cast away const; DIetc do not take const operands for some reason.
1005   OS << MI->getDebugVariable()->getName();
1006   OS << " <- ";
1007   // Frame address.  Currently handles register +- offset only.
1008   assert(MI->isIndirectDebugValue());
1009   OS << '[';
1010   for (unsigned I = 0, E = std::distance(MI->debug_operands().begin(),
1011                                          MI->debug_operands().end());
1012        I < E; ++I) {
1013     if (I != 0)
1014       OS << ", ";
1015     printOperand(MI, I, OS);
1016   }
1017   OS << ']';
1018   OS << "+";
1019   printOperand(MI, NOps - 2, OS);
1020 }
1021 
1022 void AArch64AsmPrinter::emitJumpTableInfo() {
1023   const MachineJumpTableInfo *MJTI = MF->getJumpTableInfo();
1024   if (!MJTI) return;
1025 
1026   const std::vector<MachineJumpTableEntry> &JT = MJTI->getJumpTables();
1027   if (JT.empty()) return;
1028 
1029   const TargetLoweringObjectFile &TLOF = getObjFileLowering();
1030   MCSection *ReadOnlySec = TLOF.getSectionForJumpTable(MF->getFunction(), TM);
1031   OutStreamer->switchSection(ReadOnlySec);
1032 
1033   auto AFI = MF->getInfo<AArch64FunctionInfo>();
1034   for (unsigned JTI = 0, e = JT.size(); JTI != e; ++JTI) {
1035     const std::vector<MachineBasicBlock*> &JTBBs = JT[JTI].MBBs;
1036 
1037     // If this jump table was deleted, ignore it.
1038     if (JTBBs.empty()) continue;
1039 
1040     unsigned Size = AFI->getJumpTableEntrySize(JTI);
1041     emitAlignment(Align(Size));
1042     OutStreamer->emitLabel(GetJTISymbol(JTI));
1043 
1044     const MCSymbol *BaseSym = AArch64FI->getJumpTableEntryPCRelSymbol(JTI);
1045     const MCExpr *Base = MCSymbolRefExpr::create(BaseSym, OutContext);
1046 
1047     for (auto *JTBB : JTBBs) {
1048       const MCExpr *Value =
1049           MCSymbolRefExpr::create(JTBB->getSymbol(), OutContext);
1050 
1051       // Each entry is:
1052       //     .byte/.hword (LBB - Lbase)>>2
1053       // or plain:
1054       //     .word LBB - Lbase
1055       Value = MCBinaryExpr::createSub(Value, Base, OutContext);
1056       if (Size != 4)
1057         Value = MCBinaryExpr::createLShr(
1058             Value, MCConstantExpr::create(2, OutContext), OutContext);
1059 
1060       OutStreamer->emitValue(Value, Size);
1061     }
1062   }
1063 }
1064 
1065 void AArch64AsmPrinter::emitFunctionEntryLabel() {
1066   if (MF->getFunction().getCallingConv() == CallingConv::AArch64_VectorCall ||
1067       MF->getFunction().getCallingConv() ==
1068           CallingConv::AArch64_SVE_VectorCall ||
1069       MF->getInfo<AArch64FunctionInfo>()->isSVECC()) {
1070     auto *TS =
1071         static_cast<AArch64TargetStreamer *>(OutStreamer->getTargetStreamer());
1072     TS->emitDirectiveVariantPCS(CurrentFnSym);
1073   }
1074 
1075   return AsmPrinter::emitFunctionEntryLabel();
1076 }
1077 
1078 /// Small jump tables contain an unsigned byte or half, representing the offset
1079 /// from the lowest-addressed possible destination to the desired basic
1080 /// block. Since all instructions are 4-byte aligned, this is further compressed
1081 /// by counting in instructions rather than bytes (i.e. divided by 4). So, to
1082 /// materialize the correct destination we need:
1083 ///
1084 ///             adr xDest, .LBB0_0
1085 ///             ldrb wScratch, [xTable, xEntry]   (with "lsl #1" for ldrh).
1086 ///             add xDest, xDest, xScratch (with "lsl #2" for smaller entries)
1087 void AArch64AsmPrinter::LowerJumpTableDest(llvm::MCStreamer &OutStreamer,
1088                                            const llvm::MachineInstr &MI) {
1089   Register DestReg = MI.getOperand(0).getReg();
1090   Register ScratchReg = MI.getOperand(1).getReg();
1091   Register ScratchRegW =
1092       STI->getRegisterInfo()->getSubReg(ScratchReg, AArch64::sub_32);
1093   Register TableReg = MI.getOperand(2).getReg();
1094   Register EntryReg = MI.getOperand(3).getReg();
1095   int JTIdx = MI.getOperand(4).getIndex();
1096   int Size = AArch64FI->getJumpTableEntrySize(JTIdx);
1097 
1098   // This has to be first because the compression pass based its reachability
1099   // calculations on the start of the JumpTableDest instruction.
1100   auto Label =
1101       MF->getInfo<AArch64FunctionInfo>()->getJumpTableEntryPCRelSymbol(JTIdx);
1102 
1103   // If we don't already have a symbol to use as the base, use the ADR
1104   // instruction itself.
1105   if (!Label) {
1106     Label = MF->getContext().createTempSymbol();
1107     AArch64FI->setJumpTableEntryInfo(JTIdx, Size, Label);
1108     OutStreamer.emitLabel(Label);
1109   }
1110 
1111   auto LabelExpr = MCSymbolRefExpr::create(Label, MF->getContext());
1112   EmitToStreamer(OutStreamer, MCInstBuilder(AArch64::ADR)
1113                                   .addReg(DestReg)
1114                                   .addExpr(LabelExpr));
1115 
1116   // Load the number of instruction-steps to offset from the label.
1117   unsigned LdrOpcode;
1118   switch (Size) {
1119   case 1: LdrOpcode = AArch64::LDRBBroX; break;
1120   case 2: LdrOpcode = AArch64::LDRHHroX; break;
1121   case 4: LdrOpcode = AArch64::LDRSWroX; break;
1122   default:
1123     llvm_unreachable("Unknown jump table size");
1124   }
1125 
1126   EmitToStreamer(OutStreamer, MCInstBuilder(LdrOpcode)
1127                                   .addReg(Size == 4 ? ScratchReg : ScratchRegW)
1128                                   .addReg(TableReg)
1129                                   .addReg(EntryReg)
1130                                   .addImm(0)
1131                                   .addImm(Size == 1 ? 0 : 1));
1132 
1133   // Add to the already materialized base label address, multiplying by 4 if
1134   // compressed.
1135   EmitToStreamer(OutStreamer, MCInstBuilder(AArch64::ADDXrs)
1136                                   .addReg(DestReg)
1137                                   .addReg(DestReg)
1138                                   .addReg(ScratchReg)
1139                                   .addImm(Size == 4 ? 0 : 2));
1140 }
1141 
1142 void AArch64AsmPrinter::LowerMOPS(llvm::MCStreamer &OutStreamer,
1143                                   const llvm::MachineInstr &MI) {
1144   unsigned Opcode = MI.getOpcode();
1145   assert(STI->hasMOPS());
1146   assert(STI->hasMTE() || Opcode != AArch64::MOPSMemorySetTaggingPseudo);
1147 
1148   const auto Ops = [Opcode]() -> std::array<unsigned, 3> {
1149     if (Opcode == AArch64::MOPSMemoryCopyPseudo)
1150       return {AArch64::CPYFP, AArch64::CPYFM, AArch64::CPYFE};
1151     if (Opcode == AArch64::MOPSMemoryMovePseudo)
1152       return {AArch64::CPYP, AArch64::CPYM, AArch64::CPYE};
1153     if (Opcode == AArch64::MOPSMemorySetPseudo)
1154       return {AArch64::SETP, AArch64::SETM, AArch64::SETE};
1155     if (Opcode == AArch64::MOPSMemorySetTaggingPseudo)
1156       return {AArch64::SETGP, AArch64::SETGM, AArch64::MOPSSETGE};
1157     llvm_unreachable("Unhandled memory operation pseudo");
1158   }();
1159   const bool IsSet = Opcode == AArch64::MOPSMemorySetPseudo ||
1160                      Opcode == AArch64::MOPSMemorySetTaggingPseudo;
1161 
1162   for (auto Op : Ops) {
1163     int i = 0;
1164     auto MCIB = MCInstBuilder(Op);
1165     // Destination registers
1166     MCIB.addReg(MI.getOperand(i++).getReg());
1167     MCIB.addReg(MI.getOperand(i++).getReg());
1168     if (!IsSet)
1169       MCIB.addReg(MI.getOperand(i++).getReg());
1170     // Input registers
1171     MCIB.addReg(MI.getOperand(i++).getReg());
1172     MCIB.addReg(MI.getOperand(i++).getReg());
1173     MCIB.addReg(MI.getOperand(i++).getReg());
1174 
1175     EmitToStreamer(OutStreamer, MCIB);
1176   }
1177 }
1178 
1179 void AArch64AsmPrinter::LowerSTACKMAP(MCStreamer &OutStreamer, StackMaps &SM,
1180                                       const MachineInstr &MI) {
1181   unsigned NumNOPBytes = StackMapOpers(&MI).getNumPatchBytes();
1182 
1183   auto &Ctx = OutStreamer.getContext();
1184   MCSymbol *MILabel = Ctx.createTempSymbol();
1185   OutStreamer.emitLabel(MILabel);
1186 
1187   SM.recordStackMap(*MILabel, MI);
1188   assert(NumNOPBytes % 4 == 0 && "Invalid number of NOP bytes requested!");
1189 
1190   // Scan ahead to trim the shadow.
1191   const MachineBasicBlock &MBB = *MI.getParent();
1192   MachineBasicBlock::const_iterator MII(MI);
1193   ++MII;
1194   while (NumNOPBytes > 0) {
1195     if (MII == MBB.end() || MII->isCall() ||
1196         MII->getOpcode() == AArch64::DBG_VALUE ||
1197         MII->getOpcode() == TargetOpcode::PATCHPOINT ||
1198         MII->getOpcode() == TargetOpcode::STACKMAP)
1199       break;
1200     ++MII;
1201     NumNOPBytes -= 4;
1202   }
1203 
1204   // Emit nops.
1205   for (unsigned i = 0; i < NumNOPBytes; i += 4)
1206     EmitToStreamer(OutStreamer, MCInstBuilder(AArch64::HINT).addImm(0));
1207 }
1208 
1209 // Lower a patchpoint of the form:
1210 // [<def>], <id>, <numBytes>, <target>, <numArgs>
1211 void AArch64AsmPrinter::LowerPATCHPOINT(MCStreamer &OutStreamer, StackMaps &SM,
1212                                         const MachineInstr &MI) {
1213   auto &Ctx = OutStreamer.getContext();
1214   MCSymbol *MILabel = Ctx.createTempSymbol();
1215   OutStreamer.emitLabel(MILabel);
1216   SM.recordPatchPoint(*MILabel, MI);
1217 
1218   PatchPointOpers Opers(&MI);
1219 
1220   int64_t CallTarget = Opers.getCallTarget().getImm();
1221   unsigned EncodedBytes = 0;
1222   if (CallTarget) {
1223     assert((CallTarget & 0xFFFFFFFFFFFF) == CallTarget &&
1224            "High 16 bits of call target should be zero.");
1225     Register ScratchReg = MI.getOperand(Opers.getNextScratchIdx()).getReg();
1226     EncodedBytes = 16;
1227     // Materialize the jump address:
1228     EmitToStreamer(OutStreamer, MCInstBuilder(AArch64::MOVZXi)
1229                                     .addReg(ScratchReg)
1230                                     .addImm((CallTarget >> 32) & 0xFFFF)
1231                                     .addImm(32));
1232     EmitToStreamer(OutStreamer, MCInstBuilder(AArch64::MOVKXi)
1233                                     .addReg(ScratchReg)
1234                                     .addReg(ScratchReg)
1235                                     .addImm((CallTarget >> 16) & 0xFFFF)
1236                                     .addImm(16));
1237     EmitToStreamer(OutStreamer, MCInstBuilder(AArch64::MOVKXi)
1238                                     .addReg(ScratchReg)
1239                                     .addReg(ScratchReg)
1240                                     .addImm(CallTarget & 0xFFFF)
1241                                     .addImm(0));
1242     EmitToStreamer(OutStreamer, MCInstBuilder(AArch64::BLR).addReg(ScratchReg));
1243   }
1244   // Emit padding.
1245   unsigned NumBytes = Opers.getNumPatchBytes();
1246   assert(NumBytes >= EncodedBytes &&
1247          "Patchpoint can't request size less than the length of a call.");
1248   assert((NumBytes - EncodedBytes) % 4 == 0 &&
1249          "Invalid number of NOP bytes requested!");
1250   for (unsigned i = EncodedBytes; i < NumBytes; i += 4)
1251     EmitToStreamer(OutStreamer, MCInstBuilder(AArch64::HINT).addImm(0));
1252 }
1253 
1254 void AArch64AsmPrinter::LowerSTATEPOINT(MCStreamer &OutStreamer, StackMaps &SM,
1255                                         const MachineInstr &MI) {
1256   StatepointOpers SOpers(&MI);
1257   if (unsigned PatchBytes = SOpers.getNumPatchBytes()) {
1258     assert(PatchBytes % 4 == 0 && "Invalid number of NOP bytes requested!");
1259     for (unsigned i = 0; i < PatchBytes; i += 4)
1260       EmitToStreamer(OutStreamer, MCInstBuilder(AArch64::HINT).addImm(0));
1261   } else {
1262     // Lower call target and choose correct opcode
1263     const MachineOperand &CallTarget = SOpers.getCallTarget();
1264     MCOperand CallTargetMCOp;
1265     unsigned CallOpcode;
1266     switch (CallTarget.getType()) {
1267     case MachineOperand::MO_GlobalAddress:
1268     case MachineOperand::MO_ExternalSymbol:
1269       MCInstLowering.lowerOperand(CallTarget, CallTargetMCOp);
1270       CallOpcode = AArch64::BL;
1271       break;
1272     case MachineOperand::MO_Immediate:
1273       CallTargetMCOp = MCOperand::createImm(CallTarget.getImm());
1274       CallOpcode = AArch64::BL;
1275       break;
1276     case MachineOperand::MO_Register:
1277       CallTargetMCOp = MCOperand::createReg(CallTarget.getReg());
1278       CallOpcode = AArch64::BLR;
1279       break;
1280     default:
1281       llvm_unreachable("Unsupported operand type in statepoint call target");
1282       break;
1283     }
1284 
1285     EmitToStreamer(OutStreamer,
1286                    MCInstBuilder(CallOpcode).addOperand(CallTargetMCOp));
1287   }
1288 
1289   auto &Ctx = OutStreamer.getContext();
1290   MCSymbol *MILabel = Ctx.createTempSymbol();
1291   OutStreamer.emitLabel(MILabel);
1292   SM.recordStatepoint(*MILabel, MI);
1293 }
1294 
1295 void AArch64AsmPrinter::LowerFAULTING_OP(const MachineInstr &FaultingMI) {
1296   // FAULTING_LOAD_OP <def>, <faltinf type>, <MBB handler>,
1297   //                  <opcode>, <operands>
1298 
1299   Register DefRegister = FaultingMI.getOperand(0).getReg();
1300   FaultMaps::FaultKind FK =
1301       static_cast<FaultMaps::FaultKind>(FaultingMI.getOperand(1).getImm());
1302   MCSymbol *HandlerLabel = FaultingMI.getOperand(2).getMBB()->getSymbol();
1303   unsigned Opcode = FaultingMI.getOperand(3).getImm();
1304   unsigned OperandsBeginIdx = 4;
1305 
1306   auto &Ctx = OutStreamer->getContext();
1307   MCSymbol *FaultingLabel = Ctx.createTempSymbol();
1308   OutStreamer->emitLabel(FaultingLabel);
1309 
1310   assert(FK < FaultMaps::FaultKindMax && "Invalid Faulting Kind!");
1311   FM.recordFaultingOp(FK, FaultingLabel, HandlerLabel);
1312 
1313   MCInst MI;
1314   MI.setOpcode(Opcode);
1315 
1316   if (DefRegister != (Register)0)
1317     MI.addOperand(MCOperand::createReg(DefRegister));
1318 
1319   for (const MachineOperand &MO :
1320        llvm::drop_begin(FaultingMI.operands(), OperandsBeginIdx)) {
1321     MCOperand Dest;
1322     lowerOperand(MO, Dest);
1323     MI.addOperand(Dest);
1324   }
1325 
1326   OutStreamer->AddComment("on-fault: " + HandlerLabel->getName());
1327   OutStreamer->emitInstruction(MI, getSubtargetInfo());
1328 }
1329 
1330 void AArch64AsmPrinter::emitFMov0(const MachineInstr &MI) {
1331   Register DestReg = MI.getOperand(0).getReg();
1332   if (STI->hasZeroCycleZeroingFP() && !STI->hasZeroCycleZeroingFPWorkaround() &&
1333       STI->isNeonAvailable()) {
1334     // Convert H/S register to corresponding D register
1335     if (AArch64::H0 <= DestReg && DestReg <= AArch64::H31)
1336       DestReg = AArch64::D0 + (DestReg - AArch64::H0);
1337     else if (AArch64::S0 <= DestReg && DestReg <= AArch64::S31)
1338       DestReg = AArch64::D0 + (DestReg - AArch64::S0);
1339     else
1340       assert(AArch64::D0 <= DestReg && DestReg <= AArch64::D31);
1341 
1342     MCInst MOVI;
1343     MOVI.setOpcode(AArch64::MOVID);
1344     MOVI.addOperand(MCOperand::createReg(DestReg));
1345     MOVI.addOperand(MCOperand::createImm(0));
1346     EmitToStreamer(*OutStreamer, MOVI);
1347   } else {
1348     MCInst FMov;
1349     switch (MI.getOpcode()) {
1350     default: llvm_unreachable("Unexpected opcode");
1351     case AArch64::FMOVH0:
1352       FMov.setOpcode(STI->hasFullFP16() ? AArch64::FMOVWHr : AArch64::FMOVWSr);
1353       if (!STI->hasFullFP16())
1354         DestReg = (AArch64::S0 + (DestReg - AArch64::H0));
1355       FMov.addOperand(MCOperand::createReg(DestReg));
1356       FMov.addOperand(MCOperand::createReg(AArch64::WZR));
1357       break;
1358     case AArch64::FMOVS0:
1359       FMov.setOpcode(AArch64::FMOVWSr);
1360       FMov.addOperand(MCOperand::createReg(DestReg));
1361       FMov.addOperand(MCOperand::createReg(AArch64::WZR));
1362       break;
1363     case AArch64::FMOVD0:
1364       FMov.setOpcode(AArch64::FMOVXDr);
1365       FMov.addOperand(MCOperand::createReg(DestReg));
1366       FMov.addOperand(MCOperand::createReg(AArch64::XZR));
1367       break;
1368     }
1369     EmitToStreamer(*OutStreamer, FMov);
1370   }
1371 }
1372 
1373 // Simple pseudo-instructions have their lowering (with expansion to real
1374 // instructions) auto-generated.
1375 #include "AArch64GenMCPseudoLowering.inc"
1376 
1377 void AArch64AsmPrinter::emitInstruction(const MachineInstr *MI) {
1378   AArch64_MC::verifyInstructionPredicates(MI->getOpcode(), STI->getFeatureBits());
1379 
1380   // Do any auto-generated pseudo lowerings.
1381   if (emitPseudoExpansionLowering(*OutStreamer, MI))
1382     return;
1383 
1384   if (MI->getOpcode() == AArch64::ADRP) {
1385     for (auto &Opd : MI->operands()) {
1386       if (Opd.isSymbol() && StringRef(Opd.getSymbolName()) ==
1387                                 "swift_async_extendedFramePointerFlags") {
1388         ShouldEmitWeakSwiftAsyncExtendedFramePointerFlags = true;
1389       }
1390     }
1391   }
1392 
1393   if (AArch64FI->getLOHRelated().count(MI)) {
1394     // Generate a label for LOH related instruction
1395     MCSymbol *LOHLabel = createTempSymbol("loh");
1396     // Associate the instruction with the label
1397     LOHInstToLabel[MI] = LOHLabel;
1398     OutStreamer->emitLabel(LOHLabel);
1399   }
1400 
1401   AArch64TargetStreamer *TS =
1402     static_cast<AArch64TargetStreamer *>(OutStreamer->getTargetStreamer());
1403   // Do any manual lowerings.
1404   switch (MI->getOpcode()) {
1405   default:
1406     break;
1407   case AArch64::HINT: {
1408     // CurrentPatchableFunctionEntrySym can be CurrentFnBegin only for
1409     // -fpatchable-function-entry=N,0. The entry MBB is guaranteed to be
1410     // non-empty. If MI is the initial BTI, place the
1411     // __patchable_function_entries label after BTI.
1412     if (CurrentPatchableFunctionEntrySym &&
1413         CurrentPatchableFunctionEntrySym == CurrentFnBegin &&
1414         MI == &MF->front().front()) {
1415       int64_t Imm = MI->getOperand(0).getImm();
1416       if ((Imm & 32) && (Imm & 6)) {
1417         MCInst Inst;
1418         MCInstLowering.Lower(MI, Inst);
1419         EmitToStreamer(*OutStreamer, Inst);
1420         CurrentPatchableFunctionEntrySym = createTempSymbol("patch");
1421         OutStreamer->emitLabel(CurrentPatchableFunctionEntrySym);
1422         return;
1423       }
1424     }
1425     break;
1426   }
1427     case AArch64::MOVMCSym: {
1428       Register DestReg = MI->getOperand(0).getReg();
1429       const MachineOperand &MO_Sym = MI->getOperand(1);
1430       MachineOperand Hi_MOSym(MO_Sym), Lo_MOSym(MO_Sym);
1431       MCOperand Hi_MCSym, Lo_MCSym;
1432 
1433       Hi_MOSym.setTargetFlags(AArch64II::MO_G1 | AArch64II::MO_S);
1434       Lo_MOSym.setTargetFlags(AArch64II::MO_G0 | AArch64II::MO_NC);
1435 
1436       MCInstLowering.lowerOperand(Hi_MOSym, Hi_MCSym);
1437       MCInstLowering.lowerOperand(Lo_MOSym, Lo_MCSym);
1438 
1439       MCInst MovZ;
1440       MovZ.setOpcode(AArch64::MOVZXi);
1441       MovZ.addOperand(MCOperand::createReg(DestReg));
1442       MovZ.addOperand(Hi_MCSym);
1443       MovZ.addOperand(MCOperand::createImm(16));
1444       EmitToStreamer(*OutStreamer, MovZ);
1445 
1446       MCInst MovK;
1447       MovK.setOpcode(AArch64::MOVKXi);
1448       MovK.addOperand(MCOperand::createReg(DestReg));
1449       MovK.addOperand(MCOperand::createReg(DestReg));
1450       MovK.addOperand(Lo_MCSym);
1451       MovK.addOperand(MCOperand::createImm(0));
1452       EmitToStreamer(*OutStreamer, MovK);
1453       return;
1454   }
1455   case AArch64::MOVIv2d_ns:
1456     // If the target has <rdar://problem/16473581>, lower this
1457     // instruction to movi.16b instead.
1458     if (STI->hasZeroCycleZeroingFPWorkaround() &&
1459         MI->getOperand(1).getImm() == 0) {
1460       MCInst TmpInst;
1461       TmpInst.setOpcode(AArch64::MOVIv16b_ns);
1462       TmpInst.addOperand(MCOperand::createReg(MI->getOperand(0).getReg()));
1463       TmpInst.addOperand(MCOperand::createImm(MI->getOperand(1).getImm()));
1464       EmitToStreamer(*OutStreamer, TmpInst);
1465       return;
1466     }
1467     break;
1468 
1469   case AArch64::DBG_VALUE:
1470   case AArch64::DBG_VALUE_LIST:
1471     if (isVerbose() && OutStreamer->hasRawTextSupport()) {
1472       SmallString<128> TmpStr;
1473       raw_svector_ostream OS(TmpStr);
1474       PrintDebugValueComment(MI, OS);
1475       OutStreamer->emitRawText(StringRef(OS.str()));
1476     }
1477     return;
1478 
1479   case AArch64::EMITBKEY: {
1480       ExceptionHandling ExceptionHandlingType = MAI->getExceptionHandlingType();
1481       if (ExceptionHandlingType != ExceptionHandling::DwarfCFI &&
1482           ExceptionHandlingType != ExceptionHandling::ARM)
1483         return;
1484 
1485       if (getFunctionCFISectionType(*MF) == CFISection::None)
1486         return;
1487 
1488       OutStreamer->emitCFIBKeyFrame();
1489       return;
1490   }
1491 
1492   case AArch64::EMITMTETAGGED: {
1493     ExceptionHandling ExceptionHandlingType = MAI->getExceptionHandlingType();
1494     if (ExceptionHandlingType != ExceptionHandling::DwarfCFI &&
1495         ExceptionHandlingType != ExceptionHandling::ARM)
1496       return;
1497 
1498     if (getFunctionCFISectionType(*MF) != CFISection::None)
1499       OutStreamer->emitCFIMTETaggedFrame();
1500     return;
1501   }
1502 
1503   // Tail calls use pseudo instructions so they have the proper code-gen
1504   // attributes (isCall, isReturn, etc.). We lower them to the real
1505   // instruction here.
1506   case AArch64::TCRETURNri:
1507   case AArch64::TCRETURNriBTI:
1508   case AArch64::TCRETURNriALL: {
1509     MCInst TmpInst;
1510     TmpInst.setOpcode(AArch64::BR);
1511     TmpInst.addOperand(MCOperand::createReg(MI->getOperand(0).getReg()));
1512     EmitToStreamer(*OutStreamer, TmpInst);
1513     return;
1514   }
1515   case AArch64::TCRETURNdi: {
1516     MCOperand Dest;
1517     MCInstLowering.lowerOperand(MI->getOperand(0), Dest);
1518     MCInst TmpInst;
1519     TmpInst.setOpcode(AArch64::B);
1520     TmpInst.addOperand(Dest);
1521     EmitToStreamer(*OutStreamer, TmpInst);
1522     return;
1523   }
1524   case AArch64::SpeculationBarrierISBDSBEndBB: {
1525     // Print DSB SYS + ISB
1526     MCInst TmpInstDSB;
1527     TmpInstDSB.setOpcode(AArch64::DSB);
1528     TmpInstDSB.addOperand(MCOperand::createImm(0xf));
1529     EmitToStreamer(*OutStreamer, TmpInstDSB);
1530     MCInst TmpInstISB;
1531     TmpInstISB.setOpcode(AArch64::ISB);
1532     TmpInstISB.addOperand(MCOperand::createImm(0xf));
1533     EmitToStreamer(*OutStreamer, TmpInstISB);
1534     return;
1535   }
1536   case AArch64::SpeculationBarrierSBEndBB: {
1537     // Print SB
1538     MCInst TmpInstSB;
1539     TmpInstSB.setOpcode(AArch64::SB);
1540     EmitToStreamer(*OutStreamer, TmpInstSB);
1541     return;
1542   }
1543   case AArch64::TLSDESC_CALLSEQ: {
1544     /// lower this to:
1545     ///    adrp  x0, :tlsdesc:var
1546     ///    ldr   x1, [x0, #:tlsdesc_lo12:var]
1547     ///    add   x0, x0, #:tlsdesc_lo12:var
1548     ///    .tlsdesccall var
1549     ///    blr   x1
1550     ///    (TPIDR_EL0 offset now in x0)
1551     const MachineOperand &MO_Sym = MI->getOperand(0);
1552     MachineOperand MO_TLSDESC_LO12(MO_Sym), MO_TLSDESC(MO_Sym);
1553     MCOperand Sym, SymTLSDescLo12, SymTLSDesc;
1554     MO_TLSDESC_LO12.setTargetFlags(AArch64II::MO_TLS | AArch64II::MO_PAGEOFF);
1555     MO_TLSDESC.setTargetFlags(AArch64II::MO_TLS | AArch64II::MO_PAGE);
1556     MCInstLowering.lowerOperand(MO_Sym, Sym);
1557     MCInstLowering.lowerOperand(MO_TLSDESC_LO12, SymTLSDescLo12);
1558     MCInstLowering.lowerOperand(MO_TLSDESC, SymTLSDesc);
1559 
1560     MCInst Adrp;
1561     Adrp.setOpcode(AArch64::ADRP);
1562     Adrp.addOperand(MCOperand::createReg(AArch64::X0));
1563     Adrp.addOperand(SymTLSDesc);
1564     EmitToStreamer(*OutStreamer, Adrp);
1565 
1566     MCInst Ldr;
1567     if (STI->isTargetILP32()) {
1568       Ldr.setOpcode(AArch64::LDRWui);
1569       Ldr.addOperand(MCOperand::createReg(AArch64::W1));
1570     } else {
1571       Ldr.setOpcode(AArch64::LDRXui);
1572       Ldr.addOperand(MCOperand::createReg(AArch64::X1));
1573     }
1574     Ldr.addOperand(MCOperand::createReg(AArch64::X0));
1575     Ldr.addOperand(SymTLSDescLo12);
1576     Ldr.addOperand(MCOperand::createImm(0));
1577     EmitToStreamer(*OutStreamer, Ldr);
1578 
1579     MCInst Add;
1580     if (STI->isTargetILP32()) {
1581       Add.setOpcode(AArch64::ADDWri);
1582       Add.addOperand(MCOperand::createReg(AArch64::W0));
1583       Add.addOperand(MCOperand::createReg(AArch64::W0));
1584     } else {
1585       Add.setOpcode(AArch64::ADDXri);
1586       Add.addOperand(MCOperand::createReg(AArch64::X0));
1587       Add.addOperand(MCOperand::createReg(AArch64::X0));
1588     }
1589     Add.addOperand(SymTLSDescLo12);
1590     Add.addOperand(MCOperand::createImm(AArch64_AM::getShiftValue(0)));
1591     EmitToStreamer(*OutStreamer, Add);
1592 
1593     // Emit a relocation-annotation. This expands to no code, but requests
1594     // the following instruction gets an R_AARCH64_TLSDESC_CALL.
1595     MCInst TLSDescCall;
1596     TLSDescCall.setOpcode(AArch64::TLSDESCCALL);
1597     TLSDescCall.addOperand(Sym);
1598     EmitToStreamer(*OutStreamer, TLSDescCall);
1599 
1600     MCInst Blr;
1601     Blr.setOpcode(AArch64::BLR);
1602     Blr.addOperand(MCOperand::createReg(AArch64::X1));
1603     EmitToStreamer(*OutStreamer, Blr);
1604 
1605     return;
1606   }
1607 
1608   case AArch64::JumpTableDest32:
1609   case AArch64::JumpTableDest16:
1610   case AArch64::JumpTableDest8:
1611     LowerJumpTableDest(*OutStreamer, *MI);
1612     return;
1613 
1614   case AArch64::FMOVH0:
1615   case AArch64::FMOVS0:
1616   case AArch64::FMOVD0:
1617     emitFMov0(*MI);
1618     return;
1619 
1620   case AArch64::MOPSMemoryCopyPseudo:
1621   case AArch64::MOPSMemoryMovePseudo:
1622   case AArch64::MOPSMemorySetPseudo:
1623   case AArch64::MOPSMemorySetTaggingPseudo:
1624     LowerMOPS(*OutStreamer, *MI);
1625     return;
1626 
1627   case TargetOpcode::STACKMAP:
1628     return LowerSTACKMAP(*OutStreamer, SM, *MI);
1629 
1630   case TargetOpcode::PATCHPOINT:
1631     return LowerPATCHPOINT(*OutStreamer, SM, *MI);
1632 
1633   case TargetOpcode::STATEPOINT:
1634     return LowerSTATEPOINT(*OutStreamer, SM, *MI);
1635 
1636   case TargetOpcode::FAULTING_OP:
1637     return LowerFAULTING_OP(*MI);
1638 
1639   case TargetOpcode::PATCHABLE_FUNCTION_ENTER:
1640     LowerPATCHABLE_FUNCTION_ENTER(*MI);
1641     return;
1642 
1643   case TargetOpcode::PATCHABLE_FUNCTION_EXIT:
1644     LowerPATCHABLE_FUNCTION_EXIT(*MI);
1645     return;
1646 
1647   case TargetOpcode::PATCHABLE_TAIL_CALL:
1648     LowerPATCHABLE_TAIL_CALL(*MI);
1649     return;
1650   case TargetOpcode::PATCHABLE_EVENT_CALL:
1651     return LowerPATCHABLE_EVENT_CALL(*MI, false);
1652   case TargetOpcode::PATCHABLE_TYPED_EVENT_CALL:
1653     return LowerPATCHABLE_EVENT_CALL(*MI, true);
1654 
1655   case AArch64::KCFI_CHECK:
1656     LowerKCFI_CHECK(*MI);
1657     return;
1658 
1659   case AArch64::HWASAN_CHECK_MEMACCESS:
1660   case AArch64::HWASAN_CHECK_MEMACCESS_SHORTGRANULES:
1661     LowerHWASAN_CHECK_MEMACCESS(*MI);
1662     return;
1663 
1664   case AArch64::SEH_StackAlloc:
1665     TS->emitARM64WinCFIAllocStack(MI->getOperand(0).getImm());
1666     return;
1667 
1668   case AArch64::SEH_SaveFPLR:
1669     TS->emitARM64WinCFISaveFPLR(MI->getOperand(0).getImm());
1670     return;
1671 
1672   case AArch64::SEH_SaveFPLR_X:
1673     assert(MI->getOperand(0).getImm() < 0 &&
1674            "Pre increment SEH opcode must have a negative offset");
1675     TS->emitARM64WinCFISaveFPLRX(-MI->getOperand(0).getImm());
1676     return;
1677 
1678   case AArch64::SEH_SaveReg:
1679     TS->emitARM64WinCFISaveReg(MI->getOperand(0).getImm(),
1680                                MI->getOperand(1).getImm());
1681     return;
1682 
1683   case AArch64::SEH_SaveReg_X:
1684     assert(MI->getOperand(1).getImm() < 0 &&
1685            "Pre increment SEH opcode must have a negative offset");
1686     TS->emitARM64WinCFISaveRegX(MI->getOperand(0).getImm(),
1687                                 -MI->getOperand(1).getImm());
1688     return;
1689 
1690   case AArch64::SEH_SaveRegP:
1691     if (MI->getOperand(1).getImm() == 30 && MI->getOperand(0).getImm() >= 19 &&
1692         MI->getOperand(0).getImm() <= 28) {
1693       assert((MI->getOperand(0).getImm() - 19) % 2 == 0 &&
1694              "Register paired with LR must be odd");
1695       TS->emitARM64WinCFISaveLRPair(MI->getOperand(0).getImm(),
1696                                     MI->getOperand(2).getImm());
1697       return;
1698     }
1699     assert((MI->getOperand(1).getImm() - MI->getOperand(0).getImm() == 1) &&
1700             "Non-consecutive registers not allowed for save_regp");
1701     TS->emitARM64WinCFISaveRegP(MI->getOperand(0).getImm(),
1702                                 MI->getOperand(2).getImm());
1703     return;
1704 
1705   case AArch64::SEH_SaveRegP_X:
1706     assert((MI->getOperand(1).getImm() - MI->getOperand(0).getImm() == 1) &&
1707             "Non-consecutive registers not allowed for save_regp_x");
1708     assert(MI->getOperand(2).getImm() < 0 &&
1709            "Pre increment SEH opcode must have a negative offset");
1710     TS->emitARM64WinCFISaveRegPX(MI->getOperand(0).getImm(),
1711                                  -MI->getOperand(2).getImm());
1712     return;
1713 
1714   case AArch64::SEH_SaveFReg:
1715     TS->emitARM64WinCFISaveFReg(MI->getOperand(0).getImm(),
1716                                 MI->getOperand(1).getImm());
1717     return;
1718 
1719   case AArch64::SEH_SaveFReg_X:
1720     assert(MI->getOperand(1).getImm() < 0 &&
1721            "Pre increment SEH opcode must have a negative offset");
1722     TS->emitARM64WinCFISaveFRegX(MI->getOperand(0).getImm(),
1723                                  -MI->getOperand(1).getImm());
1724     return;
1725 
1726   case AArch64::SEH_SaveFRegP:
1727     assert((MI->getOperand(1).getImm() - MI->getOperand(0).getImm() == 1) &&
1728             "Non-consecutive registers not allowed for save_regp");
1729     TS->emitARM64WinCFISaveFRegP(MI->getOperand(0).getImm(),
1730                                  MI->getOperand(2).getImm());
1731     return;
1732 
1733   case AArch64::SEH_SaveFRegP_X:
1734     assert((MI->getOperand(1).getImm() - MI->getOperand(0).getImm() == 1) &&
1735             "Non-consecutive registers not allowed for save_regp_x");
1736     assert(MI->getOperand(2).getImm() < 0 &&
1737            "Pre increment SEH opcode must have a negative offset");
1738     TS->emitARM64WinCFISaveFRegPX(MI->getOperand(0).getImm(),
1739                                   -MI->getOperand(2).getImm());
1740     return;
1741 
1742   case AArch64::SEH_SetFP:
1743     TS->emitARM64WinCFISetFP();
1744     return;
1745 
1746   case AArch64::SEH_AddFP:
1747     TS->emitARM64WinCFIAddFP(MI->getOperand(0).getImm());
1748     return;
1749 
1750   case AArch64::SEH_Nop:
1751     TS->emitARM64WinCFINop();
1752     return;
1753 
1754   case AArch64::SEH_PrologEnd:
1755     TS->emitARM64WinCFIPrologEnd();
1756     return;
1757 
1758   case AArch64::SEH_EpilogStart:
1759     TS->emitARM64WinCFIEpilogStart();
1760     return;
1761 
1762   case AArch64::SEH_EpilogEnd:
1763     TS->emitARM64WinCFIEpilogEnd();
1764     return;
1765 
1766   case AArch64::SEH_PACSignLR:
1767     TS->emitARM64WinCFIPACSignLR();
1768     return;
1769   }
1770 
1771   // Finally, do the automated lowerings for everything else.
1772   MCInst TmpInst;
1773   MCInstLowering.Lower(MI, TmpInst);
1774   EmitToStreamer(*OutStreamer, TmpInst);
1775 }
1776 
1777 // Force static initialization.
1778 extern "C" LLVM_EXTERNAL_VISIBILITY void LLVMInitializeAArch64AsmPrinter() {
1779   RegisterAsmPrinter<AArch64AsmPrinter> X(getTheAArch64leTarget());
1780   RegisterAsmPrinter<AArch64AsmPrinter> Y(getTheAArch64beTarget());
1781   RegisterAsmPrinter<AArch64AsmPrinter> Z(getTheARM64Target());
1782   RegisterAsmPrinter<AArch64AsmPrinter> W(getTheARM64_32Target());
1783   RegisterAsmPrinter<AArch64AsmPrinter> V(getTheAArch64_32Target());
1784 }
1785