1 //===-- AArch64AsmBackend.cpp - AArch64 Assembler Backend -----------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 
9 #include "MCTargetDesc/AArch64FixupKinds.h"
10 #include "MCTargetDesc/AArch64MCExpr.h"
11 #include "MCTargetDesc/AArch64MCTargetDesc.h"
12 #include "Utils/AArch64BaseInfo.h"
13 #include "llvm/ADT/Triple.h"
14 #include "llvm/BinaryFormat/MachO.h"
15 #include "llvm/MC/MCAsmBackend.h"
16 #include "llvm/MC/MCAssembler.h"
17 #include "llvm/MC/MCContext.h"
18 #include "llvm/MC/MCDirectives.h"
19 #include "llvm/MC/MCELFObjectWriter.h"
20 #include "llvm/MC/MCFixupKindInfo.h"
21 #include "llvm/MC/MCObjectWriter.h"
22 #include "llvm/MC/MCRegisterInfo.h"
23 #include "llvm/MC/MCSectionELF.h"
24 #include "llvm/MC/MCSectionMachO.h"
25 #include "llvm/MC/MCSubtargetInfo.h"
26 #include "llvm/MC/MCTargetOptions.h"
27 #include "llvm/MC/MCValue.h"
28 #include "llvm/MC/TargetRegistry.h"
29 #include "llvm/Support/ErrorHandling.h"
30 using namespace llvm;
31 
32 namespace {
33 
34 class AArch64AsmBackend : public MCAsmBackend {
35   static const unsigned PCRelFlagVal =
36       MCFixupKindInfo::FKF_IsAlignedDownTo32Bits | MCFixupKindInfo::FKF_IsPCRel;
37 protected:
38   Triple TheTriple;
39 
40 public:
41   AArch64AsmBackend(const Target &T, const Triple &TT, bool IsLittleEndian)
42       : MCAsmBackend(IsLittleEndian ? support::little : support::big),
43         TheTriple(TT) {}
44 
45   unsigned getNumFixupKinds() const override {
46     return AArch64::NumTargetFixupKinds;
47   }
48 
49   Optional<MCFixupKind> getFixupKind(StringRef Name) const override;
50 
51   const MCFixupKindInfo &getFixupKindInfo(MCFixupKind Kind) const override {
52     const static MCFixupKindInfo Infos[AArch64::NumTargetFixupKinds] = {
53         // This table *must* be in the order that the fixup_* kinds are defined
54         // in AArch64FixupKinds.h.
55         //
56         // Name                           Offset (bits) Size (bits)     Flags
57         {"fixup_aarch64_pcrel_adr_imm21", 0, 32, PCRelFlagVal},
58         {"fixup_aarch64_pcrel_adrp_imm21", 0, 32, PCRelFlagVal},
59         {"fixup_aarch64_add_imm12", 10, 12, 0},
60         {"fixup_aarch64_ldst_imm12_scale1", 10, 12, 0},
61         {"fixup_aarch64_ldst_imm12_scale2", 10, 12, 0},
62         {"fixup_aarch64_ldst_imm12_scale4", 10, 12, 0},
63         {"fixup_aarch64_ldst_imm12_scale8", 10, 12, 0},
64         {"fixup_aarch64_ldst_imm12_scale16", 10, 12, 0},
65         {"fixup_aarch64_ldr_pcrel_imm19", 5, 19, PCRelFlagVal},
66         {"fixup_aarch64_movw", 5, 16, 0},
67         {"fixup_aarch64_pcrel_branch14", 5, 14, PCRelFlagVal},
68         {"fixup_aarch64_pcrel_branch19", 5, 19, PCRelFlagVal},
69         {"fixup_aarch64_pcrel_branch26", 0, 26, PCRelFlagVal},
70         {"fixup_aarch64_pcrel_call26", 0, 26, PCRelFlagVal}};
71 
72     // Fixup kinds from .reloc directive are like R_AARCH64_NONE. They do not
73     // require any extra processing.
74     if (Kind >= FirstLiteralRelocationKind)
75       return MCAsmBackend::getFixupKindInfo(FK_NONE);
76 
77     if (Kind < FirstTargetFixupKind)
78       return MCAsmBackend::getFixupKindInfo(Kind);
79 
80     assert(unsigned(Kind - FirstTargetFixupKind) < getNumFixupKinds() &&
81            "Invalid kind!");
82     return Infos[Kind - FirstTargetFixupKind];
83   }
84 
85   void applyFixup(const MCAssembler &Asm, const MCFixup &Fixup,
86                   const MCValue &Target, MutableArrayRef<char> Data,
87                   uint64_t Value, bool IsResolved,
88                   const MCSubtargetInfo *STI) const override;
89 
90   bool fixupNeedsRelaxation(const MCFixup &Fixup, uint64_t Value,
91                             const MCRelaxableFragment *DF,
92                             const MCAsmLayout &Layout) const override;
93   void relaxInstruction(MCInst &Inst,
94                         const MCSubtargetInfo &STI) const override;
95   bool writeNopData(raw_ostream &OS, uint64_t Count,
96                     const MCSubtargetInfo *STI) const override;
97 
98   unsigned getFixupKindContainereSizeInBytes(unsigned Kind) const;
99 
100   bool shouldForceRelocation(const MCAssembler &Asm, const MCFixup &Fixup,
101                              const MCValue &Target) override;
102 };
103 
104 } // end anonymous namespace
105 
106 /// The number of bytes the fixup may change.
107 static unsigned getFixupKindNumBytes(unsigned Kind) {
108   switch (Kind) {
109   default:
110     llvm_unreachable("Unknown fixup kind!");
111 
112   case FK_Data_1:
113     return 1;
114 
115   case FK_Data_2:
116   case FK_SecRel_2:
117     return 2;
118 
119   case AArch64::fixup_aarch64_movw:
120   case AArch64::fixup_aarch64_pcrel_branch14:
121   case AArch64::fixup_aarch64_add_imm12:
122   case AArch64::fixup_aarch64_ldst_imm12_scale1:
123   case AArch64::fixup_aarch64_ldst_imm12_scale2:
124   case AArch64::fixup_aarch64_ldst_imm12_scale4:
125   case AArch64::fixup_aarch64_ldst_imm12_scale8:
126   case AArch64::fixup_aarch64_ldst_imm12_scale16:
127   case AArch64::fixup_aarch64_ldr_pcrel_imm19:
128   case AArch64::fixup_aarch64_pcrel_branch19:
129     return 3;
130 
131   case AArch64::fixup_aarch64_pcrel_adr_imm21:
132   case AArch64::fixup_aarch64_pcrel_adrp_imm21:
133   case AArch64::fixup_aarch64_pcrel_branch26:
134   case AArch64::fixup_aarch64_pcrel_call26:
135   case FK_Data_4:
136   case FK_SecRel_4:
137     return 4;
138 
139   case FK_Data_8:
140     return 8;
141   }
142 }
143 
144 static unsigned AdrImmBits(unsigned Value) {
145   unsigned lo2 = Value & 0x3;
146   unsigned hi19 = (Value & 0x1ffffc) >> 2;
147   return (hi19 << 5) | (lo2 << 29);
148 }
149 
150 static uint64_t adjustFixupValue(const MCFixup &Fixup, const MCValue &Target,
151                                  uint64_t Value, MCContext &Ctx,
152                                  const Triple &TheTriple, bool IsResolved) {
153   int64_t SignedValue = static_cast<int64_t>(Value);
154   switch (Fixup.getTargetKind()) {
155   default:
156     llvm_unreachable("Unknown fixup kind!");
157   case AArch64::fixup_aarch64_pcrel_adr_imm21:
158     if (SignedValue > 2097151 || SignedValue < -2097152)
159       Ctx.reportError(Fixup.getLoc(), "fixup value out of range");
160     return AdrImmBits(Value & 0x1fffffULL);
161   case AArch64::fixup_aarch64_pcrel_adrp_imm21:
162     assert(!IsResolved);
163     if (TheTriple.isOSBinFormatCOFF()) {
164       if (!isInt<21>(SignedValue))
165         Ctx.reportError(Fixup.getLoc(), "fixup value out of range");
166       return AdrImmBits(Value & 0x1fffffULL);
167     }
168     return AdrImmBits((Value & 0x1fffff000ULL) >> 12);
169   case AArch64::fixup_aarch64_ldr_pcrel_imm19:
170   case AArch64::fixup_aarch64_pcrel_branch19:
171     // Signed 21-bit immediate
172     if (SignedValue > 2097151 || SignedValue < -2097152)
173       Ctx.reportError(Fixup.getLoc(), "fixup value out of range");
174     if (Value & 0x3)
175       Ctx.reportError(Fixup.getLoc(), "fixup not sufficiently aligned");
176     // Low two bits are not encoded.
177     return (Value >> 2) & 0x7ffff;
178   case AArch64::fixup_aarch64_add_imm12:
179   case AArch64::fixup_aarch64_ldst_imm12_scale1:
180     if (TheTriple.isOSBinFormatCOFF() && !IsResolved)
181       Value &= 0xfff;
182     // Unsigned 12-bit immediate
183     if (Value >= 0x1000)
184       Ctx.reportError(Fixup.getLoc(), "fixup value out of range");
185     return Value;
186   case AArch64::fixup_aarch64_ldst_imm12_scale2:
187     if (TheTriple.isOSBinFormatCOFF() && !IsResolved)
188       Value &= 0xfff;
189     // Unsigned 12-bit immediate which gets multiplied by 2
190     if (Value >= 0x2000)
191       Ctx.reportError(Fixup.getLoc(), "fixup value out of range");
192     if (Value & 0x1)
193       Ctx.reportError(Fixup.getLoc(), "fixup must be 2-byte aligned");
194     return Value >> 1;
195   case AArch64::fixup_aarch64_ldst_imm12_scale4:
196     if (TheTriple.isOSBinFormatCOFF() && !IsResolved)
197       Value &= 0xfff;
198     // Unsigned 12-bit immediate which gets multiplied by 4
199     if (Value >= 0x4000)
200       Ctx.reportError(Fixup.getLoc(), "fixup value out of range");
201     if (Value & 0x3)
202       Ctx.reportError(Fixup.getLoc(), "fixup must be 4-byte aligned");
203     return Value >> 2;
204   case AArch64::fixup_aarch64_ldst_imm12_scale8:
205     if (TheTriple.isOSBinFormatCOFF() && !IsResolved)
206       Value &= 0xfff;
207     // Unsigned 12-bit immediate which gets multiplied by 8
208     if (Value >= 0x8000)
209       Ctx.reportError(Fixup.getLoc(), "fixup value out of range");
210     if (Value & 0x7)
211       Ctx.reportError(Fixup.getLoc(), "fixup must be 8-byte aligned");
212     return Value >> 3;
213   case AArch64::fixup_aarch64_ldst_imm12_scale16:
214     if (TheTriple.isOSBinFormatCOFF() && !IsResolved)
215       Value &= 0xfff;
216     // Unsigned 12-bit immediate which gets multiplied by 16
217     if (Value >= 0x10000)
218       Ctx.reportError(Fixup.getLoc(), "fixup value out of range");
219     if (Value & 0xf)
220       Ctx.reportError(Fixup.getLoc(), "fixup must be 16-byte aligned");
221     return Value >> 4;
222   case AArch64::fixup_aarch64_movw: {
223     AArch64MCExpr::VariantKind RefKind =
224         static_cast<AArch64MCExpr::VariantKind>(Target.getRefKind());
225     if (AArch64MCExpr::getSymbolLoc(RefKind) != AArch64MCExpr::VK_ABS &&
226         AArch64MCExpr::getSymbolLoc(RefKind) != AArch64MCExpr::VK_SABS) {
227       if (!RefKind) {
228         // The fixup is an expression
229         if (SignedValue > 0xFFFF || SignedValue < -0xFFFF)
230           Ctx.reportError(Fixup.getLoc(),
231                           "fixup value out of range [-0xFFFF, 0xFFFF]");
232 
233         // Invert the negative immediate because it will feed into a MOVN.
234         if (SignedValue < 0)
235           SignedValue = ~SignedValue;
236         Value = static_cast<uint64_t>(SignedValue);
237       } else
238         // VK_GOTTPREL, VK_TPREL, VK_DTPREL are movw fixups, but they can't
239         // ever be resolved in the assembler.
240         Ctx.reportError(Fixup.getLoc(),
241                         "relocation for a thread-local variable points to an "
242                         "absolute symbol");
243       return Value;
244     }
245 
246     if (!IsResolved) {
247       // FIXME: Figure out when this can actually happen, and verify our
248       // behavior.
249       Ctx.reportError(Fixup.getLoc(), "unresolved movw fixup not yet "
250                                       "implemented");
251       return Value;
252     }
253 
254     if (AArch64MCExpr::getSymbolLoc(RefKind) == AArch64MCExpr::VK_SABS) {
255       switch (AArch64MCExpr::getAddressFrag(RefKind)) {
256       case AArch64MCExpr::VK_G0:
257         break;
258       case AArch64MCExpr::VK_G1:
259         SignedValue = SignedValue >> 16;
260         break;
261       case AArch64MCExpr::VK_G2:
262         SignedValue = SignedValue >> 32;
263         break;
264       case AArch64MCExpr::VK_G3:
265         SignedValue = SignedValue >> 48;
266         break;
267       default:
268         llvm_unreachable("Variant kind doesn't correspond to fixup");
269       }
270 
271     } else {
272       switch (AArch64MCExpr::getAddressFrag(RefKind)) {
273       case AArch64MCExpr::VK_G0:
274         break;
275       case AArch64MCExpr::VK_G1:
276         Value = Value >> 16;
277         break;
278       case AArch64MCExpr::VK_G2:
279         Value = Value >> 32;
280         break;
281       case AArch64MCExpr::VK_G3:
282         Value = Value >> 48;
283         break;
284       default:
285         llvm_unreachable("Variant kind doesn't correspond to fixup");
286       }
287     }
288 
289     if (RefKind & AArch64MCExpr::VK_NC) {
290       Value &= 0xFFFF;
291     }
292     else if (AArch64MCExpr::getSymbolLoc(RefKind) == AArch64MCExpr::VK_SABS) {
293       if (SignedValue > 0xFFFF || SignedValue < -0xFFFF)
294         Ctx.reportError(Fixup.getLoc(), "fixup value out of range");
295 
296       // Invert the negative immediate because it will feed into a MOVN.
297       if (SignedValue < 0)
298         SignedValue = ~SignedValue;
299       Value = static_cast<uint64_t>(SignedValue);
300     }
301     else if (Value > 0xFFFF) {
302       Ctx.reportError(Fixup.getLoc(), "fixup value out of range");
303     }
304     return Value;
305   }
306   case AArch64::fixup_aarch64_pcrel_branch14:
307     // Signed 16-bit immediate
308     if (SignedValue > 32767 || SignedValue < -32768)
309       Ctx.reportError(Fixup.getLoc(), "fixup value out of range");
310     // Low two bits are not encoded (4-byte alignment assumed).
311     if (Value & 0x3)
312       Ctx.reportError(Fixup.getLoc(), "fixup not sufficiently aligned");
313     return (Value >> 2) & 0x3fff;
314   case AArch64::fixup_aarch64_pcrel_branch26:
315   case AArch64::fixup_aarch64_pcrel_call26:
316     // Signed 28-bit immediate
317     if (SignedValue > 134217727 || SignedValue < -134217728)
318       Ctx.reportError(Fixup.getLoc(), "fixup value out of range");
319     // Low two bits are not encoded (4-byte alignment assumed).
320     if (Value & 0x3)
321       Ctx.reportError(Fixup.getLoc(), "fixup not sufficiently aligned");
322     return (Value >> 2) & 0x3ffffff;
323   case FK_Data_1:
324   case FK_Data_2:
325   case FK_Data_4:
326   case FK_Data_8:
327   case FK_SecRel_2:
328   case FK_SecRel_4:
329     return Value;
330   }
331 }
332 
333 Optional<MCFixupKind> AArch64AsmBackend::getFixupKind(StringRef Name) const {
334   if (!TheTriple.isOSBinFormatELF())
335     return None;
336 
337   unsigned Type = llvm::StringSwitch<unsigned>(Name)
338 #define ELF_RELOC(X, Y)  .Case(#X, Y)
339 #include "llvm/BinaryFormat/ELFRelocs/AArch64.def"
340 #undef ELF_RELOC
341                       .Case("BFD_RELOC_NONE", ELF::R_AARCH64_NONE)
342                       .Case("BFD_RELOC_16", ELF::R_AARCH64_ABS16)
343                       .Case("BFD_RELOC_32", ELF::R_AARCH64_ABS32)
344                       .Case("BFD_RELOC_64", ELF::R_AARCH64_ABS64)
345                       .Default(-1u);
346   if (Type == -1u)
347     return None;
348   return static_cast<MCFixupKind>(FirstLiteralRelocationKind + Type);
349 }
350 
351 /// getFixupKindContainereSizeInBytes - The number of bytes of the
352 /// container involved in big endian or 0 if the item is little endian
353 unsigned AArch64AsmBackend::getFixupKindContainereSizeInBytes(unsigned Kind) const {
354   if (Endian == support::little)
355     return 0;
356 
357   switch (Kind) {
358   default:
359     llvm_unreachable("Unknown fixup kind!");
360 
361   case FK_Data_1:
362     return 1;
363   case FK_Data_2:
364     return 2;
365   case FK_Data_4:
366     return 4;
367   case FK_Data_8:
368     return 8;
369 
370   case AArch64::fixup_aarch64_movw:
371   case AArch64::fixup_aarch64_pcrel_branch14:
372   case AArch64::fixup_aarch64_add_imm12:
373   case AArch64::fixup_aarch64_ldst_imm12_scale1:
374   case AArch64::fixup_aarch64_ldst_imm12_scale2:
375   case AArch64::fixup_aarch64_ldst_imm12_scale4:
376   case AArch64::fixup_aarch64_ldst_imm12_scale8:
377   case AArch64::fixup_aarch64_ldst_imm12_scale16:
378   case AArch64::fixup_aarch64_ldr_pcrel_imm19:
379   case AArch64::fixup_aarch64_pcrel_branch19:
380   case AArch64::fixup_aarch64_pcrel_adr_imm21:
381   case AArch64::fixup_aarch64_pcrel_adrp_imm21:
382   case AArch64::fixup_aarch64_pcrel_branch26:
383   case AArch64::fixup_aarch64_pcrel_call26:
384     // Instructions are always little endian
385     return 0;
386   }
387 }
388 
389 void AArch64AsmBackend::applyFixup(const MCAssembler &Asm, const MCFixup &Fixup,
390                                    const MCValue &Target,
391                                    MutableArrayRef<char> Data, uint64_t Value,
392                                    bool IsResolved,
393                                    const MCSubtargetInfo *STI) const {
394   if (!Value)
395     return; // Doesn't change encoding.
396   unsigned Kind = Fixup.getKind();
397   if (Kind >= FirstLiteralRelocationKind)
398     return;
399   unsigned NumBytes = getFixupKindNumBytes(Kind);
400   MCFixupKindInfo Info = getFixupKindInfo(Fixup.getKind());
401   MCContext &Ctx = Asm.getContext();
402   int64_t SignedValue = static_cast<int64_t>(Value);
403   // Apply any target-specific value adjustments.
404   Value = adjustFixupValue(Fixup, Target, Value, Ctx, TheTriple, IsResolved);
405 
406   // Shift the value into position.
407   Value <<= Info.TargetOffset;
408 
409   unsigned Offset = Fixup.getOffset();
410   assert(Offset + NumBytes <= Data.size() && "Invalid fixup offset!");
411 
412   // Used to point to big endian bytes.
413   unsigned FulleSizeInBytes = getFixupKindContainereSizeInBytes(Fixup.getKind());
414 
415   // For each byte of the fragment that the fixup touches, mask in the
416   // bits from the fixup value.
417   if (FulleSizeInBytes == 0) {
418     // Handle as little-endian
419     for (unsigned i = 0; i != NumBytes; ++i) {
420       Data[Offset + i] |= uint8_t((Value >> (i * 8)) & 0xff);
421     }
422   } else {
423     // Handle as big-endian
424     assert((Offset + FulleSizeInBytes) <= Data.size() && "Invalid fixup size!");
425     assert(NumBytes <= FulleSizeInBytes && "Invalid fixup size!");
426     for (unsigned i = 0; i != NumBytes; ++i) {
427       unsigned Idx = FulleSizeInBytes - 1 - i;
428       Data[Offset + Idx] |= uint8_t((Value >> (i * 8)) & 0xff);
429     }
430   }
431 
432   // FIXME: getFixupKindInfo() and getFixupKindNumBytes() could be fixed to
433   // handle this more cleanly. This may affect the output of -show-mc-encoding.
434   AArch64MCExpr::VariantKind RefKind =
435       static_cast<AArch64MCExpr::VariantKind>(Target.getRefKind());
436   if (AArch64MCExpr::getSymbolLoc(RefKind) == AArch64MCExpr::VK_SABS ||
437       (!RefKind && Fixup.getTargetKind() == AArch64::fixup_aarch64_movw)) {
438     // If the immediate is negative, generate MOVN else MOVZ.
439     // (Bit 30 = 0) ==> MOVN, (Bit 30 = 1) ==> MOVZ.
440     if (SignedValue < 0)
441       Data[Offset + 3] &= ~(1 << 6);
442     else
443       Data[Offset + 3] |= (1 << 6);
444   }
445 }
446 
447 bool AArch64AsmBackend::fixupNeedsRelaxation(const MCFixup &Fixup,
448                                              uint64_t Value,
449                                              const MCRelaxableFragment *DF,
450                                              const MCAsmLayout &Layout) const {
451   // FIXME:  This isn't correct for AArch64. Just moving the "generic" logic
452   // into the targets for now.
453   //
454   // Relax if the value is too big for a (signed) i8.
455   return int64_t(Value) != int64_t(int8_t(Value));
456 }
457 
458 void AArch64AsmBackend::relaxInstruction(MCInst &Inst,
459                                          const MCSubtargetInfo &STI) const {
460   llvm_unreachable("AArch64AsmBackend::relaxInstruction() unimplemented");
461 }
462 
463 bool AArch64AsmBackend::writeNopData(raw_ostream &OS, uint64_t Count,
464                                      const MCSubtargetInfo *STI) const {
465   // If the count is not 4-byte aligned, we must be writing data into the text
466   // section (otherwise we have unaligned instructions, and thus have far
467   // bigger problems), so just write zeros instead.
468   OS.write_zeros(Count % 4);
469 
470   // We are properly aligned, so write NOPs as requested.
471   Count /= 4;
472   for (uint64_t i = 0; i != Count; ++i)
473     OS.write("\x1f\x20\x03\xd5", 4);
474   return true;
475 }
476 
477 bool AArch64AsmBackend::shouldForceRelocation(const MCAssembler &Asm,
478                                               const MCFixup &Fixup,
479                                               const MCValue &Target) {
480   unsigned Kind = Fixup.getKind();
481   if (Kind >= FirstLiteralRelocationKind)
482     return true;
483 
484   // The ADRP instruction adds some multiple of 0x1000 to the current PC &
485   // ~0xfff. This means that the required offset to reach a symbol can vary by
486   // up to one step depending on where the ADRP is in memory. For example:
487   //
488   //     ADRP x0, there
489   //  there:
490   //
491   // If the ADRP occurs at address 0xffc then "there" will be at 0x1000 and
492   // we'll need that as an offset. At any other address "there" will be in the
493   // same page as the ADRP and the instruction should encode 0x0. Assuming the
494   // section isn't 0x1000-aligned, we therefore need to delegate this decision
495   // to the linker -- a relocation!
496   if (Kind == AArch64::fixup_aarch64_pcrel_adrp_imm21)
497     return true;
498 
499   return false;
500 }
501 
502 namespace {
503 
504 namespace CU {
505 
506 /// Compact unwind encoding values.
507 enum CompactUnwindEncodings {
508   /// A "frameless" leaf function, where no non-volatile registers are
509   /// saved. The return remains in LR throughout the function.
510   UNWIND_ARM64_MODE_FRAMELESS = 0x02000000,
511 
512   /// No compact unwind encoding available. Instead the low 23-bits of
513   /// the compact unwind encoding is the offset of the DWARF FDE in the
514   /// __eh_frame section. This mode is never used in object files. It is only
515   /// generated by the linker in final linked images, which have only DWARF info
516   /// for a function.
517   UNWIND_ARM64_MODE_DWARF = 0x03000000,
518 
519   /// This is a standard arm64 prologue where FP/LR are immediately
520   /// pushed on the stack, then SP is copied to FP. If there are any
521   /// non-volatile register saved, they are copied into the stack fame in pairs
522   /// in a contiguous ranger right below the saved FP/LR pair. Any subset of the
523   /// five X pairs and four D pairs can be saved, but the memory layout must be
524   /// in register number order.
525   UNWIND_ARM64_MODE_FRAME = 0x04000000,
526 
527   /// Frame register pair encodings.
528   UNWIND_ARM64_FRAME_X19_X20_PAIR = 0x00000001,
529   UNWIND_ARM64_FRAME_X21_X22_PAIR = 0x00000002,
530   UNWIND_ARM64_FRAME_X23_X24_PAIR = 0x00000004,
531   UNWIND_ARM64_FRAME_X25_X26_PAIR = 0x00000008,
532   UNWIND_ARM64_FRAME_X27_X28_PAIR = 0x00000010,
533   UNWIND_ARM64_FRAME_D8_D9_PAIR = 0x00000100,
534   UNWIND_ARM64_FRAME_D10_D11_PAIR = 0x00000200,
535   UNWIND_ARM64_FRAME_D12_D13_PAIR = 0x00000400,
536   UNWIND_ARM64_FRAME_D14_D15_PAIR = 0x00000800
537 };
538 
539 } // end CU namespace
540 
541 // FIXME: This should be in a separate file.
542 class DarwinAArch64AsmBackend : public AArch64AsmBackend {
543   const MCRegisterInfo &MRI;
544 
545   /// Encode compact unwind stack adjustment for frameless functions.
546   /// See UNWIND_ARM64_FRAMELESS_STACK_SIZE_MASK in compact_unwind_encoding.h.
547   /// The stack size always needs to be 16 byte aligned.
548   uint32_t encodeStackAdjustment(uint32_t StackSize) const {
549     return (StackSize / 16) << 12;
550   }
551 
552 public:
553   DarwinAArch64AsmBackend(const Target &T, const Triple &TT,
554                           const MCRegisterInfo &MRI)
555       : AArch64AsmBackend(T, TT, /*IsLittleEndian*/ true), MRI(MRI) {}
556 
557   std::unique_ptr<MCObjectTargetWriter>
558   createObjectTargetWriter() const override {
559     uint32_t CPUType = cantFail(MachO::getCPUType(TheTriple));
560     uint32_t CPUSubType = cantFail(MachO::getCPUSubType(TheTriple));
561     return createAArch64MachObjectWriter(CPUType, CPUSubType,
562                                          TheTriple.isArch32Bit());
563   }
564 
565   /// Generate the compact unwind encoding from the CFI directives.
566   uint32_t generateCompactUnwindEncoding(
567                              ArrayRef<MCCFIInstruction> Instrs) const override {
568     if (Instrs.empty())
569       return CU::UNWIND_ARM64_MODE_FRAMELESS;
570 
571     bool HasFP = false;
572     unsigned StackSize = 0;
573 
574     uint32_t CompactUnwindEncoding = 0;
575     int CurOffset = 0;
576     for (size_t i = 0, e = Instrs.size(); i != e; ++i) {
577       const MCCFIInstruction &Inst = Instrs[i];
578 
579       switch (Inst.getOperation()) {
580       default:
581         // Cannot handle this directive:  bail out.
582         return CU::UNWIND_ARM64_MODE_DWARF;
583       case MCCFIInstruction::OpDefCfa: {
584         // Defines a frame pointer.
585         unsigned XReg =
586             getXRegFromWReg(*MRI.getLLVMRegNum(Inst.getRegister(), true));
587 
588         // Other CFA registers than FP are not supported by compact unwind.
589         // Fallback on DWARF.
590         // FIXME: When opt-remarks are supported in MC, add a remark to notify
591         // the user.
592         if (XReg != AArch64::FP)
593           return CU::UNWIND_ARM64_MODE_DWARF;
594 
595         if (i + 2 >= e)
596           return CU::UNWIND_ARM64_MODE_DWARF;
597 
598         const MCCFIInstruction &LRPush = Instrs[++i];
599         if (LRPush.getOperation() != MCCFIInstruction::OpOffset)
600           return CU::UNWIND_ARM64_MODE_DWARF;
601         const MCCFIInstruction &FPPush = Instrs[++i];
602         if (FPPush.getOperation() != MCCFIInstruction::OpOffset)
603           return CU::UNWIND_ARM64_MODE_DWARF;
604 
605         if (FPPush.getOffset() + 8 != LRPush.getOffset())
606           return CU::UNWIND_ARM64_MODE_DWARF;
607         CurOffset = FPPush.getOffset();
608 
609         unsigned LRReg = *MRI.getLLVMRegNum(LRPush.getRegister(), true);
610         unsigned FPReg = *MRI.getLLVMRegNum(FPPush.getRegister(), true);
611 
612         LRReg = getXRegFromWReg(LRReg);
613         FPReg = getXRegFromWReg(FPReg);
614 
615         if (LRReg != AArch64::LR || FPReg != AArch64::FP)
616           return CU::UNWIND_ARM64_MODE_DWARF;
617 
618         // Indicate that the function has a frame.
619         CompactUnwindEncoding |= CU::UNWIND_ARM64_MODE_FRAME;
620         HasFP = true;
621         break;
622       }
623       case MCCFIInstruction::OpDefCfaOffset: {
624         if (StackSize != 0)
625           return CU::UNWIND_ARM64_MODE_DWARF;
626         StackSize = std::abs(Inst.getOffset());
627         break;
628       }
629       case MCCFIInstruction::OpOffset: {
630         // Registers are saved in pairs. We expect there to be two consecutive
631         // `.cfi_offset' instructions with the appropriate registers specified.
632         unsigned Reg1 = *MRI.getLLVMRegNum(Inst.getRegister(), true);
633         if (i + 1 == e)
634           return CU::UNWIND_ARM64_MODE_DWARF;
635 
636         if (CurOffset != 0 && Inst.getOffset() != CurOffset - 8)
637           return CU::UNWIND_ARM64_MODE_DWARF;
638         CurOffset = Inst.getOffset();
639 
640         const MCCFIInstruction &Inst2 = Instrs[++i];
641         if (Inst2.getOperation() != MCCFIInstruction::OpOffset)
642           return CU::UNWIND_ARM64_MODE_DWARF;
643         unsigned Reg2 = *MRI.getLLVMRegNum(Inst2.getRegister(), true);
644 
645         if (Inst2.getOffset() != CurOffset - 8)
646           return CU::UNWIND_ARM64_MODE_DWARF;
647         CurOffset = Inst2.getOffset();
648 
649         // N.B. The encodings must be in register number order, and the X
650         // registers before the D registers.
651 
652         // X19/X20 pair = 0x00000001,
653         // X21/X22 pair = 0x00000002,
654         // X23/X24 pair = 0x00000004,
655         // X25/X26 pair = 0x00000008,
656         // X27/X28 pair = 0x00000010
657         Reg1 = getXRegFromWReg(Reg1);
658         Reg2 = getXRegFromWReg(Reg2);
659 
660         if (Reg1 == AArch64::X19 && Reg2 == AArch64::X20 &&
661             (CompactUnwindEncoding & 0xF1E) == 0)
662           CompactUnwindEncoding |= CU::UNWIND_ARM64_FRAME_X19_X20_PAIR;
663         else if (Reg1 == AArch64::X21 && Reg2 == AArch64::X22 &&
664                  (CompactUnwindEncoding & 0xF1C) == 0)
665           CompactUnwindEncoding |= CU::UNWIND_ARM64_FRAME_X21_X22_PAIR;
666         else if (Reg1 == AArch64::X23 && Reg2 == AArch64::X24 &&
667                  (CompactUnwindEncoding & 0xF18) == 0)
668           CompactUnwindEncoding |= CU::UNWIND_ARM64_FRAME_X23_X24_PAIR;
669         else if (Reg1 == AArch64::X25 && Reg2 == AArch64::X26 &&
670                  (CompactUnwindEncoding & 0xF10) == 0)
671           CompactUnwindEncoding |= CU::UNWIND_ARM64_FRAME_X25_X26_PAIR;
672         else if (Reg1 == AArch64::X27 && Reg2 == AArch64::X28 &&
673                  (CompactUnwindEncoding & 0xF00) == 0)
674           CompactUnwindEncoding |= CU::UNWIND_ARM64_FRAME_X27_X28_PAIR;
675         else {
676           Reg1 = getDRegFromBReg(Reg1);
677           Reg2 = getDRegFromBReg(Reg2);
678 
679           // D8/D9 pair   = 0x00000100,
680           // D10/D11 pair = 0x00000200,
681           // D12/D13 pair = 0x00000400,
682           // D14/D15 pair = 0x00000800
683           if (Reg1 == AArch64::D8 && Reg2 == AArch64::D9 &&
684               (CompactUnwindEncoding & 0xE00) == 0)
685             CompactUnwindEncoding |= CU::UNWIND_ARM64_FRAME_D8_D9_PAIR;
686           else if (Reg1 == AArch64::D10 && Reg2 == AArch64::D11 &&
687                    (CompactUnwindEncoding & 0xC00) == 0)
688             CompactUnwindEncoding |= CU::UNWIND_ARM64_FRAME_D10_D11_PAIR;
689           else if (Reg1 == AArch64::D12 && Reg2 == AArch64::D13 &&
690                    (CompactUnwindEncoding & 0x800) == 0)
691             CompactUnwindEncoding |= CU::UNWIND_ARM64_FRAME_D12_D13_PAIR;
692           else if (Reg1 == AArch64::D14 && Reg2 == AArch64::D15)
693             CompactUnwindEncoding |= CU::UNWIND_ARM64_FRAME_D14_D15_PAIR;
694           else
695             // A pair was pushed which we cannot handle.
696             return CU::UNWIND_ARM64_MODE_DWARF;
697         }
698 
699         break;
700       }
701       }
702     }
703 
704     if (!HasFP) {
705       // With compact unwind info we can only represent stack adjustments of up
706       // to 65520 bytes.
707       if (StackSize > 65520)
708         return CU::UNWIND_ARM64_MODE_DWARF;
709 
710       CompactUnwindEncoding |= CU::UNWIND_ARM64_MODE_FRAMELESS;
711       CompactUnwindEncoding |= encodeStackAdjustment(StackSize);
712     }
713 
714     return CompactUnwindEncoding;
715   }
716 };
717 
718 } // end anonymous namespace
719 
720 namespace {
721 
722 class ELFAArch64AsmBackend : public AArch64AsmBackend {
723 public:
724   uint8_t OSABI;
725   bool IsILP32;
726 
727   ELFAArch64AsmBackend(const Target &T, const Triple &TT, uint8_t OSABI,
728                        bool IsLittleEndian, bool IsILP32)
729       : AArch64AsmBackend(T, TT, IsLittleEndian), OSABI(OSABI),
730         IsILP32(IsILP32) {}
731 
732   std::unique_ptr<MCObjectTargetWriter>
733   createObjectTargetWriter() const override {
734     return createAArch64ELFObjectWriter(OSABI, IsILP32);
735   }
736 };
737 
738 }
739 
740 namespace {
741 class COFFAArch64AsmBackend : public AArch64AsmBackend {
742 public:
743   COFFAArch64AsmBackend(const Target &T, const Triple &TheTriple)
744       : AArch64AsmBackend(T, TheTriple, /*IsLittleEndian*/ true) {}
745 
746   std::unique_ptr<MCObjectTargetWriter>
747   createObjectTargetWriter() const override {
748     return createAArch64WinCOFFObjectWriter();
749   }
750 };
751 }
752 
753 MCAsmBackend *llvm::createAArch64leAsmBackend(const Target &T,
754                                               const MCSubtargetInfo &STI,
755                                               const MCRegisterInfo &MRI,
756                                               const MCTargetOptions &Options) {
757   const Triple &TheTriple = STI.getTargetTriple();
758   if (TheTriple.isOSBinFormatMachO()) {
759     return new DarwinAArch64AsmBackend(T, TheTriple, MRI);
760   }
761 
762   if (TheTriple.isOSBinFormatCOFF())
763     return new COFFAArch64AsmBackend(T, TheTriple);
764 
765   assert(TheTriple.isOSBinFormatELF() && "Invalid target");
766 
767   uint8_t OSABI = MCELFObjectTargetWriter::getOSABI(TheTriple.getOS());
768   bool IsILP32 = STI.getTargetTriple().getEnvironment() == Triple::GNUILP32;
769   return new ELFAArch64AsmBackend(T, TheTriple, OSABI, /*IsLittleEndian=*/true,
770                                   IsILP32);
771 }
772 
773 MCAsmBackend *llvm::createAArch64beAsmBackend(const Target &T,
774                                               const MCSubtargetInfo &STI,
775                                               const MCRegisterInfo &MRI,
776                                               const MCTargetOptions &Options) {
777   const Triple &TheTriple = STI.getTargetTriple();
778   assert(TheTriple.isOSBinFormatELF() &&
779          "Big endian is only supported for ELF targets!");
780   uint8_t OSABI = MCELFObjectTargetWriter::getOSABI(TheTriple.getOS());
781   bool IsILP32 = STI.getTargetTriple().getEnvironment() == Triple::GNUILP32;
782   return new ELFAArch64AsmBackend(T, TheTriple, OSABI, /*IsLittleEndian=*/false,
783                                   IsILP32);
784 }
785