1 //===-- ARMAsmBackend.cpp - ARM Assembler Backend -------------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 
9 #include "MCTargetDesc/ARMAsmBackend.h"
10 #include "MCTargetDesc/ARMAddressingModes.h"
11 #include "MCTargetDesc/ARMAsmBackendDarwin.h"
12 #include "MCTargetDesc/ARMAsmBackendELF.h"
13 #include "MCTargetDesc/ARMAsmBackendWinCOFF.h"
14 #include "MCTargetDesc/ARMFixupKinds.h"
15 #include "MCTargetDesc/ARMMCTargetDesc.h"
16 #include "llvm/ADT/StringSwitch.h"
17 #include "llvm/BinaryFormat/ELF.h"
18 #include "llvm/BinaryFormat/MachO.h"
19 #include "llvm/MC/MCAsmBackend.h"
20 #include "llvm/MC/MCAssembler.h"
21 #include "llvm/MC/MCContext.h"
22 #include "llvm/MC/MCDirectives.h"
23 #include "llvm/MC/MCELFObjectWriter.h"
24 #include "llvm/MC/MCExpr.h"
25 #include "llvm/MC/MCFixupKindInfo.h"
26 #include "llvm/MC/MCObjectWriter.h"
27 #include "llvm/MC/MCRegisterInfo.h"
28 #include "llvm/MC/MCSectionELF.h"
29 #include "llvm/MC/MCSectionMachO.h"
30 #include "llvm/MC/MCSubtargetInfo.h"
31 #include "llvm/MC/MCValue.h"
32 #include "llvm/MC/MCAsmLayout.h"
33 #include "llvm/Support/Debug.h"
34 #include "llvm/Support/EndianStream.h"
35 #include "llvm/Support/ErrorHandling.h"
36 #include "llvm/Support/Format.h"
37 #include "llvm/Support/TargetParser.h"
38 #include "llvm/Support/raw_ostream.h"
39 using namespace llvm;
40 
41 namespace {
42 class ARMELFObjectWriter : public MCELFObjectTargetWriter {
43 public:
44   ARMELFObjectWriter(uint8_t OSABI)
45       : MCELFObjectTargetWriter(/*Is64Bit*/ false, OSABI, ELF::EM_ARM,
46                                 /*HasRelocationAddend*/ false) {}
47 };
48 } // end anonymous namespace
49 
50 std::optional<MCFixupKind> ARMAsmBackend::getFixupKind(StringRef Name) const {
51   return std::nullopt;
52 }
53 
54 std::optional<MCFixupKind>
55 ARMAsmBackendELF::getFixupKind(StringRef Name) const {
56   unsigned Type = llvm::StringSwitch<unsigned>(Name)
57 #define ELF_RELOC(X, Y) .Case(#X, Y)
58 #include "llvm/BinaryFormat/ELFRelocs/ARM.def"
59 #undef ELF_RELOC
60                       .Case("BFD_RELOC_NONE", ELF::R_ARM_NONE)
61                       .Case("BFD_RELOC_8", ELF::R_ARM_ABS8)
62                       .Case("BFD_RELOC_16", ELF::R_ARM_ABS16)
63                       .Case("BFD_RELOC_32", ELF::R_ARM_ABS32)
64                       .Default(-1u);
65   if (Type == -1u)
66     return std::nullopt;
67   return static_cast<MCFixupKind>(FirstLiteralRelocationKind + Type);
68 }
69 
70 const MCFixupKindInfo &ARMAsmBackend::getFixupKindInfo(MCFixupKind Kind) const {
71   unsigned IsPCRelConstant =
72       MCFixupKindInfo::FKF_IsPCRel | MCFixupKindInfo::FKF_Constant;
73   const static MCFixupKindInfo InfosLE[ARM::NumTargetFixupKinds] = {
74       // This table *must* be in the order that the fixup_* kinds are defined in
75       // ARMFixupKinds.h.
76       //
77       // Name                      Offset (bits) Size (bits)     Flags
78       {"fixup_arm_ldst_pcrel_12", 0, 32, IsPCRelConstant},
79       {"fixup_t2_ldst_pcrel_12", 0, 32,
80        IsPCRelConstant | MCFixupKindInfo::FKF_IsAlignedDownTo32Bits},
81       {"fixup_arm_pcrel_10_unscaled", 0, 32, IsPCRelConstant},
82       {"fixup_arm_pcrel_10", 0, 32, IsPCRelConstant},
83       {"fixup_t2_pcrel_10", 0, 32,
84        MCFixupKindInfo::FKF_IsPCRel |
85            MCFixupKindInfo::FKF_IsAlignedDownTo32Bits},
86       {"fixup_arm_pcrel_9", 0, 32, MCFixupKindInfo::FKF_IsPCRel},
87       {"fixup_t2_pcrel_9", 0, 32,
88        IsPCRelConstant | MCFixupKindInfo::FKF_IsAlignedDownTo32Bits},
89       {"fixup_arm_ldst_abs_12", 0, 32, 0},
90       {"fixup_thumb_adr_pcrel_10", 0, 8,
91        IsPCRelConstant | MCFixupKindInfo::FKF_IsAlignedDownTo32Bits},
92       {"fixup_arm_adr_pcrel_12", 0, 32, IsPCRelConstant},
93       {"fixup_t2_adr_pcrel_12", 0, 32,
94        IsPCRelConstant | MCFixupKindInfo::FKF_IsAlignedDownTo32Bits},
95       {"fixup_arm_condbranch", 0, 24, MCFixupKindInfo::FKF_IsPCRel},
96       {"fixup_arm_uncondbranch", 0, 24, MCFixupKindInfo::FKF_IsPCRel},
97       {"fixup_t2_condbranch", 0, 32, MCFixupKindInfo::FKF_IsPCRel},
98       {"fixup_t2_uncondbranch", 0, 32, MCFixupKindInfo::FKF_IsPCRel},
99       {"fixup_arm_thumb_br", 0, 16, MCFixupKindInfo::FKF_IsPCRel},
100       {"fixup_arm_uncondbl", 0, 24, MCFixupKindInfo::FKF_IsPCRel},
101       {"fixup_arm_condbl", 0, 24, MCFixupKindInfo::FKF_IsPCRel},
102       {"fixup_arm_blx", 0, 24, MCFixupKindInfo::FKF_IsPCRel},
103       {"fixup_arm_thumb_bl", 0, 32, MCFixupKindInfo::FKF_IsPCRel},
104       {"fixup_arm_thumb_blx", 0, 32,
105        MCFixupKindInfo::FKF_IsPCRel |
106            MCFixupKindInfo::FKF_IsAlignedDownTo32Bits},
107       {"fixup_arm_thumb_cb", 0, 16, MCFixupKindInfo::FKF_IsPCRel},
108       {"fixup_arm_thumb_cp", 0, 8,
109        MCFixupKindInfo::FKF_IsPCRel |
110            MCFixupKindInfo::FKF_IsAlignedDownTo32Bits},
111       {"fixup_arm_thumb_bcc", 0, 8, MCFixupKindInfo::FKF_IsPCRel},
112       // movw / movt: 16-bits immediate but scattered into two chunks 0 - 12, 16
113       // - 19.
114       {"fixup_arm_movt_hi16", 0, 20, 0},
115       {"fixup_arm_movw_lo16", 0, 20, 0},
116       {"fixup_t2_movt_hi16", 0, 20, 0},
117       {"fixup_t2_movw_lo16", 0, 20, 0},
118       {"fixup_arm_mod_imm", 0, 12, 0},
119       {"fixup_t2_so_imm", 0, 26, 0},
120       {"fixup_bf_branch", 0, 32, MCFixupKindInfo::FKF_IsPCRel},
121       {"fixup_bf_target", 0, 32, MCFixupKindInfo::FKF_IsPCRel},
122       {"fixup_bfl_target", 0, 32, MCFixupKindInfo::FKF_IsPCRel},
123       {"fixup_bfc_target", 0, 32, MCFixupKindInfo::FKF_IsPCRel},
124       {"fixup_bfcsel_else_target", 0, 32, 0},
125       {"fixup_wls", 0, 32, MCFixupKindInfo::FKF_IsPCRel},
126       {"fixup_le", 0, 32, MCFixupKindInfo::FKF_IsPCRel}};
127   const static MCFixupKindInfo InfosBE[ARM::NumTargetFixupKinds] = {
128       // This table *must* be in the order that the fixup_* kinds are defined in
129       // ARMFixupKinds.h.
130       //
131       // Name                      Offset (bits) Size (bits)     Flags
132       {"fixup_arm_ldst_pcrel_12", 0, 32, IsPCRelConstant},
133       {"fixup_t2_ldst_pcrel_12", 0, 32,
134        IsPCRelConstant | MCFixupKindInfo::FKF_IsAlignedDownTo32Bits},
135       {"fixup_arm_pcrel_10_unscaled", 0, 32, IsPCRelConstant},
136       {"fixup_arm_pcrel_10", 0, 32, IsPCRelConstant},
137       {"fixup_t2_pcrel_10", 0, 32,
138        MCFixupKindInfo::FKF_IsPCRel |
139            MCFixupKindInfo::FKF_IsAlignedDownTo32Bits},
140       {"fixup_arm_pcrel_9", 0, 32, MCFixupKindInfo::FKF_IsPCRel},
141       {"fixup_t2_pcrel_9", 0, 32,
142        IsPCRelConstant | MCFixupKindInfo::FKF_IsAlignedDownTo32Bits},
143       {"fixup_arm_ldst_abs_12", 0, 32, 0},
144       {"fixup_thumb_adr_pcrel_10", 8, 8,
145        IsPCRelConstant | MCFixupKindInfo::FKF_IsAlignedDownTo32Bits},
146       {"fixup_arm_adr_pcrel_12", 0, 32, IsPCRelConstant},
147       {"fixup_t2_adr_pcrel_12", 0, 32,
148        IsPCRelConstant | MCFixupKindInfo::FKF_IsAlignedDownTo32Bits},
149       {"fixup_arm_condbranch", 8, 24, MCFixupKindInfo::FKF_IsPCRel},
150       {"fixup_arm_uncondbranch", 8, 24, MCFixupKindInfo::FKF_IsPCRel},
151       {"fixup_t2_condbranch", 0, 32, MCFixupKindInfo::FKF_IsPCRel},
152       {"fixup_t2_uncondbranch", 0, 32, MCFixupKindInfo::FKF_IsPCRel},
153       {"fixup_arm_thumb_br", 0, 16, MCFixupKindInfo::FKF_IsPCRel},
154       {"fixup_arm_uncondbl", 8, 24, MCFixupKindInfo::FKF_IsPCRel},
155       {"fixup_arm_condbl", 8, 24, MCFixupKindInfo::FKF_IsPCRel},
156       {"fixup_arm_blx", 8, 24, MCFixupKindInfo::FKF_IsPCRel},
157       {"fixup_arm_thumb_bl", 0, 32, MCFixupKindInfo::FKF_IsPCRel},
158       {"fixup_arm_thumb_blx", 0, 32,
159        MCFixupKindInfo::FKF_IsPCRel |
160            MCFixupKindInfo::FKF_IsAlignedDownTo32Bits},
161       {"fixup_arm_thumb_cb", 0, 16, MCFixupKindInfo::FKF_IsPCRel},
162       {"fixup_arm_thumb_cp", 8, 8,
163        MCFixupKindInfo::FKF_IsPCRel |
164            MCFixupKindInfo::FKF_IsAlignedDownTo32Bits},
165       {"fixup_arm_thumb_bcc", 8, 8, MCFixupKindInfo::FKF_IsPCRel},
166       // movw / movt: 16-bits immediate but scattered into two chunks 0 - 12, 16
167       // - 19.
168       {"fixup_arm_movt_hi16", 12, 20, 0},
169       {"fixup_arm_movw_lo16", 12, 20, 0},
170       {"fixup_t2_movt_hi16", 12, 20, 0},
171       {"fixup_t2_movw_lo16", 12, 20, 0},
172       {"fixup_arm_mod_imm", 20, 12, 0},
173       {"fixup_t2_so_imm", 26, 6, 0},
174       {"fixup_bf_branch", 0, 32, MCFixupKindInfo::FKF_IsPCRel},
175       {"fixup_bf_target", 0, 32, MCFixupKindInfo::FKF_IsPCRel},
176       {"fixup_bfl_target", 0, 32, MCFixupKindInfo::FKF_IsPCRel},
177       {"fixup_bfc_target", 0, 32, MCFixupKindInfo::FKF_IsPCRel},
178       {"fixup_bfcsel_else_target", 0, 32, 0},
179       {"fixup_wls", 0, 32, MCFixupKindInfo::FKF_IsPCRel},
180       {"fixup_le", 0, 32, MCFixupKindInfo::FKF_IsPCRel}};
181 
182   // Fixup kinds from .reloc directive are like R_ARM_NONE. They do not require
183   // any extra processing.
184   if (Kind >= FirstLiteralRelocationKind)
185     return MCAsmBackend::getFixupKindInfo(FK_NONE);
186 
187   if (Kind < FirstTargetFixupKind)
188     return MCAsmBackend::getFixupKindInfo(Kind);
189 
190   assert(unsigned(Kind - FirstTargetFixupKind) < getNumFixupKinds() &&
191          "Invalid kind!");
192   return (Endian == support::little ? InfosLE
193                                     : InfosBE)[Kind - FirstTargetFixupKind];
194 }
195 
196 void ARMAsmBackend::handleAssemblerFlag(MCAssemblerFlag Flag) {
197   switch (Flag) {
198   default:
199     break;
200   case MCAF_Code16:
201     setIsThumb(true);
202     break;
203   case MCAF_Code32:
204     setIsThumb(false);
205     break;
206   }
207 }
208 
209 unsigned ARMAsmBackend::getRelaxedOpcode(unsigned Op,
210                                          const MCSubtargetInfo &STI) const {
211   bool HasThumb2 = STI.getFeatureBits()[ARM::FeatureThumb2];
212   bool HasV8MBaselineOps = STI.getFeatureBits()[ARM::HasV8MBaselineOps];
213 
214   switch (Op) {
215   default:
216     return Op;
217   case ARM::tBcc:
218     return HasThumb2 ? (unsigned)ARM::t2Bcc : Op;
219   case ARM::tLDRpci:
220     return HasThumb2 ? (unsigned)ARM::t2LDRpci : Op;
221   case ARM::tADR:
222     return HasThumb2 ? (unsigned)ARM::t2ADR : Op;
223   case ARM::tB:
224     return HasV8MBaselineOps ? (unsigned)ARM::t2B : Op;
225   case ARM::tCBZ:
226     return ARM::tHINT;
227   case ARM::tCBNZ:
228     return ARM::tHINT;
229   }
230 }
231 
232 bool ARMAsmBackend::mayNeedRelaxation(const MCInst &Inst,
233                                       const MCSubtargetInfo &STI) const {
234   if (getRelaxedOpcode(Inst.getOpcode(), STI) != Inst.getOpcode())
235     return true;
236   return false;
237 }
238 
239 static const char *checkPCRelOffset(uint64_t Value, int64_t Min, int64_t Max) {
240   int64_t Offset = int64_t(Value) - 4;
241   if (Offset < Min || Offset > Max)
242     return "out of range pc-relative fixup value";
243   return nullptr;
244 }
245 
246 const char *ARMAsmBackend::reasonForFixupRelaxation(const MCFixup &Fixup,
247                                                     uint64_t Value) const {
248   switch (Fixup.getTargetKind()) {
249   case ARM::fixup_arm_thumb_br: {
250     // Relaxing tB to t2B. tB has a signed 12-bit displacement with the
251     // low bit being an implied zero. There's an implied +4 offset for the
252     // branch, so we adjust the other way here to determine what's
253     // encodable.
254     //
255     // Relax if the value is too big for a (signed) i8.
256     int64_t Offset = int64_t(Value) - 4;
257     if (Offset > 2046 || Offset < -2048)
258       return "out of range pc-relative fixup value";
259     break;
260   }
261   case ARM::fixup_arm_thumb_bcc: {
262     // Relaxing tBcc to t2Bcc. tBcc has a signed 9-bit displacement with the
263     // low bit being an implied zero. There's an implied +4 offset for the
264     // branch, so we adjust the other way here to determine what's
265     // encodable.
266     //
267     // Relax if the value is too big for a (signed) i8.
268     int64_t Offset = int64_t(Value) - 4;
269     if (Offset > 254 || Offset < -256)
270       return "out of range pc-relative fixup value";
271     break;
272   }
273   case ARM::fixup_thumb_adr_pcrel_10:
274   case ARM::fixup_arm_thumb_cp: {
275     // If the immediate is negative, greater than 1020, or not a multiple
276     // of four, the wide version of the instruction must be used.
277     int64_t Offset = int64_t(Value) - 4;
278     if (Offset & 3)
279       return "misaligned pc-relative fixup value";
280     else if (Offset > 1020 || Offset < 0)
281       return "out of range pc-relative fixup value";
282     break;
283   }
284   case ARM::fixup_arm_thumb_cb: {
285     // If we have a Thumb CBZ or CBNZ instruction and its target is the next
286     // instruction it is actually out of range for the instruction.
287     // It will be changed to a NOP.
288     int64_t Offset = (Value & ~1);
289     if (Offset == 2)
290       return "will be converted to nop";
291     break;
292   }
293   case ARM::fixup_bf_branch:
294     return checkPCRelOffset(Value, 0, 30);
295   case ARM::fixup_bf_target:
296     return checkPCRelOffset(Value, -0x10000, +0xfffe);
297   case ARM::fixup_bfl_target:
298     return checkPCRelOffset(Value, -0x40000, +0x3fffe);
299   case ARM::fixup_bfc_target:
300     return checkPCRelOffset(Value, -0x1000, +0xffe);
301   case ARM::fixup_wls:
302     return checkPCRelOffset(Value, 0, +0xffe);
303   case ARM::fixup_le:
304     // The offset field in the LE and LETP instructions is an 11-bit
305     // value shifted left by 2 (i.e. 0,2,4,...,4094), and it is
306     // interpreted as a negative offset from the value read from pc,
307     // i.e. from instruction_address+4.
308     //
309     // So an LE instruction can in principle address the instruction
310     // immediately after itself, or (not very usefully) the address
311     // half way through the 4-byte LE.
312     return checkPCRelOffset(Value, -0xffe, 0);
313   case ARM::fixup_bfcsel_else_target: {
314     if (Value != 2 && Value != 4)
315       return "out of range label-relative fixup value";
316     break;
317   }
318 
319   default:
320     llvm_unreachable("Unexpected fixup kind in reasonForFixupRelaxation()!");
321   }
322   return nullptr;
323 }
324 
325 bool ARMAsmBackend::fixupNeedsRelaxation(const MCFixup &Fixup, uint64_t Value,
326                                          const MCRelaxableFragment *DF,
327                                          const MCAsmLayout &Layout) const {
328   return reasonForFixupRelaxation(Fixup, Value);
329 }
330 
331 void ARMAsmBackend::relaxInstruction(MCInst &Inst,
332                                      const MCSubtargetInfo &STI) const {
333   unsigned RelaxedOp = getRelaxedOpcode(Inst.getOpcode(), STI);
334 
335   // Return a diagnostic if we get here w/ a bogus instruction.
336   if (RelaxedOp == Inst.getOpcode()) {
337     SmallString<256> Tmp;
338     raw_svector_ostream OS(Tmp);
339     Inst.dump_pretty(OS);
340     OS << "\n";
341     report_fatal_error("unexpected instruction to relax: " + OS.str());
342   }
343 
344   // If we are changing Thumb CBZ or CBNZ instruction to a NOP, aka tHINT, we
345   // have to change the operands too.
346   if ((Inst.getOpcode() == ARM::tCBZ || Inst.getOpcode() == ARM::tCBNZ) &&
347       RelaxedOp == ARM::tHINT) {
348     MCInst Res;
349     Res.setOpcode(RelaxedOp);
350     Res.addOperand(MCOperand::createImm(0));
351     Res.addOperand(MCOperand::createImm(14));
352     Res.addOperand(MCOperand::createReg(0));
353     Inst = std::move(Res);
354     return;
355   }
356 
357   // The rest of instructions we're relaxing have the same operands.
358   // We just need to update to the proper opcode.
359   Inst.setOpcode(RelaxedOp);
360 }
361 
362 bool ARMAsmBackend::writeNopData(raw_ostream &OS, uint64_t Count,
363                                  const MCSubtargetInfo *STI) const {
364   const uint16_t Thumb1_16bitNopEncoding = 0x46c0; // using MOV r8,r8
365   const uint16_t Thumb2_16bitNopEncoding = 0xbf00; // NOP
366   const uint32_t ARMv4_NopEncoding = 0xe1a00000;   // using MOV r0,r0
367   const uint32_t ARMv6T2_NopEncoding = 0xe320f000; // NOP
368   if (isThumb()) {
369     const uint16_t nopEncoding =
370         hasNOP(STI) ? Thumb2_16bitNopEncoding : Thumb1_16bitNopEncoding;
371     uint64_t NumNops = Count / 2;
372     for (uint64_t i = 0; i != NumNops; ++i)
373       support::endian::write(OS, nopEncoding, Endian);
374     if (Count & 1)
375       OS << '\0';
376     return true;
377   }
378   // ARM mode
379   const uint32_t nopEncoding =
380       hasNOP(STI) ? ARMv6T2_NopEncoding : ARMv4_NopEncoding;
381   uint64_t NumNops = Count / 4;
382   for (uint64_t i = 0; i != NumNops; ++i)
383     support::endian::write(OS, nopEncoding, Endian);
384   // FIXME: should this function return false when unable to write exactly
385   // 'Count' bytes with NOP encodings?
386   switch (Count % 4) {
387   default:
388     break; // No leftover bytes to write
389   case 1:
390     OS << '\0';
391     break;
392   case 2:
393     OS.write("\0\0", 2);
394     break;
395   case 3:
396     OS.write("\0\0\xa0", 3);
397     break;
398   }
399 
400   return true;
401 }
402 
403 static uint32_t swapHalfWords(uint32_t Value, bool IsLittleEndian) {
404   if (IsLittleEndian) {
405     // Note that the halfwords are stored high first and low second in thumb;
406     // so we need to swap the fixup value here to map properly.
407     uint32_t Swapped = (Value & 0xFFFF0000) >> 16;
408     Swapped |= (Value & 0x0000FFFF) << 16;
409     return Swapped;
410   } else
411     return Value;
412 }
413 
414 static uint32_t joinHalfWords(uint32_t FirstHalf, uint32_t SecondHalf,
415                               bool IsLittleEndian) {
416   uint32_t Value;
417 
418   if (IsLittleEndian) {
419     Value = (SecondHalf & 0xFFFF) << 16;
420     Value |= (FirstHalf & 0xFFFF);
421   } else {
422     Value = (SecondHalf & 0xFFFF);
423     Value |= (FirstHalf & 0xFFFF) << 16;
424   }
425 
426   return Value;
427 }
428 
429 unsigned ARMAsmBackend::adjustFixupValue(const MCAssembler &Asm,
430                                          const MCFixup &Fixup,
431                                          const MCValue &Target, uint64_t Value,
432                                          bool IsResolved, MCContext &Ctx,
433                                          const MCSubtargetInfo* STI) const {
434   unsigned Kind = Fixup.getKind();
435 
436   // MachO tries to make .o files that look vaguely pre-linked, so for MOVW/MOVT
437   // and .word relocations they put the Thumb bit into the addend if possible.
438   // Other relocation types don't want this bit though (branches couldn't encode
439   // it if it *was* present, and no other relocations exist) and it can
440   // interfere with checking valid expressions.
441   if (const MCSymbolRefExpr *A = Target.getSymA()) {
442     if (A->hasSubsectionsViaSymbols() && Asm.isThumbFunc(&A->getSymbol()) &&
443         A->getSymbol().isExternal() &&
444         (Kind == FK_Data_4 || Kind == ARM::fixup_arm_movw_lo16 ||
445          Kind == ARM::fixup_arm_movt_hi16 || Kind == ARM::fixup_t2_movw_lo16 ||
446          Kind == ARM::fixup_t2_movt_hi16))
447       Value |= 1;
448   }
449 
450   switch (Kind) {
451   default:
452     Ctx.reportError(Fixup.getLoc(), "bad relocation fixup type");
453     return 0;
454   case FK_Data_1:
455   case FK_Data_2:
456   case FK_Data_4:
457     return Value;
458   case FK_SecRel_2:
459     return Value;
460   case FK_SecRel_4:
461     return Value;
462   case ARM::fixup_arm_movt_hi16:
463     assert(STI != nullptr);
464     if (IsResolved || !STI->getTargetTriple().isOSBinFormatELF())
465       Value >>= 16;
466     [[fallthrough]];
467   case ARM::fixup_arm_movw_lo16: {
468     unsigned Hi4 = (Value & 0xF000) >> 12;
469     unsigned Lo12 = Value & 0x0FFF;
470     // inst{19-16} = Hi4;
471     // inst{11-0} = Lo12;
472     Value = (Hi4 << 16) | (Lo12);
473     return Value;
474   }
475   case ARM::fixup_t2_movt_hi16:
476     assert(STI != nullptr);
477     if (IsResolved || !STI->getTargetTriple().isOSBinFormatELF())
478       Value >>= 16;
479     [[fallthrough]];
480   case ARM::fixup_t2_movw_lo16: {
481     unsigned Hi4 = (Value & 0xF000) >> 12;
482     unsigned i = (Value & 0x800) >> 11;
483     unsigned Mid3 = (Value & 0x700) >> 8;
484     unsigned Lo8 = Value & 0x0FF;
485     // inst{19-16} = Hi4;
486     // inst{26} = i;
487     // inst{14-12} = Mid3;
488     // inst{7-0} = Lo8;
489     Value = (Hi4 << 16) | (i << 26) | (Mid3 << 12) | (Lo8);
490     return swapHalfWords(Value, Endian == support::little);
491   }
492   case ARM::fixup_arm_ldst_pcrel_12:
493     // ARM PC-relative values are offset by 8.
494     Value -= 4;
495     [[fallthrough]];
496   case ARM::fixup_t2_ldst_pcrel_12:
497     // Offset by 4, adjusted by two due to the half-word ordering of thumb.
498     Value -= 4;
499     [[fallthrough]];
500   case ARM::fixup_arm_ldst_abs_12: {
501     bool isAdd = true;
502     if ((int64_t)Value < 0) {
503       Value = -Value;
504       isAdd = false;
505     }
506     if (Value >= 4096) {
507       Ctx.reportError(Fixup.getLoc(), "out of range pc-relative fixup value");
508       return 0;
509     }
510     Value |= isAdd << 23;
511 
512     // Same addressing mode as fixup_arm_pcrel_10,
513     // but with 16-bit halfwords swapped.
514     if (Kind == ARM::fixup_t2_ldst_pcrel_12)
515       return swapHalfWords(Value, Endian == support::little);
516 
517     return Value;
518   }
519   case ARM::fixup_arm_adr_pcrel_12: {
520     // ARM PC-relative values are offset by 8.
521     Value -= 8;
522     unsigned opc = 4; // bits {24-21}. Default to add: 0b0100
523     if ((int64_t)Value < 0) {
524       Value = -Value;
525       opc = 2; // 0b0010
526     }
527     if (ARM_AM::getSOImmVal(Value) == -1) {
528       Ctx.reportError(Fixup.getLoc(), "out of range pc-relative fixup value");
529       return 0;
530     }
531     // Encode the immediate and shift the opcode into place.
532     return ARM_AM::getSOImmVal(Value) | (opc << 21);
533   }
534 
535   case ARM::fixup_t2_adr_pcrel_12: {
536     Value -= 4;
537     unsigned opc = 0;
538     if ((int64_t)Value < 0) {
539       Value = -Value;
540       opc = 5;
541     }
542 
543     uint32_t out = (opc << 21);
544     out |= (Value & 0x800) << 15;
545     out |= (Value & 0x700) << 4;
546     out |= (Value & 0x0FF);
547 
548     return swapHalfWords(out, Endian == support::little);
549   }
550 
551   case ARM::fixup_arm_condbranch:
552   case ARM::fixup_arm_uncondbranch:
553   case ARM::fixup_arm_uncondbl:
554   case ARM::fixup_arm_condbl:
555   case ARM::fixup_arm_blx:
556     // These values don't encode the low two bits since they're always zero.
557     // Offset by 8 just as above.
558     if (const MCSymbolRefExpr *SRE =
559             dyn_cast<MCSymbolRefExpr>(Fixup.getValue()))
560       if (SRE->getKind() == MCSymbolRefExpr::VK_TLSCALL)
561         return 0;
562     return 0xffffff & ((Value - 8) >> 2);
563   case ARM::fixup_t2_uncondbranch: {
564     Value = Value - 4;
565     if (!isInt<25>(Value)) {
566       Ctx.reportError(Fixup.getLoc(), "Relocation out of range");
567       return 0;
568     }
569 
570     Value >>= 1; // Low bit is not encoded.
571 
572     uint32_t out = 0;
573     bool I = Value & 0x800000;
574     bool J1 = Value & 0x400000;
575     bool J2 = Value & 0x200000;
576     J1 ^= I;
577     J2 ^= I;
578 
579     out |= I << 26;                 // S bit
580     out |= !J1 << 13;               // J1 bit
581     out |= !J2 << 11;               // J2 bit
582     out |= (Value & 0x1FF800) << 5; // imm6 field
583     out |= (Value & 0x0007FF);      // imm11 field
584 
585     return swapHalfWords(out, Endian == support::little);
586   }
587   case ARM::fixup_t2_condbranch: {
588     Value = Value - 4;
589     if (!isInt<21>(Value)) {
590       Ctx.reportError(Fixup.getLoc(), "Relocation out of range");
591       return 0;
592     }
593 
594     Value >>= 1; // Low bit is not encoded.
595 
596     uint64_t out = 0;
597     out |= (Value & 0x80000) << 7; // S bit
598     out |= (Value & 0x40000) >> 7; // J2 bit
599     out |= (Value & 0x20000) >> 4; // J1 bit
600     out |= (Value & 0x1F800) << 5; // imm6 field
601     out |= (Value & 0x007FF);      // imm11 field
602 
603     return swapHalfWords(out, Endian == support::little);
604   }
605   case ARM::fixup_arm_thumb_bl: {
606     if (!isInt<25>(Value - 4) ||
607         (!STI->getFeatureBits()[ARM::FeatureThumb2] &&
608          !STI->getFeatureBits()[ARM::HasV8MBaselineOps] &&
609          !STI->getFeatureBits()[ARM::HasV6MOps] &&
610          !isInt<23>(Value - 4))) {
611       Ctx.reportError(Fixup.getLoc(), "Relocation out of range");
612       return 0;
613     }
614 
615     // The value doesn't encode the low bit (always zero) and is offset by
616     // four. The 32-bit immediate value is encoded as
617     //   imm32 = SignExtend(S:I1:I2:imm10:imm11:0)
618     // where I1 = NOT(J1 ^ S) and I2 = NOT(J2 ^ S).
619     // The value is encoded into disjoint bit positions in the destination
620     // opcode. x = unchanged, I = immediate value bit, S = sign extension bit,
621     // J = either J1 or J2 bit
622     //
623     //   BL:  xxxxxSIIIIIIIIII xxJxJIIIIIIIIIII
624     //
625     // Note that the halfwords are stored high first, low second; so we need
626     // to transpose the fixup value here to map properly.
627     uint32_t offset = (Value - 4) >> 1;
628     uint32_t signBit = (offset & 0x800000) >> 23;
629     uint32_t I1Bit = (offset & 0x400000) >> 22;
630     uint32_t J1Bit = (I1Bit ^ 0x1) ^ signBit;
631     uint32_t I2Bit = (offset & 0x200000) >> 21;
632     uint32_t J2Bit = (I2Bit ^ 0x1) ^ signBit;
633     uint32_t imm10Bits = (offset & 0x1FF800) >> 11;
634     uint32_t imm11Bits = (offset & 0x000007FF);
635 
636     uint32_t FirstHalf = (((uint16_t)signBit << 10) | (uint16_t)imm10Bits);
637     uint32_t SecondHalf = (((uint16_t)J1Bit << 13) | ((uint16_t)J2Bit << 11) |
638                            (uint16_t)imm11Bits);
639     return joinHalfWords(FirstHalf, SecondHalf, Endian == support::little);
640   }
641   case ARM::fixup_arm_thumb_blx: {
642     // The value doesn't encode the low two bits (always zero) and is offset by
643     // four (see fixup_arm_thumb_cp). The 32-bit immediate value is encoded as
644     //   imm32 = SignExtend(S:I1:I2:imm10H:imm10L:00)
645     // where I1 = NOT(J1 ^ S) and I2 = NOT(J2 ^ S).
646     // The value is encoded into disjoint bit positions in the destination
647     // opcode. x = unchanged, I = immediate value bit, S = sign extension bit,
648     // J = either J1 or J2 bit, 0 = zero.
649     //
650     //   BLX: xxxxxSIIIIIIIIII xxJxJIIIIIIIIII0
651     //
652     // Note that the halfwords are stored high first, low second; so we need
653     // to transpose the fixup value here to map properly.
654     if (Value % 4 != 0) {
655       Ctx.reportError(Fixup.getLoc(), "misaligned ARM call destination");
656       return 0;
657     }
658 
659     uint32_t offset = (Value - 4) >> 2;
660     if (const MCSymbolRefExpr *SRE =
661             dyn_cast<MCSymbolRefExpr>(Fixup.getValue()))
662       if (SRE->getKind() == MCSymbolRefExpr::VK_TLSCALL)
663         offset = 0;
664     uint32_t signBit = (offset & 0x400000) >> 22;
665     uint32_t I1Bit = (offset & 0x200000) >> 21;
666     uint32_t J1Bit = (I1Bit ^ 0x1) ^ signBit;
667     uint32_t I2Bit = (offset & 0x100000) >> 20;
668     uint32_t J2Bit = (I2Bit ^ 0x1) ^ signBit;
669     uint32_t imm10HBits = (offset & 0xFFC00) >> 10;
670     uint32_t imm10LBits = (offset & 0x3FF);
671 
672     uint32_t FirstHalf = (((uint16_t)signBit << 10) | (uint16_t)imm10HBits);
673     uint32_t SecondHalf = (((uint16_t)J1Bit << 13) | ((uint16_t)J2Bit << 11) |
674                            ((uint16_t)imm10LBits) << 1);
675     return joinHalfWords(FirstHalf, SecondHalf, Endian == support::little);
676   }
677   case ARM::fixup_thumb_adr_pcrel_10:
678   case ARM::fixup_arm_thumb_cp:
679     // On CPUs supporting Thumb2, this will be relaxed to an ldr.w, otherwise we
680     // could have an error on our hands.
681     assert(STI != nullptr);
682     if (!STI->getFeatureBits()[ARM::FeatureThumb2] && IsResolved) {
683       const char *FixupDiagnostic = reasonForFixupRelaxation(Fixup, Value);
684       if (FixupDiagnostic) {
685         Ctx.reportError(Fixup.getLoc(), FixupDiagnostic);
686         return 0;
687       }
688     }
689     // Offset by 4, and don't encode the low two bits.
690     return ((Value - 4) >> 2) & 0xff;
691   case ARM::fixup_arm_thumb_cb: {
692     // CB instructions can only branch to offsets in [4, 126] in multiples of 2
693     // so ensure that the raw value LSB is zero and it lies in [2, 130].
694     // An offset of 2 will be relaxed to a NOP.
695     if ((int64_t)Value < 2 || Value > 0x82 || Value & 1) {
696       Ctx.reportError(Fixup.getLoc(), "out of range pc-relative fixup value");
697       return 0;
698     }
699     // Offset by 4 and don't encode the lower bit, which is always 0.
700     // FIXME: diagnose if no Thumb2
701     uint32_t Binary = (Value - 4) >> 1;
702     return ((Binary & 0x20) << 4) | ((Binary & 0x1f) << 3);
703   }
704   case ARM::fixup_arm_thumb_br:
705     // Offset by 4 and don't encode the lower bit, which is always 0.
706     assert(STI != nullptr);
707     if (!STI->getFeatureBits()[ARM::FeatureThumb2] &&
708         !STI->getFeatureBits()[ARM::HasV8MBaselineOps]) {
709       const char *FixupDiagnostic = reasonForFixupRelaxation(Fixup, Value);
710       if (FixupDiagnostic) {
711         Ctx.reportError(Fixup.getLoc(), FixupDiagnostic);
712         return 0;
713       }
714     }
715     return ((Value - 4) >> 1) & 0x7ff;
716   case ARM::fixup_arm_thumb_bcc:
717     // Offset by 4 and don't encode the lower bit, which is always 0.
718     assert(STI != nullptr);
719     if (!STI->getFeatureBits()[ARM::FeatureThumb2]) {
720       const char *FixupDiagnostic = reasonForFixupRelaxation(Fixup, Value);
721       if (FixupDiagnostic) {
722         Ctx.reportError(Fixup.getLoc(), FixupDiagnostic);
723         return 0;
724       }
725     }
726     return ((Value - 4) >> 1) & 0xff;
727   case ARM::fixup_arm_pcrel_10_unscaled: {
728     Value = Value - 8; // ARM fixups offset by an additional word and don't
729                        // need to adjust for the half-word ordering.
730     bool isAdd = true;
731     if ((int64_t)Value < 0) {
732       Value = -Value;
733       isAdd = false;
734     }
735     // The value has the low 4 bits encoded in [3:0] and the high 4 in [11:8].
736     if (Value >= 256) {
737       Ctx.reportError(Fixup.getLoc(), "out of range pc-relative fixup value");
738       return 0;
739     }
740     Value = (Value & 0xf) | ((Value & 0xf0) << 4);
741     return Value | (isAdd << 23);
742   }
743   case ARM::fixup_arm_pcrel_10:
744     Value = Value - 4; // ARM fixups offset by an additional word and don't
745                        // need to adjust for the half-word ordering.
746     [[fallthrough]];
747   case ARM::fixup_t2_pcrel_10: {
748     // Offset by 4, adjusted by two due to the half-word ordering of thumb.
749     Value = Value - 4;
750     bool isAdd = true;
751     if ((int64_t)Value < 0) {
752       Value = -Value;
753       isAdd = false;
754     }
755     // These values don't encode the low two bits since they're always zero.
756     Value >>= 2;
757     if (Value >= 256) {
758       Ctx.reportError(Fixup.getLoc(), "out of range pc-relative fixup value");
759       return 0;
760     }
761     Value |= isAdd << 23;
762 
763     // Same addressing mode as fixup_arm_pcrel_10, but with 16-bit halfwords
764     // swapped.
765     if (Kind == ARM::fixup_t2_pcrel_10)
766       return swapHalfWords(Value, Endian == support::little);
767 
768     return Value;
769   }
770   case ARM::fixup_arm_pcrel_9:
771     Value = Value - 4; // ARM fixups offset by an additional word and don't
772                        // need to adjust for the half-word ordering.
773     [[fallthrough]];
774   case ARM::fixup_t2_pcrel_9: {
775     // Offset by 4, adjusted by two due to the half-word ordering of thumb.
776     Value = Value - 4;
777     bool isAdd = true;
778     if ((int64_t)Value < 0) {
779       Value = -Value;
780       isAdd = false;
781     }
782     // These values don't encode the low bit since it's always zero.
783     if (Value & 1) {
784       Ctx.reportError(Fixup.getLoc(), "invalid value for this fixup");
785       return 0;
786     }
787     Value >>= 1;
788     if (Value >= 256) {
789       Ctx.reportError(Fixup.getLoc(), "out of range pc-relative fixup value");
790       return 0;
791     }
792     Value |= isAdd << 23;
793 
794     // Same addressing mode as fixup_arm_pcrel_9, but with 16-bit halfwords
795     // swapped.
796     if (Kind == ARM::fixup_t2_pcrel_9)
797       return swapHalfWords(Value, Endian == support::little);
798 
799     return Value;
800   }
801   case ARM::fixup_arm_mod_imm:
802     Value = ARM_AM::getSOImmVal(Value);
803     if (Value >> 12) {
804       Ctx.reportError(Fixup.getLoc(), "out of range immediate fixup value");
805       return 0;
806     }
807     return Value;
808   case ARM::fixup_t2_so_imm: {
809     Value = ARM_AM::getT2SOImmVal(Value);
810     if ((int64_t)Value < 0) {
811       Ctx.reportError(Fixup.getLoc(), "out of range immediate fixup value");
812       return 0;
813     }
814     // Value will contain a 12-bit value broken up into a 4-bit shift in bits
815     // 11:8 and the 8-bit immediate in 0:7. The instruction has the immediate
816     // in 0:7. The 4-bit shift is split up into i:imm3 where i is placed at bit
817     // 10 of the upper half-word and imm3 is placed at 14:12 of the lower
818     // half-word.
819     uint64_t EncValue = 0;
820     EncValue |= (Value & 0x800) << 15;
821     EncValue |= (Value & 0x700) << 4;
822     EncValue |= (Value & 0xff);
823     return swapHalfWords(EncValue, Endian == support::little);
824   }
825   case ARM::fixup_bf_branch: {
826     const char *FixupDiagnostic = reasonForFixupRelaxation(Fixup, Value);
827     if (FixupDiagnostic) {
828       Ctx.reportError(Fixup.getLoc(), FixupDiagnostic);
829       return 0;
830     }
831     uint32_t out = (((Value - 4) >> 1) & 0xf) << 23;
832     return swapHalfWords(out, Endian == support::little);
833   }
834   case ARM::fixup_bf_target:
835   case ARM::fixup_bfl_target:
836   case ARM::fixup_bfc_target: {
837     const char *FixupDiagnostic = reasonForFixupRelaxation(Fixup, Value);
838     if (FixupDiagnostic) {
839       Ctx.reportError(Fixup.getLoc(), FixupDiagnostic);
840       return 0;
841     }
842     uint32_t out = 0;
843     uint32_t HighBitMask = (Kind == ARM::fixup_bf_target ? 0xf800 :
844                             Kind == ARM::fixup_bfl_target ? 0x3f800 : 0x800);
845     out |= (((Value - 4) >> 1) & 0x1) << 11;
846     out |= (((Value - 4) >> 1) & 0x7fe);
847     out |= (((Value - 4) >> 1) & HighBitMask) << 5;
848     return swapHalfWords(out, Endian == support::little);
849   }
850   case ARM::fixup_bfcsel_else_target: {
851     // If this is a fixup of a branch future's else target then it should be a
852     // constant MCExpr representing the distance between the branch targetted
853     // and the instruction after that same branch.
854     Value = Target.getConstant();
855 
856     const char *FixupDiagnostic = reasonForFixupRelaxation(Fixup, Value);
857     if (FixupDiagnostic) {
858       Ctx.reportError(Fixup.getLoc(), FixupDiagnostic);
859       return 0;
860     }
861     uint32_t out = ((Value >> 2) & 1) << 17;
862     return swapHalfWords(out, Endian == support::little);
863   }
864   case ARM::fixup_wls:
865   case ARM::fixup_le: {
866     const char *FixupDiagnostic = reasonForFixupRelaxation(Fixup, Value);
867     if (FixupDiagnostic) {
868       Ctx.reportError(Fixup.getLoc(), FixupDiagnostic);
869       return 0;
870     }
871     uint64_t real_value = Value - 4;
872     uint32_t out = 0;
873     if (Kind == ARM::fixup_le)
874       real_value = -real_value;
875     out |= ((real_value >> 1) & 0x1) << 11;
876     out |= ((real_value >> 1) & 0x7fe);
877     return swapHalfWords(out, Endian == support::little);
878   }
879   }
880 }
881 
882 bool ARMAsmBackend::shouldForceRelocation(const MCAssembler &Asm,
883                                           const MCFixup &Fixup,
884                                           const MCValue &Target) {
885   const MCSymbolRefExpr *A = Target.getSymA();
886   const MCSymbol *Sym = A ? &A->getSymbol() : nullptr;
887   const unsigned FixupKind = Fixup.getKind();
888   if (FixupKind >= FirstLiteralRelocationKind)
889     return true;
890   if (FixupKind == ARM::fixup_arm_thumb_bl) {
891     assert(Sym && "How did we resolve this?");
892 
893     // If the symbol is external the linker will handle it.
894     // FIXME: Should we handle it as an optimization?
895 
896     // If the symbol is out of range, produce a relocation and hope the
897     // linker can handle it. GNU AS produces an error in this case.
898     if (Sym->isExternal())
899       return true;
900   }
901   // Create relocations for unconditional branches to function symbols with
902   // different execution mode in ELF binaries.
903   if (Sym && Sym->isELF()) {
904     unsigned Type = cast<MCSymbolELF>(Sym)->getType();
905     if ((Type == ELF::STT_FUNC || Type == ELF::STT_GNU_IFUNC)) {
906       if (Asm.isThumbFunc(Sym) && (FixupKind == ARM::fixup_arm_uncondbranch))
907         return true;
908       if (!Asm.isThumbFunc(Sym) && (FixupKind == ARM::fixup_arm_thumb_br ||
909                                     FixupKind == ARM::fixup_arm_thumb_bl ||
910                                     FixupKind == ARM::fixup_t2_condbranch ||
911                                     FixupKind == ARM::fixup_t2_uncondbranch))
912         return true;
913     }
914   }
915   // We must always generate a relocation for BL/BLX instructions if we have
916   // a symbol to reference, as the linker relies on knowing the destination
917   // symbol's thumb-ness to get interworking right.
918   if (A && (FixupKind == ARM::fixup_arm_thumb_blx ||
919             FixupKind == ARM::fixup_arm_blx ||
920             FixupKind == ARM::fixup_arm_uncondbl ||
921             FixupKind == ARM::fixup_arm_condbl))
922     return true;
923   return false;
924 }
925 
926 /// getFixupKindNumBytes - The number of bytes the fixup may change.
927 static unsigned getFixupKindNumBytes(unsigned Kind) {
928   switch (Kind) {
929   default:
930     llvm_unreachable("Unknown fixup kind!");
931 
932   case FK_Data_1:
933   case ARM::fixup_arm_thumb_bcc:
934   case ARM::fixup_arm_thumb_cp:
935   case ARM::fixup_thumb_adr_pcrel_10:
936     return 1;
937 
938   case FK_Data_2:
939   case ARM::fixup_arm_thumb_br:
940   case ARM::fixup_arm_thumb_cb:
941   case ARM::fixup_arm_mod_imm:
942     return 2;
943 
944   case ARM::fixup_arm_pcrel_10_unscaled:
945   case ARM::fixup_arm_ldst_pcrel_12:
946   case ARM::fixup_arm_pcrel_10:
947   case ARM::fixup_arm_pcrel_9:
948   case ARM::fixup_arm_ldst_abs_12:
949   case ARM::fixup_arm_adr_pcrel_12:
950   case ARM::fixup_arm_uncondbl:
951   case ARM::fixup_arm_condbl:
952   case ARM::fixup_arm_blx:
953   case ARM::fixup_arm_condbranch:
954   case ARM::fixup_arm_uncondbranch:
955     return 3;
956 
957   case FK_Data_4:
958   case ARM::fixup_t2_ldst_pcrel_12:
959   case ARM::fixup_t2_condbranch:
960   case ARM::fixup_t2_uncondbranch:
961   case ARM::fixup_t2_pcrel_10:
962   case ARM::fixup_t2_pcrel_9:
963   case ARM::fixup_t2_adr_pcrel_12:
964   case ARM::fixup_arm_thumb_bl:
965   case ARM::fixup_arm_thumb_blx:
966   case ARM::fixup_arm_movt_hi16:
967   case ARM::fixup_arm_movw_lo16:
968   case ARM::fixup_t2_movt_hi16:
969   case ARM::fixup_t2_movw_lo16:
970   case ARM::fixup_t2_so_imm:
971   case ARM::fixup_bf_branch:
972   case ARM::fixup_bf_target:
973   case ARM::fixup_bfl_target:
974   case ARM::fixup_bfc_target:
975   case ARM::fixup_bfcsel_else_target:
976   case ARM::fixup_wls:
977   case ARM::fixup_le:
978     return 4;
979 
980   case FK_SecRel_2:
981     return 2;
982   case FK_SecRel_4:
983     return 4;
984   }
985 }
986 
987 /// getFixupKindContainerSizeBytes - The number of bytes of the
988 /// container involved in big endian.
989 static unsigned getFixupKindContainerSizeBytes(unsigned Kind) {
990   switch (Kind) {
991   default:
992     llvm_unreachable("Unknown fixup kind!");
993 
994   case FK_Data_1:
995     return 1;
996   case FK_Data_2:
997     return 2;
998   case FK_Data_4:
999     return 4;
1000 
1001   case ARM::fixup_arm_thumb_bcc:
1002   case ARM::fixup_arm_thumb_cp:
1003   case ARM::fixup_thumb_adr_pcrel_10:
1004   case ARM::fixup_arm_thumb_br:
1005   case ARM::fixup_arm_thumb_cb:
1006     // Instruction size is 2 bytes.
1007     return 2;
1008 
1009   case ARM::fixup_arm_pcrel_10_unscaled:
1010   case ARM::fixup_arm_ldst_pcrel_12:
1011   case ARM::fixup_arm_pcrel_10:
1012   case ARM::fixup_arm_pcrel_9:
1013   case ARM::fixup_arm_adr_pcrel_12:
1014   case ARM::fixup_arm_uncondbl:
1015   case ARM::fixup_arm_condbl:
1016   case ARM::fixup_arm_blx:
1017   case ARM::fixup_arm_condbranch:
1018   case ARM::fixup_arm_uncondbranch:
1019   case ARM::fixup_t2_ldst_pcrel_12:
1020   case ARM::fixup_t2_condbranch:
1021   case ARM::fixup_t2_uncondbranch:
1022   case ARM::fixup_t2_pcrel_10:
1023   case ARM::fixup_t2_pcrel_9:
1024   case ARM::fixup_t2_adr_pcrel_12:
1025   case ARM::fixup_arm_thumb_bl:
1026   case ARM::fixup_arm_thumb_blx:
1027   case ARM::fixup_arm_movt_hi16:
1028   case ARM::fixup_arm_movw_lo16:
1029   case ARM::fixup_t2_movt_hi16:
1030   case ARM::fixup_t2_movw_lo16:
1031   case ARM::fixup_arm_mod_imm:
1032   case ARM::fixup_t2_so_imm:
1033   case ARM::fixup_bf_branch:
1034   case ARM::fixup_bf_target:
1035   case ARM::fixup_bfl_target:
1036   case ARM::fixup_bfc_target:
1037   case ARM::fixup_bfcsel_else_target:
1038   case ARM::fixup_wls:
1039   case ARM::fixup_le:
1040     // Instruction size is 4 bytes.
1041     return 4;
1042   }
1043 }
1044 
1045 void ARMAsmBackend::applyFixup(const MCAssembler &Asm, const MCFixup &Fixup,
1046                                const MCValue &Target,
1047                                MutableArrayRef<char> Data, uint64_t Value,
1048                                bool IsResolved,
1049                                const MCSubtargetInfo* STI) const {
1050   unsigned Kind = Fixup.getKind();
1051   if (Kind >= FirstLiteralRelocationKind)
1052     return;
1053   MCContext &Ctx = Asm.getContext();
1054   Value = adjustFixupValue(Asm, Fixup, Target, Value, IsResolved, Ctx, STI);
1055   if (!Value)
1056     return; // Doesn't change encoding.
1057   const unsigned NumBytes = getFixupKindNumBytes(Kind);
1058 
1059   unsigned Offset = Fixup.getOffset();
1060   assert(Offset + NumBytes <= Data.size() && "Invalid fixup offset!");
1061 
1062   // Used to point to big endian bytes.
1063   unsigned FullSizeBytes;
1064   if (Endian == support::big) {
1065     FullSizeBytes = getFixupKindContainerSizeBytes(Kind);
1066     assert((Offset + FullSizeBytes) <= Data.size() && "Invalid fixup size!");
1067     assert(NumBytes <= FullSizeBytes && "Invalid fixup size!");
1068   }
1069 
1070   // For each byte of the fragment that the fixup touches, mask in the bits from
1071   // the fixup value. The Value has been "split up" into the appropriate
1072   // bitfields above.
1073   for (unsigned i = 0; i != NumBytes; ++i) {
1074     unsigned Idx = Endian == support::little ? i : (FullSizeBytes - 1 - i);
1075     Data[Offset + Idx] |= uint8_t((Value >> (i * 8)) & 0xff);
1076   }
1077 }
1078 
1079 namespace CU {
1080 
1081 /// Compact unwind encoding values.
1082 enum CompactUnwindEncodings {
1083   UNWIND_ARM_MODE_MASK                         = 0x0F000000,
1084   UNWIND_ARM_MODE_FRAME                        = 0x01000000,
1085   UNWIND_ARM_MODE_FRAME_D                      = 0x02000000,
1086   UNWIND_ARM_MODE_DWARF                        = 0x04000000,
1087 
1088   UNWIND_ARM_FRAME_STACK_ADJUST_MASK           = 0x00C00000,
1089 
1090   UNWIND_ARM_FRAME_FIRST_PUSH_R4               = 0x00000001,
1091   UNWIND_ARM_FRAME_FIRST_PUSH_R5               = 0x00000002,
1092   UNWIND_ARM_FRAME_FIRST_PUSH_R6               = 0x00000004,
1093 
1094   UNWIND_ARM_FRAME_SECOND_PUSH_R8              = 0x00000008,
1095   UNWIND_ARM_FRAME_SECOND_PUSH_R9              = 0x00000010,
1096   UNWIND_ARM_FRAME_SECOND_PUSH_R10             = 0x00000020,
1097   UNWIND_ARM_FRAME_SECOND_PUSH_R11             = 0x00000040,
1098   UNWIND_ARM_FRAME_SECOND_PUSH_R12             = 0x00000080,
1099 
1100   UNWIND_ARM_FRAME_D_REG_COUNT_MASK            = 0x00000F00,
1101 
1102   UNWIND_ARM_DWARF_SECTION_OFFSET              = 0x00FFFFFF
1103 };
1104 
1105 } // end CU namespace
1106 
1107 /// Generate compact unwind encoding for the function based on the CFI
1108 /// instructions. If the CFI instructions describe a frame that cannot be
1109 /// encoded in compact unwind, the method returns UNWIND_ARM_MODE_DWARF which
1110 /// tells the runtime to fallback and unwind using dwarf.
1111 uint32_t ARMAsmBackendDarwin::generateCompactUnwindEncoding(
1112     ArrayRef<MCCFIInstruction> Instrs) const {
1113   DEBUG_WITH_TYPE("compact-unwind", llvm::dbgs() << "generateCU()\n");
1114   // Only armv7k uses CFI based unwinding.
1115   if (Subtype != MachO::CPU_SUBTYPE_ARM_V7K)
1116     return 0;
1117   // No .cfi directives means no frame.
1118   if (Instrs.empty())
1119     return 0;
1120   // Start off assuming CFA is at SP+0.
1121   unsigned CFARegister = ARM::SP;
1122   int CFARegisterOffset = 0;
1123   // Mark savable registers as initially unsaved
1124   DenseMap<unsigned, int> RegOffsets;
1125   int FloatRegCount = 0;
1126   // Process each .cfi directive and build up compact unwind info.
1127   for (const MCCFIInstruction &Inst : Instrs) {
1128     unsigned Reg;
1129     switch (Inst.getOperation()) {
1130     case MCCFIInstruction::OpDefCfa: // DW_CFA_def_cfa
1131       CFARegisterOffset = Inst.getOffset();
1132       CFARegister = *MRI.getLLVMRegNum(Inst.getRegister(), true);
1133       break;
1134     case MCCFIInstruction::OpDefCfaOffset: // DW_CFA_def_cfa_offset
1135       CFARegisterOffset = Inst.getOffset();
1136       break;
1137     case MCCFIInstruction::OpDefCfaRegister: // DW_CFA_def_cfa_register
1138       CFARegister = *MRI.getLLVMRegNum(Inst.getRegister(), true);
1139       break;
1140     case MCCFIInstruction::OpOffset: // DW_CFA_offset
1141       Reg = *MRI.getLLVMRegNum(Inst.getRegister(), true);
1142       if (ARMMCRegisterClasses[ARM::GPRRegClassID].contains(Reg))
1143         RegOffsets[Reg] = Inst.getOffset();
1144       else if (ARMMCRegisterClasses[ARM::DPRRegClassID].contains(Reg)) {
1145         RegOffsets[Reg] = Inst.getOffset();
1146         ++FloatRegCount;
1147       } else {
1148         DEBUG_WITH_TYPE("compact-unwind",
1149                         llvm::dbgs() << ".cfi_offset on unknown register="
1150                                      << Inst.getRegister() << "\n");
1151         return CU::UNWIND_ARM_MODE_DWARF;
1152       }
1153       break;
1154     case MCCFIInstruction::OpRelOffset: // DW_CFA_advance_loc
1155       // Ignore
1156       break;
1157     default:
1158       // Directive not convertable to compact unwind, bail out.
1159       DEBUG_WITH_TYPE("compact-unwind",
1160                       llvm::dbgs()
1161                           << "CFI directive not compatiable with comact "
1162                              "unwind encoding, opcode=" << Inst.getOperation()
1163                           << "\n");
1164       return CU::UNWIND_ARM_MODE_DWARF;
1165       break;
1166     }
1167   }
1168 
1169   // If no frame set up, return no unwind info.
1170   if ((CFARegister == ARM::SP) && (CFARegisterOffset == 0))
1171     return 0;
1172 
1173   // Verify standard frame (lr/r7) was used.
1174   if (CFARegister != ARM::R7) {
1175     DEBUG_WITH_TYPE("compact-unwind", llvm::dbgs() << "frame register is "
1176                                                    << CFARegister
1177                                                    << " instead of r7\n");
1178     return CU::UNWIND_ARM_MODE_DWARF;
1179   }
1180   int StackAdjust = CFARegisterOffset - 8;
1181   if (RegOffsets.lookup(ARM::LR) != (-4 - StackAdjust)) {
1182     DEBUG_WITH_TYPE("compact-unwind",
1183                     llvm::dbgs()
1184                         << "LR not saved as standard frame, StackAdjust="
1185                         << StackAdjust
1186                         << ", CFARegisterOffset=" << CFARegisterOffset
1187                         << ", lr save at offset=" << RegOffsets[14] << "\n");
1188     return CU::UNWIND_ARM_MODE_DWARF;
1189   }
1190   if (RegOffsets.lookup(ARM::R7) != (-8 - StackAdjust)) {
1191     DEBUG_WITH_TYPE("compact-unwind",
1192                     llvm::dbgs() << "r7 not saved as standard frame\n");
1193     return CU::UNWIND_ARM_MODE_DWARF;
1194   }
1195   uint32_t CompactUnwindEncoding = CU::UNWIND_ARM_MODE_FRAME;
1196 
1197   // If var-args are used, there may be a stack adjust required.
1198   switch (StackAdjust) {
1199   case 0:
1200     break;
1201   case 4:
1202     CompactUnwindEncoding |= 0x00400000;
1203     break;
1204   case 8:
1205     CompactUnwindEncoding |= 0x00800000;
1206     break;
1207   case 12:
1208     CompactUnwindEncoding |= 0x00C00000;
1209     break;
1210   default:
1211     DEBUG_WITH_TYPE("compact-unwind", llvm::dbgs()
1212                                           << ".cfi_def_cfa stack adjust ("
1213                                           << StackAdjust << ") out of range\n");
1214     return CU::UNWIND_ARM_MODE_DWARF;
1215   }
1216 
1217   // If r6 is saved, it must be right below r7.
1218   static struct {
1219     unsigned Reg;
1220     unsigned Encoding;
1221   } GPRCSRegs[] = {{ARM::R6, CU::UNWIND_ARM_FRAME_FIRST_PUSH_R6},
1222                    {ARM::R5, CU::UNWIND_ARM_FRAME_FIRST_PUSH_R5},
1223                    {ARM::R4, CU::UNWIND_ARM_FRAME_FIRST_PUSH_R4},
1224                    {ARM::R12, CU::UNWIND_ARM_FRAME_SECOND_PUSH_R12},
1225                    {ARM::R11, CU::UNWIND_ARM_FRAME_SECOND_PUSH_R11},
1226                    {ARM::R10, CU::UNWIND_ARM_FRAME_SECOND_PUSH_R10},
1227                    {ARM::R9, CU::UNWIND_ARM_FRAME_SECOND_PUSH_R9},
1228                    {ARM::R8, CU::UNWIND_ARM_FRAME_SECOND_PUSH_R8}};
1229 
1230   int CurOffset = -8 - StackAdjust;
1231   for (auto CSReg : GPRCSRegs) {
1232     auto Offset = RegOffsets.find(CSReg.Reg);
1233     if (Offset == RegOffsets.end())
1234       continue;
1235 
1236     int RegOffset = Offset->second;
1237     if (RegOffset != CurOffset - 4) {
1238       DEBUG_WITH_TYPE("compact-unwind",
1239                       llvm::dbgs() << MRI.getName(CSReg.Reg) << " saved at "
1240                                    << RegOffset << " but only supported at "
1241                                    << CurOffset << "\n");
1242       return CU::UNWIND_ARM_MODE_DWARF;
1243     }
1244     CompactUnwindEncoding |= CSReg.Encoding;
1245     CurOffset -= 4;
1246   }
1247 
1248   // If no floats saved, we are done.
1249   if (FloatRegCount == 0)
1250     return CompactUnwindEncoding;
1251 
1252   // Switch mode to include D register saving.
1253   CompactUnwindEncoding &= ~CU::UNWIND_ARM_MODE_MASK;
1254   CompactUnwindEncoding |= CU::UNWIND_ARM_MODE_FRAME_D;
1255 
1256   // FIXME: supporting more than 4 saved D-registers compactly would be trivial,
1257   // but needs coordination with the linker and libunwind.
1258   if (FloatRegCount > 4) {
1259     DEBUG_WITH_TYPE("compact-unwind",
1260                     llvm::dbgs() << "unsupported number of D registers saved ("
1261                                  << FloatRegCount << ")\n");
1262       return CU::UNWIND_ARM_MODE_DWARF;
1263   }
1264 
1265   // Floating point registers must either be saved sequentially, or we defer to
1266   // DWARF. No gaps allowed here so check that each saved d-register is
1267   // precisely where it should be.
1268   static unsigned FPRCSRegs[] = { ARM::D8, ARM::D10, ARM::D12, ARM::D14 };
1269   for (int Idx = FloatRegCount - 1; Idx >= 0; --Idx) {
1270     auto Offset = RegOffsets.find(FPRCSRegs[Idx]);
1271     if (Offset == RegOffsets.end()) {
1272       DEBUG_WITH_TYPE("compact-unwind",
1273                       llvm::dbgs() << FloatRegCount << " D-regs saved, but "
1274                                    << MRI.getName(FPRCSRegs[Idx])
1275                                    << " not saved\n");
1276       return CU::UNWIND_ARM_MODE_DWARF;
1277     } else if (Offset->second != CurOffset - 8) {
1278       DEBUG_WITH_TYPE("compact-unwind",
1279                       llvm::dbgs() << FloatRegCount << " D-regs saved, but "
1280                                    << MRI.getName(FPRCSRegs[Idx])
1281                                    << " saved at " << Offset->second
1282                                    << ", expected at " << CurOffset - 8
1283                                    << "\n");
1284       return CU::UNWIND_ARM_MODE_DWARF;
1285     }
1286     CurOffset -= 8;
1287   }
1288 
1289   return CompactUnwindEncoding | ((FloatRegCount - 1) << 8);
1290 }
1291 
1292 static MCAsmBackend *createARMAsmBackend(const Target &T,
1293                                          const MCSubtargetInfo &STI,
1294                                          const MCRegisterInfo &MRI,
1295                                          const MCTargetOptions &Options,
1296                                          support::endianness Endian) {
1297   const Triple &TheTriple = STI.getTargetTriple();
1298   switch (TheTriple.getObjectFormat()) {
1299   default:
1300     llvm_unreachable("unsupported object format");
1301   case Triple::MachO:
1302     return new ARMAsmBackendDarwin(T, STI, MRI);
1303   case Triple::COFF:
1304     assert(TheTriple.isOSWindows() && "non-Windows ARM COFF is not supported");
1305     return new ARMAsmBackendWinCOFF(T, STI.getTargetTriple().isThumb());
1306   case Triple::ELF:
1307     assert(TheTriple.isOSBinFormatELF() && "using ELF for non-ELF target");
1308     uint8_t OSABI = MCELFObjectTargetWriter::getOSABI(TheTriple.getOS());
1309     return new ARMAsmBackendELF(T, STI.getTargetTriple().isThumb(), OSABI,
1310                                 Endian);
1311   }
1312 }
1313 
1314 MCAsmBackend *llvm::createARMLEAsmBackend(const Target &T,
1315                                           const MCSubtargetInfo &STI,
1316                                           const MCRegisterInfo &MRI,
1317                                           const MCTargetOptions &Options) {
1318   return createARMAsmBackend(T, STI, MRI, Options, support::little);
1319 }
1320 
1321 MCAsmBackend *llvm::createARMBEAsmBackend(const Target &T,
1322                                           const MCSubtargetInfo &STI,
1323                                           const MCRegisterInfo &MRI,
1324                                           const MCTargetOptions &Options) {
1325   return createARMAsmBackend(T, STI, MRI, Options, support::big);
1326 }
1327