1 //===-- ARMAsmBackend.cpp - ARM Assembler Backend -------------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 
9 #include "MCTargetDesc/ARMAsmBackend.h"
10 #include "MCTargetDesc/ARMAddressingModes.h"
11 #include "MCTargetDesc/ARMAsmBackendDarwin.h"
12 #include "MCTargetDesc/ARMAsmBackendELF.h"
13 #include "MCTargetDesc/ARMAsmBackendWinCOFF.h"
14 #include "MCTargetDesc/ARMFixupKinds.h"
15 #include "MCTargetDesc/ARMMCTargetDesc.h"
16 #include "llvm/ADT/StringSwitch.h"
17 #include "llvm/BinaryFormat/ELF.h"
18 #include "llvm/BinaryFormat/MachO.h"
19 #include "llvm/MC/MCAsmBackend.h"
20 #include "llvm/MC/MCAssembler.h"
21 #include "llvm/MC/MCContext.h"
22 #include "llvm/MC/MCDirectives.h"
23 #include "llvm/MC/MCELFObjectWriter.h"
24 #include "llvm/MC/MCExpr.h"
25 #include "llvm/MC/MCFixupKindInfo.h"
26 #include "llvm/MC/MCObjectWriter.h"
27 #include "llvm/MC/MCRegisterInfo.h"
28 #include "llvm/MC/MCSectionELF.h"
29 #include "llvm/MC/MCSectionMachO.h"
30 #include "llvm/MC/MCSubtargetInfo.h"
31 #include "llvm/MC/MCValue.h"
32 #include "llvm/MC/MCAsmLayout.h"
33 #include "llvm/Support/Debug.h"
34 #include "llvm/Support/EndianStream.h"
35 #include "llvm/Support/ErrorHandling.h"
36 #include "llvm/Support/Format.h"
37 #include "llvm/Support/TargetParser.h"
38 #include "llvm/Support/raw_ostream.h"
39 using namespace llvm;
40 
41 namespace {
42 class ARMELFObjectWriter : public MCELFObjectTargetWriter {
43 public:
ARMELFObjectWriter(uint8_t OSABI)44   ARMELFObjectWriter(uint8_t OSABI)
45       : MCELFObjectTargetWriter(/*Is64Bit*/ false, OSABI, ELF::EM_ARM,
46                                 /*HasRelocationAddend*/ false) {}
47 };
48 } // end anonymous namespace
49 
getFixupKind(StringRef Name) const50 Optional<MCFixupKind> ARMAsmBackend::getFixupKind(StringRef Name) const {
51   if (STI.getTargetTriple().isOSBinFormatELF() && Name == "R_ARM_NONE")
52     return FK_NONE;
53 
54   return MCAsmBackend::getFixupKind(Name);
55 }
56 
getFixupKindInfo(MCFixupKind Kind) const57 const MCFixupKindInfo &ARMAsmBackend::getFixupKindInfo(MCFixupKind Kind) const {
58   unsigned IsPCRelConstant =
59       MCFixupKindInfo::FKF_IsPCRel | MCFixupKindInfo::FKF_Constant;
60   const static MCFixupKindInfo InfosLE[ARM::NumTargetFixupKinds] = {
61       // This table *must* be in the order that the fixup_* kinds are defined in
62       // ARMFixupKinds.h.
63       //
64       // Name                      Offset (bits) Size (bits)     Flags
65       {"fixup_arm_ldst_pcrel_12", 0, 32, IsPCRelConstant},
66       {"fixup_t2_ldst_pcrel_12", 0, 32,
67        IsPCRelConstant | MCFixupKindInfo::FKF_IsAlignedDownTo32Bits},
68       {"fixup_arm_pcrel_10_unscaled", 0, 32, IsPCRelConstant},
69       {"fixup_arm_pcrel_10", 0, 32, IsPCRelConstant},
70       {"fixup_t2_pcrel_10", 0, 32,
71        MCFixupKindInfo::FKF_IsPCRel |
72            MCFixupKindInfo::FKF_IsAlignedDownTo32Bits},
73       {"fixup_arm_pcrel_9", 0, 32, MCFixupKindInfo::FKF_IsPCRel},
74       {"fixup_t2_pcrel_9", 0, 32,
75        IsPCRelConstant | MCFixupKindInfo::FKF_IsAlignedDownTo32Bits},
76       {"fixup_thumb_adr_pcrel_10", 0, 8,
77        IsPCRelConstant | MCFixupKindInfo::FKF_IsAlignedDownTo32Bits},
78       {"fixup_arm_adr_pcrel_12", 0, 32, IsPCRelConstant},
79       {"fixup_t2_adr_pcrel_12", 0, 32,
80        IsPCRelConstant | MCFixupKindInfo::FKF_IsAlignedDownTo32Bits},
81       {"fixup_arm_condbranch", 0, 24, MCFixupKindInfo::FKF_IsPCRel},
82       {"fixup_arm_uncondbranch", 0, 24, MCFixupKindInfo::FKF_IsPCRel},
83       {"fixup_t2_condbranch", 0, 32, MCFixupKindInfo::FKF_IsPCRel},
84       {"fixup_t2_uncondbranch", 0, 32, MCFixupKindInfo::FKF_IsPCRel},
85       {"fixup_arm_thumb_br", 0, 16, MCFixupKindInfo::FKF_IsPCRel},
86       {"fixup_arm_uncondbl", 0, 24, MCFixupKindInfo::FKF_IsPCRel},
87       {"fixup_arm_condbl", 0, 24, MCFixupKindInfo::FKF_IsPCRel},
88       {"fixup_arm_blx", 0, 24, MCFixupKindInfo::FKF_IsPCRel},
89       {"fixup_arm_thumb_bl", 0, 32, MCFixupKindInfo::FKF_IsPCRel},
90       {"fixup_arm_thumb_blx", 0, 32,
91        MCFixupKindInfo::FKF_IsPCRel |
92            MCFixupKindInfo::FKF_IsAlignedDownTo32Bits},
93       {"fixup_arm_thumb_cb", 0, 16, MCFixupKindInfo::FKF_IsPCRel},
94       {"fixup_arm_thumb_cp", 0, 8,
95        MCFixupKindInfo::FKF_IsPCRel |
96            MCFixupKindInfo::FKF_IsAlignedDownTo32Bits},
97       {"fixup_arm_thumb_bcc", 0, 8, MCFixupKindInfo::FKF_IsPCRel},
98       // movw / movt: 16-bits immediate but scattered into two chunks 0 - 12, 16
99       // - 19.
100       {"fixup_arm_movt_hi16", 0, 20, 0},
101       {"fixup_arm_movw_lo16", 0, 20, 0},
102       {"fixup_t2_movt_hi16", 0, 20, 0},
103       {"fixup_t2_movw_lo16", 0, 20, 0},
104       {"fixup_arm_mod_imm", 0, 12, 0},
105       {"fixup_t2_so_imm", 0, 26, 0},
106       {"fixup_bf_branch", 0, 32, MCFixupKindInfo::FKF_IsPCRel},
107       {"fixup_bf_target", 0, 32, MCFixupKindInfo::FKF_IsPCRel},
108       {"fixup_bfl_target", 0, 32, MCFixupKindInfo::FKF_IsPCRel},
109       {"fixup_bfc_target", 0, 32, MCFixupKindInfo::FKF_IsPCRel},
110       {"fixup_bfcsel_else_target", 0, 32, 0},
111       {"fixup_wls", 0, 32, MCFixupKindInfo::FKF_IsPCRel},
112       {"fixup_le", 0, 32, MCFixupKindInfo::FKF_IsPCRel}
113   };
114   const static MCFixupKindInfo InfosBE[ARM::NumTargetFixupKinds] = {
115       // This table *must* be in the order that the fixup_* kinds are defined in
116       // ARMFixupKinds.h.
117       //
118       // Name                      Offset (bits) Size (bits)     Flags
119       {"fixup_arm_ldst_pcrel_12", 0, 32, IsPCRelConstant},
120       {"fixup_t2_ldst_pcrel_12", 0, 32,
121        IsPCRelConstant | MCFixupKindInfo::FKF_IsAlignedDownTo32Bits},
122       {"fixup_arm_pcrel_10_unscaled", 0, 32, IsPCRelConstant},
123       {"fixup_arm_pcrel_10", 0, 32, IsPCRelConstant},
124       {"fixup_t2_pcrel_10", 0, 32,
125        MCFixupKindInfo::FKF_IsPCRel |
126            MCFixupKindInfo::FKF_IsAlignedDownTo32Bits},
127       {"fixup_arm_pcrel_9", 0, 32, MCFixupKindInfo::FKF_IsPCRel},
128       {"fixup_t2_pcrel_9", 0, 32,
129        IsPCRelConstant | MCFixupKindInfo::FKF_IsAlignedDownTo32Bits},
130       {"fixup_thumb_adr_pcrel_10", 8, 8,
131        IsPCRelConstant | MCFixupKindInfo::FKF_IsAlignedDownTo32Bits},
132       {"fixup_arm_adr_pcrel_12", 0, 32, IsPCRelConstant},
133       {"fixup_t2_adr_pcrel_12", 0, 32,
134        IsPCRelConstant | MCFixupKindInfo::FKF_IsAlignedDownTo32Bits},
135       {"fixup_arm_condbranch", 8, 24, MCFixupKindInfo::FKF_IsPCRel},
136       {"fixup_arm_uncondbranch", 8, 24, MCFixupKindInfo::FKF_IsPCRel},
137       {"fixup_t2_condbranch", 0, 32, MCFixupKindInfo::FKF_IsPCRel},
138       {"fixup_t2_uncondbranch", 0, 32, MCFixupKindInfo::FKF_IsPCRel},
139       {"fixup_arm_thumb_br", 0, 16, MCFixupKindInfo::FKF_IsPCRel},
140       {"fixup_arm_uncondbl", 8, 24, MCFixupKindInfo::FKF_IsPCRel},
141       {"fixup_arm_condbl", 8, 24, MCFixupKindInfo::FKF_IsPCRel},
142       {"fixup_arm_blx", 8, 24, MCFixupKindInfo::FKF_IsPCRel},
143       {"fixup_arm_thumb_bl", 0, 32, MCFixupKindInfo::FKF_IsPCRel},
144       {"fixup_arm_thumb_blx", 0, 32,
145        MCFixupKindInfo::FKF_IsPCRel |
146            MCFixupKindInfo::FKF_IsAlignedDownTo32Bits},
147       {"fixup_arm_thumb_cb", 0, 16, MCFixupKindInfo::FKF_IsPCRel},
148       {"fixup_arm_thumb_cp", 8, 8,
149        MCFixupKindInfo::FKF_IsPCRel |
150            MCFixupKindInfo::FKF_IsAlignedDownTo32Bits},
151       {"fixup_arm_thumb_bcc", 8, 8, MCFixupKindInfo::FKF_IsPCRel},
152       // movw / movt: 16-bits immediate but scattered into two chunks 0 - 12, 16
153       // - 19.
154       {"fixup_arm_movt_hi16", 12, 20, 0},
155       {"fixup_arm_movw_lo16", 12, 20, 0},
156       {"fixup_t2_movt_hi16", 12, 20, 0},
157       {"fixup_t2_movw_lo16", 12, 20, 0},
158       {"fixup_arm_mod_imm", 20, 12, 0},
159       {"fixup_t2_so_imm", 26, 6, 0},
160       {"fixup_bf_branch", 0, 32, MCFixupKindInfo::FKF_IsPCRel},
161       {"fixup_bf_target", 0, 32, MCFixupKindInfo::FKF_IsPCRel},
162       {"fixup_bfl_target", 0, 32, MCFixupKindInfo::FKF_IsPCRel},
163       {"fixup_bfc_target", 0, 32, MCFixupKindInfo::FKF_IsPCRel},
164       {"fixup_bfcsel_else_target", 0, 32, 0},
165       {"fixup_wls", 0, 32, MCFixupKindInfo::FKF_IsPCRel},
166       {"fixup_le", 0, 32, MCFixupKindInfo::FKF_IsPCRel}
167   };
168 
169   if (Kind < FirstTargetFixupKind)
170     return MCAsmBackend::getFixupKindInfo(Kind);
171 
172   assert(unsigned(Kind - FirstTargetFixupKind) < getNumFixupKinds() &&
173          "Invalid kind!");
174   return (Endian == support::little ? InfosLE
175                                     : InfosBE)[Kind - FirstTargetFixupKind];
176 }
177 
handleAssemblerFlag(MCAssemblerFlag Flag)178 void ARMAsmBackend::handleAssemblerFlag(MCAssemblerFlag Flag) {
179   switch (Flag) {
180   default:
181     break;
182   case MCAF_Code16:
183     setIsThumb(true);
184     break;
185   case MCAF_Code32:
186     setIsThumb(false);
187     break;
188   }
189 }
190 
getRelaxedOpcode(unsigned Op,const MCSubtargetInfo & STI) const191 unsigned ARMAsmBackend::getRelaxedOpcode(unsigned Op,
192                                          const MCSubtargetInfo &STI) const {
193   bool HasThumb2 = STI.getFeatureBits()[ARM::FeatureThumb2];
194   bool HasV8MBaselineOps = STI.getFeatureBits()[ARM::HasV8MBaselineOps];
195 
196   switch (Op) {
197   default:
198     return Op;
199   case ARM::tBcc:
200     return HasThumb2 ? (unsigned)ARM::t2Bcc : Op;
201   case ARM::tLDRpci:
202     return HasThumb2 ? (unsigned)ARM::t2LDRpci : Op;
203   case ARM::tADR:
204     return HasThumb2 ? (unsigned)ARM::t2ADR : Op;
205   case ARM::tB:
206     return HasV8MBaselineOps ? (unsigned)ARM::t2B : Op;
207   case ARM::tCBZ:
208     return ARM::tHINT;
209   case ARM::tCBNZ:
210     return ARM::tHINT;
211   }
212 }
213 
mayNeedRelaxation(const MCInst & Inst,const MCSubtargetInfo & STI) const214 bool ARMAsmBackend::mayNeedRelaxation(const MCInst &Inst,
215                                       const MCSubtargetInfo &STI) const {
216   if (getRelaxedOpcode(Inst.getOpcode(), STI) != Inst.getOpcode())
217     return true;
218   return false;
219 }
220 
checkPCRelOffset(uint64_t Value,int64_t Min,int64_t Max)221 static const char *checkPCRelOffset(uint64_t Value, int64_t Min, int64_t Max) {
222   int64_t Offset = int64_t(Value) - 4;
223   if (Offset < Min || Offset > Max)
224     return "out of range pc-relative fixup value";
225   return nullptr;
226 }
227 
reasonForFixupRelaxation(const MCFixup & Fixup,uint64_t Value) const228 const char *ARMAsmBackend::reasonForFixupRelaxation(const MCFixup &Fixup,
229                                                     uint64_t Value) const {
230   switch (Fixup.getTargetKind()) {
231   case ARM::fixup_arm_thumb_br: {
232     // Relaxing tB to t2B. tB has a signed 12-bit displacement with the
233     // low bit being an implied zero. There's an implied +4 offset for the
234     // branch, so we adjust the other way here to determine what's
235     // encodable.
236     //
237     // Relax if the value is too big for a (signed) i8.
238     int64_t Offset = int64_t(Value) - 4;
239     if (Offset > 2046 || Offset < -2048)
240       return "out of range pc-relative fixup value";
241     break;
242   }
243   case ARM::fixup_arm_thumb_bcc: {
244     // Relaxing tBcc to t2Bcc. tBcc has a signed 9-bit displacement with the
245     // low bit being an implied zero. There's an implied +4 offset for the
246     // branch, so we adjust the other way here to determine what's
247     // encodable.
248     //
249     // Relax if the value is too big for a (signed) i8.
250     int64_t Offset = int64_t(Value) - 4;
251     if (Offset > 254 || Offset < -256)
252       return "out of range pc-relative fixup value";
253     break;
254   }
255   case ARM::fixup_thumb_adr_pcrel_10:
256   case ARM::fixup_arm_thumb_cp: {
257     // If the immediate is negative, greater than 1020, or not a multiple
258     // of four, the wide version of the instruction must be used.
259     int64_t Offset = int64_t(Value) - 4;
260     if (Offset & 3)
261       return "misaligned pc-relative fixup value";
262     else if (Offset > 1020 || Offset < 0)
263       return "out of range pc-relative fixup value";
264     break;
265   }
266   case ARM::fixup_arm_thumb_cb: {
267     // If we have a Thumb CBZ or CBNZ instruction and its target is the next
268     // instruction it is actually out of range for the instruction.
269     // It will be changed to a NOP.
270     int64_t Offset = (Value & ~1);
271     if (Offset == 2)
272       return "will be converted to nop";
273     break;
274   }
275   case ARM::fixup_bf_branch:
276     return checkPCRelOffset(Value, 0, 30);
277   case ARM::fixup_bf_target:
278     return checkPCRelOffset(Value, -0x10000, +0xfffe);
279   case ARM::fixup_bfl_target:
280     return checkPCRelOffset(Value, -0x40000, +0x3fffe);
281   case ARM::fixup_bfc_target:
282     return checkPCRelOffset(Value, -0x1000, +0xffe);
283   case ARM::fixup_wls:
284     return checkPCRelOffset(Value, 0, +0xffe);
285   case ARM::fixup_le:
286     // The offset field in the LE and LETP instructions is an 11-bit
287     // value shifted left by 2 (i.e. 0,2,4,...,4094), and it is
288     // interpreted as a negative offset from the value read from pc,
289     // i.e. from instruction_address+4.
290     //
291     // So an LE instruction can in principle address the instruction
292     // immediately after itself, or (not very usefully) the address
293     // half way through the 4-byte LE.
294     return checkPCRelOffset(Value, -0xffe, 0);
295   case ARM::fixup_bfcsel_else_target: {
296     if (Value != 2 && Value != 4)
297       return "out of range label-relative fixup value";
298     break;
299   }
300 
301   default:
302     llvm_unreachable("Unexpected fixup kind in reasonForFixupRelaxation()!");
303   }
304   return nullptr;
305 }
306 
fixupNeedsRelaxation(const MCFixup & Fixup,uint64_t Value,const MCRelaxableFragment * DF,const MCAsmLayout & Layout) const307 bool ARMAsmBackend::fixupNeedsRelaxation(const MCFixup &Fixup, uint64_t Value,
308                                          const MCRelaxableFragment *DF,
309                                          const MCAsmLayout &Layout) const {
310   return reasonForFixupRelaxation(Fixup, Value);
311 }
312 
relaxInstruction(const MCInst & Inst,const MCSubtargetInfo & STI,MCInst & Res) const313 void ARMAsmBackend::relaxInstruction(const MCInst &Inst,
314                                      const MCSubtargetInfo &STI,
315                                      MCInst &Res) const {
316   unsigned RelaxedOp = getRelaxedOpcode(Inst.getOpcode(), STI);
317 
318   // Sanity check w/ diagnostic if we get here w/ a bogus instruction.
319   if (RelaxedOp == Inst.getOpcode()) {
320     SmallString<256> Tmp;
321     raw_svector_ostream OS(Tmp);
322     Inst.dump_pretty(OS);
323     OS << "\n";
324     report_fatal_error("unexpected instruction to relax: " + OS.str());
325   }
326 
327   // If we are changing Thumb CBZ or CBNZ instruction to a NOP, aka tHINT, we
328   // have to change the operands too.
329   if ((Inst.getOpcode() == ARM::tCBZ || Inst.getOpcode() == ARM::tCBNZ) &&
330       RelaxedOp == ARM::tHINT) {
331     Res.setOpcode(RelaxedOp);
332     Res.addOperand(MCOperand::createImm(0));
333     Res.addOperand(MCOperand::createImm(14));
334     Res.addOperand(MCOperand::createReg(0));
335     return;
336   }
337 
338   // The rest of instructions we're relaxing have the same operands.
339   // We just need to update to the proper opcode.
340   Res = Inst;
341   Res.setOpcode(RelaxedOp);
342 }
343 
writeNopData(raw_ostream & OS,uint64_t Count) const344 bool ARMAsmBackend::writeNopData(raw_ostream &OS, uint64_t Count) const {
345   const uint16_t Thumb1_16bitNopEncoding = 0x46c0; // using MOV r8,r8
346   const uint16_t Thumb2_16bitNopEncoding = 0xbf00; // NOP
347   const uint32_t ARMv4_NopEncoding = 0xe1a00000;   // using MOV r0,r0
348   const uint32_t ARMv6T2_NopEncoding = 0xe320f000; // NOP
349   if (isThumb()) {
350     const uint16_t nopEncoding =
351         hasNOP() ? Thumb2_16bitNopEncoding : Thumb1_16bitNopEncoding;
352     uint64_t NumNops = Count / 2;
353     for (uint64_t i = 0; i != NumNops; ++i)
354       support::endian::write(OS, nopEncoding, Endian);
355     if (Count & 1)
356       OS << '\0';
357     return true;
358   }
359   // ARM mode
360   const uint32_t nopEncoding =
361       hasNOP() ? ARMv6T2_NopEncoding : ARMv4_NopEncoding;
362   uint64_t NumNops = Count / 4;
363   for (uint64_t i = 0; i != NumNops; ++i)
364     support::endian::write(OS, nopEncoding, Endian);
365   // FIXME: should this function return false when unable to write exactly
366   // 'Count' bytes with NOP encodings?
367   switch (Count % 4) {
368   default:
369     break; // No leftover bytes to write
370   case 1:
371     OS << '\0';
372     break;
373   case 2:
374     OS.write("\0\0", 2);
375     break;
376   case 3:
377     OS.write("\0\0\xa0", 3);
378     break;
379   }
380 
381   return true;
382 }
383 
swapHalfWords(uint32_t Value,bool IsLittleEndian)384 static uint32_t swapHalfWords(uint32_t Value, bool IsLittleEndian) {
385   if (IsLittleEndian) {
386     // Note that the halfwords are stored high first and low second in thumb;
387     // so we need to swap the fixup value here to map properly.
388     uint32_t Swapped = (Value & 0xFFFF0000) >> 16;
389     Swapped |= (Value & 0x0000FFFF) << 16;
390     return Swapped;
391   } else
392     return Value;
393 }
394 
joinHalfWords(uint32_t FirstHalf,uint32_t SecondHalf,bool IsLittleEndian)395 static uint32_t joinHalfWords(uint32_t FirstHalf, uint32_t SecondHalf,
396                               bool IsLittleEndian) {
397   uint32_t Value;
398 
399   if (IsLittleEndian) {
400     Value = (SecondHalf & 0xFFFF) << 16;
401     Value |= (FirstHalf & 0xFFFF);
402   } else {
403     Value = (SecondHalf & 0xFFFF);
404     Value |= (FirstHalf & 0xFFFF) << 16;
405   }
406 
407   return Value;
408 }
409 
adjustFixupValue(const MCAssembler & Asm,const MCFixup & Fixup,const MCValue & Target,uint64_t Value,bool IsResolved,MCContext & Ctx,const MCSubtargetInfo * STI) const410 unsigned ARMAsmBackend::adjustFixupValue(const MCAssembler &Asm,
411                                          const MCFixup &Fixup,
412                                          const MCValue &Target, uint64_t Value,
413                                          bool IsResolved, MCContext &Ctx,
414                                          const MCSubtargetInfo* STI) const {
415   unsigned Kind = Fixup.getKind();
416 
417   // MachO tries to make .o files that look vaguely pre-linked, so for MOVW/MOVT
418   // and .word relocations they put the Thumb bit into the addend if possible.
419   // Other relocation types don't want this bit though (branches couldn't encode
420   // it if it *was* present, and no other relocations exist) and it can
421   // interfere with checking valid expressions.
422   if (const MCSymbolRefExpr *A = Target.getSymA()) {
423     if (A->hasSubsectionsViaSymbols() && Asm.isThumbFunc(&A->getSymbol()) &&
424         A->getSymbol().isExternal() &&
425         (Kind == FK_Data_4 || Kind == ARM::fixup_arm_movw_lo16 ||
426          Kind == ARM::fixup_arm_movt_hi16 || Kind == ARM::fixup_t2_movw_lo16 ||
427          Kind == ARM::fixup_t2_movt_hi16))
428       Value |= 1;
429   }
430 
431   switch (Kind) {
432   default:
433     Ctx.reportError(Fixup.getLoc(), "bad relocation fixup type");
434     return 0;
435   case FK_NONE:
436   case FK_Data_1:
437   case FK_Data_2:
438   case FK_Data_4:
439     return Value;
440   case FK_SecRel_2:
441     return Value;
442   case FK_SecRel_4:
443     return Value;
444   case ARM::fixup_arm_movt_hi16:
445     assert(STI != nullptr);
446     if (IsResolved || !STI->getTargetTriple().isOSBinFormatELF())
447       Value >>= 16;
448     LLVM_FALLTHROUGH;
449   case ARM::fixup_arm_movw_lo16: {
450     unsigned Hi4 = (Value & 0xF000) >> 12;
451     unsigned Lo12 = Value & 0x0FFF;
452     // inst{19-16} = Hi4;
453     // inst{11-0} = Lo12;
454     Value = (Hi4 << 16) | (Lo12);
455     return Value;
456   }
457   case ARM::fixup_t2_movt_hi16:
458     assert(STI != nullptr);
459     if (IsResolved || !STI->getTargetTriple().isOSBinFormatELF())
460       Value >>= 16;
461     LLVM_FALLTHROUGH;
462   case ARM::fixup_t2_movw_lo16: {
463     unsigned Hi4 = (Value & 0xF000) >> 12;
464     unsigned i = (Value & 0x800) >> 11;
465     unsigned Mid3 = (Value & 0x700) >> 8;
466     unsigned Lo8 = Value & 0x0FF;
467     // inst{19-16} = Hi4;
468     // inst{26} = i;
469     // inst{14-12} = Mid3;
470     // inst{7-0} = Lo8;
471     Value = (Hi4 << 16) | (i << 26) | (Mid3 << 12) | (Lo8);
472     return swapHalfWords(Value, Endian == support::little);
473   }
474   case ARM::fixup_arm_ldst_pcrel_12:
475     // ARM PC-relative values are offset by 8.
476     Value -= 4;
477     LLVM_FALLTHROUGH;
478   case ARM::fixup_t2_ldst_pcrel_12: {
479     // Offset by 4, adjusted by two due to the half-word ordering of thumb.
480     Value -= 4;
481     bool isAdd = true;
482     if ((int64_t)Value < 0) {
483       Value = -Value;
484       isAdd = false;
485     }
486     if (Value >= 4096) {
487       Ctx.reportError(Fixup.getLoc(), "out of range pc-relative fixup value");
488       return 0;
489     }
490     Value |= isAdd << 23;
491 
492     // Same addressing mode as fixup_arm_pcrel_10,
493     // but with 16-bit halfwords swapped.
494     if (Kind == ARM::fixup_t2_ldst_pcrel_12)
495       return swapHalfWords(Value, Endian == support::little);
496 
497     return Value;
498   }
499   case ARM::fixup_arm_adr_pcrel_12: {
500     // ARM PC-relative values are offset by 8.
501     Value -= 8;
502     unsigned opc = 4; // bits {24-21}. Default to add: 0b0100
503     if ((int64_t)Value < 0) {
504       Value = -Value;
505       opc = 2; // 0b0010
506     }
507     if (ARM_AM::getSOImmVal(Value) == -1) {
508       Ctx.reportError(Fixup.getLoc(), "out of range pc-relative fixup value");
509       return 0;
510     }
511     // Encode the immediate and shift the opcode into place.
512     return ARM_AM::getSOImmVal(Value) | (opc << 21);
513   }
514 
515   case ARM::fixup_t2_adr_pcrel_12: {
516     Value -= 4;
517     unsigned opc = 0;
518     if ((int64_t)Value < 0) {
519       Value = -Value;
520       opc = 5;
521     }
522 
523     uint32_t out = (opc << 21);
524     out |= (Value & 0x800) << 15;
525     out |= (Value & 0x700) << 4;
526     out |= (Value & 0x0FF);
527 
528     return swapHalfWords(out, Endian == support::little);
529   }
530 
531   case ARM::fixup_arm_condbranch:
532   case ARM::fixup_arm_uncondbranch:
533   case ARM::fixup_arm_uncondbl:
534   case ARM::fixup_arm_condbl:
535   case ARM::fixup_arm_blx:
536     // These values don't encode the low two bits since they're always zero.
537     // Offset by 8 just as above.
538     if (const MCSymbolRefExpr *SRE =
539             dyn_cast<MCSymbolRefExpr>(Fixup.getValue()))
540       if (SRE->getKind() == MCSymbolRefExpr::VK_TLSCALL)
541         return 0;
542     return 0xffffff & ((Value - 8) >> 2);
543   case ARM::fixup_t2_uncondbranch: {
544     Value = Value - 4;
545     if (!isInt<25>(Value)) {
546       Ctx.reportError(Fixup.getLoc(), "Relocation out of range");
547       return 0;
548     }
549 
550     Value >>= 1; // Low bit is not encoded.
551 
552     uint32_t out = 0;
553     bool I = Value & 0x800000;
554     bool J1 = Value & 0x400000;
555     bool J2 = Value & 0x200000;
556     J1 ^= I;
557     J2 ^= I;
558 
559     out |= I << 26;                 // S bit
560     out |= !J1 << 13;               // J1 bit
561     out |= !J2 << 11;               // J2 bit
562     out |= (Value & 0x1FF800) << 5; // imm6 field
563     out |= (Value & 0x0007FF);      // imm11 field
564 
565     return swapHalfWords(out, Endian == support::little);
566   }
567   case ARM::fixup_t2_condbranch: {
568     Value = Value - 4;
569     if (!isInt<21>(Value)) {
570       Ctx.reportError(Fixup.getLoc(), "Relocation out of range");
571       return 0;
572     }
573 
574     Value >>= 1; // Low bit is not encoded.
575 
576     uint64_t out = 0;
577     out |= (Value & 0x80000) << 7; // S bit
578     out |= (Value & 0x40000) >> 7; // J2 bit
579     out |= (Value & 0x20000) >> 4; // J1 bit
580     out |= (Value & 0x1F800) << 5; // imm6 field
581     out |= (Value & 0x007FF);      // imm11 field
582 
583     return swapHalfWords(out, Endian == support::little);
584   }
585   case ARM::fixup_arm_thumb_bl: {
586     if (!isInt<25>(Value - 4) ||
587         (!STI->getFeatureBits()[ARM::FeatureThumb2] &&
588          !STI->getFeatureBits()[ARM::HasV8MBaselineOps] &&
589          !STI->getFeatureBits()[ARM::HasV6MOps] &&
590          !isInt<23>(Value - 4))) {
591       Ctx.reportError(Fixup.getLoc(), "Relocation out of range");
592       return 0;
593     }
594 
595     // The value doesn't encode the low bit (always zero) and is offset by
596     // four. The 32-bit immediate value is encoded as
597     //   imm32 = SignExtend(S:I1:I2:imm10:imm11:0)
598     // where I1 = NOT(J1 ^ S) and I2 = NOT(J2 ^ S).
599     // The value is encoded into disjoint bit positions in the destination
600     // opcode. x = unchanged, I = immediate value bit, S = sign extension bit,
601     // J = either J1 or J2 bit
602     //
603     //   BL:  xxxxxSIIIIIIIIII xxJxJIIIIIIIIIII
604     //
605     // Note that the halfwords are stored high first, low second; so we need
606     // to transpose the fixup value here to map properly.
607     uint32_t offset = (Value - 4) >> 1;
608     uint32_t signBit = (offset & 0x800000) >> 23;
609     uint32_t I1Bit = (offset & 0x400000) >> 22;
610     uint32_t J1Bit = (I1Bit ^ 0x1) ^ signBit;
611     uint32_t I2Bit = (offset & 0x200000) >> 21;
612     uint32_t J2Bit = (I2Bit ^ 0x1) ^ signBit;
613     uint32_t imm10Bits = (offset & 0x1FF800) >> 11;
614     uint32_t imm11Bits = (offset & 0x000007FF);
615 
616     uint32_t FirstHalf = (((uint16_t)signBit << 10) | (uint16_t)imm10Bits);
617     uint32_t SecondHalf = (((uint16_t)J1Bit << 13) | ((uint16_t)J2Bit << 11) |
618                            (uint16_t)imm11Bits);
619     return joinHalfWords(FirstHalf, SecondHalf, Endian == support::little);
620   }
621   case ARM::fixup_arm_thumb_blx: {
622     // The value doesn't encode the low two bits (always zero) and is offset by
623     // four (see fixup_arm_thumb_cp). The 32-bit immediate value is encoded as
624     //   imm32 = SignExtend(S:I1:I2:imm10H:imm10L:00)
625     // where I1 = NOT(J1 ^ S) and I2 = NOT(J2 ^ S).
626     // The value is encoded into disjoint bit positions in the destination
627     // opcode. x = unchanged, I = immediate value bit, S = sign extension bit,
628     // J = either J1 or J2 bit, 0 = zero.
629     //
630     //   BLX: xxxxxSIIIIIIIIII xxJxJIIIIIIIIII0
631     //
632     // Note that the halfwords are stored high first, low second; so we need
633     // to transpose the fixup value here to map properly.
634     if (Value % 4 != 0) {
635       Ctx.reportError(Fixup.getLoc(), "misaligned ARM call destination");
636       return 0;
637     }
638 
639     uint32_t offset = (Value - 4) >> 2;
640     if (const MCSymbolRefExpr *SRE =
641             dyn_cast<MCSymbolRefExpr>(Fixup.getValue()))
642       if (SRE->getKind() == MCSymbolRefExpr::VK_TLSCALL)
643         offset = 0;
644     uint32_t signBit = (offset & 0x400000) >> 22;
645     uint32_t I1Bit = (offset & 0x200000) >> 21;
646     uint32_t J1Bit = (I1Bit ^ 0x1) ^ signBit;
647     uint32_t I2Bit = (offset & 0x100000) >> 20;
648     uint32_t J2Bit = (I2Bit ^ 0x1) ^ signBit;
649     uint32_t imm10HBits = (offset & 0xFFC00) >> 10;
650     uint32_t imm10LBits = (offset & 0x3FF);
651 
652     uint32_t FirstHalf = (((uint16_t)signBit << 10) | (uint16_t)imm10HBits);
653     uint32_t SecondHalf = (((uint16_t)J1Bit << 13) | ((uint16_t)J2Bit << 11) |
654                            ((uint16_t)imm10LBits) << 1);
655     return joinHalfWords(FirstHalf, SecondHalf, Endian == support::little);
656   }
657   case ARM::fixup_thumb_adr_pcrel_10:
658   case ARM::fixup_arm_thumb_cp:
659     // On CPUs supporting Thumb2, this will be relaxed to an ldr.w, otherwise we
660     // could have an error on our hands.
661     assert(STI != nullptr);
662     if (!STI->getFeatureBits()[ARM::FeatureThumb2] && IsResolved) {
663       const char *FixupDiagnostic = reasonForFixupRelaxation(Fixup, Value);
664       if (FixupDiagnostic) {
665         Ctx.reportError(Fixup.getLoc(), FixupDiagnostic);
666         return 0;
667       }
668     }
669     // Offset by 4, and don't encode the low two bits.
670     return ((Value - 4) >> 2) & 0xff;
671   case ARM::fixup_arm_thumb_cb: {
672     // CB instructions can only branch to offsets in [4, 126] in multiples of 2
673     // so ensure that the raw value LSB is zero and it lies in [2, 130].
674     // An offset of 2 will be relaxed to a NOP.
675     if ((int64_t)Value < 2 || Value > 0x82 || Value & 1) {
676       Ctx.reportError(Fixup.getLoc(), "out of range pc-relative fixup value");
677       return 0;
678     }
679     // Offset by 4 and don't encode the lower bit, which is always 0.
680     // FIXME: diagnose if no Thumb2
681     uint32_t Binary = (Value - 4) >> 1;
682     return ((Binary & 0x20) << 4) | ((Binary & 0x1f) << 3);
683   }
684   case ARM::fixup_arm_thumb_br:
685     // Offset by 4 and don't encode the lower bit, which is always 0.
686     assert(STI != nullptr);
687     if (!STI->getFeatureBits()[ARM::FeatureThumb2] &&
688         !STI->getFeatureBits()[ARM::HasV8MBaselineOps]) {
689       const char *FixupDiagnostic = reasonForFixupRelaxation(Fixup, Value);
690       if (FixupDiagnostic) {
691         Ctx.reportError(Fixup.getLoc(), FixupDiagnostic);
692         return 0;
693       }
694     }
695     return ((Value - 4) >> 1) & 0x7ff;
696   case ARM::fixup_arm_thumb_bcc:
697     // Offset by 4 and don't encode the lower bit, which is always 0.
698     assert(STI != nullptr);
699     if (!STI->getFeatureBits()[ARM::FeatureThumb2]) {
700       const char *FixupDiagnostic = reasonForFixupRelaxation(Fixup, Value);
701       if (FixupDiagnostic) {
702         Ctx.reportError(Fixup.getLoc(), FixupDiagnostic);
703         return 0;
704       }
705     }
706     return ((Value - 4) >> 1) & 0xff;
707   case ARM::fixup_arm_pcrel_10_unscaled: {
708     Value = Value - 8; // ARM fixups offset by an additional word and don't
709                        // need to adjust for the half-word ordering.
710     bool isAdd = true;
711     if ((int64_t)Value < 0) {
712       Value = -Value;
713       isAdd = false;
714     }
715     // The value has the low 4 bits encoded in [3:0] and the high 4 in [11:8].
716     if (Value >= 256) {
717       Ctx.reportError(Fixup.getLoc(), "out of range pc-relative fixup value");
718       return 0;
719     }
720     Value = (Value & 0xf) | ((Value & 0xf0) << 4);
721     return Value | (isAdd << 23);
722   }
723   case ARM::fixup_arm_pcrel_10:
724     Value = Value - 4; // ARM fixups offset by an additional word and don't
725                        // need to adjust for the half-word ordering.
726     LLVM_FALLTHROUGH;
727   case ARM::fixup_t2_pcrel_10: {
728     // Offset by 4, adjusted by two due to the half-word ordering of thumb.
729     Value = Value - 4;
730     bool isAdd = true;
731     if ((int64_t)Value < 0) {
732       Value = -Value;
733       isAdd = false;
734     }
735     // These values don't encode the low two bits since they're always zero.
736     Value >>= 2;
737     if (Value >= 256) {
738       Ctx.reportError(Fixup.getLoc(), "out of range pc-relative fixup value");
739       return 0;
740     }
741     Value |= isAdd << 23;
742 
743     // Same addressing mode as fixup_arm_pcrel_10, but with 16-bit halfwords
744     // swapped.
745     if (Kind == ARM::fixup_t2_pcrel_10)
746       return swapHalfWords(Value, Endian == support::little);
747 
748     return Value;
749   }
750   case ARM::fixup_arm_pcrel_9:
751     Value = Value - 4; // ARM fixups offset by an additional word and don't
752                        // need to adjust for the half-word ordering.
753     LLVM_FALLTHROUGH;
754   case ARM::fixup_t2_pcrel_9: {
755     // Offset by 4, adjusted by two due to the half-word ordering of thumb.
756     Value = Value - 4;
757     bool isAdd = true;
758     if ((int64_t)Value < 0) {
759       Value = -Value;
760       isAdd = false;
761     }
762     // These values don't encode the low bit since it's always zero.
763     if (Value & 1) {
764       Ctx.reportError(Fixup.getLoc(), "invalid value for this fixup");
765       return 0;
766     }
767     Value >>= 1;
768     if (Value >= 256) {
769       Ctx.reportError(Fixup.getLoc(), "out of range pc-relative fixup value");
770       return 0;
771     }
772     Value |= isAdd << 23;
773 
774     // Same addressing mode as fixup_arm_pcrel_9, but with 16-bit halfwords
775     // swapped.
776     if (Kind == ARM::fixup_t2_pcrel_9)
777       return swapHalfWords(Value, Endian == support::little);
778 
779     return Value;
780   }
781   case ARM::fixup_arm_mod_imm:
782     Value = ARM_AM::getSOImmVal(Value);
783     if (Value >> 12) {
784       Ctx.reportError(Fixup.getLoc(), "out of range immediate fixup value");
785       return 0;
786     }
787     return Value;
788   case ARM::fixup_t2_so_imm: {
789     Value = ARM_AM::getT2SOImmVal(Value);
790     if ((int64_t)Value < 0) {
791       Ctx.reportError(Fixup.getLoc(), "out of range immediate fixup value");
792       return 0;
793     }
794     // Value will contain a 12-bit value broken up into a 4-bit shift in bits
795     // 11:8 and the 8-bit immediate in 0:7. The instruction has the immediate
796     // in 0:7. The 4-bit shift is split up into i:imm3 where i is placed at bit
797     // 10 of the upper half-word and imm3 is placed at 14:12 of the lower
798     // half-word.
799     uint64_t EncValue = 0;
800     EncValue |= (Value & 0x800) << 15;
801     EncValue |= (Value & 0x700) << 4;
802     EncValue |= (Value & 0xff);
803     return swapHalfWords(EncValue, Endian == support::little);
804   }
805   case ARM::fixup_bf_branch: {
806     const char *FixupDiagnostic = reasonForFixupRelaxation(Fixup, Value);
807     if (FixupDiagnostic) {
808       Ctx.reportError(Fixup.getLoc(), FixupDiagnostic);
809       return 0;
810     }
811     uint32_t out = (((Value - 4) >> 1) & 0xf) << 23;
812     return swapHalfWords(out, Endian == support::little);
813   }
814   case ARM::fixup_bf_target:
815   case ARM::fixup_bfl_target:
816   case ARM::fixup_bfc_target: {
817     const char *FixupDiagnostic = reasonForFixupRelaxation(Fixup, Value);
818     if (FixupDiagnostic) {
819       Ctx.reportError(Fixup.getLoc(), FixupDiagnostic);
820       return 0;
821     }
822     uint32_t out = 0;
823     uint32_t HighBitMask = (Kind == ARM::fixup_bf_target ? 0xf800 :
824                             Kind == ARM::fixup_bfl_target ? 0x3f800 : 0x800);
825     out |= (((Value - 4) >> 1) & 0x1) << 11;
826     out |= (((Value - 4) >> 1) & 0x7fe);
827     out |= (((Value - 4) >> 1) & HighBitMask) << 5;
828     return swapHalfWords(out, Endian == support::little);
829   }
830   case ARM::fixup_bfcsel_else_target: {
831     // If this is a fixup of a branch future's else target then it should be a
832     // constant MCExpr representing the distance between the branch targetted
833     // and the instruction after that same branch.
834     Value = Target.getConstant();
835 
836     const char *FixupDiagnostic = reasonForFixupRelaxation(Fixup, Value);
837     if (FixupDiagnostic) {
838       Ctx.reportError(Fixup.getLoc(), FixupDiagnostic);
839       return 0;
840     }
841     uint32_t out = ((Value >> 2) & 1) << 17;
842     return swapHalfWords(out, Endian == support::little);
843   }
844   case ARM::fixup_wls:
845   case ARM::fixup_le: {
846     const char *FixupDiagnostic = reasonForFixupRelaxation(Fixup, Value);
847     if (FixupDiagnostic) {
848       Ctx.reportError(Fixup.getLoc(), FixupDiagnostic);
849       return 0;
850     }
851     uint64_t real_value = Value - 4;
852     uint32_t out = 0;
853     if (Kind == ARM::fixup_le)
854       real_value = -real_value;
855     out |= ((real_value >> 1) & 0x1) << 11;
856     out |= ((real_value >> 1) & 0x7fe);
857     return swapHalfWords(out, Endian == support::little);
858   }
859   }
860 }
861 
shouldForceRelocation(const MCAssembler & Asm,const MCFixup & Fixup,const MCValue & Target)862 bool ARMAsmBackend::shouldForceRelocation(const MCAssembler &Asm,
863                                           const MCFixup &Fixup,
864                                           const MCValue &Target) {
865   const MCSymbolRefExpr *A = Target.getSymA();
866   const MCSymbol *Sym = A ? &A->getSymbol() : nullptr;
867   const unsigned FixupKind = Fixup.getKind();
868   if (FixupKind == FK_NONE)
869     return true;
870   if (FixupKind == ARM::fixup_arm_thumb_bl) {
871     assert(Sym && "How did we resolve this?");
872 
873     // If the symbol is external the linker will handle it.
874     // FIXME: Should we handle it as an optimization?
875 
876     // If the symbol is out of range, produce a relocation and hope the
877     // linker can handle it. GNU AS produces an error in this case.
878     if (Sym->isExternal())
879       return true;
880   }
881   // Create relocations for unconditional branches to function symbols with
882   // different execution mode in ELF binaries.
883   if (Sym && Sym->isELF()) {
884     unsigned Type = cast<MCSymbolELF>(Sym)->getType();
885     if ((Type == ELF::STT_FUNC || Type == ELF::STT_GNU_IFUNC)) {
886       if (Asm.isThumbFunc(Sym) && (FixupKind == ARM::fixup_arm_uncondbranch))
887         return true;
888       if (!Asm.isThumbFunc(Sym) && (FixupKind == ARM::fixup_arm_thumb_br ||
889                                     FixupKind == ARM::fixup_arm_thumb_bl ||
890                                     FixupKind == ARM::fixup_t2_condbranch ||
891                                     FixupKind == ARM::fixup_t2_uncondbranch))
892         return true;
893     }
894   }
895   // We must always generate a relocation for BL/BLX instructions if we have
896   // a symbol to reference, as the linker relies on knowing the destination
897   // symbol's thumb-ness to get interworking right.
898   if (A && (FixupKind == ARM::fixup_arm_thumb_blx ||
899             FixupKind == ARM::fixup_arm_blx ||
900             FixupKind == ARM::fixup_arm_uncondbl ||
901             FixupKind == ARM::fixup_arm_condbl))
902     return true;
903   return false;
904 }
905 
906 /// getFixupKindNumBytes - The number of bytes the fixup may change.
getFixupKindNumBytes(unsigned Kind)907 static unsigned getFixupKindNumBytes(unsigned Kind) {
908   switch (Kind) {
909   default:
910     llvm_unreachable("Unknown fixup kind!");
911 
912   case FK_NONE:
913     return 0;
914 
915   case FK_Data_1:
916   case ARM::fixup_arm_thumb_bcc:
917   case ARM::fixup_arm_thumb_cp:
918   case ARM::fixup_thumb_adr_pcrel_10:
919     return 1;
920 
921   case FK_Data_2:
922   case ARM::fixup_arm_thumb_br:
923   case ARM::fixup_arm_thumb_cb:
924   case ARM::fixup_arm_mod_imm:
925     return 2;
926 
927   case ARM::fixup_arm_pcrel_10_unscaled:
928   case ARM::fixup_arm_ldst_pcrel_12:
929   case ARM::fixup_arm_pcrel_10:
930   case ARM::fixup_arm_pcrel_9:
931   case ARM::fixup_arm_adr_pcrel_12:
932   case ARM::fixup_arm_uncondbl:
933   case ARM::fixup_arm_condbl:
934   case ARM::fixup_arm_blx:
935   case ARM::fixup_arm_condbranch:
936   case ARM::fixup_arm_uncondbranch:
937     return 3;
938 
939   case FK_Data_4:
940   case ARM::fixup_t2_ldst_pcrel_12:
941   case ARM::fixup_t2_condbranch:
942   case ARM::fixup_t2_uncondbranch:
943   case ARM::fixup_t2_pcrel_10:
944   case ARM::fixup_t2_pcrel_9:
945   case ARM::fixup_t2_adr_pcrel_12:
946   case ARM::fixup_arm_thumb_bl:
947   case ARM::fixup_arm_thumb_blx:
948   case ARM::fixup_arm_movt_hi16:
949   case ARM::fixup_arm_movw_lo16:
950   case ARM::fixup_t2_movt_hi16:
951   case ARM::fixup_t2_movw_lo16:
952   case ARM::fixup_t2_so_imm:
953   case ARM::fixup_bf_branch:
954   case ARM::fixup_bf_target:
955   case ARM::fixup_bfl_target:
956   case ARM::fixup_bfc_target:
957   case ARM::fixup_bfcsel_else_target:
958   case ARM::fixup_wls:
959   case ARM::fixup_le:
960     return 4;
961 
962   case FK_SecRel_2:
963     return 2;
964   case FK_SecRel_4:
965     return 4;
966   }
967 }
968 
969 /// getFixupKindContainerSizeBytes - The number of bytes of the
970 /// container involved in big endian.
getFixupKindContainerSizeBytes(unsigned Kind)971 static unsigned getFixupKindContainerSizeBytes(unsigned Kind) {
972   switch (Kind) {
973   default:
974     llvm_unreachable("Unknown fixup kind!");
975 
976   case FK_NONE:
977     return 0;
978 
979   case FK_Data_1:
980     return 1;
981   case FK_Data_2:
982     return 2;
983   case FK_Data_4:
984     return 4;
985 
986   case ARM::fixup_arm_thumb_bcc:
987   case ARM::fixup_arm_thumb_cp:
988   case ARM::fixup_thumb_adr_pcrel_10:
989   case ARM::fixup_arm_thumb_br:
990   case ARM::fixup_arm_thumb_cb:
991     // Instruction size is 2 bytes.
992     return 2;
993 
994   case ARM::fixup_arm_pcrel_10_unscaled:
995   case ARM::fixup_arm_ldst_pcrel_12:
996   case ARM::fixup_arm_pcrel_10:
997   case ARM::fixup_arm_pcrel_9:
998   case ARM::fixup_arm_adr_pcrel_12:
999   case ARM::fixup_arm_uncondbl:
1000   case ARM::fixup_arm_condbl:
1001   case ARM::fixup_arm_blx:
1002   case ARM::fixup_arm_condbranch:
1003   case ARM::fixup_arm_uncondbranch:
1004   case ARM::fixup_t2_ldst_pcrel_12:
1005   case ARM::fixup_t2_condbranch:
1006   case ARM::fixup_t2_uncondbranch:
1007   case ARM::fixup_t2_pcrel_10:
1008   case ARM::fixup_t2_adr_pcrel_12:
1009   case ARM::fixup_arm_thumb_bl:
1010   case ARM::fixup_arm_thumb_blx:
1011   case ARM::fixup_arm_movt_hi16:
1012   case ARM::fixup_arm_movw_lo16:
1013   case ARM::fixup_t2_movt_hi16:
1014   case ARM::fixup_t2_movw_lo16:
1015   case ARM::fixup_arm_mod_imm:
1016   case ARM::fixup_t2_so_imm:
1017   case ARM::fixup_bf_branch:
1018   case ARM::fixup_bf_target:
1019   case ARM::fixup_bfl_target:
1020   case ARM::fixup_bfc_target:
1021   case ARM::fixup_bfcsel_else_target:
1022   case ARM::fixup_wls:
1023   case ARM::fixup_le:
1024     // Instruction size is 4 bytes.
1025     return 4;
1026   }
1027 }
1028 
applyFixup(const MCAssembler & Asm,const MCFixup & Fixup,const MCValue & Target,MutableArrayRef<char> Data,uint64_t Value,bool IsResolved,const MCSubtargetInfo * STI) const1029 void ARMAsmBackend::applyFixup(const MCAssembler &Asm, const MCFixup &Fixup,
1030                                const MCValue &Target,
1031                                MutableArrayRef<char> Data, uint64_t Value,
1032                                bool IsResolved,
1033                                const MCSubtargetInfo* STI) const {
1034   unsigned NumBytes = getFixupKindNumBytes(Fixup.getKind());
1035   MCContext &Ctx = Asm.getContext();
1036   Value = adjustFixupValue(Asm, Fixup, Target, Value, IsResolved, Ctx, STI);
1037   if (!Value)
1038     return; // Doesn't change encoding.
1039 
1040   unsigned Offset = Fixup.getOffset();
1041   assert(Offset + NumBytes <= Data.size() && "Invalid fixup offset!");
1042 
1043   // Used to point to big endian bytes.
1044   unsigned FullSizeBytes;
1045   if (Endian == support::big) {
1046     FullSizeBytes = getFixupKindContainerSizeBytes(Fixup.getKind());
1047     assert((Offset + FullSizeBytes) <= Data.size() && "Invalid fixup size!");
1048     assert(NumBytes <= FullSizeBytes && "Invalid fixup size!");
1049   }
1050 
1051   // For each byte of the fragment that the fixup touches, mask in the bits from
1052   // the fixup value. The Value has been "split up" into the appropriate
1053   // bitfields above.
1054   for (unsigned i = 0; i != NumBytes; ++i) {
1055     unsigned Idx = Endian == support::little ? i : (FullSizeBytes - 1 - i);
1056     Data[Offset + Idx] |= uint8_t((Value >> (i * 8)) & 0xff);
1057   }
1058 }
1059 
1060 namespace CU {
1061 
1062 /// Compact unwind encoding values.
1063 enum CompactUnwindEncodings {
1064   UNWIND_ARM_MODE_MASK                         = 0x0F000000,
1065   UNWIND_ARM_MODE_FRAME                        = 0x01000000,
1066   UNWIND_ARM_MODE_FRAME_D                      = 0x02000000,
1067   UNWIND_ARM_MODE_DWARF                        = 0x04000000,
1068 
1069   UNWIND_ARM_FRAME_STACK_ADJUST_MASK           = 0x00C00000,
1070 
1071   UNWIND_ARM_FRAME_FIRST_PUSH_R4               = 0x00000001,
1072   UNWIND_ARM_FRAME_FIRST_PUSH_R5               = 0x00000002,
1073   UNWIND_ARM_FRAME_FIRST_PUSH_R6               = 0x00000004,
1074 
1075   UNWIND_ARM_FRAME_SECOND_PUSH_R8              = 0x00000008,
1076   UNWIND_ARM_FRAME_SECOND_PUSH_R9              = 0x00000010,
1077   UNWIND_ARM_FRAME_SECOND_PUSH_R10             = 0x00000020,
1078   UNWIND_ARM_FRAME_SECOND_PUSH_R11             = 0x00000040,
1079   UNWIND_ARM_FRAME_SECOND_PUSH_R12             = 0x00000080,
1080 
1081   UNWIND_ARM_FRAME_D_REG_COUNT_MASK            = 0x00000F00,
1082 
1083   UNWIND_ARM_DWARF_SECTION_OFFSET              = 0x00FFFFFF
1084 };
1085 
1086 } // end CU namespace
1087 
1088 /// Generate compact unwind encoding for the function based on the CFI
1089 /// instructions. If the CFI instructions describe a frame that cannot be
1090 /// encoded in compact unwind, the method returns UNWIND_ARM_MODE_DWARF which
1091 /// tells the runtime to fallback and unwind using dwarf.
generateCompactUnwindEncoding(ArrayRef<MCCFIInstruction> Instrs) const1092 uint32_t ARMAsmBackendDarwin::generateCompactUnwindEncoding(
1093     ArrayRef<MCCFIInstruction> Instrs) const {
1094   DEBUG_WITH_TYPE("compact-unwind", llvm::dbgs() << "generateCU()\n");
1095   // Only armv7k uses CFI based unwinding.
1096   if (Subtype != MachO::CPU_SUBTYPE_ARM_V7K)
1097     return 0;
1098   // No .cfi directives means no frame.
1099   if (Instrs.empty())
1100     return 0;
1101   // Start off assuming CFA is at SP+0.
1102   unsigned CFARegister = ARM::SP;
1103   int CFARegisterOffset = 0;
1104   // Mark savable registers as initially unsaved
1105   DenseMap<unsigned, int> RegOffsets;
1106   int FloatRegCount = 0;
1107   // Process each .cfi directive and build up compact unwind info.
1108   for (size_t i = 0, e = Instrs.size(); i != e; ++i) {
1109     unsigned Reg;
1110     const MCCFIInstruction &Inst = Instrs[i];
1111     switch (Inst.getOperation()) {
1112     case MCCFIInstruction::OpDefCfa: // DW_CFA_def_cfa
1113       CFARegisterOffset = -Inst.getOffset();
1114       CFARegister = *MRI.getLLVMRegNum(Inst.getRegister(), true);
1115       break;
1116     case MCCFIInstruction::OpDefCfaOffset: // DW_CFA_def_cfa_offset
1117       CFARegisterOffset = -Inst.getOffset();
1118       break;
1119     case MCCFIInstruction::OpDefCfaRegister: // DW_CFA_def_cfa_register
1120       CFARegister = *MRI.getLLVMRegNum(Inst.getRegister(), true);
1121       break;
1122     case MCCFIInstruction::OpOffset: // DW_CFA_offset
1123       Reg = *MRI.getLLVMRegNum(Inst.getRegister(), true);
1124       if (ARMMCRegisterClasses[ARM::GPRRegClassID].contains(Reg))
1125         RegOffsets[Reg] = Inst.getOffset();
1126       else if (ARMMCRegisterClasses[ARM::DPRRegClassID].contains(Reg)) {
1127         RegOffsets[Reg] = Inst.getOffset();
1128         ++FloatRegCount;
1129       } else {
1130         DEBUG_WITH_TYPE("compact-unwind",
1131                         llvm::dbgs() << ".cfi_offset on unknown register="
1132                                      << Inst.getRegister() << "\n");
1133         return CU::UNWIND_ARM_MODE_DWARF;
1134       }
1135       break;
1136     case MCCFIInstruction::OpRelOffset: // DW_CFA_advance_loc
1137       // Ignore
1138       break;
1139     default:
1140       // Directive not convertable to compact unwind, bail out.
1141       DEBUG_WITH_TYPE("compact-unwind",
1142                       llvm::dbgs()
1143                           << "CFI directive not compatiable with comact "
1144                              "unwind encoding, opcode=" << Inst.getOperation()
1145                           << "\n");
1146       return CU::UNWIND_ARM_MODE_DWARF;
1147       break;
1148     }
1149   }
1150 
1151   // If no frame set up, return no unwind info.
1152   if ((CFARegister == ARM::SP) && (CFARegisterOffset == 0))
1153     return 0;
1154 
1155   // Verify standard frame (lr/r7) was used.
1156   if (CFARegister != ARM::R7) {
1157     DEBUG_WITH_TYPE("compact-unwind", llvm::dbgs() << "frame register is "
1158                                                    << CFARegister
1159                                                    << " instead of r7\n");
1160     return CU::UNWIND_ARM_MODE_DWARF;
1161   }
1162   int StackAdjust = CFARegisterOffset - 8;
1163   if (RegOffsets.lookup(ARM::LR) != (-4 - StackAdjust)) {
1164     DEBUG_WITH_TYPE("compact-unwind",
1165                     llvm::dbgs()
1166                         << "LR not saved as standard frame, StackAdjust="
1167                         << StackAdjust
1168                         << ", CFARegisterOffset=" << CFARegisterOffset
1169                         << ", lr save at offset=" << RegOffsets[14] << "\n");
1170     return CU::UNWIND_ARM_MODE_DWARF;
1171   }
1172   if (RegOffsets.lookup(ARM::R7) != (-8 - StackAdjust)) {
1173     DEBUG_WITH_TYPE("compact-unwind",
1174                     llvm::dbgs() << "r7 not saved as standard frame\n");
1175     return CU::UNWIND_ARM_MODE_DWARF;
1176   }
1177   uint32_t CompactUnwindEncoding = CU::UNWIND_ARM_MODE_FRAME;
1178 
1179   // If var-args are used, there may be a stack adjust required.
1180   switch (StackAdjust) {
1181   case 0:
1182     break;
1183   case 4:
1184     CompactUnwindEncoding |= 0x00400000;
1185     break;
1186   case 8:
1187     CompactUnwindEncoding |= 0x00800000;
1188     break;
1189   case 12:
1190     CompactUnwindEncoding |= 0x00C00000;
1191     break;
1192   default:
1193     DEBUG_WITH_TYPE("compact-unwind", llvm::dbgs()
1194                                           << ".cfi_def_cfa stack adjust ("
1195                                           << StackAdjust << ") out of range\n");
1196     return CU::UNWIND_ARM_MODE_DWARF;
1197   }
1198 
1199   // If r6 is saved, it must be right below r7.
1200   static struct {
1201     unsigned Reg;
1202     unsigned Encoding;
1203   } GPRCSRegs[] = {{ARM::R6, CU::UNWIND_ARM_FRAME_FIRST_PUSH_R6},
1204                    {ARM::R5, CU::UNWIND_ARM_FRAME_FIRST_PUSH_R5},
1205                    {ARM::R4, CU::UNWIND_ARM_FRAME_FIRST_PUSH_R4},
1206                    {ARM::R12, CU::UNWIND_ARM_FRAME_SECOND_PUSH_R12},
1207                    {ARM::R11, CU::UNWIND_ARM_FRAME_SECOND_PUSH_R11},
1208                    {ARM::R10, CU::UNWIND_ARM_FRAME_SECOND_PUSH_R10},
1209                    {ARM::R9, CU::UNWIND_ARM_FRAME_SECOND_PUSH_R9},
1210                    {ARM::R8, CU::UNWIND_ARM_FRAME_SECOND_PUSH_R8}};
1211 
1212   int CurOffset = -8 - StackAdjust;
1213   for (auto CSReg : GPRCSRegs) {
1214     auto Offset = RegOffsets.find(CSReg.Reg);
1215     if (Offset == RegOffsets.end())
1216       continue;
1217 
1218     int RegOffset = Offset->second;
1219     if (RegOffset != CurOffset - 4) {
1220       DEBUG_WITH_TYPE("compact-unwind",
1221                       llvm::dbgs() << MRI.getName(CSReg.Reg) << " saved at "
1222                                    << RegOffset << " but only supported at "
1223                                    << CurOffset << "\n");
1224       return CU::UNWIND_ARM_MODE_DWARF;
1225     }
1226     CompactUnwindEncoding |= CSReg.Encoding;
1227     CurOffset -= 4;
1228   }
1229 
1230   // If no floats saved, we are done.
1231   if (FloatRegCount == 0)
1232     return CompactUnwindEncoding;
1233 
1234   // Switch mode to include D register saving.
1235   CompactUnwindEncoding &= ~CU::UNWIND_ARM_MODE_MASK;
1236   CompactUnwindEncoding |= CU::UNWIND_ARM_MODE_FRAME_D;
1237 
1238   // FIXME: supporting more than 4 saved D-registers compactly would be trivial,
1239   // but needs coordination with the linker and libunwind.
1240   if (FloatRegCount > 4) {
1241     DEBUG_WITH_TYPE("compact-unwind",
1242                     llvm::dbgs() << "unsupported number of D registers saved ("
1243                                  << FloatRegCount << ")\n");
1244       return CU::UNWIND_ARM_MODE_DWARF;
1245   }
1246 
1247   // Floating point registers must either be saved sequentially, or we defer to
1248   // DWARF. No gaps allowed here so check that each saved d-register is
1249   // precisely where it should be.
1250   static unsigned FPRCSRegs[] = { ARM::D8, ARM::D10, ARM::D12, ARM::D14 };
1251   for (int Idx = FloatRegCount - 1; Idx >= 0; --Idx) {
1252     auto Offset = RegOffsets.find(FPRCSRegs[Idx]);
1253     if (Offset == RegOffsets.end()) {
1254       DEBUG_WITH_TYPE("compact-unwind",
1255                       llvm::dbgs() << FloatRegCount << " D-regs saved, but "
1256                                    << MRI.getName(FPRCSRegs[Idx])
1257                                    << " not saved\n");
1258       return CU::UNWIND_ARM_MODE_DWARF;
1259     } else if (Offset->second != CurOffset - 8) {
1260       DEBUG_WITH_TYPE("compact-unwind",
1261                       llvm::dbgs() << FloatRegCount << " D-regs saved, but "
1262                                    << MRI.getName(FPRCSRegs[Idx])
1263                                    << " saved at " << Offset->second
1264                                    << ", expected at " << CurOffset - 8
1265                                    << "\n");
1266       return CU::UNWIND_ARM_MODE_DWARF;
1267     }
1268     CurOffset -= 8;
1269   }
1270 
1271   return CompactUnwindEncoding | ((FloatRegCount - 1) << 8);
1272 }
1273 
getMachOSubTypeFromArch(StringRef Arch)1274 static MachO::CPUSubTypeARM getMachOSubTypeFromArch(StringRef Arch) {
1275   ARM::ArchKind AK = ARM::parseArch(Arch);
1276   switch (AK) {
1277   default:
1278     return MachO::CPU_SUBTYPE_ARM_V7;
1279   case ARM::ArchKind::ARMV4T:
1280     return MachO::CPU_SUBTYPE_ARM_V4T;
1281   case ARM::ArchKind::ARMV5T:
1282   case ARM::ArchKind::ARMV5TE:
1283   case ARM::ArchKind::ARMV5TEJ:
1284     return MachO::CPU_SUBTYPE_ARM_V5;
1285   case ARM::ArchKind::ARMV6:
1286   case ARM::ArchKind::ARMV6K:
1287     return MachO::CPU_SUBTYPE_ARM_V6;
1288   case ARM::ArchKind::ARMV7A:
1289     return MachO::CPU_SUBTYPE_ARM_V7;
1290   case ARM::ArchKind::ARMV7S:
1291     return MachO::CPU_SUBTYPE_ARM_V7S;
1292   case ARM::ArchKind::ARMV7K:
1293     return MachO::CPU_SUBTYPE_ARM_V7K;
1294   case ARM::ArchKind::ARMV6M:
1295     return MachO::CPU_SUBTYPE_ARM_V6M;
1296   case ARM::ArchKind::ARMV7M:
1297     return MachO::CPU_SUBTYPE_ARM_V7M;
1298   case ARM::ArchKind::ARMV7EM:
1299     return MachO::CPU_SUBTYPE_ARM_V7EM;
1300   }
1301 }
1302 
createARMAsmBackend(const Target & T,const MCSubtargetInfo & STI,const MCRegisterInfo & MRI,const MCTargetOptions & Options,support::endianness Endian)1303 static MCAsmBackend *createARMAsmBackend(const Target &T,
1304                                          const MCSubtargetInfo &STI,
1305                                          const MCRegisterInfo &MRI,
1306                                          const MCTargetOptions &Options,
1307                                          support::endianness Endian) {
1308   const Triple &TheTriple = STI.getTargetTriple();
1309   switch (TheTriple.getObjectFormat()) {
1310   default:
1311     llvm_unreachable("unsupported object format");
1312   case Triple::MachO: {
1313     MachO::CPUSubTypeARM CS = getMachOSubTypeFromArch(TheTriple.getArchName());
1314     return new ARMAsmBackendDarwin(T, STI, MRI, CS);
1315   }
1316   case Triple::COFF:
1317     assert(TheTriple.isOSWindows() && "non-Windows ARM COFF is not supported");
1318     return new ARMAsmBackendWinCOFF(T, STI);
1319   case Triple::ELF:
1320     assert(TheTriple.isOSBinFormatELF() && "using ELF for non-ELF target");
1321     uint8_t OSABI = MCELFObjectTargetWriter::getOSABI(TheTriple.getOS());
1322     return new ARMAsmBackendELF(T, STI, OSABI, Endian);
1323   }
1324 }
1325 
createARMLEAsmBackend(const Target & T,const MCSubtargetInfo & STI,const MCRegisterInfo & MRI,const MCTargetOptions & Options)1326 MCAsmBackend *llvm::createARMLEAsmBackend(const Target &T,
1327                                           const MCSubtargetInfo &STI,
1328                                           const MCRegisterInfo &MRI,
1329                                           const MCTargetOptions &Options) {
1330   return createARMAsmBackend(T, STI, MRI, Options, support::little);
1331 }
1332 
createARMBEAsmBackend(const Target & T,const MCSubtargetInfo & STI,const MCRegisterInfo & MRI,const MCTargetOptions & Options)1333 MCAsmBackend *llvm::createARMBEAsmBackend(const Target &T,
1334                                           const MCSubtargetInfo &STI,
1335                                           const MCRegisterInfo &MRI,
1336                                           const MCTargetOptions &Options) {
1337   return createARMAsmBackend(T, STI, MRI, Options, support::big);
1338 }
1339