1 //===- ARM.cpp ------------------------------------------------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 
9 #include "InputFiles.h"
10 #include "Symbols.h"
11 #include "SyntheticSections.h"
12 #include "Target.h"
13 #include "Thunks.h"
14 #include "lld/Common/ErrorHandler.h"
15 #include "llvm/Object/ELF.h"
16 #include "llvm/Support/Endian.h"
17 
18 using namespace llvm;
19 using namespace llvm::support::endian;
20 using namespace llvm::ELF;
21 using namespace lld;
22 using namespace lld::elf;
23 
24 namespace {
25 class ARM final : public TargetInfo {
26 public:
27   ARM();
28   uint32_t calcEFlags() const override;
29   RelExpr getRelExpr(RelType type, const Symbol &s,
30                      const uint8_t *loc) const override;
31   RelType getDynRel(RelType type) const override;
32   int64_t getImplicitAddend(const uint8_t *buf, RelType type) const override;
33   void writeGotPlt(uint8_t *buf, const Symbol &s) const override;
34   void writeIgotPlt(uint8_t *buf, const Symbol &s) const override;
35   void writePltHeader(uint8_t *buf) const override;
36   void writePlt(uint8_t *buf, const Symbol &sym,
37                 uint64_t pltEntryAddr) const override;
38   void addPltSymbols(InputSection &isec, uint64_t off) const override;
39   void addPltHeaderSymbols(InputSection &isd) const override;
40   bool needsThunk(RelExpr expr, RelType type, const InputFile *file,
41                   uint64_t branchAddr, const Symbol &s,
42                   int64_t a) const override;
43   uint32_t getThunkSectionSpacing() const override;
44   bool inBranchRange(RelType type, uint64_t src, uint64_t dst) const override;
45   void relocate(uint8_t *loc, const Relocation &rel,
46                 uint64_t val) const override;
47 };
48 } // namespace
49 
ARM()50 ARM::ARM() {
51   copyRel = R_ARM_COPY;
52   relativeRel = R_ARM_RELATIVE;
53   iRelativeRel = R_ARM_IRELATIVE;
54   gotRel = R_ARM_GLOB_DAT;
55   pltRel = R_ARM_JUMP_SLOT;
56   symbolicRel = R_ARM_ABS32;
57   tlsGotRel = R_ARM_TLS_TPOFF32;
58   tlsModuleIndexRel = R_ARM_TLS_DTPMOD32;
59   tlsOffsetRel = R_ARM_TLS_DTPOFF32;
60   pltHeaderSize = 32;
61   pltEntrySize = 16;
62   ipltEntrySize = 16;
63   trapInstr = {0xd4, 0xd4, 0xd4, 0xd4};
64   needsThunks = true;
65   defaultMaxPageSize = 65536;
66 }
67 
calcEFlags() const68 uint32_t ARM::calcEFlags() const {
69   // The ABIFloatType is used by loaders to detect the floating point calling
70   // convention.
71   uint32_t abiFloatType = 0;
72   if (config->armVFPArgs == ARMVFPArgKind::Base ||
73       config->armVFPArgs == ARMVFPArgKind::Default)
74     abiFloatType = EF_ARM_ABI_FLOAT_SOFT;
75   else if (config->armVFPArgs == ARMVFPArgKind::VFP)
76     abiFloatType = EF_ARM_ABI_FLOAT_HARD;
77 
78   // We don't currently use any features incompatible with EF_ARM_EABI_VER5,
79   // but we don't have any firm guarantees of conformance. Linux AArch64
80   // kernels (as of 2016) require an EABI version to be set.
81   return EF_ARM_EABI_VER5 | abiFloatType;
82 }
83 
getRelExpr(RelType type,const Symbol & s,const uint8_t * loc) const84 RelExpr ARM::getRelExpr(RelType type, const Symbol &s,
85                         const uint8_t *loc) const {
86   switch (type) {
87   case R_ARM_THM_JUMP11:
88     return R_PC;
89   case R_ARM_CALL:
90   case R_ARM_JUMP24:
91   case R_ARM_PC24:
92   case R_ARM_PLT32:
93   case R_ARM_PREL31:
94   case R_ARM_THM_JUMP19:
95   case R_ARM_THM_JUMP24:
96   case R_ARM_THM_CALL:
97     return R_PLT_PC;
98   case R_ARM_GOTOFF32:
99     // (S + A) - GOT_ORG
100     return R_GOTREL;
101   case R_ARM_GOT_BREL:
102     // GOT(S) + A - GOT_ORG
103     return R_GOT_OFF;
104   case R_ARM_GOT_PREL:
105   case R_ARM_TLS_IE32:
106     // GOT(S) + A - P
107     return R_GOT_PC;
108   case R_ARM_SBREL32:
109     return R_ARM_SBREL;
110   case R_ARM_TARGET1:
111     return config->target1Rel ? R_PC : R_ABS;
112   case R_ARM_TARGET2:
113     if (config->target2 == Target2Policy::Rel)
114       return R_PC;
115     if (config->target2 == Target2Policy::Abs)
116       return R_ABS;
117     return R_GOT_PC;
118   case R_ARM_TLS_GD32:
119     return R_TLSGD_PC;
120   case R_ARM_TLS_LDM32:
121     return R_TLSLD_PC;
122   case R_ARM_TLS_LDO32:
123     return R_DTPREL;
124   case R_ARM_BASE_PREL:
125     // B(S) + A - P
126     // FIXME: currently B(S) assumed to be .got, this may not hold for all
127     // platforms.
128     return R_GOTONLY_PC;
129   case R_ARM_MOVW_PREL_NC:
130   case R_ARM_MOVT_PREL:
131   case R_ARM_REL32:
132   case R_ARM_THM_MOVW_PREL_NC:
133   case R_ARM_THM_MOVT_PREL:
134     return R_PC;
135   case R_ARM_ALU_PC_G0:
136   case R_ARM_LDR_PC_G0:
137   case R_ARM_THM_ALU_PREL_11_0:
138   case R_ARM_THM_PC8:
139   case R_ARM_THM_PC12:
140     return R_ARM_PCA;
141   case R_ARM_MOVW_BREL_NC:
142   case R_ARM_MOVW_BREL:
143   case R_ARM_MOVT_BREL:
144   case R_ARM_THM_MOVW_BREL_NC:
145   case R_ARM_THM_MOVW_BREL:
146   case R_ARM_THM_MOVT_BREL:
147     return R_ARM_SBREL;
148   case R_ARM_NONE:
149     return R_NONE;
150   case R_ARM_TLS_LE32:
151     return R_TPREL;
152   case R_ARM_V4BX:
153     // V4BX is just a marker to indicate there's a "bx rN" instruction at the
154     // given address. It can be used to implement a special linker mode which
155     // rewrites ARMv4T inputs to ARMv4. Since we support only ARMv4 input and
156     // not ARMv4 output, we can just ignore it.
157     return R_NONE;
158   default:
159     return R_ABS;
160   }
161 }
162 
getDynRel(RelType type) const163 RelType ARM::getDynRel(RelType type) const {
164   if ((type == R_ARM_ABS32) || (type == R_ARM_TARGET1 && !config->target1Rel))
165     return R_ARM_ABS32;
166   return R_ARM_NONE;
167 }
168 
writeGotPlt(uint8_t * buf,const Symbol &) const169 void ARM::writeGotPlt(uint8_t *buf, const Symbol &) const {
170   write32le(buf, in.plt->getVA());
171 }
172 
writeIgotPlt(uint8_t * buf,const Symbol & s) const173 void ARM::writeIgotPlt(uint8_t *buf, const Symbol &s) const {
174   // An ARM entry is the address of the ifunc resolver function.
175   write32le(buf, s.getVA());
176 }
177 
178 // Long form PLT Header that does not have any restrictions on the displacement
179 // of the .plt from the .plt.got.
writePltHeaderLong(uint8_t * buf)180 static void writePltHeaderLong(uint8_t *buf) {
181   const uint8_t pltData[] = {
182       0x04, 0xe0, 0x2d, 0xe5, //     str lr, [sp,#-4]!
183       0x04, 0xe0, 0x9f, 0xe5, //     ldr lr, L2
184       0x0e, 0xe0, 0x8f, 0xe0, // L1: add lr, pc, lr
185       0x08, 0xf0, 0xbe, 0xe5, //     ldr pc, [lr, #8]
186       0x00, 0x00, 0x00, 0x00, // L2: .word   &(.got.plt) - L1 - 8
187       0xd4, 0xd4, 0xd4, 0xd4, //     Pad to 32-byte boundary
188       0xd4, 0xd4, 0xd4, 0xd4, //     Pad to 32-byte boundary
189       0xd4, 0xd4, 0xd4, 0xd4};
190   memcpy(buf, pltData, sizeof(pltData));
191   uint64_t gotPlt = in.gotPlt->getVA();
192   uint64_t l1 = in.plt->getVA() + 8;
193   write32le(buf + 16, gotPlt - l1 - 8);
194 }
195 
196 // The default PLT header requires the .plt.got to be within 128 Mb of the
197 // .plt in the positive direction.
writePltHeader(uint8_t * buf) const198 void ARM::writePltHeader(uint8_t *buf) const {
199   // Use a similar sequence to that in writePlt(), the difference is the calling
200   // conventions mean we use lr instead of ip. The PLT entry is responsible for
201   // saving lr on the stack, the dynamic loader is responsible for reloading
202   // it.
203   const uint32_t pltData[] = {
204       0xe52de004, // L1: str lr, [sp,#-4]!
205       0xe28fe600, //     add lr, pc,  #0x0NN00000 &(.got.plt - L1 - 4)
206       0xe28eea00, //     add lr, lr,  #0x000NN000 &(.got.plt - L1 - 4)
207       0xe5bef000, //     ldr pc, [lr, #0x00000NNN] &(.got.plt -L1 - 4)
208   };
209 
210   uint64_t offset = in.gotPlt->getVA() - in.plt->getVA() - 4;
211   if (!llvm::isUInt<27>(offset)) {
212     // We cannot encode the Offset, use the long form.
213     writePltHeaderLong(buf);
214     return;
215   }
216   write32le(buf + 0, pltData[0]);
217   write32le(buf + 4, pltData[1] | ((offset >> 20) & 0xff));
218   write32le(buf + 8, pltData[2] | ((offset >> 12) & 0xff));
219   write32le(buf + 12, pltData[3] | (offset & 0xfff));
220   memcpy(buf + 16, trapInstr.data(), 4); // Pad to 32-byte boundary
221   memcpy(buf + 20, trapInstr.data(), 4);
222   memcpy(buf + 24, trapInstr.data(), 4);
223   memcpy(buf + 28, trapInstr.data(), 4);
224 }
225 
addPltHeaderSymbols(InputSection & isec) const226 void ARM::addPltHeaderSymbols(InputSection &isec) const {
227   addSyntheticLocal("$a", STT_NOTYPE, 0, 0, isec);
228   addSyntheticLocal("$d", STT_NOTYPE, 16, 0, isec);
229 }
230 
231 // Long form PLT entries that do not have any restrictions on the displacement
232 // of the .plt from the .plt.got.
writePltLong(uint8_t * buf,uint64_t gotPltEntryAddr,uint64_t pltEntryAddr)233 static void writePltLong(uint8_t *buf, uint64_t gotPltEntryAddr,
234                          uint64_t pltEntryAddr) {
235   const uint8_t pltData[] = {
236       0x04, 0xc0, 0x9f, 0xe5, //     ldr ip, L2
237       0x0f, 0xc0, 0x8c, 0xe0, // L1: add ip, ip, pc
238       0x00, 0xf0, 0x9c, 0xe5, //     ldr pc, [ip]
239       0x00, 0x00, 0x00, 0x00, // L2: .word   Offset(&(.plt.got) - L1 - 8
240   };
241   memcpy(buf, pltData, sizeof(pltData));
242   uint64_t l1 = pltEntryAddr + 4;
243   write32le(buf + 12, gotPltEntryAddr - l1 - 8);
244 }
245 
246 // The default PLT entries require the .plt.got to be within 128 Mb of the
247 // .plt in the positive direction.
writePlt(uint8_t * buf,const Symbol & sym,uint64_t pltEntryAddr) const248 void ARM::writePlt(uint8_t *buf, const Symbol &sym,
249                    uint64_t pltEntryAddr) const {
250   // The PLT entry is similar to the example given in Appendix A of ELF for
251   // the Arm Architecture. Instead of using the Group Relocations to find the
252   // optimal rotation for the 8-bit immediate used in the add instructions we
253   // hard code the most compact rotations for simplicity. This saves a load
254   // instruction over the long plt sequences.
255   const uint32_t pltData[] = {
256       0xe28fc600, // L1: add ip, pc,  #0x0NN00000  Offset(&(.plt.got) - L1 - 8
257       0xe28cca00, //     add ip, ip,  #0x000NN000  Offset(&(.plt.got) - L1 - 8
258       0xe5bcf000, //     ldr pc, [ip, #0x00000NNN] Offset(&(.plt.got) - L1 - 8
259   };
260 
261   uint64_t offset = sym.getGotPltVA() - pltEntryAddr - 8;
262   if (!llvm::isUInt<27>(offset)) {
263     // We cannot encode the Offset, use the long form.
264     writePltLong(buf, sym.getGotPltVA(), pltEntryAddr);
265     return;
266   }
267   write32le(buf + 0, pltData[0] | ((offset >> 20) & 0xff));
268   write32le(buf + 4, pltData[1] | ((offset >> 12) & 0xff));
269   write32le(buf + 8, pltData[2] | (offset & 0xfff));
270   memcpy(buf + 12, trapInstr.data(), 4); // Pad to 16-byte boundary
271 }
272 
addPltSymbols(InputSection & isec,uint64_t off) const273 void ARM::addPltSymbols(InputSection &isec, uint64_t off) const {
274   addSyntheticLocal("$a", STT_NOTYPE, off, 0, isec);
275   addSyntheticLocal("$d", STT_NOTYPE, off + 12, 0, isec);
276 }
277 
needsThunk(RelExpr expr,RelType type,const InputFile * file,uint64_t branchAddr,const Symbol & s,int64_t a) const278 bool ARM::needsThunk(RelExpr expr, RelType type, const InputFile *file,
279                      uint64_t branchAddr, const Symbol &s,
280                      int64_t a) const {
281   // If S is an undefined weak symbol and does not have a PLT entry then it
282   // will be resolved as a branch to the next instruction.
283   if (s.isUndefWeak() && !s.isInPlt())
284     return false;
285   // A state change from ARM to Thumb and vice versa must go through an
286   // interworking thunk if the relocation type is not R_ARM_CALL or
287   // R_ARM_THM_CALL.
288   switch (type) {
289   case R_ARM_PC24:
290   case R_ARM_PLT32:
291   case R_ARM_JUMP24:
292     // Source is ARM, all PLT entries are ARM so no interworking required.
293     // Otherwise we need to interwork if STT_FUNC Symbol has bit 0 set (Thumb).
294     if (s.isFunc() && expr == R_PC && (s.getVA() & 1))
295       return true;
296     LLVM_FALLTHROUGH;
297   case R_ARM_CALL: {
298     uint64_t dst = (expr == R_PLT_PC) ? s.getPltVA() : s.getVA();
299     return !inBranchRange(type, branchAddr, dst + a);
300   }
301   case R_ARM_THM_JUMP19:
302   case R_ARM_THM_JUMP24:
303     // Source is Thumb, all PLT entries are ARM so interworking is required.
304     // Otherwise we need to interwork if STT_FUNC Symbol has bit 0 clear (ARM).
305     if (expr == R_PLT_PC || (s.isFunc() && (s.getVA() & 1) == 0))
306       return true;
307     LLVM_FALLTHROUGH;
308   case R_ARM_THM_CALL: {
309     uint64_t dst = (expr == R_PLT_PC) ? s.getPltVA() : s.getVA();
310     return !inBranchRange(type, branchAddr, dst + a);
311   }
312   }
313   return false;
314 }
315 
getThunkSectionSpacing() const316 uint32_t ARM::getThunkSectionSpacing() const {
317   // The placing of pre-created ThunkSections is controlled by the value
318   // thunkSectionSpacing returned by getThunkSectionSpacing(). The aim is to
319   // place the ThunkSection such that all branches from the InputSections
320   // prior to the ThunkSection can reach a Thunk placed at the end of the
321   // ThunkSection. Graphically:
322   // | up to thunkSectionSpacing .text input sections |
323   // | ThunkSection                                   |
324   // | up to thunkSectionSpacing .text input sections |
325   // | ThunkSection                                   |
326 
327   // Pre-created ThunkSections are spaced roughly 16MiB apart on ARMv7. This
328   // is to match the most common expected case of a Thumb 2 encoded BL, BLX or
329   // B.W:
330   // ARM B, BL, BLX range +/- 32MiB
331   // Thumb B.W, BL, BLX range +/- 16MiB
332   // Thumb B<cc>.W range +/- 1MiB
333   // If a branch cannot reach a pre-created ThunkSection a new one will be
334   // created so we can handle the rare cases of a Thumb 2 conditional branch.
335   // We intentionally use a lower size for thunkSectionSpacing than the maximum
336   // branch range so the end of the ThunkSection is more likely to be within
337   // range of the branch instruction that is furthest away. The value we shorten
338   // thunkSectionSpacing by is set conservatively to allow us to create 16,384
339   // 12 byte Thunks at any offset in a ThunkSection without risk of a branch to
340   // one of the Thunks going out of range.
341 
342   // On Arm the thunkSectionSpacing depends on the range of the Thumb Branch
343   // range. On earlier Architectures such as ARMv4, ARMv5 and ARMv6 (except
344   // ARMv6T2) the range is +/- 4MiB.
345 
346   return (config->armJ1J2BranchEncoding) ? 0x1000000 - 0x30000
347                                          : 0x400000 - 0x7500;
348 }
349 
inBranchRange(RelType type,uint64_t src,uint64_t dst) const350 bool ARM::inBranchRange(RelType type, uint64_t src, uint64_t dst) const {
351   if ((dst & 0x1) == 0)
352     // Destination is ARM, if ARM caller then Src is already 4-byte aligned.
353     // If Thumb Caller (BLX) the Src address has bottom 2 bits cleared to ensure
354     // destination will be 4 byte aligned.
355     src &= ~0x3;
356   else
357     // Bit 0 == 1 denotes Thumb state, it is not part of the range.
358     dst &= ~0x1;
359 
360   int64_t offset = dst - src;
361   switch (type) {
362   case R_ARM_PC24:
363   case R_ARM_PLT32:
364   case R_ARM_JUMP24:
365   case R_ARM_CALL:
366     return llvm::isInt<26>(offset);
367   case R_ARM_THM_JUMP19:
368     return llvm::isInt<21>(offset);
369   case R_ARM_THM_JUMP24:
370   case R_ARM_THM_CALL:
371     return config->armJ1J2BranchEncoding ? llvm::isInt<25>(offset)
372                                          : llvm::isInt<23>(offset);
373   default:
374     return true;
375   }
376 }
377 
378 // Helper to produce message text when LLD detects that a CALL relocation to
379 // a non STT_FUNC symbol that may result in incorrect interworking between ARM
380 // or Thumb.
stateChangeWarning(uint8_t * loc,RelType relt,const Symbol & s)381 static void stateChangeWarning(uint8_t *loc, RelType relt, const Symbol &s) {
382   assert(!s.isFunc());
383   if (s.isSection()) {
384     // Section symbols must be defined and in a section. Users cannot change
385     // the type. Use the section name as getName() returns an empty string.
386     warn(getErrorLocation(loc) + "branch and link relocation: " +
387          toString(relt) + " to STT_SECTION symbol " +
388          cast<Defined>(s).section->name + " ; interworking not performed");
389   } else {
390     // Warn with hint on how to alter the symbol type.
391     warn(getErrorLocation(loc) + "branch and link relocation: " +
392          toString(relt) + " to non STT_FUNC symbol: " + s.getName() +
393          " interworking not performed; consider using directive '.type " +
394          s.getName() +
395          ", %function' to give symbol type STT_FUNC if"
396          " interworking between ARM and Thumb is required");
397   }
398 }
399 
400 // Utility functions taken from ARMAddressingModes.h, only changes are LLD
401 // coding style.
402 
403 // Rotate a 32-bit unsigned value right by a specified amt of bits.
rotr32(uint32_t val,uint32_t amt)404 static uint32_t rotr32(uint32_t val, uint32_t amt) {
405   assert(amt < 32 && "Invalid rotate amount");
406   return (val >> amt) | (val << ((32 - amt) & 31));
407 }
408 
409 // Rotate a 32-bit unsigned value left by a specified amt of bits.
rotl32(uint32_t val,uint32_t amt)410 static uint32_t rotl32(uint32_t val, uint32_t amt) {
411   assert(amt < 32 && "Invalid rotate amount");
412   return (val << amt) | (val >> ((32 - amt) & 31));
413 }
414 
415 // Try to encode a 32-bit unsigned immediate imm with an immediate shifter
416 // operand, this form is an 8-bit immediate rotated right by an even number of
417 // bits. We compute the rotate amount to use.  If this immediate value cannot be
418 // handled with a single shifter-op, determine a good rotate amount that will
419 // take a maximal chunk of bits out of the immediate.
getSOImmValRotate(uint32_t imm)420 static uint32_t getSOImmValRotate(uint32_t imm) {
421   // 8-bit (or less) immediates are trivially shifter_operands with a rotate
422   // of zero.
423   if ((imm & ~255U) == 0)
424     return 0;
425 
426   // Use CTZ to compute the rotate amount.
427   unsigned tz = llvm::countTrailingZeros(imm);
428 
429   // Rotate amount must be even.  Something like 0x200 must be rotated 8 bits,
430   // not 9.
431   unsigned rotAmt = tz & ~1;
432 
433   // If we can handle this spread, return it.
434   if ((rotr32(imm, rotAmt) & ~255U) == 0)
435     return (32 - rotAmt) & 31; // HW rotates right, not left.
436 
437   // For values like 0xF000000F, we should ignore the low 6 bits, then
438   // retry the hunt.
439   if (imm & 63U) {
440     unsigned tz2 = countTrailingZeros(imm & ~63U);
441     unsigned rotAmt2 = tz2 & ~1;
442     if ((rotr32(imm, rotAmt2) & ~255U) == 0)
443       return (32 - rotAmt2) & 31; // HW rotates right, not left.
444   }
445 
446   // Otherwise, we have no way to cover this span of bits with a single
447   // shifter_op immediate.  Return a chunk of bits that will be useful to
448   // handle.
449   return (32 - rotAmt) & 31; // HW rotates right, not left.
450 }
451 
relocate(uint8_t * loc,const Relocation & rel,uint64_t val) const452 void ARM::relocate(uint8_t *loc, const Relocation &rel, uint64_t val) const {
453   switch (rel.type) {
454   case R_ARM_ABS32:
455   case R_ARM_BASE_PREL:
456   case R_ARM_GOTOFF32:
457   case R_ARM_GOT_BREL:
458   case R_ARM_GOT_PREL:
459   case R_ARM_REL32:
460   case R_ARM_RELATIVE:
461   case R_ARM_SBREL32:
462   case R_ARM_TARGET1:
463   case R_ARM_TARGET2:
464   case R_ARM_TLS_GD32:
465   case R_ARM_TLS_IE32:
466   case R_ARM_TLS_LDM32:
467   case R_ARM_TLS_LDO32:
468   case R_ARM_TLS_LE32:
469   case R_ARM_TLS_TPOFF32:
470   case R_ARM_TLS_DTPOFF32:
471     write32le(loc, val);
472     break;
473   case R_ARM_PREL31:
474     checkInt(loc, val, 31, rel);
475     write32le(loc, (read32le(loc) & 0x80000000) | (val & ~0x80000000));
476     break;
477   case R_ARM_CALL: {
478     // R_ARM_CALL is used for BL and BLX instructions, for symbols of type
479     // STT_FUNC we choose whether to write a BL or BLX depending on the
480     // value of bit 0 of Val. With bit 0 == 1 denoting Thumb. If the symbol is
481     // not of type STT_FUNC then we must preserve the original instruction.
482     // PLT entries are always ARM state so we know we don't need to interwork.
483     assert(rel.sym); // R_ARM_CALL is always reached via relocate().
484     bool bit0Thumb = val & 1;
485     bool isBlx = (read32le(loc) & 0xfe000000) == 0xfa000000;
486     // lld 10.0 and before always used bit0Thumb when deciding to write a BLX
487     // even when type not STT_FUNC.
488     if (!rel.sym->isFunc() && isBlx != bit0Thumb)
489       stateChangeWarning(loc, rel.type, *rel.sym);
490     if (rel.sym->isFunc() ? bit0Thumb : isBlx) {
491       // The BLX encoding is 0xfa:H:imm24 where Val = imm24:H:'1'
492       checkInt(loc, val, 26, rel);
493       write32le(loc, 0xfa000000 |                    // opcode
494                          ((val & 2) << 23) |         // H
495                          ((val >> 2) & 0x00ffffff)); // imm24
496       break;
497     }
498     // BLX (always unconditional) instruction to an ARM Target, select an
499     // unconditional BL.
500     write32le(loc, 0xeb000000 | (read32le(loc) & 0x00ffffff));
501     // fall through as BL encoding is shared with B
502   }
503     LLVM_FALLTHROUGH;
504   case R_ARM_JUMP24:
505   case R_ARM_PC24:
506   case R_ARM_PLT32:
507     checkInt(loc, val, 26, rel);
508     write32le(loc, (read32le(loc) & ~0x00ffffff) | ((val >> 2) & 0x00ffffff));
509     break;
510   case R_ARM_THM_JUMP11:
511     checkInt(loc, val, 12, rel);
512     write16le(loc, (read32le(loc) & 0xf800) | ((val >> 1) & 0x07ff));
513     break;
514   case R_ARM_THM_JUMP19:
515     // Encoding T3: Val = S:J2:J1:imm6:imm11:0
516     checkInt(loc, val, 21, rel);
517     write16le(loc,
518               (read16le(loc) & 0xfbc0) |   // opcode cond
519                   ((val >> 10) & 0x0400) | // S
520                   ((val >> 12) & 0x003f)); // imm6
521     write16le(loc + 2,
522               0x8000 |                    // opcode
523                   ((val >> 8) & 0x0800) | // J2
524                   ((val >> 5) & 0x2000) | // J1
525                   ((val >> 1) & 0x07ff)); // imm11
526     break;
527   case R_ARM_THM_CALL: {
528     // R_ARM_THM_CALL is used for BL and BLX instructions, for symbols of type
529     // STT_FUNC we choose whether to write a BL or BLX depending on the
530     // value of bit 0 of Val. With bit 0 == 0 denoting ARM, if the symbol is
531     // not of type STT_FUNC then we must preserve the original instruction.
532     // PLT entries are always ARM state so we know we need to interwork.
533     assert(rel.sym); // R_ARM_THM_CALL is always reached via relocate().
534     bool bit0Thumb = val & 1;
535     bool isBlx = (read16le(loc + 2) & 0x1000) == 0;
536     // lld 10.0 and before always used bit0Thumb when deciding to write a BLX
537     // even when type not STT_FUNC. PLT entries generated by LLD are always ARM.
538     if (!rel.sym->isFunc() && !rel.sym->isInPlt() && isBlx == bit0Thumb)
539       stateChangeWarning(loc, rel.type, *rel.sym);
540     if (rel.sym->isFunc() || rel.sym->isInPlt() ? !bit0Thumb : isBlx) {
541       // We are writing a BLX. Ensure BLX destination is 4-byte aligned. As
542       // the BLX instruction may only be two byte aligned. This must be done
543       // before overflow check.
544       val = alignTo(val, 4);
545       write16le(loc + 2, read16le(loc + 2) & ~0x1000);
546     } else {
547       write16le(loc + 2, (read16le(loc + 2) & ~0x1000) | 1 << 12);
548     }
549     if (!config->armJ1J2BranchEncoding) {
550       // Older Arm architectures do not support R_ARM_THM_JUMP24 and have
551       // different encoding rules and range due to J1 and J2 always being 1.
552       checkInt(loc, val, 23, rel);
553       write16le(loc,
554                 0xf000 |                     // opcode
555                     ((val >> 12) & 0x07ff)); // imm11
556       write16le(loc + 2,
557                 (read16le(loc + 2) & 0xd000) | // opcode
558                     0x2800 |                   // J1 == J2 == 1
559                     ((val >> 1) & 0x07ff));    // imm11
560       break;
561     }
562   }
563     // Fall through as rest of encoding is the same as B.W
564     LLVM_FALLTHROUGH;
565   case R_ARM_THM_JUMP24:
566     // Encoding B  T4, BL T1, BLX T2: Val = S:I1:I2:imm10:imm11:0
567     checkInt(loc, val, 25, rel);
568     write16le(loc,
569               0xf000 |                     // opcode
570                   ((val >> 14) & 0x0400) | // S
571                   ((val >> 12) & 0x03ff)); // imm10
572     write16le(loc + 2,
573               (read16le(loc + 2) & 0xd000) |                  // opcode
574                   (((~(val >> 10)) ^ (val >> 11)) & 0x2000) | // J1
575                   (((~(val >> 11)) ^ (val >> 13)) & 0x0800) | // J2
576                   ((val >> 1) & 0x07ff));                     // imm11
577     break;
578   case R_ARM_MOVW_ABS_NC:
579   case R_ARM_MOVW_PREL_NC:
580   case R_ARM_MOVW_BREL_NC:
581     write32le(loc, (read32le(loc) & ~0x000f0fff) | ((val & 0xf000) << 4) |
582                        (val & 0x0fff));
583     break;
584   case R_ARM_MOVT_ABS:
585   case R_ARM_MOVT_PREL:
586   case R_ARM_MOVT_BREL:
587     write32le(loc, (read32le(loc) & ~0x000f0fff) |
588                        (((val >> 16) & 0xf000) << 4) | ((val >> 16) & 0xfff));
589     break;
590   case R_ARM_THM_MOVT_ABS:
591   case R_ARM_THM_MOVT_PREL:
592   case R_ARM_THM_MOVT_BREL:
593     // Encoding T1: A = imm4:i:imm3:imm8
594     write16le(loc,
595               0xf2c0 |                     // opcode
596                   ((val >> 17) & 0x0400) | // i
597                   ((val >> 28) & 0x000f)); // imm4
598     write16le(loc + 2,
599               (read16le(loc + 2) & 0x8f00) | // opcode
600                   ((val >> 12) & 0x7000) |   // imm3
601                   ((val >> 16) & 0x00ff));   // imm8
602     break;
603   case R_ARM_THM_MOVW_ABS_NC:
604   case R_ARM_THM_MOVW_PREL_NC:
605   case R_ARM_THM_MOVW_BREL_NC:
606     // Encoding T3: A = imm4:i:imm3:imm8
607     write16le(loc,
608               0xf240 |                     // opcode
609                   ((val >> 1) & 0x0400) |  // i
610                   ((val >> 12) & 0x000f)); // imm4
611     write16le(loc + 2,
612               (read16le(loc + 2) & 0x8f00) | // opcode
613                   ((val << 4) & 0x7000) |    // imm3
614                   (val & 0x00ff));           // imm8
615     break;
616   case R_ARM_ALU_PC_G0: {
617     // ADR (literal) add = bit23, sub = bit22
618     // literal is a 12-bit modified immediate, made up of a 4-bit even rotate
619     // right and an 8-bit immediate. The code-sequence here is derived from
620     // ARMAddressingModes.h in llvm/Target/ARM/MCTargetDesc. In our case we
621     // want to give an error if we cannot encode the constant.
622     uint32_t opcode = 0x00800000;
623     if (val >> 63) {
624       opcode = 0x00400000;
625       val = ~val + 1;
626     }
627     if ((val & ~255U) != 0) {
628       uint32_t rotAmt = getSOImmValRotate(val);
629       // Error if we cannot encode this with a single shift
630       if (rotr32(~255U, rotAmt) & val)
631         error(getErrorLocation(loc) + "unencodeable immediate " +
632               Twine(val).str() + " for relocation " + toString(rel.type));
633       val = rotl32(val, rotAmt) | ((rotAmt >> 1) << 8);
634     }
635     write32le(loc, (read32le(loc) & 0xff0ff000) | opcode | val);
636     break;
637   }
638   case R_ARM_LDR_PC_G0: {
639     // R_ARM_LDR_PC_G0 is S + A - P, we have ((S + A) | T) - P, if S is a
640     // function then addr is 0 (modulo 2) and Pa is 0 (modulo 4) so we can clear
641     // bottom bit to recover S + A - P.
642     if (rel.sym->isFunc())
643       val &= ~0x1;
644     // LDR (literal) u = bit23
645     int64_t imm = val;
646     uint32_t u = 0x00800000;
647     if (imm < 0) {
648       imm = -imm;
649       u = 0;
650     }
651     checkUInt(loc, imm, 12, rel);
652     write32le(loc, (read32le(loc) & 0xff7ff000) | u | imm);
653     break;
654   }
655   case R_ARM_THM_ALU_PREL_11_0: {
656     // ADR encoding T2 (sub), T3 (add) i:imm3:imm8
657     int64_t imm = val;
658     uint16_t sub = 0;
659     if (imm < 0) {
660       imm = -imm;
661       sub = 0x00a0;
662     }
663     checkUInt(loc, imm, 12, rel);
664     write16le(loc, (read16le(loc) & 0xfb0f) | sub | (imm & 0x800) >> 1);
665     write16le(loc + 2,
666               (read16le(loc + 2) & 0x8f00) | (imm & 0x700) << 4 | (imm & 0xff));
667     break;
668   }
669   case R_ARM_THM_PC8:
670     // ADR and LDR literal encoding T1 positive offset only imm8:00
671     // R_ARM_THM_PC8 is S + A - Pa, we have ((S + A) | T) - Pa, if S is a
672     // function then addr is 0 (modulo 2) and Pa is 0 (modulo 4) so we can clear
673     // bottom bit to recover S + A - Pa.
674     if (rel.sym->isFunc())
675       val &= ~0x1;
676     checkUInt(loc, val, 10, rel);
677     checkAlignment(loc, val, 4, rel);
678     write16le(loc, (read16le(loc) & 0xff00) | (val & 0x3fc) >> 2);
679     break;
680   case R_ARM_THM_PC12: {
681     // LDR (literal) encoding T2, add = (U == '1') imm12
682     // imm12 is unsigned
683     // R_ARM_THM_PC12 is S + A - Pa, we have ((S + A) | T) - Pa, if S is a
684     // function then addr is 0 (modulo 2) and Pa is 0 (modulo 4) so we can clear
685     // bottom bit to recover S + A - Pa.
686     if (rel.sym->isFunc())
687       val &= ~0x1;
688     int64_t imm12 = val;
689     uint16_t u = 0x0080;
690     if (imm12 < 0) {
691       imm12 = -imm12;
692       u = 0;
693     }
694     checkUInt(loc, imm12, 12, rel);
695     write16le(loc, read16le(loc) | u);
696     write16le(loc + 2, (read16le(loc + 2) & 0xf000) | imm12);
697     break;
698   }
699   default:
700     error(getErrorLocation(loc) + "unrecognized relocation " +
701           toString(rel.type));
702   }
703 }
704 
getImplicitAddend(const uint8_t * buf,RelType type) const705 int64_t ARM::getImplicitAddend(const uint8_t *buf, RelType type) const {
706   switch (type) {
707   default:
708     internalLinkerError(getErrorLocation(buf),
709                         "cannot read addend for relocation " + toString(type));
710     return 0;
711   case R_ARM_ABS32:
712   case R_ARM_BASE_PREL:
713   case R_ARM_GLOB_DAT:
714   case R_ARM_GOTOFF32:
715   case R_ARM_GOT_BREL:
716   case R_ARM_GOT_PREL:
717   case R_ARM_IRELATIVE:
718   case R_ARM_REL32:
719   case R_ARM_RELATIVE:
720   case R_ARM_SBREL32:
721   case R_ARM_TARGET1:
722   case R_ARM_TARGET2:
723   case R_ARM_TLS_DTPMOD32:
724   case R_ARM_TLS_DTPOFF32:
725   case R_ARM_TLS_GD32:
726   case R_ARM_TLS_IE32:
727   case R_ARM_TLS_LDM32:
728   case R_ARM_TLS_LE32:
729   case R_ARM_TLS_LDO32:
730   case R_ARM_TLS_TPOFF32:
731     return SignExtend64<32>(read32le(buf));
732   case R_ARM_PREL31:
733     return SignExtend64<31>(read32le(buf));
734   case R_ARM_CALL:
735   case R_ARM_JUMP24:
736   case R_ARM_PC24:
737   case R_ARM_PLT32:
738     return SignExtend64<26>(read32le(buf) << 2);
739   case R_ARM_THM_JUMP11:
740     return SignExtend64<12>(read16le(buf) << 1);
741   case R_ARM_THM_JUMP19: {
742     // Encoding T3: A = S:J2:J1:imm10:imm6:0
743     uint16_t hi = read16le(buf);
744     uint16_t lo = read16le(buf + 2);
745     return SignExtend64<20>(((hi & 0x0400) << 10) | // S
746                             ((lo & 0x0800) << 8) |  // J2
747                             ((lo & 0x2000) << 5) |  // J1
748                             ((hi & 0x003f) << 12) | // imm6
749                             ((lo & 0x07ff) << 1));  // imm11:0
750   }
751   case R_ARM_THM_CALL:
752     if (!config->armJ1J2BranchEncoding) {
753       // Older Arm architectures do not support R_ARM_THM_JUMP24 and have
754       // different encoding rules and range due to J1 and J2 always being 1.
755       uint16_t hi = read16le(buf);
756       uint16_t lo = read16le(buf + 2);
757       return SignExtend64<22>(((hi & 0x7ff) << 12) | // imm11
758                               ((lo & 0x7ff) << 1));  // imm11:0
759       break;
760     }
761     LLVM_FALLTHROUGH;
762   case R_ARM_THM_JUMP24: {
763     // Encoding B T4, BL T1, BLX T2: A = S:I1:I2:imm10:imm11:0
764     // I1 = NOT(J1 EOR S), I2 = NOT(J2 EOR S)
765     uint16_t hi = read16le(buf);
766     uint16_t lo = read16le(buf + 2);
767     return SignExtend64<24>(((hi & 0x0400) << 14) |                    // S
768                             (~((lo ^ (hi << 3)) << 10) & 0x00800000) | // I1
769                             (~((lo ^ (hi << 1)) << 11) & 0x00400000) | // I2
770                             ((hi & 0x003ff) << 12) |                   // imm0
771                             ((lo & 0x007ff) << 1)); // imm11:0
772   }
773   // ELF for the ARM Architecture 4.6.1.1 the implicit addend for MOVW and
774   // MOVT is in the range -32768 <= A < 32768
775   case R_ARM_MOVW_ABS_NC:
776   case R_ARM_MOVT_ABS:
777   case R_ARM_MOVW_PREL_NC:
778   case R_ARM_MOVT_PREL:
779   case R_ARM_MOVW_BREL_NC:
780   case R_ARM_MOVT_BREL: {
781     uint64_t val = read32le(buf) & 0x000f0fff;
782     return SignExtend64<16>(((val & 0x000f0000) >> 4) | (val & 0x00fff));
783   }
784   case R_ARM_THM_MOVW_ABS_NC:
785   case R_ARM_THM_MOVT_ABS:
786   case R_ARM_THM_MOVW_PREL_NC:
787   case R_ARM_THM_MOVT_PREL:
788   case R_ARM_THM_MOVW_BREL_NC:
789   case R_ARM_THM_MOVT_BREL: {
790     // Encoding T3: A = imm4:i:imm3:imm8
791     uint16_t hi = read16le(buf);
792     uint16_t lo = read16le(buf + 2);
793     return SignExtend64<16>(((hi & 0x000f) << 12) | // imm4
794                             ((hi & 0x0400) << 1) |  // i
795                             ((lo & 0x7000) >> 4) |  // imm3
796                             (lo & 0x00ff));         // imm8
797   }
798   case R_ARM_ALU_PC_G0: {
799     // 12-bit immediate is a modified immediate made up of a 4-bit even
800     // right rotation and 8-bit constant. After the rotation the value
801     // is zero-extended. When bit 23 is set the instruction is an add, when
802     // bit 22 is set it is a sub.
803     uint32_t instr = read32le(buf);
804     uint32_t val = rotr32(instr & 0xff, ((instr & 0xf00) >> 8) * 2);
805     return (instr & 0x00400000) ? -val : val;
806   }
807   case R_ARM_LDR_PC_G0: {
808     // ADR (literal) add = bit23, sub = bit22
809     // LDR (literal) u = bit23 unsigned imm12
810     bool u = read32le(buf) & 0x00800000;
811     uint32_t imm12 = read32le(buf) & 0xfff;
812     return u ? imm12 : -imm12;
813   }
814   case R_ARM_THM_ALU_PREL_11_0: {
815     // Thumb2 ADR, which is an alias for a sub or add instruction with an
816     // unsigned immediate.
817     // ADR encoding T2 (sub), T3 (add) i:imm3:imm8
818     uint16_t hi = read16le(buf);
819     uint16_t lo = read16le(buf + 2);
820     uint64_t imm = (hi & 0x0400) << 1 | // i
821                    (lo & 0x7000) >> 4 | // imm3
822                    (lo & 0x00ff);       // imm8
823     // For sub, addend is negative, add is positive.
824     return (hi & 0x00f0) ? -imm : imm;
825   }
826   case R_ARM_THM_PC8:
827     // ADR and LDR (literal) encoding T1
828     // From ELF for the ARM Architecture the initial signed addend is formed
829     // from an unsigned field using expression (((imm8:00 + 4) & 0x3ff) – 4)
830     // this trick permits the PC bias of -4 to be encoded using imm8 = 0xff
831     return ((((read16le(buf) & 0xff) << 2) + 4) & 0x3ff) - 4;
832   case R_ARM_THM_PC12: {
833     // LDR (literal) encoding T2, add = (U == '1') imm12
834     bool u = read16le(buf) & 0x0080;
835     uint64_t imm12 = read16le(buf + 2) & 0x0fff;
836     return u ? imm12 : -imm12;
837   }
838   case R_ARM_NONE:
839   case R_ARM_JUMP_SLOT:
840     // These relocations are defined as not having an implicit addend.
841     return 0;
842   }
843 }
844 
getARMTargetInfo()845 TargetInfo *elf::getARMTargetInfo() {
846   static ARM target;
847   return &target;
848 }
849