1 //=== aarch64.h - Generic JITLink aarch64 edge kinds, utilities -*- C++ -*-===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // Generic utilities for graphs representing aarch64 objects.
10 //
11 //===----------------------------------------------------------------------===//
12 
13 #ifndef LLVM_EXECUTIONENGINE_JITLINK_AARCH64_H
14 #define LLVM_EXECUTIONENGINE_JITLINK_AARCH64_H
15 
16 #include "TableManager.h"
17 #include "llvm/ExecutionEngine/JITLink/JITLink.h"
18 #include "llvm/ExecutionEngine/JITLink/MemoryFlags.h"
19 
20 namespace llvm {
21 namespace jitlink {
22 namespace aarch64 {
23 
24 enum EdgeKind_aarch64 : Edge::Kind {
25   Branch26 = Edge::FirstRelocation,
26   Pointer32,
27   Pointer64,
28   Pointer64Anon,
29   Page21,
30   PageOffset12,
31   MoveWide16,
32   GOTPage21,
33   GOTPageOffset12,
34   TLVPage21,
35   TLVPageOffset12,
36   TLSDescPage21,
37   TLSDescPageOffset12,
38   Delta32ToGOT,
39   PairedAddend,
40   LDRLiteral19,
41   Delta32,
42   Delta64,
43   NegDelta32,
44   NegDelta64,
45 };
46 
47 /// Returns a string name for the given aarch64 edge. For debugging purposes
48 /// only
49 const char *getEdgeKindName(Edge::Kind K);
50 
51 // Returns whether the Instr is LD/ST (imm12)
52 inline bool isLoadStoreImm12(uint32_t Instr) {
53   constexpr uint32_t LoadStoreImm12Mask = 0x3b000000;
54   return (Instr & LoadStoreImm12Mask) == 0x39000000;
55 }
56 
57 // Returns the amount the address operand of LD/ST (imm12)
58 // should be shifted right by.
59 //
60 // The shift value varies by the data size of LD/ST instruction.
61 // For instance, LDH instructoin needs the address to be shifted
62 // right by 1.
63 inline unsigned getPageOffset12Shift(uint32_t Instr) {
64   constexpr uint32_t Vec128Mask = 0x04800000;
65 
66   if (isLoadStoreImm12(Instr)) {
67     uint32_t ImplicitShift = Instr >> 30;
68     if (ImplicitShift == 0)
69       if ((Instr & Vec128Mask) == Vec128Mask)
70         ImplicitShift = 4;
71 
72     return ImplicitShift;
73   }
74 
75   return 0;
76 }
77 
78 // Returns whether the Instr is MOVK/MOVZ (imm16) with a zero immediate field
79 inline bool isMoveWideImm16(uint32_t Instr) {
80   constexpr uint32_t MoveWideImm16Mask = 0x5f9fffe0;
81   return (Instr & MoveWideImm16Mask) == 0x52800000;
82 }
83 
84 // Returns the amount the address operand of MOVK/MOVZ (imm16)
85 // should be shifted right by.
86 //
87 // The shift value is specfied in the assembly as LSL #<shift>.
88 inline unsigned getMoveWide16Shift(uint32_t Instr) {
89   if (isMoveWideImm16(Instr)) {
90     uint32_t ImplicitShift = (Instr >> 21) & 0b11;
91     return ImplicitShift << 4;
92   }
93 
94   return 0;
95 }
96 
97 /// Apply fixup expression for edge to block content.
98 inline Error applyFixup(LinkGraph &G, Block &B, const Edge &E) {
99   using namespace support;
100 
101   char *BlockWorkingMem = B.getAlreadyMutableContent().data();
102   char *FixupPtr = BlockWorkingMem + E.getOffset();
103   orc::ExecutorAddr FixupAddress = B.getAddress() + E.getOffset();
104 
105   switch (E.getKind()) {
106   case Branch26: {
107     assert((FixupAddress.getValue() & 0x3) == 0 &&
108            "Branch-inst is not 32-bit aligned");
109 
110     int64_t Value = E.getTarget().getAddress() - FixupAddress + E.getAddend();
111 
112     if (static_cast<uint64_t>(Value) & 0x3)
113       return make_error<JITLinkError>("Branch26 target is not 32-bit "
114                                       "aligned");
115 
116     if (Value < -(1 << 27) || Value > ((1 << 27) - 1))
117       return makeTargetOutOfRangeError(G, B, E);
118 
119     uint32_t RawInstr = *(little32_t *)FixupPtr;
120     assert((RawInstr & 0x7fffffff) == 0x14000000 &&
121            "RawInstr isn't a B or BR immediate instruction");
122     uint32_t Imm = (static_cast<uint32_t>(Value) & ((1 << 28) - 1)) >> 2;
123     uint32_t FixedInstr = RawInstr | Imm;
124     *(little32_t *)FixupPtr = FixedInstr;
125     break;
126   }
127   case Pointer32: {
128     uint64_t Value = E.getTarget().getAddress().getValue() + E.getAddend();
129     if (Value > std::numeric_limits<uint32_t>::max())
130       return makeTargetOutOfRangeError(G, B, E);
131     *(ulittle32_t *)FixupPtr = Value;
132     break;
133   }
134   case Pointer64:
135   case Pointer64Anon: {
136     uint64_t Value = E.getTarget().getAddress().getValue() + E.getAddend();
137     *(ulittle64_t *)FixupPtr = Value;
138     break;
139   }
140   case Page21: {
141     assert((E.getKind() != GOTPage21 || E.getAddend() == 0) &&
142            "GOTPAGE21 with non-zero addend");
143     uint64_t TargetPage =
144         (E.getTarget().getAddress().getValue() + E.getAddend()) &
145         ~static_cast<uint64_t>(4096 - 1);
146     uint64_t PCPage =
147         FixupAddress.getValue() & ~static_cast<uint64_t>(4096 - 1);
148 
149     int64_t PageDelta = TargetPage - PCPage;
150     if (!isInt<33>(PageDelta))
151       return makeTargetOutOfRangeError(G, B, E);
152 
153     uint32_t RawInstr = *(ulittle32_t *)FixupPtr;
154     assert((RawInstr & 0xffffffe0) == 0x90000000 &&
155            "RawInstr isn't an ADRP instruction");
156     uint32_t ImmLo = (static_cast<uint64_t>(PageDelta) >> 12) & 0x3;
157     uint32_t ImmHi = (static_cast<uint64_t>(PageDelta) >> 14) & 0x7ffff;
158     uint32_t FixedInstr = RawInstr | (ImmLo << 29) | (ImmHi << 5);
159     *(ulittle32_t *)FixupPtr = FixedInstr;
160     break;
161   }
162   case PageOffset12: {
163     uint64_t TargetOffset =
164         (E.getTarget().getAddress() + E.getAddend()).getValue() & 0xfff;
165 
166     uint32_t RawInstr = *(ulittle32_t *)FixupPtr;
167     unsigned ImmShift = getPageOffset12Shift(RawInstr);
168 
169     if (TargetOffset & ((1 << ImmShift) - 1))
170       return make_error<JITLinkError>("PAGEOFF12 target is not aligned");
171 
172     uint32_t EncodedImm = (TargetOffset >> ImmShift) << 10;
173     uint32_t FixedInstr = RawInstr | EncodedImm;
174     *(ulittle32_t *)FixupPtr = FixedInstr;
175     break;
176   }
177   case MoveWide16: {
178     uint64_t TargetOffset =
179         (E.getTarget().getAddress() + E.getAddend()).getValue();
180 
181     uint32_t RawInstr = *(ulittle32_t *)FixupPtr;
182     assert(isMoveWideImm16(RawInstr) &&
183            "RawInstr isn't a MOVK/MOVZ instruction");
184 
185     unsigned ImmShift = getMoveWide16Shift(RawInstr);
186     uint32_t Imm = (TargetOffset >> ImmShift) & 0xffff;
187     uint32_t FixedInstr = RawInstr | (Imm << 5);
188     *(ulittle32_t *)FixupPtr = FixedInstr;
189     break;
190   }
191   case LDRLiteral19: {
192     assert((FixupAddress.getValue() & 0x3) == 0 && "LDR is not 32-bit aligned");
193     assert(E.getAddend() == 0 && "LDRLiteral19 with non-zero addend");
194     uint32_t RawInstr = *(ulittle32_t *)FixupPtr;
195     assert(RawInstr == 0x58000010 && "RawInstr isn't a 64-bit LDR literal");
196     int64_t Delta = E.getTarget().getAddress() - FixupAddress;
197     if (Delta & 0x3)
198       return make_error<JITLinkError>("LDR literal target is not 32-bit "
199                                       "aligned");
200     if (Delta < -(1 << 20) || Delta > ((1 << 20) - 1))
201       return makeTargetOutOfRangeError(G, B, E);
202 
203     uint32_t EncodedImm = ((static_cast<uint32_t>(Delta) >> 2) & 0x7ffff) << 5;
204     uint32_t FixedInstr = RawInstr | EncodedImm;
205     *(ulittle32_t *)FixupPtr = FixedInstr;
206     break;
207   }
208   case Delta32:
209   case Delta64:
210   case NegDelta32:
211   case NegDelta64: {
212     int64_t Value;
213     if (E.getKind() == Delta32 || E.getKind() == Delta64)
214       Value = E.getTarget().getAddress() - FixupAddress + E.getAddend();
215     else
216       Value = FixupAddress - E.getTarget().getAddress() + E.getAddend();
217 
218     if (E.getKind() == Delta32 || E.getKind() == NegDelta32) {
219       if (Value < std::numeric_limits<int32_t>::min() ||
220           Value > std::numeric_limits<int32_t>::max())
221         return makeTargetOutOfRangeError(G, B, E);
222       *(little32_t *)FixupPtr = Value;
223     } else
224       *(little64_t *)FixupPtr = Value;
225     break;
226   }
227   case TLVPage21:
228   case TLVPageOffset12:
229   case TLSDescPage21:
230   case TLSDescPageOffset12:
231   case GOTPage21:
232   case GOTPageOffset12:
233   case Delta32ToGOT: {
234     return make_error<JITLinkError>(
235         "In graph " + G.getName() + ", section " + B.getSection().getName() +
236         "GOT/TLV edge kinds not lowered: " + getEdgeKindName(E.getKind()));
237   }
238   default:
239     return make_error<JITLinkError>(
240         "In graph " + G.getName() + ", section " + B.getSection().getName() +
241         "unsupported edge kind" + getEdgeKindName(E.getKind()));
242   }
243 
244   return Error::success();
245 }
246 
247 /// AArch64 null pointer content.
248 extern const uint8_t NullGOTEntryContent[8];
249 
250 /// AArch64 PLT stub content.
251 extern const uint8_t StubContent[8];
252 
253 /// Global Offset Table Builder.
254 class GOTTableManager : public TableManager<GOTTableManager> {
255 public:
256   static StringRef getSectionName() { return "$__GOT"; }
257 
258   bool visitEdge(LinkGraph &G, Block *B, Edge &E) {
259     Edge::Kind KindToSet = Edge::Invalid;
260     const char *BlockWorkingMem = B->getContent().data();
261     const char *FixupPtr = BlockWorkingMem + E.getOffset();
262 
263     switch (E.getKind()) {
264     case aarch64::GOTPage21:
265     case aarch64::TLVPage21: {
266       KindToSet = aarch64::Page21;
267       break;
268     }
269     case aarch64::GOTPageOffset12:
270     case aarch64::TLVPageOffset12: {
271       KindToSet = aarch64::PageOffset12;
272       uint32_t RawInstr = *(const support::ulittle32_t *)FixupPtr;
273       (void)RawInstr;
274       assert(E.getAddend() == 0 &&
275              "GOTPageOffset12/TLVPageOffset12 with non-zero addend");
276       assert((RawInstr & 0xfffffc00) == 0xf9400000 &&
277              "RawInstr isn't a 64-bit LDR immediate");
278       break;
279     }
280     case aarch64::Delta32ToGOT: {
281       KindToSet = aarch64::Delta32;
282       break;
283     }
284     default:
285       return false;
286     }
287     assert(KindToSet != Edge::Invalid &&
288            "Fell through switch, but no new kind to set");
289     DEBUG_WITH_TYPE("jitlink", {
290       dbgs() << "  Fixing " << G.getEdgeKindName(E.getKind()) << " edge at "
291              << B->getFixupAddress(E) << " (" << B->getAddress() << " + "
292              << formatv("{0:x}", E.getOffset()) << ")\n";
293     });
294     E.setKind(KindToSet);
295     E.setTarget(getEntryForTarget(G, E.getTarget()));
296     return true;
297   }
298 
299   Symbol &createEntry(LinkGraph &G, Symbol &Target) {
300     auto &GOTEntryBlock = G.createContentBlock(
301         getGOTSection(G), getGOTEntryBlockContent(), orc::ExecutorAddr(), 8, 0);
302     GOTEntryBlock.addEdge(aarch64::Pointer64, 0, Target, 0);
303     return G.addAnonymousSymbol(GOTEntryBlock, 0, 8, false, false);
304   }
305 
306 private:
307   Section &getGOTSection(LinkGraph &G) {
308     if (!GOTSection)
309       GOTSection =
310           &G.createSection(getSectionName(), MemProt::Read | MemProt::Exec);
311     return *GOTSection;
312   }
313 
314   ArrayRef<char> getGOTEntryBlockContent() {
315     return {reinterpret_cast<const char *>(NullGOTEntryContent),
316             sizeof(NullGOTEntryContent)};
317   }
318 
319   Section *GOTSection = nullptr;
320 };
321 
322 /// Procedure Linkage Table Builder.
323 class PLTTableManager : public TableManager<PLTTableManager> {
324 public:
325   PLTTableManager(GOTTableManager &GOT) : GOT(GOT) {}
326 
327   static StringRef getSectionName() { return "$__STUBS"; }
328 
329   bool visitEdge(LinkGraph &G, Block *B, Edge &E) {
330     if (E.getKind() == aarch64::Branch26 && !E.getTarget().isDefined()) {
331       DEBUG_WITH_TYPE("jitlink", {
332         dbgs() << "  Fixing " << G.getEdgeKindName(E.getKind()) << " edge at "
333                << B->getFixupAddress(E) << " (" << B->getAddress() << " + "
334                << formatv("{0:x}", E.getOffset()) << ")\n";
335       });
336       E.setTarget(getEntryForTarget(G, E.getTarget()));
337       return true;
338     }
339     return false;
340   }
341 
342   Symbol &createEntry(LinkGraph &G, Symbol &Target) {
343     auto &StubContentBlock = G.createContentBlock(
344         getStubsSection(G), getStubBlockContent(), orc::ExecutorAddr(), 1, 0);
345     // Re-use GOT entries for stub targets.
346     auto &GOTEntrySymbol = GOT.getEntryForTarget(G, Target);
347     StubContentBlock.addEdge(aarch64::LDRLiteral19, 0, GOTEntrySymbol, 0);
348     return G.addAnonymousSymbol(StubContentBlock, 0, 8, true, false);
349   }
350 
351 public:
352   Section &getStubsSection(LinkGraph &G) {
353     if (!StubsSection)
354       StubsSection =
355           &G.createSection(getSectionName(), MemProt::Read | MemProt::Exec);
356     return *StubsSection;
357   }
358 
359   ArrayRef<char> getStubBlockContent() {
360     return {reinterpret_cast<const char *>(StubContent), sizeof(StubContent)};
361   }
362 
363   GOTTableManager &GOT;
364   Section *StubsSection = nullptr;
365 };
366 
367 } // namespace aarch64
368 } // namespace jitlink
369 } // namespace llvm
370 
371 #endif // LLVM_EXECUTIONENGINE_JITLINK_AARCH64_H
372