1 //===----- x86_64.cpp - Generic JITLink x86-64 edge kinds, utilities ------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // Generic utilities for graphs representing x86-64 objects.
10 //
11 //===----------------------------------------------------------------------===//
12 
13 #include "llvm/ExecutionEngine/JITLink/x86_64.h"
14 
15 #define DEBUG_TYPE "jitlink"
16 
17 namespace llvm {
18 namespace jitlink {
19 namespace x86_64 {
20 
getEdgeKindName(Edge::Kind K)21 const char *getEdgeKindName(Edge::Kind K) {
22   switch (K) {
23   case Pointer64:
24     return "Pointer64";
25   case Pointer32:
26     return "Pointer32";
27   case Pointer32Signed:
28     return "Pointer32Signed";
29   case Pointer16:
30     return "Pointer16";
31   case Pointer8:
32     return "Pointer8";
33   case Delta64:
34     return "Delta64";
35   case Delta32:
36     return "Delta32";
37   case NegDelta64:
38     return "NegDelta64";
39   case NegDelta32:
40     return "NegDelta32";
41   case Delta64FromGOT:
42     return "Delta64FromGOT";
43   case PCRel32:
44     return "PCRel32";
45   case BranchPCRel32:
46     return "BranchPCRel32";
47   case BranchPCRel32ToPtrJumpStub:
48     return "BranchPCRel32ToPtrJumpStub";
49   case BranchPCRel32ToPtrJumpStubBypassable:
50     return "BranchPCRel32ToPtrJumpStubBypassable";
51   case RequestGOTAndTransformToDelta32:
52     return "RequestGOTAndTransformToDelta32";
53   case RequestGOTAndTransformToDelta64:
54     return "RequestGOTAndTransformToDelta64";
55   case RequestGOTAndTransformToDelta64FromGOT:
56     return "RequestGOTAndTransformToDelta64FromGOT";
57   case PCRel32GOTLoadREXRelaxable:
58     return "PCRel32GOTLoadREXRelaxable";
59   case RequestGOTAndTransformToPCRel32GOTLoadREXRelaxable:
60     return "RequestGOTAndTransformToPCRel32GOTLoadREXRelaxable";
61   case PCRel32GOTLoadRelaxable:
62     return "PCRel32GOTLoadRelaxable";
63   case RequestGOTAndTransformToPCRel32GOTLoadRelaxable:
64     return "RequestGOTAndTransformToPCRel32GOTLoadRelaxable";
65   case PCRel32TLVPLoadREXRelaxable:
66     return "PCRel32TLVPLoadREXRelaxable";
67   case RequestTLVPAndTransformToPCRel32TLVPLoadREXRelaxable:
68     return "RequestTLVPAndTransformToPCRel32TLVPLoadREXRelaxable";
69   default:
70     return getGenericEdgeKindName(static_cast<Edge::Kind>(K));
71   }
72 }
73 
74 const char NullPointerContent[PointerSize] = {0x00, 0x00, 0x00, 0x00,
75                                               0x00, 0x00, 0x00, 0x00};
76 
77 const char PointerJumpStubContent[6] = {
78     static_cast<char>(0xFFu), 0x25, 0x00, 0x00, 0x00, 0x00};
79 
optimizeGOTAndStubAccesses(LinkGraph & G)80 Error optimizeGOTAndStubAccesses(LinkGraph &G) {
81   LLVM_DEBUG(dbgs() << "Optimizing GOT entries and stubs:\n");
82 
83   for (auto *B : G.blocks())
84     for (auto &E : B->edges()) {
85       if (E.getKind() == x86_64::PCRel32GOTLoadRelaxable ||
86           E.getKind() == x86_64::PCRel32GOTLoadREXRelaxable) {
87 #ifndef NDEBUG
88         bool REXPrefix = E.getKind() == x86_64::PCRel32GOTLoadREXRelaxable;
89         assert(E.getOffset() >= (REXPrefix ? 3u : 2u) &&
90                "GOT edge occurs too early in block");
91 #endif
92         auto *FixupData = reinterpret_cast<uint8_t *>(
93                               const_cast<char *>(B->getContent().data())) +
94                           E.getOffset();
95         const uint8_t Op = FixupData[-2];
96         const uint8_t ModRM = FixupData[-1];
97 
98         auto &GOTEntryBlock = E.getTarget().getBlock();
99         assert(GOTEntryBlock.getSize() == G.getPointerSize() &&
100                "GOT entry block should be pointer sized");
101         assert(GOTEntryBlock.edges_size() == 1 &&
102                "GOT entry should only have one outgoing edge");
103         auto &GOTTarget = GOTEntryBlock.edges().begin()->getTarget();
104         orc::ExecutorAddr TargetAddr = GOTTarget.getAddress();
105         orc::ExecutorAddr EdgeAddr = B->getFixupAddress(E);
106         int64_t Displacement = TargetAddr - EdgeAddr + 4;
107         bool TargetInRangeForImmU32 = isUInt<32>(TargetAddr.getValue());
108         bool DisplacementInRangeForImmS32 = isInt<32>(Displacement);
109 
110         // If both of the Target and displacement is out of range, then
111         // there isn't optimization chance.
112         if (!(TargetInRangeForImmU32 || DisplacementInRangeForImmS32))
113           continue;
114 
115         // Transform "mov foo@GOTPCREL(%rip),%reg" to "lea foo(%rip),%reg".
116         if (Op == 0x8b && DisplacementInRangeForImmS32) {
117           FixupData[-2] = 0x8d;
118           E.setKind(x86_64::Delta32);
119           E.setTarget(GOTTarget);
120           E.setAddend(E.getAddend() - 4);
121           LLVM_DEBUG({
122             dbgs() << "  Replaced GOT load wih LEA:\n    ";
123             printEdge(dbgs(), *B, E, getEdgeKindName(E.getKind()));
124             dbgs() << "\n";
125           });
126           continue;
127         }
128 
129         // Transform call/jmp instructions
130         if (Op == 0xff && TargetInRangeForImmU32) {
131           if (ModRM == 0x15) {
132             // ABI says we can convert "call *foo@GOTPCREL(%rip)" to "nop; call
133             // foo" But lld convert it to "addr32 call foo, because that makes
134             // result expression to be a single instruction.
135             FixupData[-2] = 0x67;
136             FixupData[-1] = 0xe8;
137             LLVM_DEBUG({
138               dbgs() << "  replaced call instruction's memory operand wih imm "
139                         "operand:\n    ";
140               printEdge(dbgs(), *B, E, getEdgeKindName(E.getKind()));
141               dbgs() << "\n";
142             });
143           } else {
144             // Transform "jmp *foo@GOTPCREL(%rip)" to "jmp foo; nop"
145             assert(ModRM == 0x25 && "Invalid ModRm for call/jmp instructions");
146             FixupData[-2] = 0xe9;
147             FixupData[3] = 0x90;
148             E.setOffset(E.getOffset() - 1);
149             LLVM_DEBUG({
150               dbgs() << "  replaced jmp instruction's memory operand wih imm "
151                         "operand:\n    ";
152               printEdge(dbgs(), *B, E, getEdgeKindName(E.getKind()));
153               dbgs() << "\n";
154             });
155           }
156           E.setKind(x86_64::Pointer32);
157           E.setTarget(GOTTarget);
158           continue;
159         }
160       } else if (E.getKind() == x86_64::BranchPCRel32ToPtrJumpStubBypassable) {
161         auto &StubBlock = E.getTarget().getBlock();
162         assert(StubBlock.getSize() == sizeof(PointerJumpStubContent) &&
163                "Stub block should be stub sized");
164         assert(StubBlock.edges_size() == 1 &&
165                "Stub block should only have one outgoing edge");
166 
167         auto &GOTBlock = StubBlock.edges().begin()->getTarget().getBlock();
168         assert(GOTBlock.getSize() == G.getPointerSize() &&
169                "GOT block should be pointer sized");
170         assert(GOTBlock.edges_size() == 1 &&
171                "GOT block should only have one outgoing edge");
172 
173         auto &GOTTarget = GOTBlock.edges().begin()->getTarget();
174         orc::ExecutorAddr EdgeAddr = B->getAddress() + E.getOffset();
175         orc::ExecutorAddr TargetAddr = GOTTarget.getAddress();
176 
177         int64_t Displacement = TargetAddr - EdgeAddr + 4;
178         if (isInt<32>(Displacement)) {
179           E.setKind(x86_64::BranchPCRel32);
180           E.setTarget(GOTTarget);
181           LLVM_DEBUG({
182             dbgs() << "  Replaced stub branch with direct branch:\n    ";
183             printEdge(dbgs(), *B, E, getEdgeKindName(E.getKind()));
184             dbgs() << "\n";
185           });
186         }
187       }
188     }
189 
190   return Error::success();
191 }
192 
193 } // end namespace x86_64
194 } // end namespace jitlink
195 } // end namespace llvm
196