1 //===-- AMDGPUAsmBackend.cpp - AMDGPU Assembler Backend -------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 /// \file
8 //===----------------------------------------------------------------------===//
9
10 #include "MCTargetDesc/AMDGPUFixupKinds.h"
11 #include "MCTargetDesc/AMDGPUMCTargetDesc.h"
12 #include "Utils/AMDGPUBaseInfo.h"
13 #include "llvm/BinaryFormat/ELF.h"
14 #include "llvm/MC/MCAsmBackend.h"
15 #include "llvm/MC/MCAssembler.h"
16 #include "llvm/MC/MCContext.h"
17 #include "llvm/MC/MCFixupKindInfo.h"
18 #include "llvm/MC/MCObjectWriter.h"
19 #include "llvm/MC/MCSubtargetInfo.h"
20 #include "llvm/MC/TargetRegistry.h"
21 #include "llvm/Support/EndianStream.h"
22 #include "llvm/Support/TargetParser.h"
23
24 using namespace llvm;
25 using namespace llvm::AMDGPU;
26
27 namespace {
28
29 class AMDGPUAsmBackend : public MCAsmBackend {
30 public:
AMDGPUAsmBackend(const Target & T)31 AMDGPUAsmBackend(const Target &T) : MCAsmBackend(support::little) {}
32
getNumFixupKinds() const33 unsigned getNumFixupKinds() const override { return AMDGPU::NumTargetFixupKinds; };
34
35 void applyFixup(const MCAssembler &Asm, const MCFixup &Fixup,
36 const MCValue &Target, MutableArrayRef<char> Data,
37 uint64_t Value, bool IsResolved,
38 const MCSubtargetInfo *STI) const override;
39 bool fixupNeedsRelaxation(const MCFixup &Fixup, uint64_t Value,
40 const MCRelaxableFragment *DF,
41 const MCAsmLayout &Layout) const override;
42
43 void relaxInstruction(MCInst &Inst,
44 const MCSubtargetInfo &STI) const override;
45
46 bool mayNeedRelaxation(const MCInst &Inst,
47 const MCSubtargetInfo &STI) const override;
48
49 unsigned getMinimumNopSize() const override;
50 bool writeNopData(raw_ostream &OS, uint64_t Count,
51 const MCSubtargetInfo *STI) const override;
52
53 std::optional<MCFixupKind> getFixupKind(StringRef Name) const override;
54 const MCFixupKindInfo &getFixupKindInfo(MCFixupKind Kind) const override;
55 bool shouldForceRelocation(const MCAssembler &Asm, const MCFixup &Fixup,
56 const MCValue &Target) override;
57 };
58
59 } //End anonymous namespace
60
relaxInstruction(MCInst & Inst,const MCSubtargetInfo & STI) const61 void AMDGPUAsmBackend::relaxInstruction(MCInst &Inst,
62 const MCSubtargetInfo &STI) const {
63 MCInst Res;
64 unsigned RelaxedOpcode = AMDGPU::getSOPPWithRelaxation(Inst.getOpcode());
65 Res.setOpcode(RelaxedOpcode);
66 Res.addOperand(Inst.getOperand(0));
67 Inst = std::move(Res);
68 }
69
fixupNeedsRelaxation(const MCFixup & Fixup,uint64_t Value,const MCRelaxableFragment * DF,const MCAsmLayout & Layout) const70 bool AMDGPUAsmBackend::fixupNeedsRelaxation(const MCFixup &Fixup,
71 uint64_t Value,
72 const MCRelaxableFragment *DF,
73 const MCAsmLayout &Layout) const {
74 // if the branch target has an offset of x3f this needs to be relaxed to
75 // add a s_nop 0 immediately after branch to effectively increment offset
76 // for hardware workaround in gfx1010
77 return (((int64_t(Value)/4)-1) == 0x3f);
78 }
79
mayNeedRelaxation(const MCInst & Inst,const MCSubtargetInfo & STI) const80 bool AMDGPUAsmBackend::mayNeedRelaxation(const MCInst &Inst,
81 const MCSubtargetInfo &STI) const {
82 if (!STI.getFeatureBits()[AMDGPU::FeatureOffset3fBug])
83 return false;
84
85 if (AMDGPU::getSOPPWithRelaxation(Inst.getOpcode()) >= 0)
86 return true;
87
88 return false;
89 }
90
getFixupKindNumBytes(unsigned Kind)91 static unsigned getFixupKindNumBytes(unsigned Kind) {
92 switch (Kind) {
93 case AMDGPU::fixup_si_sopp_br:
94 return 2;
95 case FK_SecRel_1:
96 case FK_Data_1:
97 return 1;
98 case FK_SecRel_2:
99 case FK_Data_2:
100 return 2;
101 case FK_SecRel_4:
102 case FK_Data_4:
103 case FK_PCRel_4:
104 return 4;
105 case FK_SecRel_8:
106 case FK_Data_8:
107 return 8;
108 default:
109 llvm_unreachable("Unknown fixup kind!");
110 }
111 }
112
adjustFixupValue(const MCFixup & Fixup,uint64_t Value,MCContext * Ctx)113 static uint64_t adjustFixupValue(const MCFixup &Fixup, uint64_t Value,
114 MCContext *Ctx) {
115 int64_t SignedValue = static_cast<int64_t>(Value);
116
117 switch (Fixup.getTargetKind()) {
118 case AMDGPU::fixup_si_sopp_br: {
119 int64_t BrImm = (SignedValue - 4) / 4;
120
121 if (Ctx && !isInt<16>(BrImm))
122 Ctx->reportError(Fixup.getLoc(), "branch size exceeds simm16");
123
124 return BrImm;
125 }
126 case FK_Data_1:
127 case FK_Data_2:
128 case FK_Data_4:
129 case FK_Data_8:
130 case FK_PCRel_4:
131 case FK_SecRel_4:
132 return Value;
133 default:
134 llvm_unreachable("unhandled fixup kind");
135 }
136 }
137
applyFixup(const MCAssembler & Asm,const MCFixup & Fixup,const MCValue & Target,MutableArrayRef<char> Data,uint64_t Value,bool IsResolved,const MCSubtargetInfo * STI) const138 void AMDGPUAsmBackend::applyFixup(const MCAssembler &Asm, const MCFixup &Fixup,
139 const MCValue &Target,
140 MutableArrayRef<char> Data, uint64_t Value,
141 bool IsResolved,
142 const MCSubtargetInfo *STI) const {
143 if (Fixup.getKind() >= FirstLiteralRelocationKind)
144 return;
145
146 Value = adjustFixupValue(Fixup, Value, &Asm.getContext());
147 if (!Value)
148 return; // Doesn't change encoding.
149
150 MCFixupKindInfo Info = getFixupKindInfo(Fixup.getKind());
151
152 // Shift the value into position.
153 Value <<= Info.TargetOffset;
154
155 unsigned NumBytes = getFixupKindNumBytes(Fixup.getKind());
156 uint32_t Offset = Fixup.getOffset();
157 assert(Offset + NumBytes <= Data.size() && "Invalid fixup offset!");
158
159 // For each byte of the fragment that the fixup touches, mask in the bits from
160 // the fixup value.
161 for (unsigned i = 0; i != NumBytes; ++i)
162 Data[Offset + i] |= static_cast<uint8_t>((Value >> (i * 8)) & 0xff);
163 }
164
165 std::optional<MCFixupKind>
getFixupKind(StringRef Name) const166 AMDGPUAsmBackend::getFixupKind(StringRef Name) const {
167 return StringSwitch<std::optional<MCFixupKind>>(Name)
168 #define ELF_RELOC(Name, Value) \
169 .Case(#Name, MCFixupKind(FirstLiteralRelocationKind + Value))
170 #include "llvm/BinaryFormat/ELFRelocs/AMDGPU.def"
171 #undef ELF_RELOC
172 .Default(std::nullopt);
173 }
174
getFixupKindInfo(MCFixupKind Kind) const175 const MCFixupKindInfo &AMDGPUAsmBackend::getFixupKindInfo(
176 MCFixupKind Kind) const {
177 const static MCFixupKindInfo Infos[AMDGPU::NumTargetFixupKinds] = {
178 // name offset bits flags
179 { "fixup_si_sopp_br", 0, 16, MCFixupKindInfo::FKF_IsPCRel },
180 };
181
182 if (Kind >= FirstLiteralRelocationKind)
183 return MCAsmBackend::getFixupKindInfo(FK_NONE);
184
185 if (Kind < FirstTargetFixupKind)
186 return MCAsmBackend::getFixupKindInfo(Kind);
187
188 return Infos[Kind - FirstTargetFixupKind];
189 }
190
shouldForceRelocation(const MCAssembler &,const MCFixup & Fixup,const MCValue &)191 bool AMDGPUAsmBackend::shouldForceRelocation(const MCAssembler &,
192 const MCFixup &Fixup,
193 const MCValue &) {
194 return Fixup.getKind() >= FirstLiteralRelocationKind;
195 }
196
getMinimumNopSize() const197 unsigned AMDGPUAsmBackend::getMinimumNopSize() const {
198 return 4;
199 }
200
writeNopData(raw_ostream & OS,uint64_t Count,const MCSubtargetInfo * STI) const201 bool AMDGPUAsmBackend::writeNopData(raw_ostream &OS, uint64_t Count,
202 const MCSubtargetInfo *STI) const {
203 // If the count is not 4-byte aligned, we must be writing data into the text
204 // section (otherwise we have unaligned instructions, and thus have far
205 // bigger problems), so just write zeros instead.
206 OS.write_zeros(Count % 4);
207
208 // We are properly aligned, so write NOPs as requested.
209 Count /= 4;
210
211 // FIXME: R600 support.
212 // s_nop 0
213 const uint32_t Encoded_S_NOP_0 = 0xbf800000;
214
215 for (uint64_t I = 0; I != Count; ++I)
216 support::endian::write<uint32_t>(OS, Encoded_S_NOP_0, Endian);
217
218 return true;
219 }
220
221 //===----------------------------------------------------------------------===//
222 // ELFAMDGPUAsmBackend class
223 //===----------------------------------------------------------------------===//
224
225 namespace {
226
227 class ELFAMDGPUAsmBackend : public AMDGPUAsmBackend {
228 bool Is64Bit;
229 bool HasRelocationAddend;
230 uint8_t OSABI = ELF::ELFOSABI_NONE;
231 uint8_t ABIVersion = 0;
232
233 public:
ELFAMDGPUAsmBackend(const Target & T,const Triple & TT,uint8_t ABIVersion)234 ELFAMDGPUAsmBackend(const Target &T, const Triple &TT, uint8_t ABIVersion) :
235 AMDGPUAsmBackend(T), Is64Bit(TT.getArch() == Triple::amdgcn),
236 HasRelocationAddend(TT.getOS() == Triple::AMDHSA),
237 ABIVersion(ABIVersion) {
238 switch (TT.getOS()) {
239 case Triple::AMDHSA:
240 OSABI = ELF::ELFOSABI_AMDGPU_HSA;
241 break;
242 case Triple::AMDPAL:
243 OSABI = ELF::ELFOSABI_AMDGPU_PAL;
244 break;
245 case Triple::Mesa3D:
246 OSABI = ELF::ELFOSABI_AMDGPU_MESA3D;
247 break;
248 default:
249 break;
250 }
251 }
252
253 std::unique_ptr<MCObjectTargetWriter>
createObjectTargetWriter() const254 createObjectTargetWriter() const override {
255 return createAMDGPUELFObjectWriter(Is64Bit, OSABI, HasRelocationAddend,
256 ABIVersion);
257 }
258 };
259
260 } // end anonymous namespace
261
createAMDGPUAsmBackend(const Target & T,const MCSubtargetInfo & STI,const MCRegisterInfo & MRI,const MCTargetOptions & Options)262 MCAsmBackend *llvm::createAMDGPUAsmBackend(const Target &T,
263 const MCSubtargetInfo &STI,
264 const MCRegisterInfo &MRI,
265 const MCTargetOptions &Options) {
266 return new ELFAMDGPUAsmBackend(T, STI.getTargetTriple(),
267 getHsaAbiVersion(&STI).value_or(0));
268 }
269