1 //===-- AMDGPUAsmBackend.cpp - AMDGPU Assembler Backend -------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 /// \file
8 //===----------------------------------------------------------------------===//
9 
10 #include "MCTargetDesc/AMDGPUFixupKinds.h"
11 #include "MCTargetDesc/AMDGPUMCTargetDesc.h"
12 #include "Utils/AMDGPUBaseInfo.h"
13 #include "llvm/BinaryFormat/ELF.h"
14 #include "llvm/MC/MCAsmBackend.h"
15 #include "llvm/MC/MCAssembler.h"
16 #include "llvm/MC/MCContext.h"
17 #include "llvm/MC/MCFixupKindInfo.h"
18 #include "llvm/MC/MCObjectWriter.h"
19 #include "llvm/MC/MCSubtargetInfo.h"
20 #include "llvm/MC/TargetRegistry.h"
21 #include "llvm/Support/EndianStream.h"
22 #include "llvm/Support/TargetParser.h"
23 
24 using namespace llvm;
25 using namespace llvm::AMDGPU;
26 
27 namespace {
28 
29 class AMDGPUAsmBackend : public MCAsmBackend {
30 public:
31   AMDGPUAsmBackend(const Target &T) : MCAsmBackend(support::little) {}
32 
33   unsigned getNumFixupKinds() const override { return AMDGPU::NumTargetFixupKinds; };
34 
35   void applyFixup(const MCAssembler &Asm, const MCFixup &Fixup,
36                   const MCValue &Target, MutableArrayRef<char> Data,
37                   uint64_t Value, bool IsResolved,
38                   const MCSubtargetInfo *STI) const override;
39   bool fixupNeedsRelaxation(const MCFixup &Fixup, uint64_t Value,
40                             const MCRelaxableFragment *DF,
41                             const MCAsmLayout &Layout) const override;
42 
43   void relaxInstruction(MCInst &Inst,
44                         const MCSubtargetInfo &STI) const override;
45 
46   bool mayNeedRelaxation(const MCInst &Inst,
47                          const MCSubtargetInfo &STI) const override;
48 
49   unsigned getMinimumNopSize() const override;
50   bool writeNopData(raw_ostream &OS, uint64_t Count,
51                     const MCSubtargetInfo *STI) const override;
52 
53   Optional<MCFixupKind> getFixupKind(StringRef Name) const override;
54   const MCFixupKindInfo &getFixupKindInfo(MCFixupKind Kind) const override;
55   bool shouldForceRelocation(const MCAssembler &Asm, const MCFixup &Fixup,
56                              const MCValue &Target) override;
57 };
58 
59 } //End anonymous namespace
60 
61 void AMDGPUAsmBackend::relaxInstruction(MCInst &Inst,
62                                         const MCSubtargetInfo &STI) const {
63   MCInst Res;
64   unsigned RelaxedOpcode = AMDGPU::getSOPPWithRelaxation(Inst.getOpcode());
65   Res.setOpcode(RelaxedOpcode);
66   Res.addOperand(Inst.getOperand(0));
67   Inst = std::move(Res);
68 }
69 
70 bool AMDGPUAsmBackend::fixupNeedsRelaxation(const MCFixup &Fixup,
71                                             uint64_t Value,
72                                             const MCRelaxableFragment *DF,
73                                             const MCAsmLayout &Layout) const {
74   // if the branch target has an offset of x3f this needs to be relaxed to
75   // add a s_nop 0 immediately after branch to effectively increment offset
76   // for hardware workaround in gfx1010
77   return (((int64_t(Value)/4)-1) == 0x3f);
78 }
79 
80 bool AMDGPUAsmBackend::mayNeedRelaxation(const MCInst &Inst,
81                        const MCSubtargetInfo &STI) const {
82   if (!STI.getFeatureBits()[AMDGPU::FeatureOffset3fBug])
83     return false;
84 
85   if (AMDGPU::getSOPPWithRelaxation(Inst.getOpcode()) >= 0)
86     return true;
87 
88   return false;
89 }
90 
91 static unsigned getFixupKindNumBytes(unsigned Kind) {
92   switch (Kind) {
93   case AMDGPU::fixup_si_sopp_br:
94     return 2;
95   case FK_SecRel_1:
96   case FK_Data_1:
97     return 1;
98   case FK_SecRel_2:
99   case FK_Data_2:
100     return 2;
101   case FK_SecRel_4:
102   case FK_Data_4:
103   case FK_PCRel_4:
104     return 4;
105   case FK_SecRel_8:
106   case FK_Data_8:
107     return 8;
108   default:
109     llvm_unreachable("Unknown fixup kind!");
110   }
111 }
112 
113 static uint64_t adjustFixupValue(const MCFixup &Fixup, uint64_t Value,
114                                  MCContext *Ctx) {
115   int64_t SignedValue = static_cast<int64_t>(Value);
116 
117   switch (Fixup.getTargetKind()) {
118   case AMDGPU::fixup_si_sopp_br: {
119     int64_t BrImm = (SignedValue - 4) / 4;
120 
121     if (Ctx && !isInt<16>(BrImm))
122       Ctx->reportError(Fixup.getLoc(), "branch size exceeds simm16");
123 
124     return BrImm;
125   }
126   case FK_Data_1:
127   case FK_Data_2:
128   case FK_Data_4:
129   case FK_Data_8:
130   case FK_PCRel_4:
131   case FK_SecRel_4:
132     return Value;
133   default:
134     llvm_unreachable("unhandled fixup kind");
135   }
136 }
137 
138 void AMDGPUAsmBackend::applyFixup(const MCAssembler &Asm, const MCFixup &Fixup,
139                                   const MCValue &Target,
140                                   MutableArrayRef<char> Data, uint64_t Value,
141                                   bool IsResolved,
142                                   const MCSubtargetInfo *STI) const {
143   if (Fixup.getKind() >= FirstLiteralRelocationKind)
144     return;
145 
146   Value = adjustFixupValue(Fixup, Value, &Asm.getContext());
147   if (!Value)
148     return; // Doesn't change encoding.
149 
150   MCFixupKindInfo Info = getFixupKindInfo(Fixup.getKind());
151 
152   // Shift the value into position.
153   Value <<= Info.TargetOffset;
154 
155   unsigned NumBytes = getFixupKindNumBytes(Fixup.getKind());
156   uint32_t Offset = Fixup.getOffset();
157   assert(Offset + NumBytes <= Data.size() && "Invalid fixup offset!");
158 
159   // For each byte of the fragment that the fixup touches, mask in the bits from
160   // the fixup value.
161   for (unsigned i = 0; i != NumBytes; ++i)
162     Data[Offset + i] |= static_cast<uint8_t>((Value >> (i * 8)) & 0xff);
163 }
164 
165 Optional<MCFixupKind> AMDGPUAsmBackend::getFixupKind(StringRef Name) const {
166   return StringSwitch<Optional<MCFixupKind>>(Name)
167 #define ELF_RELOC(Name, Value)                                                 \
168   .Case(#Name, MCFixupKind(FirstLiteralRelocationKind + Value))
169 #include "llvm/BinaryFormat/ELFRelocs/AMDGPU.def"
170 #undef ELF_RELOC
171       .Default(None);
172 }
173 
174 const MCFixupKindInfo &AMDGPUAsmBackend::getFixupKindInfo(
175                                                        MCFixupKind Kind) const {
176   const static MCFixupKindInfo Infos[AMDGPU::NumTargetFixupKinds] = {
177     // name                   offset bits  flags
178     { "fixup_si_sopp_br",     0,     16,   MCFixupKindInfo::FKF_IsPCRel },
179   };
180 
181   if (Kind >= FirstLiteralRelocationKind)
182     return MCAsmBackend::getFixupKindInfo(FK_NONE);
183 
184   if (Kind < FirstTargetFixupKind)
185     return MCAsmBackend::getFixupKindInfo(Kind);
186 
187   return Infos[Kind - FirstTargetFixupKind];
188 }
189 
190 bool AMDGPUAsmBackend::shouldForceRelocation(const MCAssembler &,
191                                              const MCFixup &Fixup,
192                                              const MCValue &) {
193   return Fixup.getKind() >= FirstLiteralRelocationKind;
194 }
195 
196 unsigned AMDGPUAsmBackend::getMinimumNopSize() const {
197   return 4;
198 }
199 
200 bool AMDGPUAsmBackend::writeNopData(raw_ostream &OS, uint64_t Count,
201                                     const MCSubtargetInfo *STI) const {
202   // If the count is not 4-byte aligned, we must be writing data into the text
203   // section (otherwise we have unaligned instructions, and thus have far
204   // bigger problems), so just write zeros instead.
205   OS.write_zeros(Count % 4);
206 
207   // We are properly aligned, so write NOPs as requested.
208   Count /= 4;
209 
210   // FIXME: R600 support.
211   // s_nop 0
212   const uint32_t Encoded_S_NOP_0 = 0xbf800000;
213 
214   for (uint64_t I = 0; I != Count; ++I)
215     support::endian::write<uint32_t>(OS, Encoded_S_NOP_0, Endian);
216 
217   return true;
218 }
219 
220 //===----------------------------------------------------------------------===//
221 // ELFAMDGPUAsmBackend class
222 //===----------------------------------------------------------------------===//
223 
224 namespace {
225 
226 class ELFAMDGPUAsmBackend : public AMDGPUAsmBackend {
227   bool Is64Bit;
228   bool HasRelocationAddend;
229   uint8_t OSABI = ELF::ELFOSABI_NONE;
230   uint8_t ABIVersion = 0;
231 
232 public:
233   ELFAMDGPUAsmBackend(const Target &T, const Triple &TT, uint8_t ABIVersion) :
234       AMDGPUAsmBackend(T), Is64Bit(TT.getArch() == Triple::amdgcn),
235       HasRelocationAddend(TT.getOS() == Triple::AMDHSA),
236       ABIVersion(ABIVersion) {
237     switch (TT.getOS()) {
238     case Triple::AMDHSA:
239       OSABI = ELF::ELFOSABI_AMDGPU_HSA;
240       break;
241     case Triple::AMDPAL:
242       OSABI = ELF::ELFOSABI_AMDGPU_PAL;
243       break;
244     case Triple::Mesa3D:
245       OSABI = ELF::ELFOSABI_AMDGPU_MESA3D;
246       break;
247     default:
248       break;
249     }
250   }
251 
252   std::unique_ptr<MCObjectTargetWriter>
253   createObjectTargetWriter() const override {
254     return createAMDGPUELFObjectWriter(Is64Bit, OSABI, HasRelocationAddend,
255                                        ABIVersion);
256   }
257 };
258 
259 } // end anonymous namespace
260 
261 MCAsmBackend *llvm::createAMDGPUAsmBackend(const Target &T,
262                                            const MCSubtargetInfo &STI,
263                                            const MCRegisterInfo &MRI,
264                                            const MCTargetOptions &Options) {
265   return new ELFAMDGPUAsmBackend(T, STI.getTargetTriple(),
266                                  getHsaAbiVersion(&STI).value_or(0));
267 }
268