1 //===- ARMTargetStreamer.cpp - ARMTargetStreamer class --*- C++ -*---------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file implements the ARMTargetStreamer class.
10 //
11 //===----------------------------------------------------------------------===//
12
13 #include "MCTargetDesc/ARMMCTargetDesc.h"
14 #include "llvm/MC/ConstantPools.h"
15 #include "llvm/MC/MCAsmInfo.h"
16 #include "llvm/MC/MCContext.h"
17 #include "llvm/MC/MCExpr.h"
18 #include "llvm/MC/MCStreamer.h"
19 #include "llvm/MC/MCSubtargetInfo.h"
20 #include "llvm/Support/ARMBuildAttributes.h"
21 #include "llvm/Support/TargetParser.h"
22
23 using namespace llvm;
24
25 //
26 // ARMTargetStreamer Implemenation
27 //
28
ARMTargetStreamer(MCStreamer & S)29 ARMTargetStreamer::ARMTargetStreamer(MCStreamer &S)
30 : MCTargetStreamer(S), ConstantPools(new AssemblerConstantPools()) {}
31
32 ARMTargetStreamer::~ARMTargetStreamer() = default;
33
34 // The constant pool handling is shared by all ARMTargetStreamer
35 // implementations.
addConstantPoolEntry(const MCExpr * Expr,SMLoc Loc)36 const MCExpr *ARMTargetStreamer::addConstantPoolEntry(const MCExpr *Expr, SMLoc Loc) {
37 return ConstantPools->addEntry(Streamer, Expr, 4, Loc);
38 }
39
emitCurrentConstantPool()40 void ARMTargetStreamer::emitCurrentConstantPool() {
41 ConstantPools->emitForCurrentSection(Streamer);
42 ConstantPools->clearCacheForCurrentSection(Streamer);
43 }
44
45 // finish() - write out any non-empty assembler constant pools.
emitConstantPools()46 void ARMTargetStreamer::emitConstantPools() {
47 ConstantPools->emitAll(Streamer);
48 }
49
50 // reset() - Reset any state
reset()51 void ARMTargetStreamer::reset() {}
52
emitInst(uint32_t Inst,char Suffix)53 void ARMTargetStreamer::emitInst(uint32_t Inst, char Suffix) {
54 unsigned Size;
55 char Buffer[4];
56 const bool LittleEndian = getStreamer().getContext().getAsmInfo()->isLittleEndian();
57
58 switch (Suffix) {
59 case '\0':
60 Size = 4;
61
62 for (unsigned II = 0, IE = Size; II != IE; II++) {
63 const unsigned I = LittleEndian ? (Size - II - 1) : II;
64 Buffer[Size - II - 1] = uint8_t(Inst >> I * CHAR_BIT);
65 }
66
67 break;
68 case 'n':
69 case 'w':
70 Size = (Suffix == 'n' ? 2 : 4);
71
72 // Thumb wide instructions are emitted as a pair of 16-bit words of the
73 // appropriate endianness.
74 for (unsigned II = 0, IE = Size; II != IE; II = II + 2) {
75 const unsigned I0 = LittleEndian ? II + 0 : II + 1;
76 const unsigned I1 = LittleEndian ? II + 1 : II + 0;
77 Buffer[Size - II - 2] = uint8_t(Inst >> I0 * CHAR_BIT);
78 Buffer[Size - II - 1] = uint8_t(Inst >> I1 * CHAR_BIT);
79 }
80
81 break;
82 default:
83 llvm_unreachable("Invalid Suffix");
84 }
85 getStreamer().emitBytes(StringRef(Buffer, Size));
86 }
87
88 // The remaining callbacks should be handled separately by each
89 // streamer.
emitFnStart()90 void ARMTargetStreamer::emitFnStart() {}
emitFnEnd()91 void ARMTargetStreamer::emitFnEnd() {}
emitCantUnwind()92 void ARMTargetStreamer::emitCantUnwind() {}
emitPersonality(const MCSymbol * Personality)93 void ARMTargetStreamer::emitPersonality(const MCSymbol *Personality) {}
emitPersonalityIndex(unsigned Index)94 void ARMTargetStreamer::emitPersonalityIndex(unsigned Index) {}
emitHandlerData()95 void ARMTargetStreamer::emitHandlerData() {}
emitSetFP(unsigned FpReg,unsigned SpReg,int64_t Offset)96 void ARMTargetStreamer::emitSetFP(unsigned FpReg, unsigned SpReg,
97 int64_t Offset) {}
emitMovSP(unsigned Reg,int64_t Offset)98 void ARMTargetStreamer::emitMovSP(unsigned Reg, int64_t Offset) {}
emitPad(int64_t Offset)99 void ARMTargetStreamer::emitPad(int64_t Offset) {}
emitRegSave(const SmallVectorImpl<unsigned> & RegList,bool isVector)100 void ARMTargetStreamer::emitRegSave(const SmallVectorImpl<unsigned> &RegList,
101 bool isVector) {}
emitUnwindRaw(int64_t StackOffset,const SmallVectorImpl<uint8_t> & Opcodes)102 void ARMTargetStreamer::emitUnwindRaw(int64_t StackOffset,
103 const SmallVectorImpl<uint8_t> &Opcodes) {
104 }
switchVendor(StringRef Vendor)105 void ARMTargetStreamer::switchVendor(StringRef Vendor) {}
emitAttribute(unsigned Attribute,unsigned Value)106 void ARMTargetStreamer::emitAttribute(unsigned Attribute, unsigned Value) {}
emitTextAttribute(unsigned Attribute,StringRef String)107 void ARMTargetStreamer::emitTextAttribute(unsigned Attribute,
108 StringRef String) {}
emitIntTextAttribute(unsigned Attribute,unsigned IntValue,StringRef StringValue)109 void ARMTargetStreamer::emitIntTextAttribute(unsigned Attribute,
110 unsigned IntValue,
111 StringRef StringValue) {}
emitArch(ARM::ArchKind Arch)112 void ARMTargetStreamer::emitArch(ARM::ArchKind Arch) {}
emitArchExtension(uint64_t ArchExt)113 void ARMTargetStreamer::emitArchExtension(uint64_t ArchExt) {}
emitObjectArch(ARM::ArchKind Arch)114 void ARMTargetStreamer::emitObjectArch(ARM::ArchKind Arch) {}
emitFPU(unsigned FPU)115 void ARMTargetStreamer::emitFPU(unsigned FPU) {}
finishAttributeSection()116 void ARMTargetStreamer::finishAttributeSection() {}
annotateTLSDescriptorSequence(const MCSymbolRefExpr * SRE)117 void ARMTargetStreamer::annotateTLSDescriptorSequence(
118 const MCSymbolRefExpr *SRE) {}
emitThumbSet(MCSymbol * Symbol,const MCExpr * Value)119 void ARMTargetStreamer::emitThumbSet(MCSymbol *Symbol, const MCExpr *Value) {}
120
emitARMWinCFIAllocStack(unsigned Size,bool Wide)121 void ARMTargetStreamer::emitARMWinCFIAllocStack(unsigned Size, bool Wide) {}
emitARMWinCFISaveRegMask(unsigned Mask,bool Wide)122 void ARMTargetStreamer::emitARMWinCFISaveRegMask(unsigned Mask, bool Wide) {}
emitARMWinCFISaveSP(unsigned Reg)123 void ARMTargetStreamer::emitARMWinCFISaveSP(unsigned Reg) {}
emitARMWinCFISaveFRegs(unsigned First,unsigned Last)124 void ARMTargetStreamer::emitARMWinCFISaveFRegs(unsigned First, unsigned Last) {}
emitARMWinCFISaveLR(unsigned Offset)125 void ARMTargetStreamer::emitARMWinCFISaveLR(unsigned Offset) {}
emitARMWinCFINop(bool Wide)126 void ARMTargetStreamer::emitARMWinCFINop(bool Wide) {}
emitARMWinCFIPrologEnd(bool Fragment)127 void ARMTargetStreamer::emitARMWinCFIPrologEnd(bool Fragment) {}
emitARMWinCFIEpilogStart(unsigned Condition)128 void ARMTargetStreamer::emitARMWinCFIEpilogStart(unsigned Condition) {}
emitARMWinCFIEpilogEnd()129 void ARMTargetStreamer::emitARMWinCFIEpilogEnd() {}
emitARMWinCFICustom(unsigned Opcode)130 void ARMTargetStreamer::emitARMWinCFICustom(unsigned Opcode) {}
131
getArchForCPU(const MCSubtargetInfo & STI)132 static ARMBuildAttrs::CPUArch getArchForCPU(const MCSubtargetInfo &STI) {
133 if (STI.getCPU() == "xscale")
134 return ARMBuildAttrs::v5TEJ;
135
136 if (STI.hasFeature(ARM::HasV9_0aOps))
137 return ARMBuildAttrs::v9_A;
138 else if (STI.hasFeature(ARM::HasV8Ops)) {
139 if (STI.hasFeature(ARM::FeatureRClass))
140 return ARMBuildAttrs::v8_R;
141 return ARMBuildAttrs::v8_A;
142 } else if (STI.hasFeature(ARM::HasV8_1MMainlineOps))
143 return ARMBuildAttrs::v8_1_M_Main;
144 else if (STI.hasFeature(ARM::HasV8MMainlineOps))
145 return ARMBuildAttrs::v8_M_Main;
146 else if (STI.hasFeature(ARM::HasV7Ops)) {
147 if (STI.hasFeature(ARM::FeatureMClass) && STI.hasFeature(ARM::FeatureDSP))
148 return ARMBuildAttrs::v7E_M;
149 return ARMBuildAttrs::v7;
150 } else if (STI.hasFeature(ARM::HasV6T2Ops))
151 return ARMBuildAttrs::v6T2;
152 else if (STI.hasFeature(ARM::HasV8MBaselineOps))
153 return ARMBuildAttrs::v8_M_Base;
154 else if (STI.hasFeature(ARM::HasV6MOps))
155 return ARMBuildAttrs::v6S_M;
156 else if (STI.hasFeature(ARM::HasV6Ops))
157 return ARMBuildAttrs::v6;
158 else if (STI.hasFeature(ARM::HasV5TEOps))
159 return ARMBuildAttrs::v5TE;
160 else if (STI.hasFeature(ARM::HasV5TOps))
161 return ARMBuildAttrs::v5T;
162 else if (STI.hasFeature(ARM::HasV4TOps))
163 return ARMBuildAttrs::v4T;
164 else
165 return ARMBuildAttrs::v4;
166 }
167
isV8M(const MCSubtargetInfo & STI)168 static bool isV8M(const MCSubtargetInfo &STI) {
169 // Note that v8M Baseline is a subset of v6T2!
170 return (STI.hasFeature(ARM::HasV8MBaselineOps) &&
171 !STI.hasFeature(ARM::HasV6T2Ops)) ||
172 STI.hasFeature(ARM::HasV8MMainlineOps);
173 }
174
175 /// Emit the build attributes that only depend on the hardware that we expect
176 // /to be available, and not on the ABI, or any source-language choices.
emitTargetAttributes(const MCSubtargetInfo & STI)177 void ARMTargetStreamer::emitTargetAttributes(const MCSubtargetInfo &STI) {
178 switchVendor("aeabi");
179
180 const StringRef CPUString = STI.getCPU();
181 if (!CPUString.empty() && !CPUString.startswith("generic")) {
182 // FIXME: remove krait check when GNU tools support krait cpu
183 if (STI.hasFeature(ARM::ProcKrait)) {
184 emitTextAttribute(ARMBuildAttrs::CPU_name, "cortex-a9");
185 // We consider krait as a "cortex-a9" + hwdiv CPU
186 // Enable hwdiv through ".arch_extension idiv"
187 if (STI.hasFeature(ARM::FeatureHWDivThumb) ||
188 STI.hasFeature(ARM::FeatureHWDivARM))
189 emitArchExtension(ARM::AEK_HWDIVTHUMB | ARM::AEK_HWDIVARM);
190 } else {
191 emitTextAttribute(ARMBuildAttrs::CPU_name, CPUString);
192 }
193 }
194
195 emitAttribute(ARMBuildAttrs::CPU_arch, getArchForCPU(STI));
196
197 if (STI.hasFeature(ARM::FeatureAClass)) {
198 emitAttribute(ARMBuildAttrs::CPU_arch_profile,
199 ARMBuildAttrs::ApplicationProfile);
200 } else if (STI.hasFeature(ARM::FeatureRClass)) {
201 emitAttribute(ARMBuildAttrs::CPU_arch_profile,
202 ARMBuildAttrs::RealTimeProfile);
203 } else if (STI.hasFeature(ARM::FeatureMClass)) {
204 emitAttribute(ARMBuildAttrs::CPU_arch_profile,
205 ARMBuildAttrs::MicroControllerProfile);
206 }
207
208 emitAttribute(ARMBuildAttrs::ARM_ISA_use, STI.hasFeature(ARM::FeatureNoARM)
209 ? ARMBuildAttrs::Not_Allowed
210 : ARMBuildAttrs::Allowed);
211
212 if (isV8M(STI)) {
213 emitAttribute(ARMBuildAttrs::THUMB_ISA_use,
214 ARMBuildAttrs::AllowThumbDerived);
215 } else if (STI.hasFeature(ARM::FeatureThumb2)) {
216 emitAttribute(ARMBuildAttrs::THUMB_ISA_use,
217 ARMBuildAttrs::AllowThumb32);
218 } else if (STI.hasFeature(ARM::HasV4TOps)) {
219 emitAttribute(ARMBuildAttrs::THUMB_ISA_use, ARMBuildAttrs::Allowed);
220 }
221
222 if (STI.hasFeature(ARM::FeatureNEON)) {
223 /* NEON is not exactly a VFP architecture, but GAS emit one of
224 * neon/neon-fp-armv8/neon-vfpv4/vfpv3/vfpv2 for .fpu parameters */
225 if (STI.hasFeature(ARM::FeatureFPARMv8)) {
226 if (STI.hasFeature(ARM::FeatureCrypto))
227 emitFPU(ARM::FK_CRYPTO_NEON_FP_ARMV8);
228 else
229 emitFPU(ARM::FK_NEON_FP_ARMV8);
230 } else if (STI.hasFeature(ARM::FeatureVFP4))
231 emitFPU(ARM::FK_NEON_VFPV4);
232 else
233 emitFPU(STI.hasFeature(ARM::FeatureFP16) ? ARM::FK_NEON_FP16
234 : ARM::FK_NEON);
235 // Emit Tag_Advanced_SIMD_arch for ARMv8 architecture
236 if (STI.hasFeature(ARM::HasV8Ops))
237 emitAttribute(ARMBuildAttrs::Advanced_SIMD_arch,
238 STI.hasFeature(ARM::HasV8_1aOps)
239 ? ARMBuildAttrs::AllowNeonARMv8_1a
240 : ARMBuildAttrs::AllowNeonARMv8);
241 } else {
242 if (STI.hasFeature(ARM::FeatureFPARMv8_D16_SP))
243 // FPv5 and FP-ARMv8 have the same instructions, so are modeled as one
244 // FPU, but there are two different names for it depending on the CPU.
245 emitFPU(STI.hasFeature(ARM::FeatureD32)
246 ? ARM::FK_FP_ARMV8
247 : (STI.hasFeature(ARM::FeatureFP64) ? ARM::FK_FPV5_D16
248 : ARM::FK_FPV5_SP_D16));
249 else if (STI.hasFeature(ARM::FeatureVFP4_D16_SP))
250 emitFPU(STI.hasFeature(ARM::FeatureD32)
251 ? ARM::FK_VFPV4
252 : (STI.hasFeature(ARM::FeatureFP64) ? ARM::FK_VFPV4_D16
253 : ARM::FK_FPV4_SP_D16));
254 else if (STI.hasFeature(ARM::FeatureVFP3_D16_SP))
255 emitFPU(
256 STI.hasFeature(ARM::FeatureD32)
257 // +d32
258 ? (STI.hasFeature(ARM::FeatureFP16) ? ARM::FK_VFPV3_FP16
259 : ARM::FK_VFPV3)
260 // -d32
261 : (STI.hasFeature(ARM::FeatureFP64)
262 ? (STI.hasFeature(ARM::FeatureFP16)
263 ? ARM::FK_VFPV3_D16_FP16
264 : ARM::FK_VFPV3_D16)
265 : (STI.hasFeature(ARM::FeatureFP16) ? ARM::FK_VFPV3XD_FP16
266 : ARM::FK_VFPV3XD)));
267 else if (STI.hasFeature(ARM::FeatureVFP2_SP))
268 emitFPU(ARM::FK_VFPV2);
269 }
270
271 // ABI_HardFP_use attribute to indicate single precision FP.
272 if (STI.hasFeature(ARM::FeatureVFP2_SP) && !STI.hasFeature(ARM::FeatureFP64))
273 emitAttribute(ARMBuildAttrs::ABI_HardFP_use,
274 ARMBuildAttrs::HardFPSinglePrecision);
275
276 if (STI.hasFeature(ARM::FeatureFP16))
277 emitAttribute(ARMBuildAttrs::FP_HP_extension, ARMBuildAttrs::AllowHPFP);
278
279 if (STI.hasFeature(ARM::FeatureMP))
280 emitAttribute(ARMBuildAttrs::MPextension_use, ARMBuildAttrs::AllowMP);
281
282 if (STI.hasFeature(ARM::HasMVEFloatOps))
283 emitAttribute(ARMBuildAttrs::MVE_arch, ARMBuildAttrs::AllowMVEIntegerAndFloat);
284 else if (STI.hasFeature(ARM::HasMVEIntegerOps))
285 emitAttribute(ARMBuildAttrs::MVE_arch, ARMBuildAttrs::AllowMVEInteger);
286
287 // Hardware divide in ARM mode is part of base arch, starting from ARMv8.
288 // If only Thumb hwdiv is present, it must also be in base arch (ARMv7-R/M).
289 // It is not possible to produce DisallowDIV: if hwdiv is present in the base
290 // arch, supplying -hwdiv downgrades the effective arch, via ClearImpliedBits.
291 // AllowDIVExt is only emitted if hwdiv isn't available in the base arch;
292 // otherwise, the default value (AllowDIVIfExists) applies.
293 if (STI.hasFeature(ARM::FeatureHWDivARM) && !STI.hasFeature(ARM::HasV8Ops))
294 emitAttribute(ARMBuildAttrs::DIV_use, ARMBuildAttrs::AllowDIVExt);
295
296 if (STI.hasFeature(ARM::FeatureDSP) && isV8M(STI))
297 emitAttribute(ARMBuildAttrs::DSP_extension, ARMBuildAttrs::Allowed);
298
299 if (STI.hasFeature(ARM::FeatureStrictAlign))
300 emitAttribute(ARMBuildAttrs::CPU_unaligned_access,
301 ARMBuildAttrs::Not_Allowed);
302 else
303 emitAttribute(ARMBuildAttrs::CPU_unaligned_access,
304 ARMBuildAttrs::Allowed);
305
306 if (STI.hasFeature(ARM::FeatureTrustZone) &&
307 STI.hasFeature(ARM::FeatureVirtualization))
308 emitAttribute(ARMBuildAttrs::Virtualization_use,
309 ARMBuildAttrs::AllowTZVirtualization);
310 else if (STI.hasFeature(ARM::FeatureTrustZone))
311 emitAttribute(ARMBuildAttrs::Virtualization_use, ARMBuildAttrs::AllowTZ);
312 else if (STI.hasFeature(ARM::FeatureVirtualization))
313 emitAttribute(ARMBuildAttrs::Virtualization_use,
314 ARMBuildAttrs::AllowVirtualization);
315
316 if (STI.hasFeature(ARM::FeaturePACBTI)) {
317 emitAttribute(ARMBuildAttrs::PAC_extension, ARMBuildAttrs::AllowPAC);
318 emitAttribute(ARMBuildAttrs::BTI_extension, ARMBuildAttrs::AllowBTI);
319 }
320 }
321
322 MCTargetStreamer *
createARMObjectTargetStreamer(MCStreamer & S,const MCSubtargetInfo & STI)323 llvm::createARMObjectTargetStreamer(MCStreamer &S, const MCSubtargetInfo &STI) {
324 const Triple &TT = STI.getTargetTriple();
325 if (TT.isOSBinFormatELF())
326 return createARMObjectTargetELFStreamer(S);
327 if (TT.isOSBinFormatCOFF())
328 return createARMObjectTargetWinCOFFStreamer(S);
329 return new ARMTargetStreamer(S);
330 }
331