1 //=- WebAssemblyISelLowering.cpp - WebAssembly DAG Lowering Implementation -==//
2 //
3 // The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 ///
10 /// \file
11 /// This file implements the WebAssemblyTargetLowering class.
12 ///
13 //===----------------------------------------------------------------------===//
14
15 #include "WebAssemblyISelLowering.h"
16 #include "MCTargetDesc/WebAssemblyMCTargetDesc.h"
17 #include "WebAssemblyMachineFunctionInfo.h"
18 #include "WebAssemblySubtarget.h"
19 #include "WebAssemblyTargetMachine.h"
20 #include "llvm/CodeGen/Analysis.h"
21 #include "llvm/CodeGen/CallingConvLower.h"
22 #include "llvm/CodeGen/MachineInstrBuilder.h"
23 #include "llvm/CodeGen/MachineJumpTableInfo.h"
24 #include "llvm/CodeGen/MachineModuleInfo.h"
25 #include "llvm/CodeGen/MachineRegisterInfo.h"
26 #include "llvm/CodeGen/SelectionDAG.h"
27 #include "llvm/CodeGen/WasmEHFuncInfo.h"
28 #include "llvm/IR/DiagnosticInfo.h"
29 #include "llvm/IR/DiagnosticPrinter.h"
30 #include "llvm/IR/Function.h"
31 #include "llvm/IR/Intrinsics.h"
32 #include "llvm/Support/Debug.h"
33 #include "llvm/Support/ErrorHandling.h"
34 #include "llvm/Support/raw_ostream.h"
35 #include "llvm/Target/TargetOptions.h"
36 using namespace llvm;
37
38 #define DEBUG_TYPE "wasm-lower"
39
WebAssemblyTargetLowering(const TargetMachine & TM,const WebAssemblySubtarget & STI)40 WebAssemblyTargetLowering::WebAssemblyTargetLowering(
41 const TargetMachine &TM, const WebAssemblySubtarget &STI)
42 : TargetLowering(TM), Subtarget(&STI) {
43 auto MVTPtr = Subtarget->hasAddr64() ? MVT::i64 : MVT::i32;
44
45 // Booleans always contain 0 or 1.
46 setBooleanContents(ZeroOrOneBooleanContent);
47 // Except in SIMD vectors
48 setBooleanVectorContents(ZeroOrNegativeOneBooleanContent);
49 // WebAssembly does not produce floating-point exceptions on normal floating
50 // point operations.
51 setHasFloatingPointExceptions(false);
52 // We don't know the microarchitecture here, so just reduce register pressure.
53 setSchedulingPreference(Sched::RegPressure);
54 // Tell ISel that we have a stack pointer.
55 setStackPointerRegisterToSaveRestore(
56 Subtarget->hasAddr64() ? WebAssembly::SP64 : WebAssembly::SP32);
57 // Set up the register classes.
58 addRegisterClass(MVT::i32, &WebAssembly::I32RegClass);
59 addRegisterClass(MVT::i64, &WebAssembly::I64RegClass);
60 addRegisterClass(MVT::f32, &WebAssembly::F32RegClass);
61 addRegisterClass(MVT::f64, &WebAssembly::F64RegClass);
62 if (Subtarget->hasSIMD128()) {
63 addRegisterClass(MVT::v16i8, &WebAssembly::V128RegClass);
64 addRegisterClass(MVT::v8i16, &WebAssembly::V128RegClass);
65 addRegisterClass(MVT::v4i32, &WebAssembly::V128RegClass);
66 addRegisterClass(MVT::v4f32, &WebAssembly::V128RegClass);
67 if (Subtarget->hasUnimplementedSIMD128()) {
68 addRegisterClass(MVT::v2i64, &WebAssembly::V128RegClass);
69 addRegisterClass(MVT::v2f64, &WebAssembly::V128RegClass);
70 }
71 }
72 // Compute derived properties from the register classes.
73 computeRegisterProperties(Subtarget->getRegisterInfo());
74
75 setOperationAction(ISD::GlobalAddress, MVTPtr, Custom);
76 setOperationAction(ISD::ExternalSymbol, MVTPtr, Custom);
77 setOperationAction(ISD::JumpTable, MVTPtr, Custom);
78 setOperationAction(ISD::BlockAddress, MVTPtr, Custom);
79 setOperationAction(ISD::BRIND, MVT::Other, Custom);
80
81 // Take the default expansion for va_arg, va_copy, and va_end. There is no
82 // default action for va_start, so we do that custom.
83 setOperationAction(ISD::VASTART, MVT::Other, Custom);
84 setOperationAction(ISD::VAARG, MVT::Other, Expand);
85 setOperationAction(ISD::VACOPY, MVT::Other, Expand);
86 setOperationAction(ISD::VAEND, MVT::Other, Expand);
87
88 for (auto T : {MVT::f32, MVT::f64, MVT::v4f32, MVT::v2f64}) {
89 // Don't expand the floating-point types to constant pools.
90 setOperationAction(ISD::ConstantFP, T, Legal);
91 // Expand floating-point comparisons.
92 for (auto CC : {ISD::SETO, ISD::SETUO, ISD::SETUEQ, ISD::SETONE,
93 ISD::SETULT, ISD::SETULE, ISD::SETUGT, ISD::SETUGE})
94 setCondCodeAction(CC, T, Expand);
95 // Expand floating-point library function operators.
96 for (auto Op :
97 {ISD::FSIN, ISD::FCOS, ISD::FSINCOS, ISD::FPOW, ISD::FREM, ISD::FMA})
98 setOperationAction(Op, T, Expand);
99 // Note supported floating-point library function operators that otherwise
100 // default to expand.
101 for (auto Op :
102 {ISD::FCEIL, ISD::FFLOOR, ISD::FTRUNC, ISD::FNEARBYINT, ISD::FRINT})
103 setOperationAction(Op, T, Legal);
104 // Support minimum and maximum, which otherwise default to expand.
105 setOperationAction(ISD::FMINIMUM, T, Legal);
106 setOperationAction(ISD::FMAXIMUM, T, Legal);
107 // WebAssembly currently has no builtin f16 support.
108 setOperationAction(ISD::FP16_TO_FP, T, Expand);
109 setOperationAction(ISD::FP_TO_FP16, T, Expand);
110 setLoadExtAction(ISD::EXTLOAD, T, MVT::f16, Expand);
111 setTruncStoreAction(T, MVT::f16, Expand);
112 }
113
114 // Support saturating add for i8x16 and i16x8
115 if (Subtarget->hasSIMD128())
116 for (auto T : {MVT::v16i8, MVT::v8i16})
117 for (auto Op : {ISD::SADDSAT, ISD::UADDSAT})
118 setOperationAction(Op, T, Legal);
119
120 // Expand unavailable integer operations.
121 for (auto Op :
122 {ISD::BSWAP, ISD::SMUL_LOHI, ISD::UMUL_LOHI, ISD::MULHS, ISD::MULHU,
123 ISD::SDIVREM, ISD::UDIVREM, ISD::SHL_PARTS, ISD::SRA_PARTS,
124 ISD::SRL_PARTS, ISD::ADDC, ISD::ADDE, ISD::SUBC, ISD::SUBE}) {
125 for (auto T : {MVT::i32, MVT::i64}) {
126 setOperationAction(Op, T, Expand);
127 }
128 if (Subtarget->hasSIMD128()) {
129 for (auto T : {MVT::v16i8, MVT::v8i16, MVT::v4i32}) {
130 setOperationAction(Op, T, Expand);
131 }
132 if (Subtarget->hasUnimplementedSIMD128()) {
133 setOperationAction(Op, MVT::v2i64, Expand);
134 }
135 }
136 }
137
138 // There is no i64x2.mul instruction
139 setOperationAction(ISD::MUL, MVT::v2i64, Expand);
140
141 // We have custom shuffle lowering to expose the shuffle mask
142 if (Subtarget->hasSIMD128()) {
143 for (auto T : {MVT::v16i8, MVT::v8i16, MVT::v4i32, MVT::v4f32}) {
144 setOperationAction(ISD::VECTOR_SHUFFLE, T, Custom);
145 }
146 if (Subtarget->hasUnimplementedSIMD128()) {
147 setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v2i64, Custom);
148 setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v2f64, Custom);
149 }
150 }
151
152 // Custom lowering since wasm shifts must have a scalar shift amount
153 if (Subtarget->hasSIMD128()) {
154 for (auto T : {MVT::v16i8, MVT::v8i16, MVT::v4i32})
155 for (auto Op : {ISD::SHL, ISD::SRA, ISD::SRL})
156 setOperationAction(Op, T, Custom);
157 if (Subtarget->hasUnimplementedSIMD128())
158 for (auto Op : {ISD::SHL, ISD::SRA, ISD::SRL})
159 setOperationAction(Op, MVT::v2i64, Custom);
160 }
161
162 // There are no select instructions for vectors
163 if (Subtarget->hasSIMD128())
164 for (auto Op : {ISD::VSELECT, ISD::SELECT_CC, ISD::SELECT}) {
165 for (auto T : {MVT::v16i8, MVT::v8i16, MVT::v4i32, MVT::v4f32})
166 setOperationAction(Op, T, Expand);
167 if (Subtarget->hasUnimplementedSIMD128())
168 for (auto T : {MVT::v2i64, MVT::v2f64})
169 setOperationAction(Op, T, Expand);
170 }
171
172 // As a special case, these operators use the type to mean the type to
173 // sign-extend from.
174 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i1, Expand);
175 if (!Subtarget->hasSignExt()) {
176 // Sign extends are legal only when extending a vector extract
177 auto Action = Subtarget->hasSIMD128() ? Custom : Expand;
178 for (auto T : {MVT::i8, MVT::i16, MVT::i32})
179 setOperationAction(ISD::SIGN_EXTEND_INREG, T, Action);
180 }
181 for (auto T : MVT::integer_vector_valuetypes())
182 setOperationAction(ISD::SIGN_EXTEND_INREG, T, Expand);
183
184 // Dynamic stack allocation: use the default expansion.
185 setOperationAction(ISD::STACKSAVE, MVT::Other, Expand);
186 setOperationAction(ISD::STACKRESTORE, MVT::Other, Expand);
187 setOperationAction(ISD::DYNAMIC_STACKALLOC, MVTPtr, Expand);
188
189 setOperationAction(ISD::FrameIndex, MVT::i32, Custom);
190 setOperationAction(ISD::CopyToReg, MVT::Other, Custom);
191
192 // Expand these forms; we pattern-match the forms that we can handle in isel.
193 for (auto T : {MVT::i32, MVT::i64, MVT::f32, MVT::f64})
194 for (auto Op : {ISD::BR_CC, ISD::SELECT_CC})
195 setOperationAction(Op, T, Expand);
196
197 // We have custom switch handling.
198 setOperationAction(ISD::BR_JT, MVT::Other, Custom);
199
200 // WebAssembly doesn't have:
201 // - Floating-point extending loads.
202 // - Floating-point truncating stores.
203 // - i1 extending loads.
204 // - extending/truncating SIMD loads/stores
205 setLoadExtAction(ISD::EXTLOAD, MVT::f64, MVT::f32, Expand);
206 setTruncStoreAction(MVT::f64, MVT::f32, Expand);
207 for (auto T : MVT::integer_valuetypes())
208 for (auto Ext : {ISD::EXTLOAD, ISD::ZEXTLOAD, ISD::SEXTLOAD})
209 setLoadExtAction(Ext, T, MVT::i1, Promote);
210 if (Subtarget->hasSIMD128()) {
211 for (auto T : {MVT::v16i8, MVT::v8i16, MVT::v4i32, MVT::v2i64, MVT::v4f32,
212 MVT::v2f64}) {
213 for (auto MemT : MVT::vector_valuetypes()) {
214 if (MVT(T) != MemT) {
215 setTruncStoreAction(T, MemT, Expand);
216 for (auto Ext : {ISD::EXTLOAD, ISD::ZEXTLOAD, ISD::SEXTLOAD})
217 setLoadExtAction(Ext, T, MemT, Expand);
218 }
219 }
220 }
221 }
222
223 // Expand additional SIMD ops that V8 hasn't implemented yet
224 if (Subtarget->hasSIMD128() && !Subtarget->hasUnimplementedSIMD128()) {
225 setOperationAction(ISD::FSQRT, MVT::v4f32, Expand);
226 setOperationAction(ISD::FDIV, MVT::v4f32, Expand);
227 }
228
229 // Custom lower lane accesses to expand out variable indices
230 if (Subtarget->hasSIMD128()) {
231 for (auto T : {MVT::v16i8, MVT::v8i16, MVT::v4i32, MVT::v4f32}) {
232 setOperationAction(ISD::EXTRACT_VECTOR_ELT, T, Custom);
233 setOperationAction(ISD::INSERT_VECTOR_ELT, T, Custom);
234 }
235 if (Subtarget->hasUnimplementedSIMD128()) {
236 for (auto T : {MVT::v2i64, MVT::v2f64}) {
237 setOperationAction(ISD::EXTRACT_VECTOR_ELT, T, Custom);
238 setOperationAction(ISD::INSERT_VECTOR_ELT, T, Custom);
239 }
240 }
241 }
242
243 // Trap lowers to wasm unreachable
244 setOperationAction(ISD::TRAP, MVT::Other, Legal);
245
246 // Exception handling intrinsics
247 setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::Other, Custom);
248 setOperationAction(ISD::INTRINSIC_VOID, MVT::Other, Custom);
249
250 setMaxAtomicSizeInBitsSupported(64);
251 }
252
253 TargetLowering::AtomicExpansionKind
shouldExpandAtomicRMWInIR(AtomicRMWInst * AI) const254 WebAssemblyTargetLowering::shouldExpandAtomicRMWInIR(AtomicRMWInst *AI) const {
255 // We have wasm instructions for these
256 switch (AI->getOperation()) {
257 case AtomicRMWInst::Add:
258 case AtomicRMWInst::Sub:
259 case AtomicRMWInst::And:
260 case AtomicRMWInst::Or:
261 case AtomicRMWInst::Xor:
262 case AtomicRMWInst::Xchg:
263 return AtomicExpansionKind::None;
264 default:
265 break;
266 }
267 return AtomicExpansionKind::CmpXChg;
268 }
269
createFastISel(FunctionLoweringInfo & FuncInfo,const TargetLibraryInfo * LibInfo) const270 FastISel *WebAssemblyTargetLowering::createFastISel(
271 FunctionLoweringInfo &FuncInfo, const TargetLibraryInfo *LibInfo) const {
272 return WebAssembly::createFastISel(FuncInfo, LibInfo);
273 }
274
isOffsetFoldingLegal(const GlobalAddressSDNode *) const275 bool WebAssemblyTargetLowering::isOffsetFoldingLegal(
276 const GlobalAddressSDNode * /*GA*/) const {
277 // All offsets can be folded.
278 return true;
279 }
280
getScalarShiftAmountTy(const DataLayout &,EVT VT) const281 MVT WebAssemblyTargetLowering::getScalarShiftAmountTy(const DataLayout & /*DL*/,
282 EVT VT) const {
283 unsigned BitWidth = NextPowerOf2(VT.getSizeInBits() - 1);
284 if (BitWidth > 1 && BitWidth < 8)
285 BitWidth = 8;
286
287 if (BitWidth > 64) {
288 // The shift will be lowered to a libcall, and compiler-rt libcalls expect
289 // the count to be an i32.
290 BitWidth = 32;
291 assert(BitWidth >= Log2_32_Ceil(VT.getSizeInBits()) &&
292 "32-bit shift counts ought to be enough for anyone");
293 }
294
295 MVT Result = MVT::getIntegerVT(BitWidth);
296 assert(Result != MVT::INVALID_SIMPLE_VALUE_TYPE &&
297 "Unable to represent scalar shift amount type");
298 return Result;
299 }
300
301 // Lower an fp-to-int conversion operator from the LLVM opcode, which has an
302 // undefined result on invalid/overflow, to the WebAssembly opcode, which
303 // traps on invalid/overflow.
LowerFPToInt(MachineInstr & MI,DebugLoc DL,MachineBasicBlock * BB,const TargetInstrInfo & TII,bool IsUnsigned,bool Int64,bool Float64,unsigned LoweredOpcode)304 static MachineBasicBlock *LowerFPToInt(MachineInstr &MI, DebugLoc DL,
305 MachineBasicBlock *BB,
306 const TargetInstrInfo &TII,
307 bool IsUnsigned, bool Int64,
308 bool Float64, unsigned LoweredOpcode) {
309 MachineRegisterInfo &MRI = BB->getParent()->getRegInfo();
310
311 unsigned OutReg = MI.getOperand(0).getReg();
312 unsigned InReg = MI.getOperand(1).getReg();
313
314 unsigned Abs = Float64 ? WebAssembly::ABS_F64 : WebAssembly::ABS_F32;
315 unsigned FConst = Float64 ? WebAssembly::CONST_F64 : WebAssembly::CONST_F32;
316 unsigned LT = Float64 ? WebAssembly::LT_F64 : WebAssembly::LT_F32;
317 unsigned GE = Float64 ? WebAssembly::GE_F64 : WebAssembly::GE_F32;
318 unsigned IConst = Int64 ? WebAssembly::CONST_I64 : WebAssembly::CONST_I32;
319 unsigned Eqz = WebAssembly::EQZ_I32;
320 unsigned And = WebAssembly::AND_I32;
321 int64_t Limit = Int64 ? INT64_MIN : INT32_MIN;
322 int64_t Substitute = IsUnsigned ? 0 : Limit;
323 double CmpVal = IsUnsigned ? -(double)Limit * 2.0 : -(double)Limit;
324 auto &Context = BB->getParent()->getFunction().getContext();
325 Type *Ty = Float64 ? Type::getDoubleTy(Context) : Type::getFloatTy(Context);
326
327 const BasicBlock *LLVM_BB = BB->getBasicBlock();
328 MachineFunction *F = BB->getParent();
329 MachineBasicBlock *TrueMBB = F->CreateMachineBasicBlock(LLVM_BB);
330 MachineBasicBlock *FalseMBB = F->CreateMachineBasicBlock(LLVM_BB);
331 MachineBasicBlock *DoneMBB = F->CreateMachineBasicBlock(LLVM_BB);
332
333 MachineFunction::iterator It = ++BB->getIterator();
334 F->insert(It, FalseMBB);
335 F->insert(It, TrueMBB);
336 F->insert(It, DoneMBB);
337
338 // Transfer the remainder of BB and its successor edges to DoneMBB.
339 DoneMBB->splice(DoneMBB->begin(), BB,
340 std::next(MachineBasicBlock::iterator(MI)), BB->end());
341 DoneMBB->transferSuccessorsAndUpdatePHIs(BB);
342
343 BB->addSuccessor(TrueMBB);
344 BB->addSuccessor(FalseMBB);
345 TrueMBB->addSuccessor(DoneMBB);
346 FalseMBB->addSuccessor(DoneMBB);
347
348 unsigned Tmp0, Tmp1, CmpReg, EqzReg, FalseReg, TrueReg;
349 Tmp0 = MRI.createVirtualRegister(MRI.getRegClass(InReg));
350 Tmp1 = MRI.createVirtualRegister(MRI.getRegClass(InReg));
351 CmpReg = MRI.createVirtualRegister(&WebAssembly::I32RegClass);
352 EqzReg = MRI.createVirtualRegister(&WebAssembly::I32RegClass);
353 FalseReg = MRI.createVirtualRegister(MRI.getRegClass(OutReg));
354 TrueReg = MRI.createVirtualRegister(MRI.getRegClass(OutReg));
355
356 MI.eraseFromParent();
357 // For signed numbers, we can do a single comparison to determine whether
358 // fabs(x) is within range.
359 if (IsUnsigned) {
360 Tmp0 = InReg;
361 } else {
362 BuildMI(BB, DL, TII.get(Abs), Tmp0).addReg(InReg);
363 }
364 BuildMI(BB, DL, TII.get(FConst), Tmp1)
365 .addFPImm(cast<ConstantFP>(ConstantFP::get(Ty, CmpVal)));
366 BuildMI(BB, DL, TII.get(LT), CmpReg).addReg(Tmp0).addReg(Tmp1);
367
368 // For unsigned numbers, we have to do a separate comparison with zero.
369 if (IsUnsigned) {
370 Tmp1 = MRI.createVirtualRegister(MRI.getRegClass(InReg));
371 unsigned SecondCmpReg =
372 MRI.createVirtualRegister(&WebAssembly::I32RegClass);
373 unsigned AndReg = MRI.createVirtualRegister(&WebAssembly::I32RegClass);
374 BuildMI(BB, DL, TII.get(FConst), Tmp1)
375 .addFPImm(cast<ConstantFP>(ConstantFP::get(Ty, 0.0)));
376 BuildMI(BB, DL, TII.get(GE), SecondCmpReg).addReg(Tmp0).addReg(Tmp1);
377 BuildMI(BB, DL, TII.get(And), AndReg).addReg(CmpReg).addReg(SecondCmpReg);
378 CmpReg = AndReg;
379 }
380
381 BuildMI(BB, DL, TII.get(Eqz), EqzReg).addReg(CmpReg);
382
383 // Create the CFG diamond to select between doing the conversion or using
384 // the substitute value.
385 BuildMI(BB, DL, TII.get(WebAssembly::BR_IF)).addMBB(TrueMBB).addReg(EqzReg);
386 BuildMI(FalseMBB, DL, TII.get(LoweredOpcode), FalseReg).addReg(InReg);
387 BuildMI(FalseMBB, DL, TII.get(WebAssembly::BR)).addMBB(DoneMBB);
388 BuildMI(TrueMBB, DL, TII.get(IConst), TrueReg).addImm(Substitute);
389 BuildMI(*DoneMBB, DoneMBB->begin(), DL, TII.get(TargetOpcode::PHI), OutReg)
390 .addReg(FalseReg)
391 .addMBB(FalseMBB)
392 .addReg(TrueReg)
393 .addMBB(TrueMBB);
394
395 return DoneMBB;
396 }
397
EmitInstrWithCustomInserter(MachineInstr & MI,MachineBasicBlock * BB) const398 MachineBasicBlock *WebAssemblyTargetLowering::EmitInstrWithCustomInserter(
399 MachineInstr &MI, MachineBasicBlock *BB) const {
400 const TargetInstrInfo &TII = *Subtarget->getInstrInfo();
401 DebugLoc DL = MI.getDebugLoc();
402
403 switch (MI.getOpcode()) {
404 default:
405 llvm_unreachable("Unexpected instr type to insert");
406 case WebAssembly::FP_TO_SINT_I32_F32:
407 return LowerFPToInt(MI, DL, BB, TII, false, false, false,
408 WebAssembly::I32_TRUNC_S_F32);
409 case WebAssembly::FP_TO_UINT_I32_F32:
410 return LowerFPToInt(MI, DL, BB, TII, true, false, false,
411 WebAssembly::I32_TRUNC_U_F32);
412 case WebAssembly::FP_TO_SINT_I64_F32:
413 return LowerFPToInt(MI, DL, BB, TII, false, true, false,
414 WebAssembly::I64_TRUNC_S_F32);
415 case WebAssembly::FP_TO_UINT_I64_F32:
416 return LowerFPToInt(MI, DL, BB, TII, true, true, false,
417 WebAssembly::I64_TRUNC_U_F32);
418 case WebAssembly::FP_TO_SINT_I32_F64:
419 return LowerFPToInt(MI, DL, BB, TII, false, false, true,
420 WebAssembly::I32_TRUNC_S_F64);
421 case WebAssembly::FP_TO_UINT_I32_F64:
422 return LowerFPToInt(MI, DL, BB, TII, true, false, true,
423 WebAssembly::I32_TRUNC_U_F64);
424 case WebAssembly::FP_TO_SINT_I64_F64:
425 return LowerFPToInt(MI, DL, BB, TII, false, true, true,
426 WebAssembly::I64_TRUNC_S_F64);
427 case WebAssembly::FP_TO_UINT_I64_F64:
428 return LowerFPToInt(MI, DL, BB, TII, true, true, true,
429 WebAssembly::I64_TRUNC_U_F64);
430 llvm_unreachable("Unexpected instruction to emit with custom inserter");
431 }
432 }
433
434 const char *
getTargetNodeName(unsigned Opcode) const435 WebAssemblyTargetLowering::getTargetNodeName(unsigned Opcode) const {
436 switch (static_cast<WebAssemblyISD::NodeType>(Opcode)) {
437 case WebAssemblyISD::FIRST_NUMBER:
438 break;
439 #define HANDLE_NODETYPE(NODE) \
440 case WebAssemblyISD::NODE: \
441 return "WebAssemblyISD::" #NODE;
442 #include "WebAssemblyISD.def"
443 #undef HANDLE_NODETYPE
444 }
445 return nullptr;
446 }
447
448 std::pair<unsigned, const TargetRegisterClass *>
getRegForInlineAsmConstraint(const TargetRegisterInfo * TRI,StringRef Constraint,MVT VT) const449 WebAssemblyTargetLowering::getRegForInlineAsmConstraint(
450 const TargetRegisterInfo *TRI, StringRef Constraint, MVT VT) const {
451 // First, see if this is a constraint that directly corresponds to a
452 // WebAssembly register class.
453 if (Constraint.size() == 1) {
454 switch (Constraint[0]) {
455 case 'r':
456 assert(VT != MVT::iPTR && "Pointer MVT not expected here");
457 if (Subtarget->hasSIMD128() && VT.isVector()) {
458 if (VT.getSizeInBits() == 128)
459 return std::make_pair(0U, &WebAssembly::V128RegClass);
460 }
461 if (VT.isInteger() && !VT.isVector()) {
462 if (VT.getSizeInBits() <= 32)
463 return std::make_pair(0U, &WebAssembly::I32RegClass);
464 if (VT.getSizeInBits() <= 64)
465 return std::make_pair(0U, &WebAssembly::I64RegClass);
466 }
467 break;
468 default:
469 break;
470 }
471 }
472
473 return TargetLowering::getRegForInlineAsmConstraint(TRI, Constraint, VT);
474 }
475
isCheapToSpeculateCttz() const476 bool WebAssemblyTargetLowering::isCheapToSpeculateCttz() const {
477 // Assume ctz is a relatively cheap operation.
478 return true;
479 }
480
isCheapToSpeculateCtlz() const481 bool WebAssemblyTargetLowering::isCheapToSpeculateCtlz() const {
482 // Assume clz is a relatively cheap operation.
483 return true;
484 }
485
isLegalAddressingMode(const DataLayout & DL,const AddrMode & AM,Type * Ty,unsigned AS,Instruction * I) const486 bool WebAssemblyTargetLowering::isLegalAddressingMode(const DataLayout &DL,
487 const AddrMode &AM,
488 Type *Ty, unsigned AS,
489 Instruction *I) const {
490 // WebAssembly offsets are added as unsigned without wrapping. The
491 // isLegalAddressingMode gives us no way to determine if wrapping could be
492 // happening, so we approximate this by accepting only non-negative offsets.
493 if (AM.BaseOffs < 0)
494 return false;
495
496 // WebAssembly has no scale register operands.
497 if (AM.Scale != 0)
498 return false;
499
500 // Everything else is legal.
501 return true;
502 }
503
allowsMisalignedMemoryAccesses(EVT,unsigned,unsigned,bool * Fast) const504 bool WebAssemblyTargetLowering::allowsMisalignedMemoryAccesses(
505 EVT /*VT*/, unsigned /*AddrSpace*/, unsigned /*Align*/, bool *Fast) const {
506 // WebAssembly supports unaligned accesses, though it should be declared
507 // with the p2align attribute on loads and stores which do so, and there
508 // may be a performance impact. We tell LLVM they're "fast" because
509 // for the kinds of things that LLVM uses this for (merging adjacent stores
510 // of constants, etc.), WebAssembly implementations will either want the
511 // unaligned access or they'll split anyway.
512 if (Fast)
513 *Fast = true;
514 return true;
515 }
516
isIntDivCheap(EVT VT,AttributeList Attr) const517 bool WebAssemblyTargetLowering::isIntDivCheap(EVT VT,
518 AttributeList Attr) const {
519 // The current thinking is that wasm engines will perform this optimization,
520 // so we can save on code size.
521 return true;
522 }
523
getSetCCResultType(const DataLayout & DL,LLVMContext & C,EVT VT) const524 EVT WebAssemblyTargetLowering::getSetCCResultType(const DataLayout &DL,
525 LLVMContext &C,
526 EVT VT) const {
527 if (VT.isVector())
528 return VT.changeVectorElementTypeToInteger();
529
530 return TargetLowering::getSetCCResultType(DL, C, VT);
531 }
532
getTgtMemIntrinsic(IntrinsicInfo & Info,const CallInst & I,MachineFunction & MF,unsigned Intrinsic) const533 bool WebAssemblyTargetLowering::getTgtMemIntrinsic(IntrinsicInfo &Info,
534 const CallInst &I,
535 MachineFunction &MF,
536 unsigned Intrinsic) const {
537 switch (Intrinsic) {
538 case Intrinsic::wasm_atomic_notify:
539 Info.opc = ISD::INTRINSIC_W_CHAIN;
540 Info.memVT = MVT::i32;
541 Info.ptrVal = I.getArgOperand(0);
542 Info.offset = 0;
543 Info.align = 4;
544 // atomic.notify instruction does not really load the memory specified with
545 // this argument, but MachineMemOperand should either be load or store, so
546 // we set this to a load.
547 // FIXME Volatile isn't really correct, but currently all LLVM atomic
548 // instructions are treated as volatiles in the backend, so we should be
549 // consistent. The same applies for wasm_atomic_wait intrinsics too.
550 Info.flags = MachineMemOperand::MOVolatile | MachineMemOperand::MOLoad;
551 return true;
552 case Intrinsic::wasm_atomic_wait_i32:
553 Info.opc = ISD::INTRINSIC_W_CHAIN;
554 Info.memVT = MVT::i32;
555 Info.ptrVal = I.getArgOperand(0);
556 Info.offset = 0;
557 Info.align = 4;
558 Info.flags = MachineMemOperand::MOVolatile | MachineMemOperand::MOLoad;
559 return true;
560 case Intrinsic::wasm_atomic_wait_i64:
561 Info.opc = ISD::INTRINSIC_W_CHAIN;
562 Info.memVT = MVT::i64;
563 Info.ptrVal = I.getArgOperand(0);
564 Info.offset = 0;
565 Info.align = 8;
566 Info.flags = MachineMemOperand::MOVolatile | MachineMemOperand::MOLoad;
567 return true;
568 default:
569 return false;
570 }
571 }
572
573 //===----------------------------------------------------------------------===//
574 // WebAssembly Lowering private implementation.
575 //===----------------------------------------------------------------------===//
576
577 //===----------------------------------------------------------------------===//
578 // Lowering Code
579 //===----------------------------------------------------------------------===//
580
fail(const SDLoc & DL,SelectionDAG & DAG,const char * msg)581 static void fail(const SDLoc &DL, SelectionDAG &DAG, const char *msg) {
582 MachineFunction &MF = DAG.getMachineFunction();
583 DAG.getContext()->diagnose(
584 DiagnosticInfoUnsupported(MF.getFunction(), msg, DL.getDebugLoc()));
585 }
586
587 // Test whether the given calling convention is supported.
CallingConvSupported(CallingConv::ID CallConv)588 static bool CallingConvSupported(CallingConv::ID CallConv) {
589 // We currently support the language-independent target-independent
590 // conventions. We don't yet have a way to annotate calls with properties like
591 // "cold", and we don't have any call-clobbered registers, so these are mostly
592 // all handled the same.
593 return CallConv == CallingConv::C || CallConv == CallingConv::Fast ||
594 CallConv == CallingConv::Cold ||
595 CallConv == CallingConv::PreserveMost ||
596 CallConv == CallingConv::PreserveAll ||
597 CallConv == CallingConv::CXX_FAST_TLS;
598 }
599
600 SDValue
LowerCall(CallLoweringInfo & CLI,SmallVectorImpl<SDValue> & InVals) const601 WebAssemblyTargetLowering::LowerCall(CallLoweringInfo &CLI,
602 SmallVectorImpl<SDValue> &InVals) const {
603 SelectionDAG &DAG = CLI.DAG;
604 SDLoc DL = CLI.DL;
605 SDValue Chain = CLI.Chain;
606 SDValue Callee = CLI.Callee;
607 MachineFunction &MF = DAG.getMachineFunction();
608 auto Layout = MF.getDataLayout();
609
610 CallingConv::ID CallConv = CLI.CallConv;
611 if (!CallingConvSupported(CallConv))
612 fail(DL, DAG,
613 "WebAssembly doesn't support language-specific or target-specific "
614 "calling conventions yet");
615 if (CLI.IsPatchPoint)
616 fail(DL, DAG, "WebAssembly doesn't support patch point yet");
617
618 // WebAssembly doesn't currently support explicit tail calls. If they are
619 // required, fail. Otherwise, just disable them.
620 if ((CallConv == CallingConv::Fast && CLI.IsTailCall &&
621 MF.getTarget().Options.GuaranteedTailCallOpt) ||
622 (CLI.CS && CLI.CS.isMustTailCall()))
623 fail(DL, DAG, "WebAssembly doesn't support tail call yet");
624 CLI.IsTailCall = false;
625
626 SmallVectorImpl<ISD::InputArg> &Ins = CLI.Ins;
627 if (Ins.size() > 1)
628 fail(DL, DAG, "WebAssembly doesn't support more than 1 returned value yet");
629
630 SmallVectorImpl<ISD::OutputArg> &Outs = CLI.Outs;
631 SmallVectorImpl<SDValue> &OutVals = CLI.OutVals;
632 unsigned NumFixedArgs = 0;
633 for (unsigned i = 0; i < Outs.size(); ++i) {
634 const ISD::OutputArg &Out = Outs[i];
635 SDValue &OutVal = OutVals[i];
636 if (Out.Flags.isNest())
637 fail(DL, DAG, "WebAssembly hasn't implemented nest arguments");
638 if (Out.Flags.isInAlloca())
639 fail(DL, DAG, "WebAssembly hasn't implemented inalloca arguments");
640 if (Out.Flags.isInConsecutiveRegs())
641 fail(DL, DAG, "WebAssembly hasn't implemented cons regs arguments");
642 if (Out.Flags.isInConsecutiveRegsLast())
643 fail(DL, DAG, "WebAssembly hasn't implemented cons regs last arguments");
644 if (Out.Flags.isByVal() && Out.Flags.getByValSize() != 0) {
645 auto &MFI = MF.getFrameInfo();
646 int FI = MFI.CreateStackObject(Out.Flags.getByValSize(),
647 Out.Flags.getByValAlign(),
648 /*isSS=*/false);
649 SDValue SizeNode =
650 DAG.getConstant(Out.Flags.getByValSize(), DL, MVT::i32);
651 SDValue FINode = DAG.getFrameIndex(FI, getPointerTy(Layout));
652 Chain = DAG.getMemcpy(
653 Chain, DL, FINode, OutVal, SizeNode, Out.Flags.getByValAlign(),
654 /*isVolatile*/ false, /*AlwaysInline=*/false,
655 /*isTailCall*/ false, MachinePointerInfo(), MachinePointerInfo());
656 OutVal = FINode;
657 }
658 // Count the number of fixed args *after* legalization.
659 NumFixedArgs += Out.IsFixed;
660 }
661
662 bool IsVarArg = CLI.IsVarArg;
663 auto PtrVT = getPointerTy(Layout);
664
665 // Analyze operands of the call, assigning locations to each operand.
666 SmallVector<CCValAssign, 16> ArgLocs;
667 CCState CCInfo(CallConv, IsVarArg, MF, ArgLocs, *DAG.getContext());
668
669 if (IsVarArg) {
670 // Outgoing non-fixed arguments are placed in a buffer. First
671 // compute their offsets and the total amount of buffer space needed.
672 for (unsigned I = NumFixedArgs; I < Outs.size(); ++I) {
673 const ISD::OutputArg &Out = Outs[I];
674 SDValue &Arg = OutVals[I];
675 EVT VT = Arg.getValueType();
676 assert(VT != MVT::iPTR && "Legalized args should be concrete");
677 Type *Ty = VT.getTypeForEVT(*DAG.getContext());
678 unsigned Align = std::max(Out.Flags.getOrigAlign(),
679 Layout.getABITypeAlignment(Ty));
680 unsigned Offset = CCInfo.AllocateStack(Layout.getTypeAllocSize(Ty),
681 Align);
682 CCInfo.addLoc(CCValAssign::getMem(ArgLocs.size(), VT.getSimpleVT(),
683 Offset, VT.getSimpleVT(),
684 CCValAssign::Full));
685 }
686 }
687
688 unsigned NumBytes = CCInfo.getAlignedCallFrameSize();
689
690 SDValue FINode;
691 if (IsVarArg && NumBytes) {
692 // For non-fixed arguments, next emit stores to store the argument values
693 // to the stack buffer at the offsets computed above.
694 int FI = MF.getFrameInfo().CreateStackObject(NumBytes,
695 Layout.getStackAlignment(),
696 /*isSS=*/false);
697 unsigned ValNo = 0;
698 SmallVector<SDValue, 8> Chains;
699 for (SDValue Arg :
700 make_range(OutVals.begin() + NumFixedArgs, OutVals.end())) {
701 assert(ArgLocs[ValNo].getValNo() == ValNo &&
702 "ArgLocs should remain in order and only hold varargs args");
703 unsigned Offset = ArgLocs[ValNo++].getLocMemOffset();
704 FINode = DAG.getFrameIndex(FI, getPointerTy(Layout));
705 SDValue Add = DAG.getNode(ISD::ADD, DL, PtrVT, FINode,
706 DAG.getConstant(Offset, DL, PtrVT));
707 Chains.push_back(
708 DAG.getStore(Chain, DL, Arg, Add,
709 MachinePointerInfo::getFixedStack(MF, FI, Offset), 0));
710 }
711 if (!Chains.empty())
712 Chain = DAG.getNode(ISD::TokenFactor, DL, MVT::Other, Chains);
713 } else if (IsVarArg) {
714 FINode = DAG.getIntPtrConstant(0, DL);
715 }
716
717 // Compute the operands for the CALLn node.
718 SmallVector<SDValue, 16> Ops;
719 Ops.push_back(Chain);
720 Ops.push_back(Callee);
721
722 // Add all fixed arguments. Note that for non-varargs calls, NumFixedArgs
723 // isn't reliable.
724 Ops.append(OutVals.begin(),
725 IsVarArg ? OutVals.begin() + NumFixedArgs : OutVals.end());
726 // Add a pointer to the vararg buffer.
727 if (IsVarArg)
728 Ops.push_back(FINode);
729
730 SmallVector<EVT, 8> InTys;
731 for (const auto &In : Ins) {
732 assert(!In.Flags.isByVal() && "byval is not valid for return values");
733 assert(!In.Flags.isNest() && "nest is not valid for return values");
734 if (In.Flags.isInAlloca())
735 fail(DL, DAG, "WebAssembly hasn't implemented inalloca return values");
736 if (In.Flags.isInConsecutiveRegs())
737 fail(DL, DAG, "WebAssembly hasn't implemented cons regs return values");
738 if (In.Flags.isInConsecutiveRegsLast())
739 fail(DL, DAG,
740 "WebAssembly hasn't implemented cons regs last return values");
741 // Ignore In.getOrigAlign() because all our arguments are passed in
742 // registers.
743 InTys.push_back(In.VT);
744 }
745 InTys.push_back(MVT::Other);
746 SDVTList InTyList = DAG.getVTList(InTys);
747 SDValue Res =
748 DAG.getNode(Ins.empty() ? WebAssemblyISD::CALL0 : WebAssemblyISD::CALL1,
749 DL, InTyList, Ops);
750 if (Ins.empty()) {
751 Chain = Res;
752 } else {
753 InVals.push_back(Res);
754 Chain = Res.getValue(1);
755 }
756
757 return Chain;
758 }
759
CanLowerReturn(CallingConv::ID,MachineFunction &,bool,const SmallVectorImpl<ISD::OutputArg> & Outs,LLVMContext &) const760 bool WebAssemblyTargetLowering::CanLowerReturn(
761 CallingConv::ID /*CallConv*/, MachineFunction & /*MF*/, bool /*IsVarArg*/,
762 const SmallVectorImpl<ISD::OutputArg> &Outs,
763 LLVMContext & /*Context*/) const {
764 // WebAssembly can't currently handle returning tuples.
765 return Outs.size() <= 1;
766 }
767
LowerReturn(SDValue Chain,CallingConv::ID CallConv,bool,const SmallVectorImpl<ISD::OutputArg> & Outs,const SmallVectorImpl<SDValue> & OutVals,const SDLoc & DL,SelectionDAG & DAG) const768 SDValue WebAssemblyTargetLowering::LowerReturn(
769 SDValue Chain, CallingConv::ID CallConv, bool /*IsVarArg*/,
770 const SmallVectorImpl<ISD::OutputArg> &Outs,
771 const SmallVectorImpl<SDValue> &OutVals, const SDLoc &DL,
772 SelectionDAG &DAG) const {
773 assert(Outs.size() <= 1 && "WebAssembly can only return up to one value");
774 if (!CallingConvSupported(CallConv))
775 fail(DL, DAG, "WebAssembly doesn't support non-C calling conventions");
776
777 SmallVector<SDValue, 4> RetOps(1, Chain);
778 RetOps.append(OutVals.begin(), OutVals.end());
779 Chain = DAG.getNode(WebAssemblyISD::RETURN, DL, MVT::Other, RetOps);
780
781 // Record the number and types of the return values.
782 for (const ISD::OutputArg &Out : Outs) {
783 assert(!Out.Flags.isByVal() && "byval is not valid for return values");
784 assert(!Out.Flags.isNest() && "nest is not valid for return values");
785 assert(Out.IsFixed && "non-fixed return value is not valid");
786 if (Out.Flags.isInAlloca())
787 fail(DL, DAG, "WebAssembly hasn't implemented inalloca results");
788 if (Out.Flags.isInConsecutiveRegs())
789 fail(DL, DAG, "WebAssembly hasn't implemented cons regs results");
790 if (Out.Flags.isInConsecutiveRegsLast())
791 fail(DL, DAG, "WebAssembly hasn't implemented cons regs last results");
792 }
793
794 return Chain;
795 }
796
LowerFormalArguments(SDValue Chain,CallingConv::ID CallConv,bool IsVarArg,const SmallVectorImpl<ISD::InputArg> & Ins,const SDLoc & DL,SelectionDAG & DAG,SmallVectorImpl<SDValue> & InVals) const797 SDValue WebAssemblyTargetLowering::LowerFormalArguments(
798 SDValue Chain, CallingConv::ID CallConv, bool IsVarArg,
799 const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &DL,
800 SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const {
801 if (!CallingConvSupported(CallConv))
802 fail(DL, DAG, "WebAssembly doesn't support non-C calling conventions");
803
804 MachineFunction &MF = DAG.getMachineFunction();
805 auto *MFI = MF.getInfo<WebAssemblyFunctionInfo>();
806
807 // Set up the incoming ARGUMENTS value, which serves to represent the liveness
808 // of the incoming values before they're represented by virtual registers.
809 MF.getRegInfo().addLiveIn(WebAssembly::ARGUMENTS);
810
811 for (const ISD::InputArg &In : Ins) {
812 if (In.Flags.isInAlloca())
813 fail(DL, DAG, "WebAssembly hasn't implemented inalloca arguments");
814 if (In.Flags.isNest())
815 fail(DL, DAG, "WebAssembly hasn't implemented nest arguments");
816 if (In.Flags.isInConsecutiveRegs())
817 fail(DL, DAG, "WebAssembly hasn't implemented cons regs arguments");
818 if (In.Flags.isInConsecutiveRegsLast())
819 fail(DL, DAG, "WebAssembly hasn't implemented cons regs last arguments");
820 // Ignore In.getOrigAlign() because all our arguments are passed in
821 // registers.
822 InVals.push_back(In.Used ? DAG.getNode(WebAssemblyISD::ARGUMENT, DL, In.VT,
823 DAG.getTargetConstant(InVals.size(),
824 DL, MVT::i32))
825 : DAG.getUNDEF(In.VT));
826
827 // Record the number and types of arguments.
828 MFI->addParam(In.VT);
829 }
830
831 // Varargs are copied into a buffer allocated by the caller, and a pointer to
832 // the buffer is passed as an argument.
833 if (IsVarArg) {
834 MVT PtrVT = getPointerTy(MF.getDataLayout());
835 unsigned VarargVreg =
836 MF.getRegInfo().createVirtualRegister(getRegClassFor(PtrVT));
837 MFI->setVarargBufferVreg(VarargVreg);
838 Chain = DAG.getCopyToReg(
839 Chain, DL, VarargVreg,
840 DAG.getNode(WebAssemblyISD::ARGUMENT, DL, PtrVT,
841 DAG.getTargetConstant(Ins.size(), DL, MVT::i32)));
842 MFI->addParam(PtrVT);
843 }
844
845 // Record the number and types of arguments and results.
846 SmallVector<MVT, 4> Params;
847 SmallVector<MVT, 4> Results;
848 ComputeSignatureVTs(MF.getFunction().getFunctionType(), MF.getFunction(),
849 DAG.getTarget(), Params, Results);
850 for (MVT VT : Results)
851 MFI->addResult(VT);
852 // TODO: Use signatures in WebAssemblyMachineFunctionInfo too and unify
853 // the param logic here with ComputeSignatureVTs
854 assert(MFI->getParams().size() == Params.size() &&
855 std::equal(MFI->getParams().begin(), MFI->getParams().end(),
856 Params.begin()));
857
858 return Chain;
859 }
860
861 //===----------------------------------------------------------------------===//
862 // Custom lowering hooks.
863 //===----------------------------------------------------------------------===//
864
LowerOperation(SDValue Op,SelectionDAG & DAG) const865 SDValue WebAssemblyTargetLowering::LowerOperation(SDValue Op,
866 SelectionDAG &DAG) const {
867 SDLoc DL(Op);
868 switch (Op.getOpcode()) {
869 default:
870 llvm_unreachable("unimplemented operation lowering");
871 return SDValue();
872 case ISD::FrameIndex:
873 return LowerFrameIndex(Op, DAG);
874 case ISD::GlobalAddress:
875 return LowerGlobalAddress(Op, DAG);
876 case ISD::ExternalSymbol:
877 return LowerExternalSymbol(Op, DAG);
878 case ISD::JumpTable:
879 return LowerJumpTable(Op, DAG);
880 case ISD::BR_JT:
881 return LowerBR_JT(Op, DAG);
882 case ISD::VASTART:
883 return LowerVASTART(Op, DAG);
884 case ISD::BlockAddress:
885 case ISD::BRIND:
886 fail(DL, DAG, "WebAssembly hasn't implemented computed gotos");
887 return SDValue();
888 case ISD::RETURNADDR: // Probably nothing meaningful can be returned here.
889 fail(DL, DAG, "WebAssembly hasn't implemented __builtin_return_address");
890 return SDValue();
891 case ISD::FRAMEADDR:
892 return LowerFRAMEADDR(Op, DAG);
893 case ISD::CopyToReg:
894 return LowerCopyToReg(Op, DAG);
895 case ISD::INTRINSIC_WO_CHAIN:
896 return LowerINTRINSIC_WO_CHAIN(Op, DAG);
897 case ISD::EXTRACT_VECTOR_ELT:
898 case ISD::INSERT_VECTOR_ELT:
899 return LowerAccessVectorElement(Op, DAG);
900 case ISD::INTRINSIC_VOID:
901 return LowerINTRINSIC_VOID(Op, DAG);
902 case ISD::SIGN_EXTEND_INREG:
903 return LowerSIGN_EXTEND_INREG(Op, DAG);
904 case ISD::VECTOR_SHUFFLE:
905 return LowerVECTOR_SHUFFLE(Op, DAG);
906 case ISD::SHL:
907 case ISD::SRA:
908 case ISD::SRL:
909 return LowerShift(Op, DAG);
910 }
911 }
912
LowerCopyToReg(SDValue Op,SelectionDAG & DAG) const913 SDValue WebAssemblyTargetLowering::LowerCopyToReg(SDValue Op,
914 SelectionDAG &DAG) const {
915 SDValue Src = Op.getOperand(2);
916 if (isa<FrameIndexSDNode>(Src.getNode())) {
917 // CopyToReg nodes don't support FrameIndex operands. Other targets select
918 // the FI to some LEA-like instruction, but since we don't have that, we
919 // need to insert some kind of instruction that can take an FI operand and
920 // produces a value usable by CopyToReg (i.e. in a vreg). So insert a dummy
921 // local.copy between Op and its FI operand.
922 SDValue Chain = Op.getOperand(0);
923 SDLoc DL(Op);
924 unsigned Reg = cast<RegisterSDNode>(Op.getOperand(1))->getReg();
925 EVT VT = Src.getValueType();
926 SDValue Copy(DAG.getMachineNode(VT == MVT::i32 ? WebAssembly::COPY_I32
927 : WebAssembly::COPY_I64,
928 DL, VT, Src),
929 0);
930 return Op.getNode()->getNumValues() == 1
931 ? DAG.getCopyToReg(Chain, DL, Reg, Copy)
932 : DAG.getCopyToReg(Chain, DL, Reg, Copy,
933 Op.getNumOperands() == 4 ? Op.getOperand(3)
934 : SDValue());
935 }
936 return SDValue();
937 }
938
LowerFrameIndex(SDValue Op,SelectionDAG & DAG) const939 SDValue WebAssemblyTargetLowering::LowerFrameIndex(SDValue Op,
940 SelectionDAG &DAG) const {
941 int FI = cast<FrameIndexSDNode>(Op)->getIndex();
942 return DAG.getTargetFrameIndex(FI, Op.getValueType());
943 }
944
LowerFRAMEADDR(SDValue Op,SelectionDAG & DAG) const945 SDValue WebAssemblyTargetLowering::LowerFRAMEADDR(SDValue Op,
946 SelectionDAG &DAG) const {
947 // Non-zero depths are not supported by WebAssembly currently. Use the
948 // legalizer's default expansion, which is to return 0 (what this function is
949 // documented to do).
950 if (Op.getConstantOperandVal(0) > 0)
951 return SDValue();
952
953 DAG.getMachineFunction().getFrameInfo().setFrameAddressIsTaken(true);
954 EVT VT = Op.getValueType();
955 unsigned FP =
956 Subtarget->getRegisterInfo()->getFrameRegister(DAG.getMachineFunction());
957 return DAG.getCopyFromReg(DAG.getEntryNode(), SDLoc(Op), FP, VT);
958 }
959
LowerGlobalAddress(SDValue Op,SelectionDAG & DAG) const960 SDValue WebAssemblyTargetLowering::LowerGlobalAddress(SDValue Op,
961 SelectionDAG &DAG) const {
962 SDLoc DL(Op);
963 const auto *GA = cast<GlobalAddressSDNode>(Op);
964 EVT VT = Op.getValueType();
965 assert(GA->getTargetFlags() == 0 &&
966 "Unexpected target flags on generic GlobalAddressSDNode");
967 if (GA->getAddressSpace() != 0)
968 fail(DL, DAG, "WebAssembly only expects the 0 address space");
969 return DAG.getNode(
970 WebAssemblyISD::Wrapper, DL, VT,
971 DAG.getTargetGlobalAddress(GA->getGlobal(), DL, VT, GA->getOffset()));
972 }
973
974 SDValue
LowerExternalSymbol(SDValue Op,SelectionDAG & DAG) const975 WebAssemblyTargetLowering::LowerExternalSymbol(SDValue Op,
976 SelectionDAG &DAG) const {
977 SDLoc DL(Op);
978 const auto *ES = cast<ExternalSymbolSDNode>(Op);
979 EVT VT = Op.getValueType();
980 assert(ES->getTargetFlags() == 0 &&
981 "Unexpected target flags on generic ExternalSymbolSDNode");
982 // Set the TargetFlags to 0x1 which indicates that this is a "function"
983 // symbol rather than a data symbol. We do this unconditionally even though
984 // we don't know anything about the symbol other than its name, because all
985 // external symbols used in target-independent SelectionDAG code are for
986 // functions.
987 return DAG.getNode(
988 WebAssemblyISD::Wrapper, DL, VT,
989 DAG.getTargetExternalSymbol(ES->getSymbol(), VT,
990 WebAssemblyII::MO_SYMBOL_FUNCTION));
991 }
992
LowerJumpTable(SDValue Op,SelectionDAG & DAG) const993 SDValue WebAssemblyTargetLowering::LowerJumpTable(SDValue Op,
994 SelectionDAG &DAG) const {
995 // There's no need for a Wrapper node because we always incorporate a jump
996 // table operand into a BR_TABLE instruction, rather than ever
997 // materializing it in a register.
998 const JumpTableSDNode *JT = cast<JumpTableSDNode>(Op);
999 return DAG.getTargetJumpTable(JT->getIndex(), Op.getValueType(),
1000 JT->getTargetFlags());
1001 }
1002
LowerBR_JT(SDValue Op,SelectionDAG & DAG) const1003 SDValue WebAssemblyTargetLowering::LowerBR_JT(SDValue Op,
1004 SelectionDAG &DAG) const {
1005 SDLoc DL(Op);
1006 SDValue Chain = Op.getOperand(0);
1007 const auto *JT = cast<JumpTableSDNode>(Op.getOperand(1));
1008 SDValue Index = Op.getOperand(2);
1009 assert(JT->getTargetFlags() == 0 && "WebAssembly doesn't set target flags");
1010
1011 SmallVector<SDValue, 8> Ops;
1012 Ops.push_back(Chain);
1013 Ops.push_back(Index);
1014
1015 MachineJumpTableInfo *MJTI = DAG.getMachineFunction().getJumpTableInfo();
1016 const auto &MBBs = MJTI->getJumpTables()[JT->getIndex()].MBBs;
1017
1018 // Add an operand for each case.
1019 for (auto MBB : MBBs)
1020 Ops.push_back(DAG.getBasicBlock(MBB));
1021
1022 // TODO: For now, we just pick something arbitrary for a default case for now.
1023 // We really want to sniff out the guard and put in the real default case (and
1024 // delete the guard).
1025 Ops.push_back(DAG.getBasicBlock(MBBs[0]));
1026
1027 return DAG.getNode(WebAssemblyISD::BR_TABLE, DL, MVT::Other, Ops);
1028 }
1029
LowerVASTART(SDValue Op,SelectionDAG & DAG) const1030 SDValue WebAssemblyTargetLowering::LowerVASTART(SDValue Op,
1031 SelectionDAG &DAG) const {
1032 SDLoc DL(Op);
1033 EVT PtrVT = getPointerTy(DAG.getMachineFunction().getDataLayout());
1034
1035 auto *MFI = DAG.getMachineFunction().getInfo<WebAssemblyFunctionInfo>();
1036 const Value *SV = cast<SrcValueSDNode>(Op.getOperand(2))->getValue();
1037
1038 SDValue ArgN = DAG.getCopyFromReg(DAG.getEntryNode(), DL,
1039 MFI->getVarargBufferVreg(), PtrVT);
1040 return DAG.getStore(Op.getOperand(0), DL, ArgN, Op.getOperand(1),
1041 MachinePointerInfo(SV), 0);
1042 }
1043
1044 SDValue
LowerINTRINSIC_WO_CHAIN(SDValue Op,SelectionDAG & DAG) const1045 WebAssemblyTargetLowering::LowerINTRINSIC_WO_CHAIN(SDValue Op,
1046 SelectionDAG &DAG) const {
1047 unsigned IntNo = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
1048 SDLoc DL(Op);
1049 switch (IntNo) {
1050 default:
1051 return {}; // Don't custom lower most intrinsics.
1052
1053 case Intrinsic::wasm_lsda: {
1054 MachineFunction &MF = DAG.getMachineFunction();
1055 EVT VT = Op.getValueType();
1056 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
1057 MVT PtrVT = TLI.getPointerTy(DAG.getDataLayout());
1058 auto &Context = MF.getMMI().getContext();
1059 MCSymbol *S = Context.getOrCreateSymbol(Twine("GCC_except_table") +
1060 Twine(MF.getFunctionNumber()));
1061 return DAG.getNode(WebAssemblyISD::Wrapper, DL, VT,
1062 DAG.getMCSymbol(S, PtrVT));
1063 }
1064 }
1065 }
1066
1067 SDValue
LowerINTRINSIC_VOID(SDValue Op,SelectionDAG & DAG) const1068 WebAssemblyTargetLowering::LowerINTRINSIC_VOID(SDValue Op,
1069 SelectionDAG &DAG) const {
1070 MachineFunction &MF = DAG.getMachineFunction();
1071 unsigned IntNo = cast<ConstantSDNode>(Op.getOperand(1))->getZExtValue();
1072 SDLoc DL(Op);
1073
1074 switch (IntNo) {
1075 default:
1076 return {}; // Don't custom lower most intrinsics.
1077
1078 case Intrinsic::wasm_throw: {
1079 int Tag = cast<ConstantSDNode>(Op.getOperand(2).getNode())->getZExtValue();
1080 switch (Tag) {
1081 case CPP_EXCEPTION: {
1082 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
1083 MVT PtrVT = TLI.getPointerTy(DAG.getDataLayout());
1084 const char *SymName = MF.createExternalSymbolName("__cpp_exception");
1085 SDValue SymNode =
1086 DAG.getNode(WebAssemblyISD::Wrapper, DL, PtrVT,
1087 DAG.getTargetExternalSymbol(
1088 SymName, PtrVT, WebAssemblyII::MO_SYMBOL_EVENT));
1089 return DAG.getNode(WebAssemblyISD::THROW, DL,
1090 MVT::Other, // outchain type
1091 {
1092 Op.getOperand(0), // inchain
1093 SymNode, // exception symbol
1094 Op.getOperand(3) // thrown value
1095 });
1096 }
1097 default:
1098 llvm_unreachable("Invalid tag!");
1099 }
1100 break;
1101 }
1102 }
1103 }
1104
1105 SDValue
LowerSIGN_EXTEND_INREG(SDValue Op,SelectionDAG & DAG) const1106 WebAssemblyTargetLowering::LowerSIGN_EXTEND_INREG(SDValue Op,
1107 SelectionDAG &DAG) const {
1108 // If sign extension operations are disabled, allow sext_inreg only if operand
1109 // is a vector extract. SIMD does not depend on sign extension operations, but
1110 // allowing sext_inreg in this context lets us have simple patterns to select
1111 // extract_lane_s instructions. Expanding sext_inreg everywhere would be
1112 // simpler in this file, but would necessitate large and brittle patterns to
1113 // undo the expansion and select extract_lane_s instructions.
1114 assert(!Subtarget->hasSignExt() && Subtarget->hasSIMD128());
1115 if (Op.getOperand(0).getOpcode() == ISD::EXTRACT_VECTOR_ELT)
1116 return Op;
1117 // Otherwise expand
1118 return SDValue();
1119 }
1120
1121 SDValue
LowerVECTOR_SHUFFLE(SDValue Op,SelectionDAG & DAG) const1122 WebAssemblyTargetLowering::LowerVECTOR_SHUFFLE(SDValue Op,
1123 SelectionDAG &DAG) const {
1124 SDLoc DL(Op);
1125 ArrayRef<int> Mask = cast<ShuffleVectorSDNode>(Op.getNode())->getMask();
1126 MVT VecType = Op.getOperand(0).getSimpleValueType();
1127 assert(VecType.is128BitVector() && "Unexpected shuffle vector type");
1128 size_t LaneBytes = VecType.getVectorElementType().getSizeInBits() / 8;
1129
1130 // Space for two vector args and sixteen mask indices
1131 SDValue Ops[18];
1132 size_t OpIdx = 0;
1133 Ops[OpIdx++] = Op.getOperand(0);
1134 Ops[OpIdx++] = Op.getOperand(1);
1135
1136 // Expand mask indices to byte indices and materialize them as operands
1137 for (size_t I = 0, Lanes = Mask.size(); I < Lanes; ++I) {
1138 for (size_t J = 0; J < LaneBytes; ++J) {
1139 // Lower undefs (represented by -1 in mask) to zero
1140 uint64_t ByteIndex =
1141 Mask[I] == -1 ? 0 : (uint64_t)Mask[I] * LaneBytes + J;
1142 Ops[OpIdx++] = DAG.getConstant(ByteIndex, DL, MVT::i32);
1143 }
1144 }
1145
1146 return DAG.getNode(WebAssemblyISD::SHUFFLE, DL, Op.getValueType(), Ops);
1147 }
1148
1149 SDValue
LowerAccessVectorElement(SDValue Op,SelectionDAG & DAG) const1150 WebAssemblyTargetLowering::LowerAccessVectorElement(SDValue Op,
1151 SelectionDAG &DAG) const {
1152 // Allow constant lane indices, expand variable lane indices
1153 SDNode *IdxNode = Op.getOperand(Op.getNumOperands() - 1).getNode();
1154 if (isa<ConstantSDNode>(IdxNode) || IdxNode->isUndef())
1155 return Op;
1156 else
1157 // Perform default expansion
1158 return SDValue();
1159 }
1160
UnrollVectorShift(SDValue Op,SelectionDAG & DAG)1161 static SDValue UnrollVectorShift(SDValue Op, SelectionDAG &DAG) {
1162 EVT LaneT = Op.getSimpleValueType().getVectorElementType();
1163 // 32-bit and 64-bit unrolled shifts will have proper semantics
1164 if (LaneT.bitsGE(MVT::i32))
1165 return DAG.UnrollVectorOp(Op.getNode());
1166 // Otherwise mask the shift value to get proper semantics from 32-bit shift
1167 SDLoc DL(Op);
1168 SDValue ShiftVal = Op.getOperand(1);
1169 uint64_t MaskVal = LaneT.getSizeInBits() - 1;
1170 SDValue MaskedShiftVal = DAG.getNode(
1171 ISD::AND, // mask opcode
1172 DL, ShiftVal.getValueType(), // masked value type
1173 ShiftVal, // original shift value operand
1174 DAG.getConstant(MaskVal, DL, ShiftVal.getValueType()) // mask operand
1175 );
1176
1177 return DAG.UnrollVectorOp(
1178 DAG.getNode(Op.getOpcode(), // original shift opcode
1179 DL, Op.getValueType(), // original return type
1180 Op.getOperand(0), // original vector operand,
1181 MaskedShiftVal // new masked shift value operand
1182 )
1183 .getNode());
1184 }
1185
LowerShift(SDValue Op,SelectionDAG & DAG) const1186 SDValue WebAssemblyTargetLowering::LowerShift(SDValue Op,
1187 SelectionDAG &DAG) const {
1188 SDLoc DL(Op);
1189
1190 // Only manually lower vector shifts
1191 assert(Op.getSimpleValueType().isVector());
1192
1193 // Expand all vector shifts until V8 fixes its implementation
1194 // TODO: remove this once V8 is fixed
1195 if (!Subtarget->hasUnimplementedSIMD128())
1196 return UnrollVectorShift(Op, DAG);
1197
1198 // Unroll non-splat vector shifts
1199 BuildVectorSDNode *ShiftVec;
1200 SDValue SplatVal;
1201 if (!(ShiftVec = dyn_cast<BuildVectorSDNode>(Op.getOperand(1).getNode())) ||
1202 !(SplatVal = ShiftVec->getSplatValue()))
1203 return UnrollVectorShift(Op, DAG);
1204
1205 // All splats except i64x2 const splats are handled by patterns
1206 ConstantSDNode *SplatConst = dyn_cast<ConstantSDNode>(SplatVal);
1207 if (!SplatConst || Op.getSimpleValueType() != MVT::v2i64)
1208 return Op;
1209
1210 // i64x2 const splats are custom lowered to avoid unnecessary wraps
1211 unsigned Opcode;
1212 switch (Op.getOpcode()) {
1213 case ISD::SHL:
1214 Opcode = WebAssemblyISD::VEC_SHL;
1215 break;
1216 case ISD::SRA:
1217 Opcode = WebAssemblyISD::VEC_SHR_S;
1218 break;
1219 case ISD::SRL:
1220 Opcode = WebAssemblyISD::VEC_SHR_U;
1221 break;
1222 default:
1223 llvm_unreachable("unexpected opcode");
1224 }
1225 APInt Shift = SplatConst->getAPIntValue().zextOrTrunc(32);
1226 return DAG.getNode(Opcode, DL, Op.getValueType(), Op.getOperand(0),
1227 DAG.getConstant(Shift, DL, MVT::i32));
1228 }
1229
1230 //===----------------------------------------------------------------------===//
1231 // WebAssembly Optimization Hooks
1232 //===----------------------------------------------------------------------===//
1233