1 //=- WebAssemblyISelLowering.cpp - WebAssembly DAG Lowering Implementation -==//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 ///
9 /// \file
10 /// This file implements the WebAssemblyTargetLowering class.
11 ///
12 //===----------------------------------------------------------------------===//
13
14 #include "WebAssemblyISelLowering.h"
15 #include "MCTargetDesc/WebAssemblyMCTargetDesc.h"
16 #include "Utils/WebAssemblyTypeUtilities.h"
17 #include "Utils/WebAssemblyUtilities.h"
18 #include "WebAssemblyMachineFunctionInfo.h"
19 #include "WebAssemblySubtarget.h"
20 #include "WebAssemblyTargetMachine.h"
21 #include "llvm/CodeGen/CallingConvLower.h"
22 #include "llvm/CodeGen/MachineInstrBuilder.h"
23 #include "llvm/CodeGen/MachineJumpTableInfo.h"
24 #include "llvm/CodeGen/MachineModuleInfo.h"
25 #include "llvm/CodeGen/MachineRegisterInfo.h"
26 #include "llvm/CodeGen/SelectionDAG.h"
27 #include "llvm/CodeGen/SelectionDAGNodes.h"
28 #include "llvm/IR/DiagnosticInfo.h"
29 #include "llvm/IR/DiagnosticPrinter.h"
30 #include "llvm/IR/Function.h"
31 #include "llvm/IR/Intrinsics.h"
32 #include "llvm/IR/IntrinsicsWebAssembly.h"
33 #include "llvm/Support/Debug.h"
34 #include "llvm/Support/ErrorHandling.h"
35 #include "llvm/Support/KnownBits.h"
36 #include "llvm/Support/MathExtras.h"
37 #include "llvm/Support/raw_ostream.h"
38 #include "llvm/Target/TargetOptions.h"
39 using namespace llvm;
40
41 #define DEBUG_TYPE "wasm-lower"
42
WebAssemblyTargetLowering(const TargetMachine & TM,const WebAssemblySubtarget & STI)43 WebAssemblyTargetLowering::WebAssemblyTargetLowering(
44 const TargetMachine &TM, const WebAssemblySubtarget &STI)
45 : TargetLowering(TM), Subtarget(&STI) {
46 auto MVTPtr = Subtarget->hasAddr64() ? MVT::i64 : MVT::i32;
47
48 // Booleans always contain 0 or 1.
49 setBooleanContents(ZeroOrOneBooleanContent);
50 // Except in SIMD vectors
51 setBooleanVectorContents(ZeroOrNegativeOneBooleanContent);
52 // We don't know the microarchitecture here, so just reduce register pressure.
53 setSchedulingPreference(Sched::RegPressure);
54 // Tell ISel that we have a stack pointer.
55 setStackPointerRegisterToSaveRestore(
56 Subtarget->hasAddr64() ? WebAssembly::SP64 : WebAssembly::SP32);
57 // Set up the register classes.
58 addRegisterClass(MVT::i32, &WebAssembly::I32RegClass);
59 addRegisterClass(MVT::i64, &WebAssembly::I64RegClass);
60 addRegisterClass(MVT::f32, &WebAssembly::F32RegClass);
61 addRegisterClass(MVT::f64, &WebAssembly::F64RegClass);
62 if (Subtarget->hasSIMD128()) {
63 addRegisterClass(MVT::v16i8, &WebAssembly::V128RegClass);
64 addRegisterClass(MVT::v8i16, &WebAssembly::V128RegClass);
65 addRegisterClass(MVT::v4i32, &WebAssembly::V128RegClass);
66 addRegisterClass(MVT::v4f32, &WebAssembly::V128RegClass);
67 addRegisterClass(MVT::v2i64, &WebAssembly::V128RegClass);
68 addRegisterClass(MVT::v2f64, &WebAssembly::V128RegClass);
69 }
70 if (Subtarget->hasReferenceTypes()) {
71 addRegisterClass(MVT::externref, &WebAssembly::EXTERNREFRegClass);
72 addRegisterClass(MVT::funcref, &WebAssembly::FUNCREFRegClass);
73 }
74 // Compute derived properties from the register classes.
75 computeRegisterProperties(Subtarget->getRegisterInfo());
76
77 // Transform loads and stores to pointers in address space 1 to loads and
78 // stores to WebAssembly global variables, outside linear memory.
79 for (auto T : {MVT::i32, MVT::i64, MVT::f32, MVT::f64}) {
80 setOperationAction(ISD::LOAD, T, Custom);
81 setOperationAction(ISD::STORE, T, Custom);
82 }
83 if (Subtarget->hasSIMD128()) {
84 for (auto T : {MVT::v16i8, MVT::v8i16, MVT::v4i32, MVT::v4f32, MVT::v2i64,
85 MVT::v2f64}) {
86 setOperationAction(ISD::LOAD, T, Custom);
87 setOperationAction(ISD::STORE, T, Custom);
88 }
89 }
90 if (Subtarget->hasReferenceTypes()) {
91 for (auto T : {MVT::externref, MVT::funcref}) {
92 setOperationAction(ISD::LOAD, T, Custom);
93 setOperationAction(ISD::STORE, T, Custom);
94 }
95 }
96
97 setOperationAction(ISD::GlobalAddress, MVTPtr, Custom);
98 setOperationAction(ISD::GlobalTLSAddress, MVTPtr, Custom);
99 setOperationAction(ISD::ExternalSymbol, MVTPtr, Custom);
100 setOperationAction(ISD::JumpTable, MVTPtr, Custom);
101 setOperationAction(ISD::BlockAddress, MVTPtr, Custom);
102 setOperationAction(ISD::BRIND, MVT::Other, Custom);
103
104 // Take the default expansion for va_arg, va_copy, and va_end. There is no
105 // default action for va_start, so we do that custom.
106 setOperationAction(ISD::VASTART, MVT::Other, Custom);
107 setOperationAction(ISD::VAARG, MVT::Other, Expand);
108 setOperationAction(ISD::VACOPY, MVT::Other, Expand);
109 setOperationAction(ISD::VAEND, MVT::Other, Expand);
110
111 for (auto T : {MVT::f32, MVT::f64, MVT::v4f32, MVT::v2f64}) {
112 // Don't expand the floating-point types to constant pools.
113 setOperationAction(ISD::ConstantFP, T, Legal);
114 // Expand floating-point comparisons.
115 for (auto CC : {ISD::SETO, ISD::SETUO, ISD::SETUEQ, ISD::SETONE,
116 ISD::SETULT, ISD::SETULE, ISD::SETUGT, ISD::SETUGE})
117 setCondCodeAction(CC, T, Expand);
118 // Expand floating-point library function operators.
119 for (auto Op :
120 {ISD::FSIN, ISD::FCOS, ISD::FSINCOS, ISD::FPOW, ISD::FREM, ISD::FMA})
121 setOperationAction(Op, T, Expand);
122 // Note supported floating-point library function operators that otherwise
123 // default to expand.
124 for (auto Op :
125 {ISD::FCEIL, ISD::FFLOOR, ISD::FTRUNC, ISD::FNEARBYINT, ISD::FRINT})
126 setOperationAction(Op, T, Legal);
127 // Support minimum and maximum, which otherwise default to expand.
128 setOperationAction(ISD::FMINIMUM, T, Legal);
129 setOperationAction(ISD::FMAXIMUM, T, Legal);
130 // WebAssembly currently has no builtin f16 support.
131 setOperationAction(ISD::FP16_TO_FP, T, Expand);
132 setOperationAction(ISD::FP_TO_FP16, T, Expand);
133 setLoadExtAction(ISD::EXTLOAD, T, MVT::f16, Expand);
134 setTruncStoreAction(T, MVT::f16, Expand);
135 }
136
137 // Expand unavailable integer operations.
138 for (auto Op :
139 {ISD::BSWAP, ISD::SMUL_LOHI, ISD::UMUL_LOHI, ISD::MULHS, ISD::MULHU,
140 ISD::SDIVREM, ISD::UDIVREM, ISD::SHL_PARTS, ISD::SRA_PARTS,
141 ISD::SRL_PARTS, ISD::ADDC, ISD::ADDE, ISD::SUBC, ISD::SUBE}) {
142 for (auto T : {MVT::i32, MVT::i64})
143 setOperationAction(Op, T, Expand);
144 if (Subtarget->hasSIMD128())
145 for (auto T : {MVT::v16i8, MVT::v8i16, MVT::v4i32, MVT::v2i64})
146 setOperationAction(Op, T, Expand);
147 }
148
149 if (Subtarget->hasNontrappingFPToInt())
150 for (auto Op : {ISD::FP_TO_SINT_SAT, ISD::FP_TO_UINT_SAT})
151 for (auto T : {MVT::i32, MVT::i64})
152 setOperationAction(Op, T, Custom);
153
154 // SIMD-specific configuration
155 if (Subtarget->hasSIMD128()) {
156 // Hoist bitcasts out of shuffles
157 setTargetDAGCombine(ISD::VECTOR_SHUFFLE);
158
159 // Combine extends of extract_subvectors into widening ops
160 setTargetDAGCombine(ISD::SIGN_EXTEND);
161 setTargetDAGCombine(ISD::ZERO_EXTEND);
162
163 // Combine int_to_fp or fp_extend of extract_vectors and vice versa into
164 // conversions ops
165 setTargetDAGCombine(ISD::SINT_TO_FP);
166 setTargetDAGCombine(ISD::UINT_TO_FP);
167 setTargetDAGCombine(ISD::FP_EXTEND);
168 setTargetDAGCombine(ISD::EXTRACT_SUBVECTOR);
169
170 // Combine fp_to_{s,u}int_sat or fp_round of concat_vectors or vice versa
171 // into conversion ops
172 setTargetDAGCombine(ISD::FP_TO_SINT_SAT);
173 setTargetDAGCombine(ISD::FP_TO_UINT_SAT);
174 setTargetDAGCombine(ISD::FP_ROUND);
175 setTargetDAGCombine(ISD::CONCAT_VECTORS);
176
177 // Support saturating add for i8x16 and i16x8
178 for (auto Op : {ISD::SADDSAT, ISD::UADDSAT})
179 for (auto T : {MVT::v16i8, MVT::v8i16})
180 setOperationAction(Op, T, Legal);
181
182 // Support integer abs
183 for (auto T : {MVT::v16i8, MVT::v8i16, MVT::v4i32, MVT::v2i64})
184 setOperationAction(ISD::ABS, T, Legal);
185
186 // Custom lower BUILD_VECTORs to minimize number of replace_lanes
187 for (auto T : {MVT::v16i8, MVT::v8i16, MVT::v4i32, MVT::v4f32, MVT::v2i64,
188 MVT::v2f64})
189 setOperationAction(ISD::BUILD_VECTOR, T, Custom);
190
191 // We have custom shuffle lowering to expose the shuffle mask
192 for (auto T : {MVT::v16i8, MVT::v8i16, MVT::v4i32, MVT::v4f32, MVT::v2i64,
193 MVT::v2f64})
194 setOperationAction(ISD::VECTOR_SHUFFLE, T, Custom);
195
196 // Custom lowering since wasm shifts must have a scalar shift amount
197 for (auto Op : {ISD::SHL, ISD::SRA, ISD::SRL})
198 for (auto T : {MVT::v16i8, MVT::v8i16, MVT::v4i32, MVT::v2i64})
199 setOperationAction(Op, T, Custom);
200
201 // Custom lower lane accesses to expand out variable indices
202 for (auto Op : {ISD::EXTRACT_VECTOR_ELT, ISD::INSERT_VECTOR_ELT})
203 for (auto T : {MVT::v16i8, MVT::v8i16, MVT::v4i32, MVT::v4f32, MVT::v2i64,
204 MVT::v2f64})
205 setOperationAction(Op, T, Custom);
206
207 // There is no i8x16.mul instruction
208 setOperationAction(ISD::MUL, MVT::v16i8, Expand);
209
210 // There is no vector conditional select instruction
211 for (auto T : {MVT::v16i8, MVT::v8i16, MVT::v4i32, MVT::v4f32, MVT::v2i64,
212 MVT::v2f64})
213 setOperationAction(ISD::SELECT_CC, T, Expand);
214
215 // Expand integer operations supported for scalars but not SIMD
216 for (auto Op : {ISD::CTLZ, ISD::CTTZ, ISD::CTPOP, ISD::SDIV, ISD::UDIV,
217 ISD::SREM, ISD::UREM, ISD::ROTL, ISD::ROTR})
218 for (auto T : {MVT::v16i8, MVT::v8i16, MVT::v4i32, MVT::v2i64})
219 setOperationAction(Op, T, Expand);
220
221 // But we do have integer min and max operations
222 for (auto Op : {ISD::SMIN, ISD::SMAX, ISD::UMIN, ISD::UMAX})
223 for (auto T : {MVT::v16i8, MVT::v8i16, MVT::v4i32})
224 setOperationAction(Op, T, Legal);
225
226 // And we have popcnt for i8x16
227 setOperationAction(ISD::CTPOP, MVT::v16i8, Legal);
228
229 // Expand float operations supported for scalars but not SIMD
230 for (auto Op : {ISD::FCOPYSIGN, ISD::FLOG, ISD::FLOG2, ISD::FLOG10,
231 ISD::FEXP, ISD::FEXP2, ISD::FRINT})
232 for (auto T : {MVT::v4f32, MVT::v2f64})
233 setOperationAction(Op, T, Expand);
234
235 // Unsigned comparison operations are unavailable for i64x2 vectors.
236 for (auto CC : {ISD::SETUGT, ISD::SETUGE, ISD::SETULT, ISD::SETULE})
237 setCondCodeAction(CC, MVT::v2i64, Custom);
238
239 // 64x2 conversions are not in the spec
240 for (auto Op :
241 {ISD::SINT_TO_FP, ISD::UINT_TO_FP, ISD::FP_TO_SINT, ISD::FP_TO_UINT})
242 for (auto T : {MVT::v2i64, MVT::v2f64})
243 setOperationAction(Op, T, Expand);
244
245 // But saturating fp_to_int converstions are
246 for (auto Op : {ISD::FP_TO_SINT_SAT, ISD::FP_TO_UINT_SAT})
247 setOperationAction(Op, MVT::v4i32, Custom);
248 }
249
250 // As a special case, these operators use the type to mean the type to
251 // sign-extend from.
252 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i1, Expand);
253 if (!Subtarget->hasSignExt()) {
254 // Sign extends are legal only when extending a vector extract
255 auto Action = Subtarget->hasSIMD128() ? Custom : Expand;
256 for (auto T : {MVT::i8, MVT::i16, MVT::i32})
257 setOperationAction(ISD::SIGN_EXTEND_INREG, T, Action);
258 }
259 for (auto T : MVT::integer_fixedlen_vector_valuetypes())
260 setOperationAction(ISD::SIGN_EXTEND_INREG, T, Expand);
261
262 // Dynamic stack allocation: use the default expansion.
263 setOperationAction(ISD::STACKSAVE, MVT::Other, Expand);
264 setOperationAction(ISD::STACKRESTORE, MVT::Other, Expand);
265 setOperationAction(ISD::DYNAMIC_STACKALLOC, MVTPtr, Expand);
266
267 setOperationAction(ISD::FrameIndex, MVT::i32, Custom);
268 setOperationAction(ISD::FrameIndex, MVT::i64, Custom);
269 setOperationAction(ISD::CopyToReg, MVT::Other, Custom);
270
271 // Expand these forms; we pattern-match the forms that we can handle in isel.
272 for (auto T : {MVT::i32, MVT::i64, MVT::f32, MVT::f64})
273 for (auto Op : {ISD::BR_CC, ISD::SELECT_CC})
274 setOperationAction(Op, T, Expand);
275
276 // We have custom switch handling.
277 setOperationAction(ISD::BR_JT, MVT::Other, Custom);
278
279 // WebAssembly doesn't have:
280 // - Floating-point extending loads.
281 // - Floating-point truncating stores.
282 // - i1 extending loads.
283 // - truncating SIMD stores and most extending loads
284 setLoadExtAction(ISD::EXTLOAD, MVT::f64, MVT::f32, Expand);
285 setTruncStoreAction(MVT::f64, MVT::f32, Expand);
286 for (auto T : MVT::integer_valuetypes())
287 for (auto Ext : {ISD::EXTLOAD, ISD::ZEXTLOAD, ISD::SEXTLOAD})
288 setLoadExtAction(Ext, T, MVT::i1, Promote);
289 if (Subtarget->hasSIMD128()) {
290 for (auto T : {MVT::v16i8, MVT::v8i16, MVT::v4i32, MVT::v2i64, MVT::v4f32,
291 MVT::v2f64}) {
292 for (auto MemT : MVT::fixedlen_vector_valuetypes()) {
293 if (MVT(T) != MemT) {
294 setTruncStoreAction(T, MemT, Expand);
295 for (auto Ext : {ISD::EXTLOAD, ISD::ZEXTLOAD, ISD::SEXTLOAD})
296 setLoadExtAction(Ext, T, MemT, Expand);
297 }
298 }
299 }
300 // But some vector extending loads are legal
301 for (auto Ext : {ISD::EXTLOAD, ISD::SEXTLOAD, ISD::ZEXTLOAD}) {
302 setLoadExtAction(Ext, MVT::v8i16, MVT::v8i8, Legal);
303 setLoadExtAction(Ext, MVT::v4i32, MVT::v4i16, Legal);
304 setLoadExtAction(Ext, MVT::v2i64, MVT::v2i32, Legal);
305 }
306 setLoadExtAction(ISD::EXTLOAD, MVT::v2f64, MVT::v2f32, Legal);
307 }
308
309 // Don't do anything clever with build_pairs
310 setOperationAction(ISD::BUILD_PAIR, MVT::i64, Expand);
311
312 // Trap lowers to wasm unreachable
313 setOperationAction(ISD::TRAP, MVT::Other, Legal);
314 setOperationAction(ISD::DEBUGTRAP, MVT::Other, Legal);
315
316 // Exception handling intrinsics
317 setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::Other, Custom);
318 setOperationAction(ISD::INTRINSIC_W_CHAIN, MVT::Other, Custom);
319 setOperationAction(ISD::INTRINSIC_VOID, MVT::Other, Custom);
320
321 setMaxAtomicSizeInBitsSupported(64);
322
323 // Override the __gnu_f2h_ieee/__gnu_h2f_ieee names so that the f32 name is
324 // consistent with the f64 and f128 names.
325 setLibcallName(RTLIB::FPEXT_F16_F32, "__extendhfsf2");
326 setLibcallName(RTLIB::FPROUND_F32_F16, "__truncsfhf2");
327
328 // Define the emscripten name for return address helper.
329 // TODO: when implementing other Wasm backends, make this generic or only do
330 // this on emscripten depending on what they end up doing.
331 setLibcallName(RTLIB::RETURN_ADDRESS, "emscripten_return_address");
332
333 // Always convert switches to br_tables unless there is only one case, which
334 // is equivalent to a simple branch. This reduces code size for wasm, and we
335 // defer possible jump table optimizations to the VM.
336 setMinimumJumpTableEntries(2);
337 }
338
getPointerTy(const DataLayout & DL,uint32_t AS) const339 MVT WebAssemblyTargetLowering::getPointerTy(const DataLayout &DL,
340 uint32_t AS) const {
341 if (AS == WebAssembly::WasmAddressSpace::WASM_ADDRESS_SPACE_EXTERNREF)
342 return MVT::externref;
343 if (AS == WebAssembly::WasmAddressSpace::WASM_ADDRESS_SPACE_FUNCREF)
344 return MVT::funcref;
345 return TargetLowering::getPointerTy(DL, AS);
346 }
347
getPointerMemTy(const DataLayout & DL,uint32_t AS) const348 MVT WebAssemblyTargetLowering::getPointerMemTy(const DataLayout &DL,
349 uint32_t AS) const {
350 if (AS == WebAssembly::WasmAddressSpace::WASM_ADDRESS_SPACE_EXTERNREF)
351 return MVT::externref;
352 if (AS == WebAssembly::WasmAddressSpace::WASM_ADDRESS_SPACE_FUNCREF)
353 return MVT::funcref;
354 return TargetLowering::getPointerMemTy(DL, AS);
355 }
356
357 TargetLowering::AtomicExpansionKind
shouldExpandAtomicRMWInIR(AtomicRMWInst * AI) const358 WebAssemblyTargetLowering::shouldExpandAtomicRMWInIR(AtomicRMWInst *AI) const {
359 // We have wasm instructions for these
360 switch (AI->getOperation()) {
361 case AtomicRMWInst::Add:
362 case AtomicRMWInst::Sub:
363 case AtomicRMWInst::And:
364 case AtomicRMWInst::Or:
365 case AtomicRMWInst::Xor:
366 case AtomicRMWInst::Xchg:
367 return AtomicExpansionKind::None;
368 default:
369 break;
370 }
371 return AtomicExpansionKind::CmpXChg;
372 }
373
shouldScalarizeBinop(SDValue VecOp) const374 bool WebAssemblyTargetLowering::shouldScalarizeBinop(SDValue VecOp) const {
375 // Implementation copied from X86TargetLowering.
376 unsigned Opc = VecOp.getOpcode();
377
378 // Assume target opcodes can't be scalarized.
379 // TODO - do we have any exceptions?
380 if (Opc >= ISD::BUILTIN_OP_END)
381 return false;
382
383 // If the vector op is not supported, try to convert to scalar.
384 EVT VecVT = VecOp.getValueType();
385 if (!isOperationLegalOrCustomOrPromote(Opc, VecVT))
386 return true;
387
388 // If the vector op is supported, but the scalar op is not, the transform may
389 // not be worthwhile.
390 EVT ScalarVT = VecVT.getScalarType();
391 return isOperationLegalOrCustomOrPromote(Opc, ScalarVT);
392 }
393
createFastISel(FunctionLoweringInfo & FuncInfo,const TargetLibraryInfo * LibInfo) const394 FastISel *WebAssemblyTargetLowering::createFastISel(
395 FunctionLoweringInfo &FuncInfo, const TargetLibraryInfo *LibInfo) const {
396 return WebAssembly::createFastISel(FuncInfo, LibInfo);
397 }
398
getScalarShiftAmountTy(const DataLayout &,EVT VT) const399 MVT WebAssemblyTargetLowering::getScalarShiftAmountTy(const DataLayout & /*DL*/,
400 EVT VT) const {
401 unsigned BitWidth = NextPowerOf2(VT.getSizeInBits() - 1);
402 if (BitWidth > 1 && BitWidth < 8)
403 BitWidth = 8;
404
405 if (BitWidth > 64) {
406 // The shift will be lowered to a libcall, and compiler-rt libcalls expect
407 // the count to be an i32.
408 BitWidth = 32;
409 assert(BitWidth >= Log2_32_Ceil(VT.getSizeInBits()) &&
410 "32-bit shift counts ought to be enough for anyone");
411 }
412
413 MVT Result = MVT::getIntegerVT(BitWidth);
414 assert(Result != MVT::INVALID_SIMPLE_VALUE_TYPE &&
415 "Unable to represent scalar shift amount type");
416 return Result;
417 }
418
419 // Lower an fp-to-int conversion operator from the LLVM opcode, which has an
420 // undefined result on invalid/overflow, to the WebAssembly opcode, which
421 // traps on invalid/overflow.
LowerFPToInt(MachineInstr & MI,DebugLoc DL,MachineBasicBlock * BB,const TargetInstrInfo & TII,bool IsUnsigned,bool Int64,bool Float64,unsigned LoweredOpcode)422 static MachineBasicBlock *LowerFPToInt(MachineInstr &MI, DebugLoc DL,
423 MachineBasicBlock *BB,
424 const TargetInstrInfo &TII,
425 bool IsUnsigned, bool Int64,
426 bool Float64, unsigned LoweredOpcode) {
427 MachineRegisterInfo &MRI = BB->getParent()->getRegInfo();
428
429 Register OutReg = MI.getOperand(0).getReg();
430 Register InReg = MI.getOperand(1).getReg();
431
432 unsigned Abs = Float64 ? WebAssembly::ABS_F64 : WebAssembly::ABS_F32;
433 unsigned FConst = Float64 ? WebAssembly::CONST_F64 : WebAssembly::CONST_F32;
434 unsigned LT = Float64 ? WebAssembly::LT_F64 : WebAssembly::LT_F32;
435 unsigned GE = Float64 ? WebAssembly::GE_F64 : WebAssembly::GE_F32;
436 unsigned IConst = Int64 ? WebAssembly::CONST_I64 : WebAssembly::CONST_I32;
437 unsigned Eqz = WebAssembly::EQZ_I32;
438 unsigned And = WebAssembly::AND_I32;
439 int64_t Limit = Int64 ? INT64_MIN : INT32_MIN;
440 int64_t Substitute = IsUnsigned ? 0 : Limit;
441 double CmpVal = IsUnsigned ? -(double)Limit * 2.0 : -(double)Limit;
442 auto &Context = BB->getParent()->getFunction().getContext();
443 Type *Ty = Float64 ? Type::getDoubleTy(Context) : Type::getFloatTy(Context);
444
445 const BasicBlock *LLVMBB = BB->getBasicBlock();
446 MachineFunction *F = BB->getParent();
447 MachineBasicBlock *TrueMBB = F->CreateMachineBasicBlock(LLVMBB);
448 MachineBasicBlock *FalseMBB = F->CreateMachineBasicBlock(LLVMBB);
449 MachineBasicBlock *DoneMBB = F->CreateMachineBasicBlock(LLVMBB);
450
451 MachineFunction::iterator It = ++BB->getIterator();
452 F->insert(It, FalseMBB);
453 F->insert(It, TrueMBB);
454 F->insert(It, DoneMBB);
455
456 // Transfer the remainder of BB and its successor edges to DoneMBB.
457 DoneMBB->splice(DoneMBB->begin(), BB, std::next(MI.getIterator()), BB->end());
458 DoneMBB->transferSuccessorsAndUpdatePHIs(BB);
459
460 BB->addSuccessor(TrueMBB);
461 BB->addSuccessor(FalseMBB);
462 TrueMBB->addSuccessor(DoneMBB);
463 FalseMBB->addSuccessor(DoneMBB);
464
465 unsigned Tmp0, Tmp1, CmpReg, EqzReg, FalseReg, TrueReg;
466 Tmp0 = MRI.createVirtualRegister(MRI.getRegClass(InReg));
467 Tmp1 = MRI.createVirtualRegister(MRI.getRegClass(InReg));
468 CmpReg = MRI.createVirtualRegister(&WebAssembly::I32RegClass);
469 EqzReg = MRI.createVirtualRegister(&WebAssembly::I32RegClass);
470 FalseReg = MRI.createVirtualRegister(MRI.getRegClass(OutReg));
471 TrueReg = MRI.createVirtualRegister(MRI.getRegClass(OutReg));
472
473 MI.eraseFromParent();
474 // For signed numbers, we can do a single comparison to determine whether
475 // fabs(x) is within range.
476 if (IsUnsigned) {
477 Tmp0 = InReg;
478 } else {
479 BuildMI(BB, DL, TII.get(Abs), Tmp0).addReg(InReg);
480 }
481 BuildMI(BB, DL, TII.get(FConst), Tmp1)
482 .addFPImm(cast<ConstantFP>(ConstantFP::get(Ty, CmpVal)));
483 BuildMI(BB, DL, TII.get(LT), CmpReg).addReg(Tmp0).addReg(Tmp1);
484
485 // For unsigned numbers, we have to do a separate comparison with zero.
486 if (IsUnsigned) {
487 Tmp1 = MRI.createVirtualRegister(MRI.getRegClass(InReg));
488 Register SecondCmpReg =
489 MRI.createVirtualRegister(&WebAssembly::I32RegClass);
490 Register AndReg = MRI.createVirtualRegister(&WebAssembly::I32RegClass);
491 BuildMI(BB, DL, TII.get(FConst), Tmp1)
492 .addFPImm(cast<ConstantFP>(ConstantFP::get(Ty, 0.0)));
493 BuildMI(BB, DL, TII.get(GE), SecondCmpReg).addReg(Tmp0).addReg(Tmp1);
494 BuildMI(BB, DL, TII.get(And), AndReg).addReg(CmpReg).addReg(SecondCmpReg);
495 CmpReg = AndReg;
496 }
497
498 BuildMI(BB, DL, TII.get(Eqz), EqzReg).addReg(CmpReg);
499
500 // Create the CFG diamond to select between doing the conversion or using
501 // the substitute value.
502 BuildMI(BB, DL, TII.get(WebAssembly::BR_IF)).addMBB(TrueMBB).addReg(EqzReg);
503 BuildMI(FalseMBB, DL, TII.get(LoweredOpcode), FalseReg).addReg(InReg);
504 BuildMI(FalseMBB, DL, TII.get(WebAssembly::BR)).addMBB(DoneMBB);
505 BuildMI(TrueMBB, DL, TII.get(IConst), TrueReg).addImm(Substitute);
506 BuildMI(*DoneMBB, DoneMBB->begin(), DL, TII.get(TargetOpcode::PHI), OutReg)
507 .addReg(FalseReg)
508 .addMBB(FalseMBB)
509 .addReg(TrueReg)
510 .addMBB(TrueMBB);
511
512 return DoneMBB;
513 }
514
515 static MachineBasicBlock *
LowerCallResults(MachineInstr & CallResults,DebugLoc DL,MachineBasicBlock * BB,const WebAssemblySubtarget * Subtarget,const TargetInstrInfo & TII)516 LowerCallResults(MachineInstr &CallResults, DebugLoc DL, MachineBasicBlock *BB,
517 const WebAssemblySubtarget *Subtarget,
518 const TargetInstrInfo &TII) {
519 MachineInstr &CallParams = *CallResults.getPrevNode();
520 assert(CallParams.getOpcode() == WebAssembly::CALL_PARAMS);
521 assert(CallResults.getOpcode() == WebAssembly::CALL_RESULTS ||
522 CallResults.getOpcode() == WebAssembly::RET_CALL_RESULTS);
523
524 bool IsIndirect = CallParams.getOperand(0).isReg();
525 bool IsRetCall = CallResults.getOpcode() == WebAssembly::RET_CALL_RESULTS;
526
527 bool IsFuncrefCall = false;
528 if (IsIndirect) {
529 Register Reg = CallParams.getOperand(0).getReg();
530 const MachineFunction *MF = BB->getParent();
531 const MachineRegisterInfo &MRI = MF->getRegInfo();
532 const TargetRegisterClass *TRC = MRI.getRegClass(Reg);
533 IsFuncrefCall = (TRC == &WebAssembly::FUNCREFRegClass);
534 assert(!IsFuncrefCall || Subtarget->hasReferenceTypes());
535 }
536
537 unsigned CallOp;
538 if (IsIndirect && IsRetCall) {
539 CallOp = WebAssembly::RET_CALL_INDIRECT;
540 } else if (IsIndirect) {
541 CallOp = WebAssembly::CALL_INDIRECT;
542 } else if (IsRetCall) {
543 CallOp = WebAssembly::RET_CALL;
544 } else {
545 CallOp = WebAssembly::CALL;
546 }
547
548 MachineFunction &MF = *BB->getParent();
549 const MCInstrDesc &MCID = TII.get(CallOp);
550 MachineInstrBuilder MIB(MF, MF.CreateMachineInstr(MCID, DL));
551
552 // See if we must truncate the function pointer.
553 // CALL_INDIRECT takes an i32, but in wasm64 we represent function pointers
554 // as 64-bit for uniformity with other pointer types.
555 // See also: WebAssemblyFastISel::selectCall
556 if (IsIndirect && MF.getSubtarget<WebAssemblySubtarget>().hasAddr64()) {
557 Register Reg32 =
558 MF.getRegInfo().createVirtualRegister(&WebAssembly::I32RegClass);
559 auto &FnPtr = CallParams.getOperand(0);
560 BuildMI(*BB, CallResults.getIterator(), DL,
561 TII.get(WebAssembly::I32_WRAP_I64), Reg32)
562 .addReg(FnPtr.getReg());
563 FnPtr.setReg(Reg32);
564 }
565
566 // Move the function pointer to the end of the arguments for indirect calls
567 if (IsIndirect) {
568 auto FnPtr = CallParams.getOperand(0);
569 CallParams.RemoveOperand(0);
570
571 // For funcrefs, call_indirect is done through __funcref_call_table and the
572 // funcref is always installed in slot 0 of the table, therefore instead of having
573 // the function pointer added at the end of the params list, a zero (the index in
574 // __funcref_call_table is added).
575 if (IsFuncrefCall) {
576 Register RegZero =
577 MF.getRegInfo().createVirtualRegister(&WebAssembly::I32RegClass);
578 MachineInstrBuilder MIBC0 =
579 BuildMI(MF, DL, TII.get(WebAssembly::CONST_I32), RegZero).addImm(0);
580
581 BB->insert(CallResults.getIterator(), MIBC0);
582 MachineInstrBuilder(MF, CallParams).addReg(RegZero);
583 } else
584 CallParams.addOperand(FnPtr);
585 }
586
587 for (auto Def : CallResults.defs())
588 MIB.add(Def);
589
590 if (IsIndirect) {
591 // Placeholder for the type index.
592 MIB.addImm(0);
593 // The table into which this call_indirect indexes.
594 MCSymbolWasm *Table = IsFuncrefCall
595 ? WebAssembly::getOrCreateFuncrefCallTableSymbol(
596 MF.getContext(), Subtarget)
597 : WebAssembly::getOrCreateFunctionTableSymbol(
598 MF.getContext(), Subtarget);
599 if (Subtarget->hasReferenceTypes()) {
600 MIB.addSym(Table);
601 } else {
602 // For the MVP there is at most one table whose number is 0, but we can't
603 // write a table symbol or issue relocations. Instead we just ensure the
604 // table is live and write a zero.
605 Table->setNoStrip();
606 MIB.addImm(0);
607 }
608 }
609
610 for (auto Use : CallParams.uses())
611 MIB.add(Use);
612
613 BB->insert(CallResults.getIterator(), MIB);
614 CallParams.eraseFromParent();
615 CallResults.eraseFromParent();
616
617 // If this is a funcref call, to avoid hidden GC roots, we need to clear the
618 // table slot with ref.null upon call_indirect return.
619 //
620 // This generates the following code, which comes right after a call_indirect
621 // of a funcref:
622 //
623 // i32.const 0
624 // ref.null func
625 // table.set __funcref_call_table
626 if (IsIndirect && IsFuncrefCall) {
627 MCSymbolWasm *Table = WebAssembly::getOrCreateFuncrefCallTableSymbol(
628 MF.getContext(), Subtarget);
629 Register RegZero =
630 MF.getRegInfo().createVirtualRegister(&WebAssembly::I32RegClass);
631 MachineInstr *Const0 =
632 BuildMI(MF, DL, TII.get(WebAssembly::CONST_I32), RegZero).addImm(0);
633 BB->insertAfter(MIB.getInstr()->getIterator(), Const0);
634
635 Register RegFuncref =
636 MF.getRegInfo().createVirtualRegister(&WebAssembly::FUNCREFRegClass);
637 MachineInstr *RefNull =
638 BuildMI(MF, DL, TII.get(WebAssembly::REF_NULL_FUNCREF), RegFuncref)
639 .addImm(static_cast<int32_t>(WebAssembly::HeapType::Funcref));
640 BB->insertAfter(Const0->getIterator(), RefNull);
641
642 MachineInstr *TableSet =
643 BuildMI(MF, DL, TII.get(WebAssembly::TABLE_SET_FUNCREF))
644 .addSym(Table)
645 .addReg(RegZero)
646 .addReg(RegFuncref);
647 BB->insertAfter(RefNull->getIterator(), TableSet);
648 }
649
650 return BB;
651 }
652
EmitInstrWithCustomInserter(MachineInstr & MI,MachineBasicBlock * BB) const653 MachineBasicBlock *WebAssemblyTargetLowering::EmitInstrWithCustomInserter(
654 MachineInstr &MI, MachineBasicBlock *BB) const {
655 const TargetInstrInfo &TII = *Subtarget->getInstrInfo();
656 DebugLoc DL = MI.getDebugLoc();
657
658 switch (MI.getOpcode()) {
659 default:
660 llvm_unreachable("Unexpected instr type to insert");
661 case WebAssembly::FP_TO_SINT_I32_F32:
662 return LowerFPToInt(MI, DL, BB, TII, false, false, false,
663 WebAssembly::I32_TRUNC_S_F32);
664 case WebAssembly::FP_TO_UINT_I32_F32:
665 return LowerFPToInt(MI, DL, BB, TII, true, false, false,
666 WebAssembly::I32_TRUNC_U_F32);
667 case WebAssembly::FP_TO_SINT_I64_F32:
668 return LowerFPToInt(MI, DL, BB, TII, false, true, false,
669 WebAssembly::I64_TRUNC_S_F32);
670 case WebAssembly::FP_TO_UINT_I64_F32:
671 return LowerFPToInt(MI, DL, BB, TII, true, true, false,
672 WebAssembly::I64_TRUNC_U_F32);
673 case WebAssembly::FP_TO_SINT_I32_F64:
674 return LowerFPToInt(MI, DL, BB, TII, false, false, true,
675 WebAssembly::I32_TRUNC_S_F64);
676 case WebAssembly::FP_TO_UINT_I32_F64:
677 return LowerFPToInt(MI, DL, BB, TII, true, false, true,
678 WebAssembly::I32_TRUNC_U_F64);
679 case WebAssembly::FP_TO_SINT_I64_F64:
680 return LowerFPToInt(MI, DL, BB, TII, false, true, true,
681 WebAssembly::I64_TRUNC_S_F64);
682 case WebAssembly::FP_TO_UINT_I64_F64:
683 return LowerFPToInt(MI, DL, BB, TII, true, true, true,
684 WebAssembly::I64_TRUNC_U_F64);
685 case WebAssembly::CALL_RESULTS:
686 case WebAssembly::RET_CALL_RESULTS:
687 return LowerCallResults(MI, DL, BB, Subtarget, TII);
688 }
689 }
690
691 const char *
getTargetNodeName(unsigned Opcode) const692 WebAssemblyTargetLowering::getTargetNodeName(unsigned Opcode) const {
693 switch (static_cast<WebAssemblyISD::NodeType>(Opcode)) {
694 case WebAssemblyISD::FIRST_NUMBER:
695 case WebAssemblyISD::FIRST_MEM_OPCODE:
696 break;
697 #define HANDLE_NODETYPE(NODE) \
698 case WebAssemblyISD::NODE: \
699 return "WebAssemblyISD::" #NODE;
700 #define HANDLE_MEM_NODETYPE(NODE) HANDLE_NODETYPE(NODE)
701 #include "WebAssemblyISD.def"
702 #undef HANDLE_MEM_NODETYPE
703 #undef HANDLE_NODETYPE
704 }
705 return nullptr;
706 }
707
708 std::pair<unsigned, const TargetRegisterClass *>
getRegForInlineAsmConstraint(const TargetRegisterInfo * TRI,StringRef Constraint,MVT VT) const709 WebAssemblyTargetLowering::getRegForInlineAsmConstraint(
710 const TargetRegisterInfo *TRI, StringRef Constraint, MVT VT) const {
711 // First, see if this is a constraint that directly corresponds to a
712 // WebAssembly register class.
713 if (Constraint.size() == 1) {
714 switch (Constraint[0]) {
715 case 'r':
716 assert(VT != MVT::iPTR && "Pointer MVT not expected here");
717 if (Subtarget->hasSIMD128() && VT.isVector()) {
718 if (VT.getSizeInBits() == 128)
719 return std::make_pair(0U, &WebAssembly::V128RegClass);
720 }
721 if (VT.isInteger() && !VT.isVector()) {
722 if (VT.getSizeInBits() <= 32)
723 return std::make_pair(0U, &WebAssembly::I32RegClass);
724 if (VT.getSizeInBits() <= 64)
725 return std::make_pair(0U, &WebAssembly::I64RegClass);
726 }
727 if (VT.isFloatingPoint() && !VT.isVector()) {
728 switch (VT.getSizeInBits()) {
729 case 32:
730 return std::make_pair(0U, &WebAssembly::F32RegClass);
731 case 64:
732 return std::make_pair(0U, &WebAssembly::F64RegClass);
733 default:
734 break;
735 }
736 }
737 break;
738 default:
739 break;
740 }
741 }
742
743 return TargetLowering::getRegForInlineAsmConstraint(TRI, Constraint, VT);
744 }
745
isCheapToSpeculateCttz() const746 bool WebAssemblyTargetLowering::isCheapToSpeculateCttz() const {
747 // Assume ctz is a relatively cheap operation.
748 return true;
749 }
750
isCheapToSpeculateCtlz() const751 bool WebAssemblyTargetLowering::isCheapToSpeculateCtlz() const {
752 // Assume clz is a relatively cheap operation.
753 return true;
754 }
755
isLegalAddressingMode(const DataLayout & DL,const AddrMode & AM,Type * Ty,unsigned AS,Instruction * I) const756 bool WebAssemblyTargetLowering::isLegalAddressingMode(const DataLayout &DL,
757 const AddrMode &AM,
758 Type *Ty, unsigned AS,
759 Instruction *I) const {
760 // WebAssembly offsets are added as unsigned without wrapping. The
761 // isLegalAddressingMode gives us no way to determine if wrapping could be
762 // happening, so we approximate this by accepting only non-negative offsets.
763 if (AM.BaseOffs < 0)
764 return false;
765
766 // WebAssembly has no scale register operands.
767 if (AM.Scale != 0)
768 return false;
769
770 // Everything else is legal.
771 return true;
772 }
773
allowsMisalignedMemoryAccesses(EVT,unsigned,Align,MachineMemOperand::Flags,bool * Fast) const774 bool WebAssemblyTargetLowering::allowsMisalignedMemoryAccesses(
775 EVT /*VT*/, unsigned /*AddrSpace*/, Align /*Align*/,
776 MachineMemOperand::Flags /*Flags*/, bool *Fast) const {
777 // WebAssembly supports unaligned accesses, though it should be declared
778 // with the p2align attribute on loads and stores which do so, and there
779 // may be a performance impact. We tell LLVM they're "fast" because
780 // for the kinds of things that LLVM uses this for (merging adjacent stores
781 // of constants, etc.), WebAssembly implementations will either want the
782 // unaligned access or they'll split anyway.
783 if (Fast)
784 *Fast = true;
785 return true;
786 }
787
isIntDivCheap(EVT VT,AttributeList Attr) const788 bool WebAssemblyTargetLowering::isIntDivCheap(EVT VT,
789 AttributeList Attr) const {
790 // The current thinking is that wasm engines will perform this optimization,
791 // so we can save on code size.
792 return true;
793 }
794
isVectorLoadExtDesirable(SDValue ExtVal) const795 bool WebAssemblyTargetLowering::isVectorLoadExtDesirable(SDValue ExtVal) const {
796 EVT ExtT = ExtVal.getValueType();
797 EVT MemT = cast<LoadSDNode>(ExtVal->getOperand(0))->getValueType(0);
798 return (ExtT == MVT::v8i16 && MemT == MVT::v8i8) ||
799 (ExtT == MVT::v4i32 && MemT == MVT::v4i16) ||
800 (ExtT == MVT::v2i64 && MemT == MVT::v2i32);
801 }
802
isOffsetFoldingLegal(const GlobalAddressSDNode * GA) const803 bool WebAssemblyTargetLowering::isOffsetFoldingLegal(
804 const GlobalAddressSDNode *GA) const {
805 // Wasm doesn't support function addresses with offsets
806 const GlobalValue *GV = GA->getGlobal();
807 return isa<Function>(GV) ? false : TargetLowering::isOffsetFoldingLegal(GA);
808 }
809
getSetCCResultType(const DataLayout & DL,LLVMContext & C,EVT VT) const810 EVT WebAssemblyTargetLowering::getSetCCResultType(const DataLayout &DL,
811 LLVMContext &C,
812 EVT VT) const {
813 if (VT.isVector())
814 return VT.changeVectorElementTypeToInteger();
815
816 // So far, all branch instructions in Wasm take an I32 condition.
817 // The default TargetLowering::getSetCCResultType returns the pointer size,
818 // which would be useful to reduce instruction counts when testing
819 // against 64-bit pointers/values if at some point Wasm supports that.
820 return EVT::getIntegerVT(C, 32);
821 }
822
getTgtMemIntrinsic(IntrinsicInfo & Info,const CallInst & I,MachineFunction & MF,unsigned Intrinsic) const823 bool WebAssemblyTargetLowering::getTgtMemIntrinsic(IntrinsicInfo &Info,
824 const CallInst &I,
825 MachineFunction &MF,
826 unsigned Intrinsic) const {
827 switch (Intrinsic) {
828 case Intrinsic::wasm_memory_atomic_notify:
829 Info.opc = ISD::INTRINSIC_W_CHAIN;
830 Info.memVT = MVT::i32;
831 Info.ptrVal = I.getArgOperand(0);
832 Info.offset = 0;
833 Info.align = Align(4);
834 // atomic.notify instruction does not really load the memory specified with
835 // this argument, but MachineMemOperand should either be load or store, so
836 // we set this to a load.
837 // FIXME Volatile isn't really correct, but currently all LLVM atomic
838 // instructions are treated as volatiles in the backend, so we should be
839 // consistent. The same applies for wasm_atomic_wait intrinsics too.
840 Info.flags = MachineMemOperand::MOVolatile | MachineMemOperand::MOLoad;
841 return true;
842 case Intrinsic::wasm_memory_atomic_wait32:
843 Info.opc = ISD::INTRINSIC_W_CHAIN;
844 Info.memVT = MVT::i32;
845 Info.ptrVal = I.getArgOperand(0);
846 Info.offset = 0;
847 Info.align = Align(4);
848 Info.flags = MachineMemOperand::MOVolatile | MachineMemOperand::MOLoad;
849 return true;
850 case Intrinsic::wasm_memory_atomic_wait64:
851 Info.opc = ISD::INTRINSIC_W_CHAIN;
852 Info.memVT = MVT::i64;
853 Info.ptrVal = I.getArgOperand(0);
854 Info.offset = 0;
855 Info.align = Align(8);
856 Info.flags = MachineMemOperand::MOVolatile | MachineMemOperand::MOLoad;
857 return true;
858 default:
859 return false;
860 }
861 }
862
computeKnownBitsForTargetNode(const SDValue Op,KnownBits & Known,const APInt & DemandedElts,const SelectionDAG & DAG,unsigned Depth) const863 void WebAssemblyTargetLowering::computeKnownBitsForTargetNode(
864 const SDValue Op, KnownBits &Known, const APInt &DemandedElts,
865 const SelectionDAG &DAG, unsigned Depth) const {
866 switch (Op.getOpcode()) {
867 default:
868 break;
869 case ISD::INTRINSIC_WO_CHAIN: {
870 unsigned IntNo = Op.getConstantOperandVal(0);
871 switch (IntNo) {
872 default:
873 break;
874 case Intrinsic::wasm_bitmask: {
875 unsigned BitWidth = Known.getBitWidth();
876 EVT VT = Op.getOperand(1).getSimpleValueType();
877 unsigned PossibleBits = VT.getVectorNumElements();
878 APInt ZeroMask = APInt::getHighBitsSet(BitWidth, BitWidth - PossibleBits);
879 Known.Zero |= ZeroMask;
880 break;
881 }
882 }
883 }
884 }
885 }
886
887 TargetLoweringBase::LegalizeTypeAction
getPreferredVectorAction(MVT VT) const888 WebAssemblyTargetLowering::getPreferredVectorAction(MVT VT) const {
889 if (VT.isFixedLengthVector()) {
890 MVT EltVT = VT.getVectorElementType();
891 // We have legal vector types with these lane types, so widening the
892 // vector would let us use some of the lanes directly without having to
893 // extend or truncate values.
894 if (EltVT == MVT::i8 || EltVT == MVT::i16 || EltVT == MVT::i32 ||
895 EltVT == MVT::i64 || EltVT == MVT::f32 || EltVT == MVT::f64)
896 return TypeWidenVector;
897 }
898
899 return TargetLoweringBase::getPreferredVectorAction(VT);
900 }
901
902 //===----------------------------------------------------------------------===//
903 // WebAssembly Lowering private implementation.
904 //===----------------------------------------------------------------------===//
905
906 //===----------------------------------------------------------------------===//
907 // Lowering Code
908 //===----------------------------------------------------------------------===//
909
fail(const SDLoc & DL,SelectionDAG & DAG,const char * Msg)910 static void fail(const SDLoc &DL, SelectionDAG &DAG, const char *Msg) {
911 MachineFunction &MF = DAG.getMachineFunction();
912 DAG.getContext()->diagnose(
913 DiagnosticInfoUnsupported(MF.getFunction(), Msg, DL.getDebugLoc()));
914 }
915
916 // Test whether the given calling convention is supported.
callingConvSupported(CallingConv::ID CallConv)917 static bool callingConvSupported(CallingConv::ID CallConv) {
918 // We currently support the language-independent target-independent
919 // conventions. We don't yet have a way to annotate calls with properties like
920 // "cold", and we don't have any call-clobbered registers, so these are mostly
921 // all handled the same.
922 return CallConv == CallingConv::C || CallConv == CallingConv::Fast ||
923 CallConv == CallingConv::Cold ||
924 CallConv == CallingConv::PreserveMost ||
925 CallConv == CallingConv::PreserveAll ||
926 CallConv == CallingConv::CXX_FAST_TLS ||
927 CallConv == CallingConv::WASM_EmscriptenInvoke ||
928 CallConv == CallingConv::Swift;
929 }
930
931 SDValue
LowerCall(CallLoweringInfo & CLI,SmallVectorImpl<SDValue> & InVals) const932 WebAssemblyTargetLowering::LowerCall(CallLoweringInfo &CLI,
933 SmallVectorImpl<SDValue> &InVals) const {
934 SelectionDAG &DAG = CLI.DAG;
935 SDLoc DL = CLI.DL;
936 SDValue Chain = CLI.Chain;
937 SDValue Callee = CLI.Callee;
938 MachineFunction &MF = DAG.getMachineFunction();
939 auto Layout = MF.getDataLayout();
940
941 CallingConv::ID CallConv = CLI.CallConv;
942 if (!callingConvSupported(CallConv))
943 fail(DL, DAG,
944 "WebAssembly doesn't support language-specific or target-specific "
945 "calling conventions yet");
946 if (CLI.IsPatchPoint)
947 fail(DL, DAG, "WebAssembly doesn't support patch point yet");
948
949 if (CLI.IsTailCall) {
950 auto NoTail = [&](const char *Msg) {
951 if (CLI.CB && CLI.CB->isMustTailCall())
952 fail(DL, DAG, Msg);
953 CLI.IsTailCall = false;
954 };
955
956 if (!Subtarget->hasTailCall())
957 NoTail("WebAssembly 'tail-call' feature not enabled");
958
959 // Varargs calls cannot be tail calls because the buffer is on the stack
960 if (CLI.IsVarArg)
961 NoTail("WebAssembly does not support varargs tail calls");
962
963 // Do not tail call unless caller and callee return types match
964 const Function &F = MF.getFunction();
965 const TargetMachine &TM = getTargetMachine();
966 Type *RetTy = F.getReturnType();
967 SmallVector<MVT, 4> CallerRetTys;
968 SmallVector<MVT, 4> CalleeRetTys;
969 computeLegalValueVTs(F, TM, RetTy, CallerRetTys);
970 computeLegalValueVTs(F, TM, CLI.RetTy, CalleeRetTys);
971 bool TypesMatch = CallerRetTys.size() == CalleeRetTys.size() &&
972 std::equal(CallerRetTys.begin(), CallerRetTys.end(),
973 CalleeRetTys.begin());
974 if (!TypesMatch)
975 NoTail("WebAssembly tail call requires caller and callee return types to "
976 "match");
977
978 // If pointers to local stack values are passed, we cannot tail call
979 if (CLI.CB) {
980 for (auto &Arg : CLI.CB->args()) {
981 Value *Val = Arg.get();
982 // Trace the value back through pointer operations
983 while (true) {
984 Value *Src = Val->stripPointerCastsAndAliases();
985 if (auto *GEP = dyn_cast<GetElementPtrInst>(Src))
986 Src = GEP->getPointerOperand();
987 if (Val == Src)
988 break;
989 Val = Src;
990 }
991 if (isa<AllocaInst>(Val)) {
992 NoTail(
993 "WebAssembly does not support tail calling with stack arguments");
994 break;
995 }
996 }
997 }
998 }
999
1000 SmallVectorImpl<ISD::InputArg> &Ins = CLI.Ins;
1001 SmallVectorImpl<ISD::OutputArg> &Outs = CLI.Outs;
1002 SmallVectorImpl<SDValue> &OutVals = CLI.OutVals;
1003
1004 // The generic code may have added an sret argument. If we're lowering an
1005 // invoke function, the ABI requires that the function pointer be the first
1006 // argument, so we may have to swap the arguments.
1007 if (CallConv == CallingConv::WASM_EmscriptenInvoke && Outs.size() >= 2 &&
1008 Outs[0].Flags.isSRet()) {
1009 std::swap(Outs[0], Outs[1]);
1010 std::swap(OutVals[0], OutVals[1]);
1011 }
1012
1013 bool HasSwiftSelfArg = false;
1014 bool HasSwiftErrorArg = false;
1015 unsigned NumFixedArgs = 0;
1016 for (unsigned I = 0; I < Outs.size(); ++I) {
1017 const ISD::OutputArg &Out = Outs[I];
1018 SDValue &OutVal = OutVals[I];
1019 HasSwiftSelfArg |= Out.Flags.isSwiftSelf();
1020 HasSwiftErrorArg |= Out.Flags.isSwiftError();
1021 if (Out.Flags.isNest())
1022 fail(DL, DAG, "WebAssembly hasn't implemented nest arguments");
1023 if (Out.Flags.isInAlloca())
1024 fail(DL, DAG, "WebAssembly hasn't implemented inalloca arguments");
1025 if (Out.Flags.isInConsecutiveRegs())
1026 fail(DL, DAG, "WebAssembly hasn't implemented cons regs arguments");
1027 if (Out.Flags.isInConsecutiveRegsLast())
1028 fail(DL, DAG, "WebAssembly hasn't implemented cons regs last arguments");
1029 if (Out.Flags.isByVal() && Out.Flags.getByValSize() != 0) {
1030 auto &MFI = MF.getFrameInfo();
1031 int FI = MFI.CreateStackObject(Out.Flags.getByValSize(),
1032 Out.Flags.getNonZeroByValAlign(),
1033 /*isSS=*/false);
1034 SDValue SizeNode =
1035 DAG.getConstant(Out.Flags.getByValSize(), DL, MVT::i32);
1036 SDValue FINode = DAG.getFrameIndex(FI, getPointerTy(Layout));
1037 Chain = DAG.getMemcpy(
1038 Chain, DL, FINode, OutVal, SizeNode, Out.Flags.getNonZeroByValAlign(),
1039 /*isVolatile*/ false, /*AlwaysInline=*/false,
1040 /*isTailCall*/ false, MachinePointerInfo(), MachinePointerInfo());
1041 OutVal = FINode;
1042 }
1043 // Count the number of fixed args *after* legalization.
1044 NumFixedArgs += Out.IsFixed;
1045 }
1046
1047 bool IsVarArg = CLI.IsVarArg;
1048 auto PtrVT = getPointerTy(Layout);
1049
1050 // For swiftcc, emit additional swiftself and swifterror arguments
1051 // if there aren't. These additional arguments are also added for callee
1052 // signature They are necessary to match callee and caller signature for
1053 // indirect call.
1054 if (CallConv == CallingConv::Swift) {
1055 if (!HasSwiftSelfArg) {
1056 NumFixedArgs++;
1057 ISD::OutputArg Arg;
1058 Arg.Flags.setSwiftSelf();
1059 CLI.Outs.push_back(Arg);
1060 SDValue ArgVal = DAG.getUNDEF(PtrVT);
1061 CLI.OutVals.push_back(ArgVal);
1062 }
1063 if (!HasSwiftErrorArg) {
1064 NumFixedArgs++;
1065 ISD::OutputArg Arg;
1066 Arg.Flags.setSwiftError();
1067 CLI.Outs.push_back(Arg);
1068 SDValue ArgVal = DAG.getUNDEF(PtrVT);
1069 CLI.OutVals.push_back(ArgVal);
1070 }
1071 }
1072
1073 // Analyze operands of the call, assigning locations to each operand.
1074 SmallVector<CCValAssign, 16> ArgLocs;
1075 CCState CCInfo(CallConv, IsVarArg, MF, ArgLocs, *DAG.getContext());
1076
1077 if (IsVarArg) {
1078 // Outgoing non-fixed arguments are placed in a buffer. First
1079 // compute their offsets and the total amount of buffer space needed.
1080 for (unsigned I = NumFixedArgs; I < Outs.size(); ++I) {
1081 const ISD::OutputArg &Out = Outs[I];
1082 SDValue &Arg = OutVals[I];
1083 EVT VT = Arg.getValueType();
1084 assert(VT != MVT::iPTR && "Legalized args should be concrete");
1085 Type *Ty = VT.getTypeForEVT(*DAG.getContext());
1086 Align Alignment =
1087 std::max(Out.Flags.getNonZeroOrigAlign(), Layout.getABITypeAlign(Ty));
1088 unsigned Offset =
1089 CCInfo.AllocateStack(Layout.getTypeAllocSize(Ty), Alignment);
1090 CCInfo.addLoc(CCValAssign::getMem(ArgLocs.size(), VT.getSimpleVT(),
1091 Offset, VT.getSimpleVT(),
1092 CCValAssign::Full));
1093 }
1094 }
1095
1096 unsigned NumBytes = CCInfo.getAlignedCallFrameSize();
1097
1098 SDValue FINode;
1099 if (IsVarArg && NumBytes) {
1100 // For non-fixed arguments, next emit stores to store the argument values
1101 // to the stack buffer at the offsets computed above.
1102 int FI = MF.getFrameInfo().CreateStackObject(NumBytes,
1103 Layout.getStackAlignment(),
1104 /*isSS=*/false);
1105 unsigned ValNo = 0;
1106 SmallVector<SDValue, 8> Chains;
1107 for (SDValue Arg : drop_begin(OutVals, NumFixedArgs)) {
1108 assert(ArgLocs[ValNo].getValNo() == ValNo &&
1109 "ArgLocs should remain in order and only hold varargs args");
1110 unsigned Offset = ArgLocs[ValNo++].getLocMemOffset();
1111 FINode = DAG.getFrameIndex(FI, getPointerTy(Layout));
1112 SDValue Add = DAG.getNode(ISD::ADD, DL, PtrVT, FINode,
1113 DAG.getConstant(Offset, DL, PtrVT));
1114 Chains.push_back(
1115 DAG.getStore(Chain, DL, Arg, Add,
1116 MachinePointerInfo::getFixedStack(MF, FI, Offset)));
1117 }
1118 if (!Chains.empty())
1119 Chain = DAG.getNode(ISD::TokenFactor, DL, MVT::Other, Chains);
1120 } else if (IsVarArg) {
1121 FINode = DAG.getIntPtrConstant(0, DL);
1122 }
1123
1124 if (Callee->getOpcode() == ISD::GlobalAddress) {
1125 // If the callee is a GlobalAddress node (quite common, every direct call
1126 // is) turn it into a TargetGlobalAddress node so that LowerGlobalAddress
1127 // doesn't at MO_GOT which is not needed for direct calls.
1128 GlobalAddressSDNode* GA = cast<GlobalAddressSDNode>(Callee);
1129 Callee = DAG.getTargetGlobalAddress(GA->getGlobal(), DL,
1130 getPointerTy(DAG.getDataLayout()),
1131 GA->getOffset());
1132 Callee = DAG.getNode(WebAssemblyISD::Wrapper, DL,
1133 getPointerTy(DAG.getDataLayout()), Callee);
1134 }
1135
1136 // Compute the operands for the CALLn node.
1137 SmallVector<SDValue, 16> Ops;
1138 Ops.push_back(Chain);
1139 Ops.push_back(Callee);
1140
1141 // Add all fixed arguments. Note that for non-varargs calls, NumFixedArgs
1142 // isn't reliable.
1143 Ops.append(OutVals.begin(),
1144 IsVarArg ? OutVals.begin() + NumFixedArgs : OutVals.end());
1145 // Add a pointer to the vararg buffer.
1146 if (IsVarArg)
1147 Ops.push_back(FINode);
1148
1149 SmallVector<EVT, 8> InTys;
1150 for (const auto &In : Ins) {
1151 assert(!In.Flags.isByVal() && "byval is not valid for return values");
1152 assert(!In.Flags.isNest() && "nest is not valid for return values");
1153 if (In.Flags.isInAlloca())
1154 fail(DL, DAG, "WebAssembly hasn't implemented inalloca return values");
1155 if (In.Flags.isInConsecutiveRegs())
1156 fail(DL, DAG, "WebAssembly hasn't implemented cons regs return values");
1157 if (In.Flags.isInConsecutiveRegsLast())
1158 fail(DL, DAG,
1159 "WebAssembly hasn't implemented cons regs last return values");
1160 // Ignore In.getNonZeroOrigAlign() because all our arguments are passed in
1161 // registers.
1162 InTys.push_back(In.VT);
1163 }
1164
1165 // Lastly, if this is a call to a funcref we need to add an instruction
1166 // table.set to the chain and transform the call.
1167 if (CLI.CB &&
1168 WebAssembly::isFuncrefType(CLI.CB->getCalledOperand()->getType())) {
1169 // In the absence of function references proposal where a funcref call is
1170 // lowered to call_ref, using reference types we generate a table.set to set
1171 // the funcref to a special table used solely for this purpose, followed by
1172 // a call_indirect. Here we just generate the table set, and return the
1173 // SDValue of the table.set so that LowerCall can finalize the lowering by
1174 // generating the call_indirect.
1175 SDValue Chain = Ops[0];
1176
1177 MCSymbolWasm *Table = WebAssembly::getOrCreateFuncrefCallTableSymbol(
1178 MF.getContext(), Subtarget);
1179 SDValue Sym = DAG.getMCSymbol(Table, PtrVT);
1180 SDValue TableSlot = DAG.getConstant(0, DL, MVT::i32);
1181 SDValue TableSetOps[] = {Chain, Sym, TableSlot, Callee};
1182 SDValue TableSet = DAG.getMemIntrinsicNode(
1183 WebAssemblyISD::TABLE_SET, DL, DAG.getVTList(MVT::Other), TableSetOps,
1184 MVT::funcref,
1185 // Machine Mem Operand args
1186 MachinePointerInfo(
1187 WebAssembly::WasmAddressSpace::WASM_ADDRESS_SPACE_FUNCREF),
1188 CLI.CB->getCalledOperand()->getPointerAlignment(DAG.getDataLayout()),
1189 MachineMemOperand::MOStore);
1190
1191 Ops[0] = TableSet; // The new chain is the TableSet itself
1192 }
1193
1194 if (CLI.IsTailCall) {
1195 // ret_calls do not return values to the current frame
1196 SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue);
1197 return DAG.getNode(WebAssemblyISD::RET_CALL, DL, NodeTys, Ops);
1198 }
1199
1200 InTys.push_back(MVT::Other);
1201 SDVTList InTyList = DAG.getVTList(InTys);
1202 SDValue Res = DAG.getNode(WebAssemblyISD::CALL, DL, InTyList, Ops);
1203
1204 for (size_t I = 0; I < Ins.size(); ++I)
1205 InVals.push_back(Res.getValue(I));
1206
1207 // Return the chain
1208 return Res.getValue(Ins.size());
1209 }
1210
CanLowerReturn(CallingConv::ID,MachineFunction &,bool,const SmallVectorImpl<ISD::OutputArg> & Outs,LLVMContext &) const1211 bool WebAssemblyTargetLowering::CanLowerReturn(
1212 CallingConv::ID /*CallConv*/, MachineFunction & /*MF*/, bool /*IsVarArg*/,
1213 const SmallVectorImpl<ISD::OutputArg> &Outs,
1214 LLVMContext & /*Context*/) const {
1215 // WebAssembly can only handle returning tuples with multivalue enabled
1216 return Subtarget->hasMultivalue() || Outs.size() <= 1;
1217 }
1218
LowerReturn(SDValue Chain,CallingConv::ID CallConv,bool,const SmallVectorImpl<ISD::OutputArg> & Outs,const SmallVectorImpl<SDValue> & OutVals,const SDLoc & DL,SelectionDAG & DAG) const1219 SDValue WebAssemblyTargetLowering::LowerReturn(
1220 SDValue Chain, CallingConv::ID CallConv, bool /*IsVarArg*/,
1221 const SmallVectorImpl<ISD::OutputArg> &Outs,
1222 const SmallVectorImpl<SDValue> &OutVals, const SDLoc &DL,
1223 SelectionDAG &DAG) const {
1224 assert((Subtarget->hasMultivalue() || Outs.size() <= 1) &&
1225 "MVP WebAssembly can only return up to one value");
1226 if (!callingConvSupported(CallConv))
1227 fail(DL, DAG, "WebAssembly doesn't support non-C calling conventions");
1228
1229 SmallVector<SDValue, 4> RetOps(1, Chain);
1230 RetOps.append(OutVals.begin(), OutVals.end());
1231 Chain = DAG.getNode(WebAssemblyISD::RETURN, DL, MVT::Other, RetOps);
1232
1233 // Record the number and types of the return values.
1234 for (const ISD::OutputArg &Out : Outs) {
1235 assert(!Out.Flags.isByVal() && "byval is not valid for return values");
1236 assert(!Out.Flags.isNest() && "nest is not valid for return values");
1237 assert(Out.IsFixed && "non-fixed return value is not valid");
1238 if (Out.Flags.isInAlloca())
1239 fail(DL, DAG, "WebAssembly hasn't implemented inalloca results");
1240 if (Out.Flags.isInConsecutiveRegs())
1241 fail(DL, DAG, "WebAssembly hasn't implemented cons regs results");
1242 if (Out.Flags.isInConsecutiveRegsLast())
1243 fail(DL, DAG, "WebAssembly hasn't implemented cons regs last results");
1244 }
1245
1246 return Chain;
1247 }
1248
LowerFormalArguments(SDValue Chain,CallingConv::ID CallConv,bool IsVarArg,const SmallVectorImpl<ISD::InputArg> & Ins,const SDLoc & DL,SelectionDAG & DAG,SmallVectorImpl<SDValue> & InVals) const1249 SDValue WebAssemblyTargetLowering::LowerFormalArguments(
1250 SDValue Chain, CallingConv::ID CallConv, bool IsVarArg,
1251 const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &DL,
1252 SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const {
1253 if (!callingConvSupported(CallConv))
1254 fail(DL, DAG, "WebAssembly doesn't support non-C calling conventions");
1255
1256 MachineFunction &MF = DAG.getMachineFunction();
1257 auto *MFI = MF.getInfo<WebAssemblyFunctionInfo>();
1258
1259 // Set up the incoming ARGUMENTS value, which serves to represent the liveness
1260 // of the incoming values before they're represented by virtual registers.
1261 MF.getRegInfo().addLiveIn(WebAssembly::ARGUMENTS);
1262
1263 bool HasSwiftErrorArg = false;
1264 bool HasSwiftSelfArg = false;
1265 for (const ISD::InputArg &In : Ins) {
1266 HasSwiftSelfArg |= In.Flags.isSwiftSelf();
1267 HasSwiftErrorArg |= In.Flags.isSwiftError();
1268 if (In.Flags.isInAlloca())
1269 fail(DL, DAG, "WebAssembly hasn't implemented inalloca arguments");
1270 if (In.Flags.isNest())
1271 fail(DL, DAG, "WebAssembly hasn't implemented nest arguments");
1272 if (In.Flags.isInConsecutiveRegs())
1273 fail(DL, DAG, "WebAssembly hasn't implemented cons regs arguments");
1274 if (In.Flags.isInConsecutiveRegsLast())
1275 fail(DL, DAG, "WebAssembly hasn't implemented cons regs last arguments");
1276 // Ignore In.getNonZeroOrigAlign() because all our arguments are passed in
1277 // registers.
1278 InVals.push_back(In.Used ? DAG.getNode(WebAssemblyISD::ARGUMENT, DL, In.VT,
1279 DAG.getTargetConstant(InVals.size(),
1280 DL, MVT::i32))
1281 : DAG.getUNDEF(In.VT));
1282
1283 // Record the number and types of arguments.
1284 MFI->addParam(In.VT);
1285 }
1286
1287 // For swiftcc, emit additional swiftself and swifterror arguments
1288 // if there aren't. These additional arguments are also added for callee
1289 // signature They are necessary to match callee and caller signature for
1290 // indirect call.
1291 auto PtrVT = getPointerTy(MF.getDataLayout());
1292 if (CallConv == CallingConv::Swift) {
1293 if (!HasSwiftSelfArg) {
1294 MFI->addParam(PtrVT);
1295 }
1296 if (!HasSwiftErrorArg) {
1297 MFI->addParam(PtrVT);
1298 }
1299 }
1300 // Varargs are copied into a buffer allocated by the caller, and a pointer to
1301 // the buffer is passed as an argument.
1302 if (IsVarArg) {
1303 MVT PtrVT = getPointerTy(MF.getDataLayout());
1304 Register VarargVreg =
1305 MF.getRegInfo().createVirtualRegister(getRegClassFor(PtrVT));
1306 MFI->setVarargBufferVreg(VarargVreg);
1307 Chain = DAG.getCopyToReg(
1308 Chain, DL, VarargVreg,
1309 DAG.getNode(WebAssemblyISD::ARGUMENT, DL, PtrVT,
1310 DAG.getTargetConstant(Ins.size(), DL, MVT::i32)));
1311 MFI->addParam(PtrVT);
1312 }
1313
1314 // Record the number and types of arguments and results.
1315 SmallVector<MVT, 4> Params;
1316 SmallVector<MVT, 4> Results;
1317 computeSignatureVTs(MF.getFunction().getFunctionType(), &MF.getFunction(),
1318 MF.getFunction(), DAG.getTarget(), Params, Results);
1319 for (MVT VT : Results)
1320 MFI->addResult(VT);
1321 // TODO: Use signatures in WebAssemblyMachineFunctionInfo too and unify
1322 // the param logic here with ComputeSignatureVTs
1323 assert(MFI->getParams().size() == Params.size() &&
1324 std::equal(MFI->getParams().begin(), MFI->getParams().end(),
1325 Params.begin()));
1326
1327 return Chain;
1328 }
1329
ReplaceNodeResults(SDNode * N,SmallVectorImpl<SDValue> & Results,SelectionDAG & DAG) const1330 void WebAssemblyTargetLowering::ReplaceNodeResults(
1331 SDNode *N, SmallVectorImpl<SDValue> &Results, SelectionDAG &DAG) const {
1332 switch (N->getOpcode()) {
1333 case ISD::SIGN_EXTEND_INREG:
1334 // Do not add any results, signifying that N should not be custom lowered
1335 // after all. This happens because simd128 turns on custom lowering for
1336 // SIGN_EXTEND_INREG, but for non-vector sign extends the result might be an
1337 // illegal type.
1338 break;
1339 default:
1340 llvm_unreachable(
1341 "ReplaceNodeResults not implemented for this op for WebAssembly!");
1342 }
1343 }
1344
1345 //===----------------------------------------------------------------------===//
1346 // Custom lowering hooks.
1347 //===----------------------------------------------------------------------===//
1348
LowerOperation(SDValue Op,SelectionDAG & DAG) const1349 SDValue WebAssemblyTargetLowering::LowerOperation(SDValue Op,
1350 SelectionDAG &DAG) const {
1351 SDLoc DL(Op);
1352 switch (Op.getOpcode()) {
1353 default:
1354 llvm_unreachable("unimplemented operation lowering");
1355 return SDValue();
1356 case ISD::FrameIndex:
1357 return LowerFrameIndex(Op, DAG);
1358 case ISD::GlobalAddress:
1359 return LowerGlobalAddress(Op, DAG);
1360 case ISD::GlobalTLSAddress:
1361 return LowerGlobalTLSAddress(Op, DAG);
1362 case ISD::ExternalSymbol:
1363 return LowerExternalSymbol(Op, DAG);
1364 case ISD::JumpTable:
1365 return LowerJumpTable(Op, DAG);
1366 case ISD::BR_JT:
1367 return LowerBR_JT(Op, DAG);
1368 case ISD::VASTART:
1369 return LowerVASTART(Op, DAG);
1370 case ISD::BlockAddress:
1371 case ISD::BRIND:
1372 fail(DL, DAG, "WebAssembly hasn't implemented computed gotos");
1373 return SDValue();
1374 case ISD::RETURNADDR:
1375 return LowerRETURNADDR(Op, DAG);
1376 case ISD::FRAMEADDR:
1377 return LowerFRAMEADDR(Op, DAG);
1378 case ISD::CopyToReg:
1379 return LowerCopyToReg(Op, DAG);
1380 case ISD::EXTRACT_VECTOR_ELT:
1381 case ISD::INSERT_VECTOR_ELT:
1382 return LowerAccessVectorElement(Op, DAG);
1383 case ISD::INTRINSIC_VOID:
1384 case ISD::INTRINSIC_WO_CHAIN:
1385 case ISD::INTRINSIC_W_CHAIN:
1386 return LowerIntrinsic(Op, DAG);
1387 case ISD::SIGN_EXTEND_INREG:
1388 return LowerSIGN_EXTEND_INREG(Op, DAG);
1389 case ISD::BUILD_VECTOR:
1390 return LowerBUILD_VECTOR(Op, DAG);
1391 case ISD::VECTOR_SHUFFLE:
1392 return LowerVECTOR_SHUFFLE(Op, DAG);
1393 case ISD::SETCC:
1394 return LowerSETCC(Op, DAG);
1395 case ISD::SHL:
1396 case ISD::SRA:
1397 case ISD::SRL:
1398 return LowerShift(Op, DAG);
1399 case ISD::FP_TO_SINT_SAT:
1400 case ISD::FP_TO_UINT_SAT:
1401 return LowerFP_TO_INT_SAT(Op, DAG);
1402 case ISD::LOAD:
1403 return LowerLoad(Op, DAG);
1404 case ISD::STORE:
1405 return LowerStore(Op, DAG);
1406 }
1407 }
1408
IsWebAssemblyGlobal(SDValue Op)1409 static bool IsWebAssemblyGlobal(SDValue Op) {
1410 if (const GlobalAddressSDNode *GA = dyn_cast<GlobalAddressSDNode>(Op))
1411 return WebAssembly::isWasmVarAddressSpace(GA->getAddressSpace());
1412
1413 return false;
1414 }
1415
IsWebAssemblyLocal(SDValue Op,SelectionDAG & DAG)1416 static Optional<unsigned> IsWebAssemblyLocal(SDValue Op, SelectionDAG &DAG) {
1417 const FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(Op);
1418 if (!FI)
1419 return None;
1420
1421 auto &MF = DAG.getMachineFunction();
1422 return WebAssemblyFrameLowering::getLocalForStackObject(MF, FI->getIndex());
1423 }
1424
LowerStore(SDValue Op,SelectionDAG & DAG) const1425 SDValue WebAssemblyTargetLowering::LowerStore(SDValue Op,
1426 SelectionDAG &DAG) const {
1427 SDLoc DL(Op);
1428 StoreSDNode *SN = cast<StoreSDNode>(Op.getNode());
1429 const SDValue &Value = SN->getValue();
1430 const SDValue &Base = SN->getBasePtr();
1431 const SDValue &Offset = SN->getOffset();
1432
1433 if (IsWebAssemblyGlobal(Base)) {
1434 if (!Offset->isUndef())
1435 report_fatal_error("unexpected offset when storing to webassembly global",
1436 false);
1437
1438 SDVTList Tys = DAG.getVTList(MVT::Other);
1439 SDValue Ops[] = {SN->getChain(), Value, Base};
1440 return DAG.getMemIntrinsicNode(WebAssemblyISD::GLOBAL_SET, DL, Tys, Ops,
1441 SN->getMemoryVT(), SN->getMemOperand());
1442 }
1443
1444 if (Optional<unsigned> Local = IsWebAssemblyLocal(Base, DAG)) {
1445 if (!Offset->isUndef())
1446 report_fatal_error("unexpected offset when storing to webassembly local",
1447 false);
1448
1449 SDValue Idx = DAG.getTargetConstant(*Local, Base, MVT::i32);
1450 SDVTList Tys = DAG.getVTList(MVT::Other); // The chain.
1451 SDValue Ops[] = {SN->getChain(), Idx, Value};
1452 return DAG.getNode(WebAssemblyISD::LOCAL_SET, DL, Tys, Ops);
1453 }
1454
1455 return Op;
1456 }
1457
LowerLoad(SDValue Op,SelectionDAG & DAG) const1458 SDValue WebAssemblyTargetLowering::LowerLoad(SDValue Op,
1459 SelectionDAG &DAG) const {
1460 SDLoc DL(Op);
1461 LoadSDNode *LN = cast<LoadSDNode>(Op.getNode());
1462 const SDValue &Base = LN->getBasePtr();
1463 const SDValue &Offset = LN->getOffset();
1464
1465 if (IsWebAssemblyGlobal(Base)) {
1466 if (!Offset->isUndef())
1467 report_fatal_error(
1468 "unexpected offset when loading from webassembly global", false);
1469
1470 SDVTList Tys = DAG.getVTList(LN->getValueType(0), MVT::Other);
1471 SDValue Ops[] = {LN->getChain(), Base};
1472 return DAG.getMemIntrinsicNode(WebAssemblyISD::GLOBAL_GET, DL, Tys, Ops,
1473 LN->getMemoryVT(), LN->getMemOperand());
1474 }
1475
1476 if (Optional<unsigned> Local = IsWebAssemblyLocal(Base, DAG)) {
1477 if (!Offset->isUndef())
1478 report_fatal_error(
1479 "unexpected offset when loading from webassembly local", false);
1480
1481 SDValue Idx = DAG.getTargetConstant(*Local, Base, MVT::i32);
1482 EVT LocalVT = LN->getValueType(0);
1483 SDValue LocalGet = DAG.getNode(WebAssemblyISD::LOCAL_GET, DL, LocalVT,
1484 {LN->getChain(), Idx});
1485 SDValue Result = DAG.getMergeValues({LocalGet, LN->getChain()}, DL);
1486 assert(Result->getNumValues() == 2 && "Loads must carry a chain!");
1487 return Result;
1488 }
1489
1490 return Op;
1491 }
1492
LowerCopyToReg(SDValue Op,SelectionDAG & DAG) const1493 SDValue WebAssemblyTargetLowering::LowerCopyToReg(SDValue Op,
1494 SelectionDAG &DAG) const {
1495 SDValue Src = Op.getOperand(2);
1496 if (isa<FrameIndexSDNode>(Src.getNode())) {
1497 // CopyToReg nodes don't support FrameIndex operands. Other targets select
1498 // the FI to some LEA-like instruction, but since we don't have that, we
1499 // need to insert some kind of instruction that can take an FI operand and
1500 // produces a value usable by CopyToReg (i.e. in a vreg). So insert a dummy
1501 // local.copy between Op and its FI operand.
1502 SDValue Chain = Op.getOperand(0);
1503 SDLoc DL(Op);
1504 unsigned Reg = cast<RegisterSDNode>(Op.getOperand(1))->getReg();
1505 EVT VT = Src.getValueType();
1506 SDValue Copy(DAG.getMachineNode(VT == MVT::i32 ? WebAssembly::COPY_I32
1507 : WebAssembly::COPY_I64,
1508 DL, VT, Src),
1509 0);
1510 return Op.getNode()->getNumValues() == 1
1511 ? DAG.getCopyToReg(Chain, DL, Reg, Copy)
1512 : DAG.getCopyToReg(Chain, DL, Reg, Copy,
1513 Op.getNumOperands() == 4 ? Op.getOperand(3)
1514 : SDValue());
1515 }
1516 return SDValue();
1517 }
1518
LowerFrameIndex(SDValue Op,SelectionDAG & DAG) const1519 SDValue WebAssemblyTargetLowering::LowerFrameIndex(SDValue Op,
1520 SelectionDAG &DAG) const {
1521 int FI = cast<FrameIndexSDNode>(Op)->getIndex();
1522 return DAG.getTargetFrameIndex(FI, Op.getValueType());
1523 }
1524
LowerRETURNADDR(SDValue Op,SelectionDAG & DAG) const1525 SDValue WebAssemblyTargetLowering::LowerRETURNADDR(SDValue Op,
1526 SelectionDAG &DAG) const {
1527 SDLoc DL(Op);
1528
1529 if (!Subtarget->getTargetTriple().isOSEmscripten()) {
1530 fail(DL, DAG,
1531 "Non-Emscripten WebAssembly hasn't implemented "
1532 "__builtin_return_address");
1533 return SDValue();
1534 }
1535
1536 if (verifyReturnAddressArgumentIsConstant(Op, DAG))
1537 return SDValue();
1538
1539 unsigned Depth = Op.getConstantOperandVal(0);
1540 MakeLibCallOptions CallOptions;
1541 return makeLibCall(DAG, RTLIB::RETURN_ADDRESS, Op.getValueType(),
1542 {DAG.getConstant(Depth, DL, MVT::i32)}, CallOptions, DL)
1543 .first;
1544 }
1545
LowerFRAMEADDR(SDValue Op,SelectionDAG & DAG) const1546 SDValue WebAssemblyTargetLowering::LowerFRAMEADDR(SDValue Op,
1547 SelectionDAG &DAG) const {
1548 // Non-zero depths are not supported by WebAssembly currently. Use the
1549 // legalizer's default expansion, which is to return 0 (what this function is
1550 // documented to do).
1551 if (Op.getConstantOperandVal(0) > 0)
1552 return SDValue();
1553
1554 DAG.getMachineFunction().getFrameInfo().setFrameAddressIsTaken(true);
1555 EVT VT = Op.getValueType();
1556 Register FP =
1557 Subtarget->getRegisterInfo()->getFrameRegister(DAG.getMachineFunction());
1558 return DAG.getCopyFromReg(DAG.getEntryNode(), SDLoc(Op), FP, VT);
1559 }
1560
1561 SDValue
LowerGlobalTLSAddress(SDValue Op,SelectionDAG & DAG) const1562 WebAssemblyTargetLowering::LowerGlobalTLSAddress(SDValue Op,
1563 SelectionDAG &DAG) const {
1564 SDLoc DL(Op);
1565 const auto *GA = cast<GlobalAddressSDNode>(Op);
1566
1567 MachineFunction &MF = DAG.getMachineFunction();
1568 if (!MF.getSubtarget<WebAssemblySubtarget>().hasBulkMemory())
1569 report_fatal_error("cannot use thread-local storage without bulk memory",
1570 false);
1571
1572 const GlobalValue *GV = GA->getGlobal();
1573
1574 // Currently Emscripten does not support dynamic linking with threads.
1575 // Therefore, if we have thread-local storage, only the local-exec model
1576 // is possible.
1577 // TODO: remove this and implement proper TLS models once Emscripten
1578 // supports dynamic linking with threads.
1579 if (GV->getThreadLocalMode() != GlobalValue::LocalExecTLSModel &&
1580 !Subtarget->getTargetTriple().isOSEmscripten()) {
1581 report_fatal_error("only -ftls-model=local-exec is supported for now on "
1582 "non-Emscripten OSes: variable " +
1583 GV->getName(),
1584 false);
1585 }
1586
1587 auto model = GV->getThreadLocalMode();
1588
1589 // Unsupported TLS modes
1590 assert(model != GlobalValue::NotThreadLocal);
1591 assert(model != GlobalValue::InitialExecTLSModel);
1592
1593 if (model == GlobalValue::LocalExecTLSModel ||
1594 model == GlobalValue::LocalDynamicTLSModel ||
1595 (model == GlobalValue::GeneralDynamicTLSModel &&
1596 getTargetMachine().shouldAssumeDSOLocal(*GV->getParent(), GV))) {
1597 // For DSO-local TLS variables we use offset from __tls_base
1598
1599 MVT PtrVT = getPointerTy(DAG.getDataLayout());
1600 auto GlobalGet = PtrVT == MVT::i64 ? WebAssembly::GLOBAL_GET_I64
1601 : WebAssembly::GLOBAL_GET_I32;
1602 const char *BaseName = MF.createExternalSymbolName("__tls_base");
1603
1604 SDValue BaseAddr(
1605 DAG.getMachineNode(GlobalGet, DL, PtrVT,
1606 DAG.getTargetExternalSymbol(BaseName, PtrVT)),
1607 0);
1608
1609 SDValue TLSOffset = DAG.getTargetGlobalAddress(
1610 GV, DL, PtrVT, GA->getOffset(), WebAssemblyII::MO_TLS_BASE_REL);
1611 SDValue SymOffset =
1612 DAG.getNode(WebAssemblyISD::WrapperREL, DL, PtrVT, TLSOffset);
1613
1614 return DAG.getNode(ISD::ADD, DL, PtrVT, BaseAddr, SymOffset);
1615 }
1616
1617 assert(model == GlobalValue::GeneralDynamicTLSModel);
1618
1619 EVT VT = Op.getValueType();
1620 return DAG.getNode(WebAssemblyISD::Wrapper, DL, VT,
1621 DAG.getTargetGlobalAddress(GA->getGlobal(), DL, VT,
1622 GA->getOffset(),
1623 WebAssemblyII::MO_GOT_TLS));
1624 }
1625
LowerGlobalAddress(SDValue Op,SelectionDAG & DAG) const1626 SDValue WebAssemblyTargetLowering::LowerGlobalAddress(SDValue Op,
1627 SelectionDAG &DAG) const {
1628 SDLoc DL(Op);
1629 const auto *GA = cast<GlobalAddressSDNode>(Op);
1630 EVT VT = Op.getValueType();
1631 assert(GA->getTargetFlags() == 0 &&
1632 "Unexpected target flags on generic GlobalAddressSDNode");
1633 if (!WebAssembly::isValidAddressSpace(GA->getAddressSpace()))
1634 fail(DL, DAG, "Invalid address space for WebAssembly target");
1635
1636 unsigned OperandFlags = 0;
1637 if (isPositionIndependent()) {
1638 const GlobalValue *GV = GA->getGlobal();
1639 if (getTargetMachine().shouldAssumeDSOLocal(*GV->getParent(), GV)) {
1640 MachineFunction &MF = DAG.getMachineFunction();
1641 MVT PtrVT = getPointerTy(MF.getDataLayout());
1642 const char *BaseName;
1643 if (GV->getValueType()->isFunctionTy()) {
1644 BaseName = MF.createExternalSymbolName("__table_base");
1645 OperandFlags = WebAssemblyII::MO_TABLE_BASE_REL;
1646 }
1647 else {
1648 BaseName = MF.createExternalSymbolName("__memory_base");
1649 OperandFlags = WebAssemblyII::MO_MEMORY_BASE_REL;
1650 }
1651 SDValue BaseAddr =
1652 DAG.getNode(WebAssemblyISD::Wrapper, DL, PtrVT,
1653 DAG.getTargetExternalSymbol(BaseName, PtrVT));
1654
1655 SDValue SymAddr = DAG.getNode(
1656 WebAssemblyISD::WrapperREL, DL, VT,
1657 DAG.getTargetGlobalAddress(GA->getGlobal(), DL, VT, GA->getOffset(),
1658 OperandFlags));
1659
1660 return DAG.getNode(ISD::ADD, DL, VT, BaseAddr, SymAddr);
1661 }
1662 OperandFlags = WebAssemblyII::MO_GOT;
1663 }
1664
1665 return DAG.getNode(WebAssemblyISD::Wrapper, DL, VT,
1666 DAG.getTargetGlobalAddress(GA->getGlobal(), DL, VT,
1667 GA->getOffset(), OperandFlags));
1668 }
1669
1670 SDValue
LowerExternalSymbol(SDValue Op,SelectionDAG & DAG) const1671 WebAssemblyTargetLowering::LowerExternalSymbol(SDValue Op,
1672 SelectionDAG &DAG) const {
1673 SDLoc DL(Op);
1674 const auto *ES = cast<ExternalSymbolSDNode>(Op);
1675 EVT VT = Op.getValueType();
1676 assert(ES->getTargetFlags() == 0 &&
1677 "Unexpected target flags on generic ExternalSymbolSDNode");
1678 return DAG.getNode(WebAssemblyISD::Wrapper, DL, VT,
1679 DAG.getTargetExternalSymbol(ES->getSymbol(), VT));
1680 }
1681
LowerJumpTable(SDValue Op,SelectionDAG & DAG) const1682 SDValue WebAssemblyTargetLowering::LowerJumpTable(SDValue Op,
1683 SelectionDAG &DAG) const {
1684 // There's no need for a Wrapper node because we always incorporate a jump
1685 // table operand into a BR_TABLE instruction, rather than ever
1686 // materializing it in a register.
1687 const JumpTableSDNode *JT = cast<JumpTableSDNode>(Op);
1688 return DAG.getTargetJumpTable(JT->getIndex(), Op.getValueType(),
1689 JT->getTargetFlags());
1690 }
1691
LowerBR_JT(SDValue Op,SelectionDAG & DAG) const1692 SDValue WebAssemblyTargetLowering::LowerBR_JT(SDValue Op,
1693 SelectionDAG &DAG) const {
1694 SDLoc DL(Op);
1695 SDValue Chain = Op.getOperand(0);
1696 const auto *JT = cast<JumpTableSDNode>(Op.getOperand(1));
1697 SDValue Index = Op.getOperand(2);
1698 assert(JT->getTargetFlags() == 0 && "WebAssembly doesn't set target flags");
1699
1700 SmallVector<SDValue, 8> Ops;
1701 Ops.push_back(Chain);
1702 Ops.push_back(Index);
1703
1704 MachineJumpTableInfo *MJTI = DAG.getMachineFunction().getJumpTableInfo();
1705 const auto &MBBs = MJTI->getJumpTables()[JT->getIndex()].MBBs;
1706
1707 // Add an operand for each case.
1708 for (auto MBB : MBBs)
1709 Ops.push_back(DAG.getBasicBlock(MBB));
1710
1711 // Add the first MBB as a dummy default target for now. This will be replaced
1712 // with the proper default target (and the preceding range check eliminated)
1713 // if possible by WebAssemblyFixBrTableDefaults.
1714 Ops.push_back(DAG.getBasicBlock(*MBBs.begin()));
1715 return DAG.getNode(WebAssemblyISD::BR_TABLE, DL, MVT::Other, Ops);
1716 }
1717
LowerVASTART(SDValue Op,SelectionDAG & DAG) const1718 SDValue WebAssemblyTargetLowering::LowerVASTART(SDValue Op,
1719 SelectionDAG &DAG) const {
1720 SDLoc DL(Op);
1721 EVT PtrVT = getPointerTy(DAG.getMachineFunction().getDataLayout());
1722
1723 auto *MFI = DAG.getMachineFunction().getInfo<WebAssemblyFunctionInfo>();
1724 const Value *SV = cast<SrcValueSDNode>(Op.getOperand(2))->getValue();
1725
1726 SDValue ArgN = DAG.getCopyFromReg(DAG.getEntryNode(), DL,
1727 MFI->getVarargBufferVreg(), PtrVT);
1728 return DAG.getStore(Op.getOperand(0), DL, ArgN, Op.getOperand(1),
1729 MachinePointerInfo(SV));
1730 }
1731
LowerIntrinsic(SDValue Op,SelectionDAG & DAG) const1732 SDValue WebAssemblyTargetLowering::LowerIntrinsic(SDValue Op,
1733 SelectionDAG &DAG) const {
1734 MachineFunction &MF = DAG.getMachineFunction();
1735 unsigned IntNo;
1736 switch (Op.getOpcode()) {
1737 case ISD::INTRINSIC_VOID:
1738 case ISD::INTRINSIC_W_CHAIN:
1739 IntNo = Op.getConstantOperandVal(1);
1740 break;
1741 case ISD::INTRINSIC_WO_CHAIN:
1742 IntNo = Op.getConstantOperandVal(0);
1743 break;
1744 default:
1745 llvm_unreachable("Invalid intrinsic");
1746 }
1747 SDLoc DL(Op);
1748
1749 switch (IntNo) {
1750 default:
1751 return SDValue(); // Don't custom lower most intrinsics.
1752
1753 case Intrinsic::wasm_lsda: {
1754 EVT VT = Op.getValueType();
1755 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
1756 MVT PtrVT = TLI.getPointerTy(DAG.getDataLayout());
1757 auto &Context = MF.getMMI().getContext();
1758 MCSymbol *S = Context.getOrCreateSymbol(Twine("GCC_except_table") +
1759 Twine(MF.getFunctionNumber()));
1760 return DAG.getNode(WebAssemblyISD::Wrapper, DL, VT,
1761 DAG.getMCSymbol(S, PtrVT));
1762 }
1763
1764 case Intrinsic::wasm_shuffle: {
1765 // Drop in-chain and replace undefs, but otherwise pass through unchanged
1766 SDValue Ops[18];
1767 size_t OpIdx = 0;
1768 Ops[OpIdx++] = Op.getOperand(1);
1769 Ops[OpIdx++] = Op.getOperand(2);
1770 while (OpIdx < 18) {
1771 const SDValue &MaskIdx = Op.getOperand(OpIdx + 1);
1772 if (MaskIdx.isUndef() ||
1773 cast<ConstantSDNode>(MaskIdx.getNode())->getZExtValue() >= 32) {
1774 Ops[OpIdx++] = DAG.getConstant(0, DL, MVT::i32);
1775 } else {
1776 Ops[OpIdx++] = MaskIdx;
1777 }
1778 }
1779 return DAG.getNode(WebAssemblyISD::SHUFFLE, DL, Op.getValueType(), Ops);
1780 }
1781 }
1782 }
1783
1784 SDValue
LowerSIGN_EXTEND_INREG(SDValue Op,SelectionDAG & DAG) const1785 WebAssemblyTargetLowering::LowerSIGN_EXTEND_INREG(SDValue Op,
1786 SelectionDAG &DAG) const {
1787 SDLoc DL(Op);
1788 // If sign extension operations are disabled, allow sext_inreg only if operand
1789 // is a vector extract of an i8 or i16 lane. SIMD does not depend on sign
1790 // extension operations, but allowing sext_inreg in this context lets us have
1791 // simple patterns to select extract_lane_s instructions. Expanding sext_inreg
1792 // everywhere would be simpler in this file, but would necessitate large and
1793 // brittle patterns to undo the expansion and select extract_lane_s
1794 // instructions.
1795 assert(!Subtarget->hasSignExt() && Subtarget->hasSIMD128());
1796 if (Op.getOperand(0).getOpcode() != ISD::EXTRACT_VECTOR_ELT)
1797 return SDValue();
1798
1799 const SDValue &Extract = Op.getOperand(0);
1800 MVT VecT = Extract.getOperand(0).getSimpleValueType();
1801 if (VecT.getVectorElementType().getSizeInBits() > 32)
1802 return SDValue();
1803 MVT ExtractedLaneT =
1804 cast<VTSDNode>(Op.getOperand(1).getNode())->getVT().getSimpleVT();
1805 MVT ExtractedVecT =
1806 MVT::getVectorVT(ExtractedLaneT, 128 / ExtractedLaneT.getSizeInBits());
1807 if (ExtractedVecT == VecT)
1808 return Op;
1809
1810 // Bitcast vector to appropriate type to ensure ISel pattern coverage
1811 const SDNode *Index = Extract.getOperand(1).getNode();
1812 if (!isa<ConstantSDNode>(Index))
1813 return SDValue();
1814 unsigned IndexVal = cast<ConstantSDNode>(Index)->getZExtValue();
1815 unsigned Scale =
1816 ExtractedVecT.getVectorNumElements() / VecT.getVectorNumElements();
1817 assert(Scale > 1);
1818 SDValue NewIndex =
1819 DAG.getConstant(IndexVal * Scale, DL, Index->getValueType(0));
1820 SDValue NewExtract = DAG.getNode(
1821 ISD::EXTRACT_VECTOR_ELT, DL, Extract.getValueType(),
1822 DAG.getBitcast(ExtractedVecT, Extract.getOperand(0)), NewIndex);
1823 return DAG.getNode(ISD::SIGN_EXTEND_INREG, DL, Op.getValueType(), NewExtract,
1824 Op.getOperand(1));
1825 }
1826
LowerConvertLow(SDValue Op,SelectionDAG & DAG)1827 static SDValue LowerConvertLow(SDValue Op, SelectionDAG &DAG) {
1828 SDLoc DL(Op);
1829 if (Op.getValueType() != MVT::v2f64)
1830 return SDValue();
1831
1832 auto GetConvertedLane = [](SDValue Op, unsigned &Opcode, SDValue &SrcVec,
1833 unsigned &Index) -> bool {
1834 switch (Op.getOpcode()) {
1835 case ISD::SINT_TO_FP:
1836 Opcode = WebAssemblyISD::CONVERT_LOW_S;
1837 break;
1838 case ISD::UINT_TO_FP:
1839 Opcode = WebAssemblyISD::CONVERT_LOW_U;
1840 break;
1841 case ISD::FP_EXTEND:
1842 Opcode = WebAssemblyISD::PROMOTE_LOW;
1843 break;
1844 default:
1845 return false;
1846 }
1847
1848 auto ExtractVector = Op.getOperand(0);
1849 if (ExtractVector.getOpcode() != ISD::EXTRACT_VECTOR_ELT)
1850 return false;
1851
1852 if (!isa<ConstantSDNode>(ExtractVector.getOperand(1).getNode()))
1853 return false;
1854
1855 SrcVec = ExtractVector.getOperand(0);
1856 Index = ExtractVector.getConstantOperandVal(1);
1857 return true;
1858 };
1859
1860 unsigned LHSOpcode, RHSOpcode, LHSIndex, RHSIndex;
1861 SDValue LHSSrcVec, RHSSrcVec;
1862 if (!GetConvertedLane(Op.getOperand(0), LHSOpcode, LHSSrcVec, LHSIndex) ||
1863 !GetConvertedLane(Op.getOperand(1), RHSOpcode, RHSSrcVec, RHSIndex))
1864 return SDValue();
1865
1866 if (LHSOpcode != RHSOpcode)
1867 return SDValue();
1868
1869 MVT ExpectedSrcVT;
1870 switch (LHSOpcode) {
1871 case WebAssemblyISD::CONVERT_LOW_S:
1872 case WebAssemblyISD::CONVERT_LOW_U:
1873 ExpectedSrcVT = MVT::v4i32;
1874 break;
1875 case WebAssemblyISD::PROMOTE_LOW:
1876 ExpectedSrcVT = MVT::v4f32;
1877 break;
1878 }
1879 if (LHSSrcVec.getValueType() != ExpectedSrcVT)
1880 return SDValue();
1881
1882 auto Src = LHSSrcVec;
1883 if (LHSIndex != 0 || RHSIndex != 1 || LHSSrcVec != RHSSrcVec) {
1884 // Shuffle the source vector so that the converted lanes are the low lanes.
1885 Src = DAG.getVectorShuffle(
1886 ExpectedSrcVT, DL, LHSSrcVec, RHSSrcVec,
1887 {static_cast<int>(LHSIndex), static_cast<int>(RHSIndex) + 4, -1, -1});
1888 }
1889 return DAG.getNode(LHSOpcode, DL, MVT::v2f64, Src);
1890 }
1891
LowerBUILD_VECTOR(SDValue Op,SelectionDAG & DAG) const1892 SDValue WebAssemblyTargetLowering::LowerBUILD_VECTOR(SDValue Op,
1893 SelectionDAG &DAG) const {
1894 if (auto ConvertLow = LowerConvertLow(Op, DAG))
1895 return ConvertLow;
1896
1897 SDLoc DL(Op);
1898 const EVT VecT = Op.getValueType();
1899 const EVT LaneT = Op.getOperand(0).getValueType();
1900 const size_t Lanes = Op.getNumOperands();
1901 bool CanSwizzle = VecT == MVT::v16i8;
1902
1903 // BUILD_VECTORs are lowered to the instruction that initializes the highest
1904 // possible number of lanes at once followed by a sequence of replace_lane
1905 // instructions to individually initialize any remaining lanes.
1906
1907 // TODO: Tune this. For example, lanewise swizzling is very expensive, so
1908 // swizzled lanes should be given greater weight.
1909
1910 // TODO: Investigate looping rather than always extracting/replacing specific
1911 // lanes to fill gaps.
1912
1913 auto IsConstant = [](const SDValue &V) {
1914 return V.getOpcode() == ISD::Constant || V.getOpcode() == ISD::ConstantFP;
1915 };
1916
1917 // Returns the source vector and index vector pair if they exist. Checks for:
1918 // (extract_vector_elt
1919 // $src,
1920 // (sign_extend_inreg (extract_vector_elt $indices, $i))
1921 // )
1922 auto GetSwizzleSrcs = [](size_t I, const SDValue &Lane) {
1923 auto Bail = std::make_pair(SDValue(), SDValue());
1924 if (Lane->getOpcode() != ISD::EXTRACT_VECTOR_ELT)
1925 return Bail;
1926 const SDValue &SwizzleSrc = Lane->getOperand(0);
1927 const SDValue &IndexExt = Lane->getOperand(1);
1928 if (IndexExt->getOpcode() != ISD::SIGN_EXTEND_INREG)
1929 return Bail;
1930 const SDValue &Index = IndexExt->getOperand(0);
1931 if (Index->getOpcode() != ISD::EXTRACT_VECTOR_ELT)
1932 return Bail;
1933 const SDValue &SwizzleIndices = Index->getOperand(0);
1934 if (SwizzleSrc.getValueType() != MVT::v16i8 ||
1935 SwizzleIndices.getValueType() != MVT::v16i8 ||
1936 Index->getOperand(1)->getOpcode() != ISD::Constant ||
1937 Index->getConstantOperandVal(1) != I)
1938 return Bail;
1939 return std::make_pair(SwizzleSrc, SwizzleIndices);
1940 };
1941
1942 // If the lane is extracted from another vector at a constant index, return
1943 // that vector. The source vector must not have more lanes than the dest
1944 // because the shufflevector indices are in terms of the destination lanes and
1945 // would not be able to address the smaller individual source lanes.
1946 auto GetShuffleSrc = [&](const SDValue &Lane) {
1947 if (Lane->getOpcode() != ISD::EXTRACT_VECTOR_ELT)
1948 return SDValue();
1949 if (!isa<ConstantSDNode>(Lane->getOperand(1).getNode()))
1950 return SDValue();
1951 if (Lane->getOperand(0).getValueType().getVectorNumElements() >
1952 VecT.getVectorNumElements())
1953 return SDValue();
1954 return Lane->getOperand(0);
1955 };
1956
1957 using ValueEntry = std::pair<SDValue, size_t>;
1958 SmallVector<ValueEntry, 16> SplatValueCounts;
1959
1960 using SwizzleEntry = std::pair<std::pair<SDValue, SDValue>, size_t>;
1961 SmallVector<SwizzleEntry, 16> SwizzleCounts;
1962
1963 using ShuffleEntry = std::pair<SDValue, size_t>;
1964 SmallVector<ShuffleEntry, 16> ShuffleCounts;
1965
1966 auto AddCount = [](auto &Counts, const auto &Val) {
1967 auto CountIt =
1968 llvm::find_if(Counts, [&Val](auto E) { return E.first == Val; });
1969 if (CountIt == Counts.end()) {
1970 Counts.emplace_back(Val, 1);
1971 } else {
1972 CountIt->second++;
1973 }
1974 };
1975
1976 auto GetMostCommon = [](auto &Counts) {
1977 auto CommonIt =
1978 std::max_element(Counts.begin(), Counts.end(),
1979 [](auto A, auto B) { return A.second < B.second; });
1980 assert(CommonIt != Counts.end() && "Unexpected all-undef build_vector");
1981 return *CommonIt;
1982 };
1983
1984 size_t NumConstantLanes = 0;
1985
1986 // Count eligible lanes for each type of vector creation op
1987 for (size_t I = 0; I < Lanes; ++I) {
1988 const SDValue &Lane = Op->getOperand(I);
1989 if (Lane.isUndef())
1990 continue;
1991
1992 AddCount(SplatValueCounts, Lane);
1993
1994 if (IsConstant(Lane))
1995 NumConstantLanes++;
1996 if (auto ShuffleSrc = GetShuffleSrc(Lane))
1997 AddCount(ShuffleCounts, ShuffleSrc);
1998 if (CanSwizzle) {
1999 auto SwizzleSrcs = GetSwizzleSrcs(I, Lane);
2000 if (SwizzleSrcs.first)
2001 AddCount(SwizzleCounts, SwizzleSrcs);
2002 }
2003 }
2004
2005 SDValue SplatValue;
2006 size_t NumSplatLanes;
2007 std::tie(SplatValue, NumSplatLanes) = GetMostCommon(SplatValueCounts);
2008
2009 SDValue SwizzleSrc;
2010 SDValue SwizzleIndices;
2011 size_t NumSwizzleLanes = 0;
2012 if (SwizzleCounts.size())
2013 std::forward_as_tuple(std::tie(SwizzleSrc, SwizzleIndices),
2014 NumSwizzleLanes) = GetMostCommon(SwizzleCounts);
2015
2016 // Shuffles can draw from up to two vectors, so find the two most common
2017 // sources.
2018 SDValue ShuffleSrc1, ShuffleSrc2;
2019 size_t NumShuffleLanes = 0;
2020 if (ShuffleCounts.size()) {
2021 std::tie(ShuffleSrc1, NumShuffleLanes) = GetMostCommon(ShuffleCounts);
2022 ShuffleCounts.erase(std::remove_if(ShuffleCounts.begin(),
2023 ShuffleCounts.end(),
2024 [&](const auto &Pair) {
2025 return Pair.first == ShuffleSrc1;
2026 }),
2027 ShuffleCounts.end());
2028 }
2029 if (ShuffleCounts.size()) {
2030 size_t AdditionalShuffleLanes;
2031 std::tie(ShuffleSrc2, AdditionalShuffleLanes) =
2032 GetMostCommon(ShuffleCounts);
2033 NumShuffleLanes += AdditionalShuffleLanes;
2034 }
2035
2036 // Predicate returning true if the lane is properly initialized by the
2037 // original instruction
2038 std::function<bool(size_t, const SDValue &)> IsLaneConstructed;
2039 SDValue Result;
2040 // Prefer swizzles over shuffles over vector consts over splats
2041 if (NumSwizzleLanes >= NumShuffleLanes &&
2042 NumSwizzleLanes >= NumConstantLanes && NumSwizzleLanes >= NumSplatLanes) {
2043 Result = DAG.getNode(WebAssemblyISD::SWIZZLE, DL, VecT, SwizzleSrc,
2044 SwizzleIndices);
2045 auto Swizzled = std::make_pair(SwizzleSrc, SwizzleIndices);
2046 IsLaneConstructed = [&, Swizzled](size_t I, const SDValue &Lane) {
2047 return Swizzled == GetSwizzleSrcs(I, Lane);
2048 };
2049 } else if (NumShuffleLanes >= NumConstantLanes &&
2050 NumShuffleLanes >= NumSplatLanes) {
2051 size_t DestLaneSize = VecT.getVectorElementType().getFixedSizeInBits() / 8;
2052 size_t DestLaneCount = VecT.getVectorNumElements();
2053 size_t Scale1 = 1;
2054 size_t Scale2 = 1;
2055 SDValue Src1 = ShuffleSrc1;
2056 SDValue Src2 = ShuffleSrc2 ? ShuffleSrc2 : DAG.getUNDEF(VecT);
2057 if (Src1.getValueType() != VecT) {
2058 size_t LaneSize =
2059 Src1.getValueType().getVectorElementType().getFixedSizeInBits() / 8;
2060 assert(LaneSize > DestLaneSize);
2061 Scale1 = LaneSize / DestLaneSize;
2062 Src1 = DAG.getBitcast(VecT, Src1);
2063 }
2064 if (Src2.getValueType() != VecT) {
2065 size_t LaneSize =
2066 Src2.getValueType().getVectorElementType().getFixedSizeInBits() / 8;
2067 assert(LaneSize > DestLaneSize);
2068 Scale2 = LaneSize / DestLaneSize;
2069 Src2 = DAG.getBitcast(VecT, Src2);
2070 }
2071
2072 int Mask[16];
2073 assert(DestLaneCount <= 16);
2074 for (size_t I = 0; I < DestLaneCount; ++I) {
2075 const SDValue &Lane = Op->getOperand(I);
2076 SDValue Src = GetShuffleSrc(Lane);
2077 if (Src == ShuffleSrc1) {
2078 Mask[I] = Lane->getConstantOperandVal(1) * Scale1;
2079 } else if (Src && Src == ShuffleSrc2) {
2080 Mask[I] = DestLaneCount + Lane->getConstantOperandVal(1) * Scale2;
2081 } else {
2082 Mask[I] = -1;
2083 }
2084 }
2085 ArrayRef<int> MaskRef(Mask, DestLaneCount);
2086 Result = DAG.getVectorShuffle(VecT, DL, Src1, Src2, MaskRef);
2087 IsLaneConstructed = [&](size_t, const SDValue &Lane) {
2088 auto Src = GetShuffleSrc(Lane);
2089 return Src == ShuffleSrc1 || (Src && Src == ShuffleSrc2);
2090 };
2091 } else if (NumConstantLanes >= NumSplatLanes) {
2092 SmallVector<SDValue, 16> ConstLanes;
2093 for (const SDValue &Lane : Op->op_values()) {
2094 if (IsConstant(Lane)) {
2095 // Values may need to be fixed so that they will sign extend to be
2096 // within the expected range during ISel. Check whether the value is in
2097 // bounds based on the lane bit width and if it is out of bounds, lop
2098 // off the extra bits and subtract 2^n to reflect giving the high bit
2099 // value -2^(n-1) rather than +2^(n-1). Skip the i64 case because it
2100 // cannot possibly be out of range.
2101 auto *Const = dyn_cast<ConstantSDNode>(Lane.getNode());
2102 int64_t Val = Const ? Const->getSExtValue() : 0;
2103 uint64_t LaneBits = 128 / Lanes;
2104 assert((LaneBits == 64 || Val >= -(1ll << (LaneBits - 1))) &&
2105 "Unexpected out of bounds negative value");
2106 if (Const && LaneBits != 64 && Val > (1ll << (LaneBits - 1)) - 1) {
2107 auto NewVal = ((uint64_t)Val % (1ll << LaneBits)) - (1ll << LaneBits);
2108 ConstLanes.push_back(DAG.getConstant(NewVal, SDLoc(Lane), LaneT));
2109 } else {
2110 ConstLanes.push_back(Lane);
2111 }
2112 } else if (LaneT.isFloatingPoint()) {
2113 ConstLanes.push_back(DAG.getConstantFP(0, DL, LaneT));
2114 } else {
2115 ConstLanes.push_back(DAG.getConstant(0, DL, LaneT));
2116 }
2117 }
2118 Result = DAG.getBuildVector(VecT, DL, ConstLanes);
2119 IsLaneConstructed = [&IsConstant](size_t _, const SDValue &Lane) {
2120 return IsConstant(Lane);
2121 };
2122 } else {
2123 // Use a splat, but possibly a load_splat
2124 LoadSDNode *SplattedLoad;
2125 if ((SplattedLoad = dyn_cast<LoadSDNode>(SplatValue)) &&
2126 SplattedLoad->getMemoryVT() == VecT.getVectorElementType()) {
2127 Result = DAG.getMemIntrinsicNode(
2128 WebAssemblyISD::LOAD_SPLAT, DL, DAG.getVTList(VecT),
2129 {SplattedLoad->getChain(), SplattedLoad->getBasePtr(),
2130 SplattedLoad->getOffset()},
2131 SplattedLoad->getMemoryVT(), SplattedLoad->getMemOperand());
2132 } else {
2133 Result = DAG.getSplatBuildVector(VecT, DL, SplatValue);
2134 }
2135 IsLaneConstructed = [&SplatValue](size_t _, const SDValue &Lane) {
2136 return Lane == SplatValue;
2137 };
2138 }
2139
2140 assert(Result);
2141 assert(IsLaneConstructed);
2142
2143 // Add replace_lane instructions for any unhandled values
2144 for (size_t I = 0; I < Lanes; ++I) {
2145 const SDValue &Lane = Op->getOperand(I);
2146 if (!Lane.isUndef() && !IsLaneConstructed(I, Lane))
2147 Result = DAG.getNode(ISD::INSERT_VECTOR_ELT, DL, VecT, Result, Lane,
2148 DAG.getConstant(I, DL, MVT::i32));
2149 }
2150
2151 return Result;
2152 }
2153
2154 SDValue
LowerVECTOR_SHUFFLE(SDValue Op,SelectionDAG & DAG) const2155 WebAssemblyTargetLowering::LowerVECTOR_SHUFFLE(SDValue Op,
2156 SelectionDAG &DAG) const {
2157 SDLoc DL(Op);
2158 ArrayRef<int> Mask = cast<ShuffleVectorSDNode>(Op.getNode())->getMask();
2159 MVT VecType = Op.getOperand(0).getSimpleValueType();
2160 assert(VecType.is128BitVector() && "Unexpected shuffle vector type");
2161 size_t LaneBytes = VecType.getVectorElementType().getSizeInBits() / 8;
2162
2163 // Space for two vector args and sixteen mask indices
2164 SDValue Ops[18];
2165 size_t OpIdx = 0;
2166 Ops[OpIdx++] = Op.getOperand(0);
2167 Ops[OpIdx++] = Op.getOperand(1);
2168
2169 // Expand mask indices to byte indices and materialize them as operands
2170 for (int M : Mask) {
2171 for (size_t J = 0; J < LaneBytes; ++J) {
2172 // Lower undefs (represented by -1 in mask) to zero
2173 uint64_t ByteIndex = M == -1 ? 0 : (uint64_t)M * LaneBytes + J;
2174 Ops[OpIdx++] = DAG.getConstant(ByteIndex, DL, MVT::i32);
2175 }
2176 }
2177
2178 return DAG.getNode(WebAssemblyISD::SHUFFLE, DL, Op.getValueType(), Ops);
2179 }
2180
LowerSETCC(SDValue Op,SelectionDAG & DAG) const2181 SDValue WebAssemblyTargetLowering::LowerSETCC(SDValue Op,
2182 SelectionDAG &DAG) const {
2183 SDLoc DL(Op);
2184 // The legalizer does not know how to expand the unsupported comparison modes
2185 // of i64x2 vectors, so we manually unroll them here.
2186 assert(Op->getOperand(0)->getSimpleValueType(0) == MVT::v2i64);
2187 SmallVector<SDValue, 2> LHS, RHS;
2188 DAG.ExtractVectorElements(Op->getOperand(0), LHS);
2189 DAG.ExtractVectorElements(Op->getOperand(1), RHS);
2190 const SDValue &CC = Op->getOperand(2);
2191 auto MakeLane = [&](unsigned I) {
2192 return DAG.getNode(ISD::SELECT_CC, DL, MVT::i64, LHS[I], RHS[I],
2193 DAG.getConstant(uint64_t(-1), DL, MVT::i64),
2194 DAG.getConstant(uint64_t(0), DL, MVT::i64), CC);
2195 };
2196 return DAG.getBuildVector(Op->getValueType(0), DL,
2197 {MakeLane(0), MakeLane(1)});
2198 }
2199
2200 SDValue
LowerAccessVectorElement(SDValue Op,SelectionDAG & DAG) const2201 WebAssemblyTargetLowering::LowerAccessVectorElement(SDValue Op,
2202 SelectionDAG &DAG) const {
2203 // Allow constant lane indices, expand variable lane indices
2204 SDNode *IdxNode = Op.getOperand(Op.getNumOperands() - 1).getNode();
2205 if (isa<ConstantSDNode>(IdxNode) || IdxNode->isUndef())
2206 return Op;
2207 else
2208 // Perform default expansion
2209 return SDValue();
2210 }
2211
unrollVectorShift(SDValue Op,SelectionDAG & DAG)2212 static SDValue unrollVectorShift(SDValue Op, SelectionDAG &DAG) {
2213 EVT LaneT = Op.getSimpleValueType().getVectorElementType();
2214 // 32-bit and 64-bit unrolled shifts will have proper semantics
2215 if (LaneT.bitsGE(MVT::i32))
2216 return DAG.UnrollVectorOp(Op.getNode());
2217 // Otherwise mask the shift value to get proper semantics from 32-bit shift
2218 SDLoc DL(Op);
2219 size_t NumLanes = Op.getSimpleValueType().getVectorNumElements();
2220 SDValue Mask = DAG.getConstant(LaneT.getSizeInBits() - 1, DL, MVT::i32);
2221 unsigned ShiftOpcode = Op.getOpcode();
2222 SmallVector<SDValue, 16> ShiftedElements;
2223 DAG.ExtractVectorElements(Op.getOperand(0), ShiftedElements, 0, 0, MVT::i32);
2224 SmallVector<SDValue, 16> ShiftElements;
2225 DAG.ExtractVectorElements(Op.getOperand(1), ShiftElements, 0, 0, MVT::i32);
2226 SmallVector<SDValue, 16> UnrolledOps;
2227 for (size_t i = 0; i < NumLanes; ++i) {
2228 SDValue MaskedShiftValue =
2229 DAG.getNode(ISD::AND, DL, MVT::i32, ShiftElements[i], Mask);
2230 SDValue ShiftedValue = ShiftedElements[i];
2231 if (ShiftOpcode == ISD::SRA)
2232 ShiftedValue = DAG.getNode(ISD::SIGN_EXTEND_INREG, DL, MVT::i32,
2233 ShiftedValue, DAG.getValueType(LaneT));
2234 UnrolledOps.push_back(
2235 DAG.getNode(ShiftOpcode, DL, MVT::i32, ShiftedValue, MaskedShiftValue));
2236 }
2237 return DAG.getBuildVector(Op.getValueType(), DL, UnrolledOps);
2238 }
2239
LowerShift(SDValue Op,SelectionDAG & DAG) const2240 SDValue WebAssemblyTargetLowering::LowerShift(SDValue Op,
2241 SelectionDAG &DAG) const {
2242 SDLoc DL(Op);
2243
2244 // Only manually lower vector shifts
2245 assert(Op.getSimpleValueType().isVector());
2246
2247 auto ShiftVal = DAG.getSplatValue(Op.getOperand(1));
2248 if (!ShiftVal)
2249 return unrollVectorShift(Op, DAG);
2250
2251 // Use anyext because none of the high bits can affect the shift
2252 ShiftVal = DAG.getAnyExtOrTrunc(ShiftVal, DL, MVT::i32);
2253
2254 unsigned Opcode;
2255 switch (Op.getOpcode()) {
2256 case ISD::SHL:
2257 Opcode = WebAssemblyISD::VEC_SHL;
2258 break;
2259 case ISD::SRA:
2260 Opcode = WebAssemblyISD::VEC_SHR_S;
2261 break;
2262 case ISD::SRL:
2263 Opcode = WebAssemblyISD::VEC_SHR_U;
2264 break;
2265 default:
2266 llvm_unreachable("unexpected opcode");
2267 }
2268
2269 return DAG.getNode(Opcode, DL, Op.getValueType(), Op.getOperand(0), ShiftVal);
2270 }
2271
LowerFP_TO_INT_SAT(SDValue Op,SelectionDAG & DAG) const2272 SDValue WebAssemblyTargetLowering::LowerFP_TO_INT_SAT(SDValue Op,
2273 SelectionDAG &DAG) const {
2274 SDLoc DL(Op);
2275 EVT ResT = Op.getValueType();
2276 EVT SatVT = cast<VTSDNode>(Op.getOperand(1))->getVT();
2277
2278 if ((ResT == MVT::i32 || ResT == MVT::i64) &&
2279 (SatVT == MVT::i32 || SatVT == MVT::i64))
2280 return Op;
2281
2282 if (ResT == MVT::v4i32 && SatVT == MVT::i32)
2283 return Op;
2284
2285 return SDValue();
2286 }
2287
2288 //===----------------------------------------------------------------------===//
2289 // Custom DAG combine hooks
2290 //===----------------------------------------------------------------------===//
2291 static SDValue
performVECTOR_SHUFFLECombine(SDNode * N,TargetLowering::DAGCombinerInfo & DCI)2292 performVECTOR_SHUFFLECombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI) {
2293 auto &DAG = DCI.DAG;
2294 auto Shuffle = cast<ShuffleVectorSDNode>(N);
2295
2296 // Hoist vector bitcasts that don't change the number of lanes out of unary
2297 // shuffles, where they are less likely to get in the way of other combines.
2298 // (shuffle (vNxT1 (bitcast (vNxT0 x))), undef, mask) ->
2299 // (vNxT1 (bitcast (vNxT0 (shuffle x, undef, mask))))
2300 SDValue Bitcast = N->getOperand(0);
2301 if (Bitcast.getOpcode() != ISD::BITCAST)
2302 return SDValue();
2303 if (!N->getOperand(1).isUndef())
2304 return SDValue();
2305 SDValue CastOp = Bitcast.getOperand(0);
2306 MVT SrcType = CastOp.getSimpleValueType();
2307 MVT DstType = Bitcast.getSimpleValueType();
2308 if (!SrcType.is128BitVector() ||
2309 SrcType.getVectorNumElements() != DstType.getVectorNumElements())
2310 return SDValue();
2311 SDValue NewShuffle = DAG.getVectorShuffle(
2312 SrcType, SDLoc(N), CastOp, DAG.getUNDEF(SrcType), Shuffle->getMask());
2313 return DAG.getBitcast(DstType, NewShuffle);
2314 }
2315
2316 static SDValue
performVectorExtendCombine(SDNode * N,TargetLowering::DAGCombinerInfo & DCI)2317 performVectorExtendCombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI) {
2318 auto &DAG = DCI.DAG;
2319 assert(N->getOpcode() == ISD::SIGN_EXTEND ||
2320 N->getOpcode() == ISD::ZERO_EXTEND);
2321
2322 // Combine ({s,z}ext (extract_subvector src, i)) into a widening operation if
2323 // possible before the extract_subvector can be expanded.
2324 auto Extract = N->getOperand(0);
2325 if (Extract.getOpcode() != ISD::EXTRACT_SUBVECTOR)
2326 return SDValue();
2327 auto Source = Extract.getOperand(0);
2328 auto *IndexNode = dyn_cast<ConstantSDNode>(Extract.getOperand(1));
2329 if (IndexNode == nullptr)
2330 return SDValue();
2331 auto Index = IndexNode->getZExtValue();
2332
2333 // Only v8i8, v4i16, and v2i32 extracts can be widened, and only if the
2334 // extracted subvector is the low or high half of its source.
2335 EVT ResVT = N->getValueType(0);
2336 if (ResVT == MVT::v8i16) {
2337 if (Extract.getValueType() != MVT::v8i8 ||
2338 Source.getValueType() != MVT::v16i8 || (Index != 0 && Index != 8))
2339 return SDValue();
2340 } else if (ResVT == MVT::v4i32) {
2341 if (Extract.getValueType() != MVT::v4i16 ||
2342 Source.getValueType() != MVT::v8i16 || (Index != 0 && Index != 4))
2343 return SDValue();
2344 } else if (ResVT == MVT::v2i64) {
2345 if (Extract.getValueType() != MVT::v2i32 ||
2346 Source.getValueType() != MVT::v4i32 || (Index != 0 && Index != 2))
2347 return SDValue();
2348 } else {
2349 return SDValue();
2350 }
2351
2352 bool IsSext = N->getOpcode() == ISD::SIGN_EXTEND;
2353 bool IsLow = Index == 0;
2354
2355 unsigned Op = IsSext ? (IsLow ? WebAssemblyISD::EXTEND_LOW_S
2356 : WebAssemblyISD::EXTEND_HIGH_S)
2357 : (IsLow ? WebAssemblyISD::EXTEND_LOW_U
2358 : WebAssemblyISD::EXTEND_HIGH_U);
2359
2360 return DAG.getNode(Op, SDLoc(N), ResVT, Source);
2361 }
2362
2363 static SDValue
performVectorTruncZeroCombine(SDNode * N,TargetLowering::DAGCombinerInfo & DCI)2364 performVectorTruncZeroCombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI) {
2365 auto &DAG = DCI.DAG;
2366
2367 auto GetWasmConversionOp = [](unsigned Op) {
2368 switch (Op) {
2369 case ISD::FP_TO_SINT_SAT:
2370 return WebAssemblyISD::TRUNC_SAT_ZERO_S;
2371 case ISD::FP_TO_UINT_SAT:
2372 return WebAssemblyISD::TRUNC_SAT_ZERO_U;
2373 case ISD::FP_ROUND:
2374 return WebAssemblyISD::DEMOTE_ZERO;
2375 }
2376 llvm_unreachable("unexpected op");
2377 };
2378
2379 auto IsZeroSplat = [](SDValue SplatVal) {
2380 auto *Splat = dyn_cast<BuildVectorSDNode>(SplatVal.getNode());
2381 APInt SplatValue, SplatUndef;
2382 unsigned SplatBitSize;
2383 bool HasAnyUndefs;
2384 return Splat &&
2385 Splat->isConstantSplat(SplatValue, SplatUndef, SplatBitSize,
2386 HasAnyUndefs) &&
2387 SplatValue == 0;
2388 };
2389
2390 if (N->getOpcode() == ISD::CONCAT_VECTORS) {
2391 // Combine this:
2392 //
2393 // (concat_vectors (v2i32 (fp_to_{s,u}int_sat $x, 32)), (v2i32 (splat 0)))
2394 //
2395 // into (i32x4.trunc_sat_f64x2_zero_{s,u} $x).
2396 //
2397 // Or this:
2398 //
2399 // (concat_vectors (v2f32 (fp_round (v2f64 $x))), (v2f32 (splat 0)))
2400 //
2401 // into (f32x4.demote_zero_f64x2 $x).
2402 EVT ResVT;
2403 EVT ExpectedConversionType;
2404 auto Conversion = N->getOperand(0);
2405 auto ConversionOp = Conversion.getOpcode();
2406 switch (ConversionOp) {
2407 case ISD::FP_TO_SINT_SAT:
2408 case ISD::FP_TO_UINT_SAT:
2409 ResVT = MVT::v4i32;
2410 ExpectedConversionType = MVT::v2i32;
2411 break;
2412 case ISD::FP_ROUND:
2413 ResVT = MVT::v4f32;
2414 ExpectedConversionType = MVT::v2f32;
2415 break;
2416 default:
2417 return SDValue();
2418 }
2419
2420 if (N->getValueType(0) != ResVT)
2421 return SDValue();
2422
2423 if (Conversion.getValueType() != ExpectedConversionType)
2424 return SDValue();
2425
2426 auto Source = Conversion.getOperand(0);
2427 if (Source.getValueType() != MVT::v2f64)
2428 return SDValue();
2429
2430 if (!IsZeroSplat(N->getOperand(1)) ||
2431 N->getOperand(1).getValueType() != ExpectedConversionType)
2432 return SDValue();
2433
2434 unsigned Op = GetWasmConversionOp(ConversionOp);
2435 return DAG.getNode(Op, SDLoc(N), ResVT, Source);
2436 }
2437
2438 // Combine this:
2439 //
2440 // (fp_to_{s,u}int_sat (concat_vectors $x, (v2f64 (splat 0))), 32)
2441 //
2442 // into (i32x4.trunc_sat_f64x2_zero_{s,u} $x).
2443 //
2444 // Or this:
2445 //
2446 // (v4f32 (fp_round (concat_vectors $x, (v2f64 (splat 0)))))
2447 //
2448 // into (f32x4.demote_zero_f64x2 $x).
2449 EVT ResVT;
2450 auto ConversionOp = N->getOpcode();
2451 switch (ConversionOp) {
2452 case ISD::FP_TO_SINT_SAT:
2453 case ISD::FP_TO_UINT_SAT:
2454 ResVT = MVT::v4i32;
2455 break;
2456 case ISD::FP_ROUND:
2457 ResVT = MVT::v4f32;
2458 break;
2459 default:
2460 llvm_unreachable("unexpected op");
2461 }
2462
2463 if (N->getValueType(0) != ResVT)
2464 return SDValue();
2465
2466 auto Concat = N->getOperand(0);
2467 if (Concat.getValueType() != MVT::v4f64)
2468 return SDValue();
2469
2470 auto Source = Concat.getOperand(0);
2471 if (Source.getValueType() != MVT::v2f64)
2472 return SDValue();
2473
2474 if (!IsZeroSplat(Concat.getOperand(1)) ||
2475 Concat.getOperand(1).getValueType() != MVT::v2f64)
2476 return SDValue();
2477
2478 unsigned Op = GetWasmConversionOp(ConversionOp);
2479 return DAG.getNode(Op, SDLoc(N), ResVT, Source);
2480 }
2481
2482 SDValue
PerformDAGCombine(SDNode * N,DAGCombinerInfo & DCI) const2483 WebAssemblyTargetLowering::PerformDAGCombine(SDNode *N,
2484 DAGCombinerInfo &DCI) const {
2485 switch (N->getOpcode()) {
2486 default:
2487 return SDValue();
2488 case ISD::VECTOR_SHUFFLE:
2489 return performVECTOR_SHUFFLECombine(N, DCI);
2490 case ISD::SIGN_EXTEND:
2491 case ISD::ZERO_EXTEND:
2492 return performVectorExtendCombine(N, DCI);
2493 case ISD::FP_TO_SINT_SAT:
2494 case ISD::FP_TO_UINT_SAT:
2495 case ISD::FP_ROUND:
2496 case ISD::CONCAT_VECTORS:
2497 return performVectorTruncZeroCombine(N, DCI);
2498 }
2499 }
2500