1 //=- WebAssemblyISelLowering.cpp - WebAssembly DAG Lowering Implementation -==//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 ///
9 /// \file
10 /// This file implements the WebAssemblyTargetLowering class.
11 ///
12 //===----------------------------------------------------------------------===//
13
14 #include "WebAssemblyISelLowering.h"
15 #include "MCTargetDesc/WebAssemblyMCTargetDesc.h"
16 #include "Utils/WebAssemblyTypeUtilities.h"
17 #include "Utils/WebAssemblyUtilities.h"
18 #include "WebAssemblyMachineFunctionInfo.h"
19 #include "WebAssemblySubtarget.h"
20 #include "WebAssemblyTargetMachine.h"
21 #include "llvm/CodeGen/CallingConvLower.h"
22 #include "llvm/CodeGen/MachineInstrBuilder.h"
23 #include "llvm/CodeGen/MachineJumpTableInfo.h"
24 #include "llvm/CodeGen/MachineModuleInfo.h"
25 #include "llvm/CodeGen/MachineRegisterInfo.h"
26 #include "llvm/CodeGen/SelectionDAG.h"
27 #include "llvm/CodeGen/SelectionDAGNodes.h"
28 #include "llvm/CodeGen/WasmEHFuncInfo.h"
29 #include "llvm/IR/DiagnosticInfo.h"
30 #include "llvm/IR/DiagnosticPrinter.h"
31 #include "llvm/IR/Function.h"
32 #include "llvm/IR/Intrinsics.h"
33 #include "llvm/IR/IntrinsicsWebAssembly.h"
34 #include "llvm/Support/Debug.h"
35 #include "llvm/Support/ErrorHandling.h"
36 #include "llvm/Support/MathExtras.h"
37 #include "llvm/Support/raw_ostream.h"
38 #include "llvm/Target/TargetOptions.h"
39 using namespace llvm;
40
41 #define DEBUG_TYPE "wasm-lower"
42
WebAssemblyTargetLowering(const TargetMachine & TM,const WebAssemblySubtarget & STI)43 WebAssemblyTargetLowering::WebAssemblyTargetLowering(
44 const TargetMachine &TM, const WebAssemblySubtarget &STI)
45 : TargetLowering(TM), Subtarget(&STI) {
46 auto MVTPtr = Subtarget->hasAddr64() ? MVT::i64 : MVT::i32;
47
48 // Booleans always contain 0 or 1.
49 setBooleanContents(ZeroOrOneBooleanContent);
50 // Except in SIMD vectors
51 setBooleanVectorContents(ZeroOrNegativeOneBooleanContent);
52 // We don't know the microarchitecture here, so just reduce register pressure.
53 setSchedulingPreference(Sched::RegPressure);
54 // Tell ISel that we have a stack pointer.
55 setStackPointerRegisterToSaveRestore(
56 Subtarget->hasAddr64() ? WebAssembly::SP64 : WebAssembly::SP32);
57 // Set up the register classes.
58 addRegisterClass(MVT::i32, &WebAssembly::I32RegClass);
59 addRegisterClass(MVT::i64, &WebAssembly::I64RegClass);
60 addRegisterClass(MVT::f32, &WebAssembly::F32RegClass);
61 addRegisterClass(MVT::f64, &WebAssembly::F64RegClass);
62 if (Subtarget->hasSIMD128()) {
63 addRegisterClass(MVT::v16i8, &WebAssembly::V128RegClass);
64 addRegisterClass(MVT::v8i16, &WebAssembly::V128RegClass);
65 addRegisterClass(MVT::v4i32, &WebAssembly::V128RegClass);
66 addRegisterClass(MVT::v4f32, &WebAssembly::V128RegClass);
67 addRegisterClass(MVT::v2i64, &WebAssembly::V128RegClass);
68 addRegisterClass(MVT::v2f64, &WebAssembly::V128RegClass);
69 }
70 if (Subtarget->hasReferenceTypes()) {
71 addRegisterClass(MVT::externref, &WebAssembly::EXTERNREFRegClass);
72 addRegisterClass(MVT::funcref, &WebAssembly::FUNCREFRegClass);
73 }
74 // Compute derived properties from the register classes.
75 computeRegisterProperties(Subtarget->getRegisterInfo());
76
77 // Transform loads and stores to pointers in address space 1 to loads and
78 // stores to WebAssembly global variables, outside linear memory.
79 for (auto T : {MVT::i32, MVT::i64, MVT::f32, MVT::f64}) {
80 setOperationAction(ISD::LOAD, T, Custom);
81 setOperationAction(ISD::STORE, T, Custom);
82 }
83 if (Subtarget->hasSIMD128()) {
84 for (auto T : {MVT::v16i8, MVT::v8i16, MVT::v4i32, MVT::v4f32, MVT::v2i64,
85 MVT::v2f64}) {
86 setOperationAction(ISD::LOAD, T, Custom);
87 setOperationAction(ISD::STORE, T, Custom);
88 }
89 }
90 if (Subtarget->hasReferenceTypes()) {
91 for (auto T : {MVT::externref, MVT::funcref}) {
92 setOperationAction(ISD::LOAD, T, Custom);
93 setOperationAction(ISD::STORE, T, Custom);
94 }
95 }
96
97 setOperationAction(ISD::GlobalAddress, MVTPtr, Custom);
98 setOperationAction(ISD::GlobalTLSAddress, MVTPtr, Custom);
99 setOperationAction(ISD::ExternalSymbol, MVTPtr, Custom);
100 setOperationAction(ISD::JumpTable, MVTPtr, Custom);
101 setOperationAction(ISD::BlockAddress, MVTPtr, Custom);
102 setOperationAction(ISD::BRIND, MVT::Other, Custom);
103
104 // Take the default expansion for va_arg, va_copy, and va_end. There is no
105 // default action for va_start, so we do that custom.
106 setOperationAction(ISD::VASTART, MVT::Other, Custom);
107 setOperationAction(ISD::VAARG, MVT::Other, Expand);
108 setOperationAction(ISD::VACOPY, MVT::Other, Expand);
109 setOperationAction(ISD::VAEND, MVT::Other, Expand);
110
111 for (auto T : {MVT::f32, MVT::f64, MVT::v4f32, MVT::v2f64}) {
112 // Don't expand the floating-point types to constant pools.
113 setOperationAction(ISD::ConstantFP, T, Legal);
114 // Expand floating-point comparisons.
115 for (auto CC : {ISD::SETO, ISD::SETUO, ISD::SETUEQ, ISD::SETONE,
116 ISD::SETULT, ISD::SETULE, ISD::SETUGT, ISD::SETUGE})
117 setCondCodeAction(CC, T, Expand);
118 // Expand floating-point library function operators.
119 for (auto Op :
120 {ISD::FSIN, ISD::FCOS, ISD::FSINCOS, ISD::FPOW, ISD::FREM, ISD::FMA})
121 setOperationAction(Op, T, Expand);
122 // Note supported floating-point library function operators that otherwise
123 // default to expand.
124 for (auto Op :
125 {ISD::FCEIL, ISD::FFLOOR, ISD::FTRUNC, ISD::FNEARBYINT, ISD::FRINT})
126 setOperationAction(Op, T, Legal);
127 // Support minimum and maximum, which otherwise default to expand.
128 setOperationAction(ISD::FMINIMUM, T, Legal);
129 setOperationAction(ISD::FMAXIMUM, T, Legal);
130 // WebAssembly currently has no builtin f16 support.
131 setOperationAction(ISD::FP16_TO_FP, T, Expand);
132 setOperationAction(ISD::FP_TO_FP16, T, Expand);
133 setLoadExtAction(ISD::EXTLOAD, T, MVT::f16, Expand);
134 setTruncStoreAction(T, MVT::f16, Expand);
135 }
136
137 // Expand unavailable integer operations.
138 for (auto Op :
139 {ISD::BSWAP, ISD::SMUL_LOHI, ISD::UMUL_LOHI, ISD::MULHS, ISD::MULHU,
140 ISD::SDIVREM, ISD::UDIVREM, ISD::SHL_PARTS, ISD::SRA_PARTS,
141 ISD::SRL_PARTS, ISD::ADDC, ISD::ADDE, ISD::SUBC, ISD::SUBE}) {
142 for (auto T : {MVT::i32, MVT::i64})
143 setOperationAction(Op, T, Expand);
144 if (Subtarget->hasSIMD128())
145 for (auto T : {MVT::v16i8, MVT::v8i16, MVT::v4i32, MVT::v2i64})
146 setOperationAction(Op, T, Expand);
147 }
148
149 if (Subtarget->hasNontrappingFPToInt())
150 for (auto Op : {ISD::FP_TO_SINT_SAT, ISD::FP_TO_UINT_SAT})
151 for (auto T : {MVT::i32, MVT::i64})
152 setOperationAction(Op, T, Custom);
153
154 // SIMD-specific configuration
155 if (Subtarget->hasSIMD128()) {
156 // Hoist bitcasts out of shuffles
157 setTargetDAGCombine(ISD::VECTOR_SHUFFLE);
158
159 // Combine extends of extract_subvectors into widening ops
160 setTargetDAGCombine(ISD::SIGN_EXTEND);
161 setTargetDAGCombine(ISD::ZERO_EXTEND);
162
163 // Combine int_to_fp or fp_extend of extract_vectors and vice versa into
164 // conversions ops
165 setTargetDAGCombine(ISD::SINT_TO_FP);
166 setTargetDAGCombine(ISD::UINT_TO_FP);
167 setTargetDAGCombine(ISD::FP_EXTEND);
168 setTargetDAGCombine(ISD::EXTRACT_SUBVECTOR);
169
170 // Combine fp_to_{s,u}int_sat or fp_round of concat_vectors or vice versa
171 // into conversion ops
172 setTargetDAGCombine(ISD::FP_TO_SINT_SAT);
173 setTargetDAGCombine(ISD::FP_TO_UINT_SAT);
174 setTargetDAGCombine(ISD::FP_ROUND);
175 setTargetDAGCombine(ISD::CONCAT_VECTORS);
176
177 // Support saturating add for i8x16 and i16x8
178 for (auto Op : {ISD::SADDSAT, ISD::UADDSAT})
179 for (auto T : {MVT::v16i8, MVT::v8i16})
180 setOperationAction(Op, T, Legal);
181
182 // Support integer abs
183 for (auto T : {MVT::v16i8, MVT::v8i16, MVT::v4i32, MVT::v2i64})
184 setOperationAction(ISD::ABS, T, Legal);
185
186 // Custom lower BUILD_VECTORs to minimize number of replace_lanes
187 for (auto T : {MVT::v16i8, MVT::v8i16, MVT::v4i32, MVT::v4f32, MVT::v2i64,
188 MVT::v2f64})
189 setOperationAction(ISD::BUILD_VECTOR, T, Custom);
190
191 // We have custom shuffle lowering to expose the shuffle mask
192 for (auto T : {MVT::v16i8, MVT::v8i16, MVT::v4i32, MVT::v4f32, MVT::v2i64,
193 MVT::v2f64})
194 setOperationAction(ISD::VECTOR_SHUFFLE, T, Custom);
195
196 // Custom lowering since wasm shifts must have a scalar shift amount
197 for (auto Op : {ISD::SHL, ISD::SRA, ISD::SRL})
198 for (auto T : {MVT::v16i8, MVT::v8i16, MVT::v4i32, MVT::v2i64})
199 setOperationAction(Op, T, Custom);
200
201 // Custom lower lane accesses to expand out variable indices
202 for (auto Op : {ISD::EXTRACT_VECTOR_ELT, ISD::INSERT_VECTOR_ELT})
203 for (auto T : {MVT::v16i8, MVT::v8i16, MVT::v4i32, MVT::v4f32, MVT::v2i64,
204 MVT::v2f64})
205 setOperationAction(Op, T, Custom);
206
207 // There is no i8x16.mul instruction
208 setOperationAction(ISD::MUL, MVT::v16i8, Expand);
209
210 // There is no vector conditional select instruction
211 for (auto T : {MVT::v16i8, MVT::v8i16, MVT::v4i32, MVT::v4f32, MVT::v2i64,
212 MVT::v2f64})
213 setOperationAction(ISD::SELECT_CC, T, Expand);
214
215 // Expand integer operations supported for scalars but not SIMD
216 for (auto Op : {ISD::CTLZ, ISD::CTTZ, ISD::CTPOP, ISD::SDIV, ISD::UDIV,
217 ISD::SREM, ISD::UREM, ISD::ROTL, ISD::ROTR})
218 for (auto T : {MVT::v16i8, MVT::v8i16, MVT::v4i32, MVT::v2i64})
219 setOperationAction(Op, T, Expand);
220
221 // But we do have integer min and max operations
222 for (auto Op : {ISD::SMIN, ISD::SMAX, ISD::UMIN, ISD::UMAX})
223 for (auto T : {MVT::v16i8, MVT::v8i16, MVT::v4i32})
224 setOperationAction(Op, T, Legal);
225
226 // And we have popcnt for i8x16
227 setOperationAction(ISD::CTPOP, MVT::v16i8, Legal);
228
229 // Expand float operations supported for scalars but not SIMD
230 for (auto Op : {ISD::FCOPYSIGN, ISD::FLOG, ISD::FLOG2, ISD::FLOG10,
231 ISD::FEXP, ISD::FEXP2, ISD::FRINT})
232 for (auto T : {MVT::v4f32, MVT::v2f64})
233 setOperationAction(Op, T, Expand);
234
235 // Unsigned comparison operations are unavailable for i64x2 vectors.
236 for (auto CC : {ISD::SETUGT, ISD::SETUGE, ISD::SETULT, ISD::SETULE})
237 setCondCodeAction(CC, MVT::v2i64, Custom);
238
239 // 64x2 conversions are not in the spec
240 for (auto Op :
241 {ISD::SINT_TO_FP, ISD::UINT_TO_FP, ISD::FP_TO_SINT, ISD::FP_TO_UINT})
242 for (auto T : {MVT::v2i64, MVT::v2f64})
243 setOperationAction(Op, T, Expand);
244
245 // But saturating fp_to_int converstions are
246 for (auto Op : {ISD::FP_TO_SINT_SAT, ISD::FP_TO_UINT_SAT})
247 setOperationAction(Op, MVT::v4i32, Custom);
248 }
249
250 // As a special case, these operators use the type to mean the type to
251 // sign-extend from.
252 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i1, Expand);
253 if (!Subtarget->hasSignExt()) {
254 // Sign extends are legal only when extending a vector extract
255 auto Action = Subtarget->hasSIMD128() ? Custom : Expand;
256 for (auto T : {MVT::i8, MVT::i16, MVT::i32})
257 setOperationAction(ISD::SIGN_EXTEND_INREG, T, Action);
258 }
259 for (auto T : MVT::integer_fixedlen_vector_valuetypes())
260 setOperationAction(ISD::SIGN_EXTEND_INREG, T, Expand);
261
262 // Dynamic stack allocation: use the default expansion.
263 setOperationAction(ISD::STACKSAVE, MVT::Other, Expand);
264 setOperationAction(ISD::STACKRESTORE, MVT::Other, Expand);
265 setOperationAction(ISD::DYNAMIC_STACKALLOC, MVTPtr, Expand);
266
267 setOperationAction(ISD::FrameIndex, MVT::i32, Custom);
268 setOperationAction(ISD::FrameIndex, MVT::i64, Custom);
269 setOperationAction(ISD::CopyToReg, MVT::Other, Custom);
270
271 // Expand these forms; we pattern-match the forms that we can handle in isel.
272 for (auto T : {MVT::i32, MVT::i64, MVT::f32, MVT::f64})
273 for (auto Op : {ISD::BR_CC, ISD::SELECT_CC})
274 setOperationAction(Op, T, Expand);
275
276 // We have custom switch handling.
277 setOperationAction(ISD::BR_JT, MVT::Other, Custom);
278
279 // WebAssembly doesn't have:
280 // - Floating-point extending loads.
281 // - Floating-point truncating stores.
282 // - i1 extending loads.
283 // - truncating SIMD stores and most extending loads
284 setLoadExtAction(ISD::EXTLOAD, MVT::f64, MVT::f32, Expand);
285 setTruncStoreAction(MVT::f64, MVT::f32, Expand);
286 for (auto T : MVT::integer_valuetypes())
287 for (auto Ext : {ISD::EXTLOAD, ISD::ZEXTLOAD, ISD::SEXTLOAD})
288 setLoadExtAction(Ext, T, MVT::i1, Promote);
289 if (Subtarget->hasSIMD128()) {
290 for (auto T : {MVT::v16i8, MVT::v8i16, MVT::v4i32, MVT::v2i64, MVT::v4f32,
291 MVT::v2f64}) {
292 for (auto MemT : MVT::fixedlen_vector_valuetypes()) {
293 if (MVT(T) != MemT) {
294 setTruncStoreAction(T, MemT, Expand);
295 for (auto Ext : {ISD::EXTLOAD, ISD::ZEXTLOAD, ISD::SEXTLOAD})
296 setLoadExtAction(Ext, T, MemT, Expand);
297 }
298 }
299 }
300 // But some vector extending loads are legal
301 for (auto Ext : {ISD::EXTLOAD, ISD::SEXTLOAD, ISD::ZEXTLOAD}) {
302 setLoadExtAction(Ext, MVT::v8i16, MVT::v8i8, Legal);
303 setLoadExtAction(Ext, MVT::v4i32, MVT::v4i16, Legal);
304 setLoadExtAction(Ext, MVT::v2i64, MVT::v2i32, Legal);
305 }
306 // And some truncating stores are legal as well
307 setTruncStoreAction(MVT::v8i16, MVT::v8i8, Legal);
308 setTruncStoreAction(MVT::v4i32, MVT::v4i16, Legal);
309 }
310
311 // Don't do anything clever with build_pairs
312 setOperationAction(ISD::BUILD_PAIR, MVT::i64, Expand);
313
314 // Trap lowers to wasm unreachable
315 setOperationAction(ISD::TRAP, MVT::Other, Legal);
316 setOperationAction(ISD::DEBUGTRAP, MVT::Other, Legal);
317
318 // Exception handling intrinsics
319 setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::Other, Custom);
320 setOperationAction(ISD::INTRINSIC_W_CHAIN, MVT::Other, Custom);
321 setOperationAction(ISD::INTRINSIC_VOID, MVT::Other, Custom);
322
323 setMaxAtomicSizeInBitsSupported(64);
324
325 // Override the __gnu_f2h_ieee/__gnu_h2f_ieee names so that the f32 name is
326 // consistent with the f64 and f128 names.
327 setLibcallName(RTLIB::FPEXT_F16_F32, "__extendhfsf2");
328 setLibcallName(RTLIB::FPROUND_F32_F16, "__truncsfhf2");
329
330 // Define the emscripten name for return address helper.
331 // TODO: when implementing other Wasm backends, make this generic or only do
332 // this on emscripten depending on what they end up doing.
333 setLibcallName(RTLIB::RETURN_ADDRESS, "emscripten_return_address");
334
335 // Always convert switches to br_tables unless there is only one case, which
336 // is equivalent to a simple branch. This reduces code size for wasm, and we
337 // defer possible jump table optimizations to the VM.
338 setMinimumJumpTableEntries(2);
339 }
340
341 TargetLowering::AtomicExpansionKind
shouldExpandAtomicRMWInIR(AtomicRMWInst * AI) const342 WebAssemblyTargetLowering::shouldExpandAtomicRMWInIR(AtomicRMWInst *AI) const {
343 // We have wasm instructions for these
344 switch (AI->getOperation()) {
345 case AtomicRMWInst::Add:
346 case AtomicRMWInst::Sub:
347 case AtomicRMWInst::And:
348 case AtomicRMWInst::Or:
349 case AtomicRMWInst::Xor:
350 case AtomicRMWInst::Xchg:
351 return AtomicExpansionKind::None;
352 default:
353 break;
354 }
355 return AtomicExpansionKind::CmpXChg;
356 }
357
shouldScalarizeBinop(SDValue VecOp) const358 bool WebAssemblyTargetLowering::shouldScalarizeBinop(SDValue VecOp) const {
359 // Implementation copied from X86TargetLowering.
360 unsigned Opc = VecOp.getOpcode();
361
362 // Assume target opcodes can't be scalarized.
363 // TODO - do we have any exceptions?
364 if (Opc >= ISD::BUILTIN_OP_END)
365 return false;
366
367 // If the vector op is not supported, try to convert to scalar.
368 EVT VecVT = VecOp.getValueType();
369 if (!isOperationLegalOrCustomOrPromote(Opc, VecVT))
370 return true;
371
372 // If the vector op is supported, but the scalar op is not, the transform may
373 // not be worthwhile.
374 EVT ScalarVT = VecVT.getScalarType();
375 return isOperationLegalOrCustomOrPromote(Opc, ScalarVT);
376 }
377
createFastISel(FunctionLoweringInfo & FuncInfo,const TargetLibraryInfo * LibInfo) const378 FastISel *WebAssemblyTargetLowering::createFastISel(
379 FunctionLoweringInfo &FuncInfo, const TargetLibraryInfo *LibInfo) const {
380 return WebAssembly::createFastISel(FuncInfo, LibInfo);
381 }
382
getScalarShiftAmountTy(const DataLayout &,EVT VT) const383 MVT WebAssemblyTargetLowering::getScalarShiftAmountTy(const DataLayout & /*DL*/,
384 EVT VT) const {
385 unsigned BitWidth = NextPowerOf2(VT.getSizeInBits() - 1);
386 if (BitWidth > 1 && BitWidth < 8)
387 BitWidth = 8;
388
389 if (BitWidth > 64) {
390 // The shift will be lowered to a libcall, and compiler-rt libcalls expect
391 // the count to be an i32.
392 BitWidth = 32;
393 assert(BitWidth >= Log2_32_Ceil(VT.getSizeInBits()) &&
394 "32-bit shift counts ought to be enough for anyone");
395 }
396
397 MVT Result = MVT::getIntegerVT(BitWidth);
398 assert(Result != MVT::INVALID_SIMPLE_VALUE_TYPE &&
399 "Unable to represent scalar shift amount type");
400 return Result;
401 }
402
403 // Lower an fp-to-int conversion operator from the LLVM opcode, which has an
404 // undefined result on invalid/overflow, to the WebAssembly opcode, which
405 // traps on invalid/overflow.
LowerFPToInt(MachineInstr & MI,DebugLoc DL,MachineBasicBlock * BB,const TargetInstrInfo & TII,bool IsUnsigned,bool Int64,bool Float64,unsigned LoweredOpcode)406 static MachineBasicBlock *LowerFPToInt(MachineInstr &MI, DebugLoc DL,
407 MachineBasicBlock *BB,
408 const TargetInstrInfo &TII,
409 bool IsUnsigned, bool Int64,
410 bool Float64, unsigned LoweredOpcode) {
411 MachineRegisterInfo &MRI = BB->getParent()->getRegInfo();
412
413 Register OutReg = MI.getOperand(0).getReg();
414 Register InReg = MI.getOperand(1).getReg();
415
416 unsigned Abs = Float64 ? WebAssembly::ABS_F64 : WebAssembly::ABS_F32;
417 unsigned FConst = Float64 ? WebAssembly::CONST_F64 : WebAssembly::CONST_F32;
418 unsigned LT = Float64 ? WebAssembly::LT_F64 : WebAssembly::LT_F32;
419 unsigned GE = Float64 ? WebAssembly::GE_F64 : WebAssembly::GE_F32;
420 unsigned IConst = Int64 ? WebAssembly::CONST_I64 : WebAssembly::CONST_I32;
421 unsigned Eqz = WebAssembly::EQZ_I32;
422 unsigned And = WebAssembly::AND_I32;
423 int64_t Limit = Int64 ? INT64_MIN : INT32_MIN;
424 int64_t Substitute = IsUnsigned ? 0 : Limit;
425 double CmpVal = IsUnsigned ? -(double)Limit * 2.0 : -(double)Limit;
426 auto &Context = BB->getParent()->getFunction().getContext();
427 Type *Ty = Float64 ? Type::getDoubleTy(Context) : Type::getFloatTy(Context);
428
429 const BasicBlock *LLVMBB = BB->getBasicBlock();
430 MachineFunction *F = BB->getParent();
431 MachineBasicBlock *TrueMBB = F->CreateMachineBasicBlock(LLVMBB);
432 MachineBasicBlock *FalseMBB = F->CreateMachineBasicBlock(LLVMBB);
433 MachineBasicBlock *DoneMBB = F->CreateMachineBasicBlock(LLVMBB);
434
435 MachineFunction::iterator It = ++BB->getIterator();
436 F->insert(It, FalseMBB);
437 F->insert(It, TrueMBB);
438 F->insert(It, DoneMBB);
439
440 // Transfer the remainder of BB and its successor edges to DoneMBB.
441 DoneMBB->splice(DoneMBB->begin(), BB, std::next(MI.getIterator()), BB->end());
442 DoneMBB->transferSuccessorsAndUpdatePHIs(BB);
443
444 BB->addSuccessor(TrueMBB);
445 BB->addSuccessor(FalseMBB);
446 TrueMBB->addSuccessor(DoneMBB);
447 FalseMBB->addSuccessor(DoneMBB);
448
449 unsigned Tmp0, Tmp1, CmpReg, EqzReg, FalseReg, TrueReg;
450 Tmp0 = MRI.createVirtualRegister(MRI.getRegClass(InReg));
451 Tmp1 = MRI.createVirtualRegister(MRI.getRegClass(InReg));
452 CmpReg = MRI.createVirtualRegister(&WebAssembly::I32RegClass);
453 EqzReg = MRI.createVirtualRegister(&WebAssembly::I32RegClass);
454 FalseReg = MRI.createVirtualRegister(MRI.getRegClass(OutReg));
455 TrueReg = MRI.createVirtualRegister(MRI.getRegClass(OutReg));
456
457 MI.eraseFromParent();
458 // For signed numbers, we can do a single comparison to determine whether
459 // fabs(x) is within range.
460 if (IsUnsigned) {
461 Tmp0 = InReg;
462 } else {
463 BuildMI(BB, DL, TII.get(Abs), Tmp0).addReg(InReg);
464 }
465 BuildMI(BB, DL, TII.get(FConst), Tmp1)
466 .addFPImm(cast<ConstantFP>(ConstantFP::get(Ty, CmpVal)));
467 BuildMI(BB, DL, TII.get(LT), CmpReg).addReg(Tmp0).addReg(Tmp1);
468
469 // For unsigned numbers, we have to do a separate comparison with zero.
470 if (IsUnsigned) {
471 Tmp1 = MRI.createVirtualRegister(MRI.getRegClass(InReg));
472 Register SecondCmpReg =
473 MRI.createVirtualRegister(&WebAssembly::I32RegClass);
474 Register AndReg = MRI.createVirtualRegister(&WebAssembly::I32RegClass);
475 BuildMI(BB, DL, TII.get(FConst), Tmp1)
476 .addFPImm(cast<ConstantFP>(ConstantFP::get(Ty, 0.0)));
477 BuildMI(BB, DL, TII.get(GE), SecondCmpReg).addReg(Tmp0).addReg(Tmp1);
478 BuildMI(BB, DL, TII.get(And), AndReg).addReg(CmpReg).addReg(SecondCmpReg);
479 CmpReg = AndReg;
480 }
481
482 BuildMI(BB, DL, TII.get(Eqz), EqzReg).addReg(CmpReg);
483
484 // Create the CFG diamond to select between doing the conversion or using
485 // the substitute value.
486 BuildMI(BB, DL, TII.get(WebAssembly::BR_IF)).addMBB(TrueMBB).addReg(EqzReg);
487 BuildMI(FalseMBB, DL, TII.get(LoweredOpcode), FalseReg).addReg(InReg);
488 BuildMI(FalseMBB, DL, TII.get(WebAssembly::BR)).addMBB(DoneMBB);
489 BuildMI(TrueMBB, DL, TII.get(IConst), TrueReg).addImm(Substitute);
490 BuildMI(*DoneMBB, DoneMBB->begin(), DL, TII.get(TargetOpcode::PHI), OutReg)
491 .addReg(FalseReg)
492 .addMBB(FalseMBB)
493 .addReg(TrueReg)
494 .addMBB(TrueMBB);
495
496 return DoneMBB;
497 }
498
499 static MachineBasicBlock *
LowerCallResults(MachineInstr & CallResults,DebugLoc DL,MachineBasicBlock * BB,const WebAssemblySubtarget * Subtarget,const TargetInstrInfo & TII)500 LowerCallResults(MachineInstr &CallResults, DebugLoc DL, MachineBasicBlock *BB,
501 const WebAssemblySubtarget *Subtarget,
502 const TargetInstrInfo &TII) {
503 MachineInstr &CallParams = *CallResults.getPrevNode();
504 assert(CallParams.getOpcode() == WebAssembly::CALL_PARAMS);
505 assert(CallResults.getOpcode() == WebAssembly::CALL_RESULTS ||
506 CallResults.getOpcode() == WebAssembly::RET_CALL_RESULTS);
507
508 bool IsIndirect = CallParams.getOperand(0).isReg();
509 bool IsRetCall = CallResults.getOpcode() == WebAssembly::RET_CALL_RESULTS;
510
511 bool IsFuncrefCall = false;
512 if (IsIndirect) {
513 Register Reg = CallParams.getOperand(0).getReg();
514 const MachineFunction *MF = BB->getParent();
515 const MachineRegisterInfo &MRI = MF->getRegInfo();
516 const TargetRegisterClass *TRC = MRI.getRegClass(Reg);
517 IsFuncrefCall = (TRC == &WebAssembly::FUNCREFRegClass);
518 assert(!IsFuncrefCall || Subtarget->hasReferenceTypes());
519 }
520
521 unsigned CallOp;
522 if (IsIndirect && IsRetCall) {
523 CallOp = WebAssembly::RET_CALL_INDIRECT;
524 } else if (IsIndirect) {
525 CallOp = WebAssembly::CALL_INDIRECT;
526 } else if (IsRetCall) {
527 CallOp = WebAssembly::RET_CALL;
528 } else {
529 CallOp = WebAssembly::CALL;
530 }
531
532 MachineFunction &MF = *BB->getParent();
533 const MCInstrDesc &MCID = TII.get(CallOp);
534 MachineInstrBuilder MIB(MF, MF.CreateMachineInstr(MCID, DL));
535
536 // See if we must truncate the function pointer.
537 // CALL_INDIRECT takes an i32, but in wasm64 we represent function pointers
538 // as 64-bit for uniformity with other pointer types.
539 // See also: WebAssemblyFastISel::selectCall
540 if (IsIndirect && MF.getSubtarget<WebAssemblySubtarget>().hasAddr64()) {
541 Register Reg32 =
542 MF.getRegInfo().createVirtualRegister(&WebAssembly::I32RegClass);
543 auto &FnPtr = CallParams.getOperand(0);
544 BuildMI(*BB, CallResults.getIterator(), DL,
545 TII.get(WebAssembly::I32_WRAP_I64), Reg32)
546 .addReg(FnPtr.getReg());
547 FnPtr.setReg(Reg32);
548 }
549
550 // Move the function pointer to the end of the arguments for indirect calls
551 if (IsIndirect) {
552 auto FnPtr = CallParams.getOperand(0);
553 CallParams.RemoveOperand(0);
554 CallParams.addOperand(FnPtr);
555 }
556
557 for (auto Def : CallResults.defs())
558 MIB.add(Def);
559
560 if (IsIndirect) {
561 // Placeholder for the type index.
562 MIB.addImm(0);
563 // The table into which this call_indirect indexes.
564 MCSymbolWasm *Table = IsFuncrefCall
565 ? WebAssembly::getOrCreateFuncrefCallTableSymbol(
566 MF.getContext(), Subtarget)
567 : WebAssembly::getOrCreateFunctionTableSymbol(
568 MF.getContext(), Subtarget);
569 if (Subtarget->hasReferenceTypes()) {
570 MIB.addSym(Table);
571 } else {
572 // For the MVP there is at most one table whose number is 0, but we can't
573 // write a table symbol or issue relocations. Instead we just ensure the
574 // table is live and write a zero.
575 Table->setNoStrip();
576 MIB.addImm(0);
577 }
578 }
579
580 for (auto Use : CallParams.uses())
581 MIB.add(Use);
582
583 BB->insert(CallResults.getIterator(), MIB);
584 CallParams.eraseFromParent();
585 CallResults.eraseFromParent();
586
587 // If this is a funcref call, to avoid hidden GC roots, we need to clear the
588 // table slot with ref.null upon call_indirect return.
589 //
590 // This generates the following code, which comes right after a call_indirect
591 // of a funcref:
592 //
593 // i32.const 0
594 // ref.null func
595 // table.set __funcref_call_table
596 if (IsIndirect && IsFuncrefCall) {
597 MCSymbolWasm *Table = WebAssembly::getOrCreateFuncrefCallTableSymbol(
598 MF.getContext(), Subtarget);
599 Register RegZero =
600 MF.getRegInfo().createVirtualRegister(&WebAssembly::I32RegClass);
601 MachineInstr *Const0 =
602 BuildMI(MF, DL, TII.get(WebAssembly::CONST_I32), RegZero).addImm(0);
603 BB->insertAfter(MIB.getInstr()->getIterator(), Const0);
604
605 Register RegFuncref =
606 MF.getRegInfo().createVirtualRegister(&WebAssembly::FUNCREFRegClass);
607 MachineInstr *RefNull =
608 BuildMI(MF, DL, TII.get(WebAssembly::REF_NULL_FUNCREF), RegFuncref)
609 .addImm(static_cast<int32_t>(WebAssembly::HeapType::Funcref));
610 BB->insertAfter(Const0->getIterator(), RefNull);
611
612 MachineInstr *TableSet =
613 BuildMI(MF, DL, TII.get(WebAssembly::TABLE_SET_FUNCREF))
614 .addSym(Table)
615 .addReg(RegZero)
616 .addReg(RegFuncref);
617 BB->insertAfter(RefNull->getIterator(), TableSet);
618 }
619
620 return BB;
621 }
622
EmitInstrWithCustomInserter(MachineInstr & MI,MachineBasicBlock * BB) const623 MachineBasicBlock *WebAssemblyTargetLowering::EmitInstrWithCustomInserter(
624 MachineInstr &MI, MachineBasicBlock *BB) const {
625 const TargetInstrInfo &TII = *Subtarget->getInstrInfo();
626 DebugLoc DL = MI.getDebugLoc();
627
628 switch (MI.getOpcode()) {
629 default:
630 llvm_unreachable("Unexpected instr type to insert");
631 case WebAssembly::FP_TO_SINT_I32_F32:
632 return LowerFPToInt(MI, DL, BB, TII, false, false, false,
633 WebAssembly::I32_TRUNC_S_F32);
634 case WebAssembly::FP_TO_UINT_I32_F32:
635 return LowerFPToInt(MI, DL, BB, TII, true, false, false,
636 WebAssembly::I32_TRUNC_U_F32);
637 case WebAssembly::FP_TO_SINT_I64_F32:
638 return LowerFPToInt(MI, DL, BB, TII, false, true, false,
639 WebAssembly::I64_TRUNC_S_F32);
640 case WebAssembly::FP_TO_UINT_I64_F32:
641 return LowerFPToInt(MI, DL, BB, TII, true, true, false,
642 WebAssembly::I64_TRUNC_U_F32);
643 case WebAssembly::FP_TO_SINT_I32_F64:
644 return LowerFPToInt(MI, DL, BB, TII, false, false, true,
645 WebAssembly::I32_TRUNC_S_F64);
646 case WebAssembly::FP_TO_UINT_I32_F64:
647 return LowerFPToInt(MI, DL, BB, TII, true, false, true,
648 WebAssembly::I32_TRUNC_U_F64);
649 case WebAssembly::FP_TO_SINT_I64_F64:
650 return LowerFPToInt(MI, DL, BB, TII, false, true, true,
651 WebAssembly::I64_TRUNC_S_F64);
652 case WebAssembly::FP_TO_UINT_I64_F64:
653 return LowerFPToInt(MI, DL, BB, TII, true, true, true,
654 WebAssembly::I64_TRUNC_U_F64);
655 case WebAssembly::CALL_RESULTS:
656 case WebAssembly::RET_CALL_RESULTS:
657 return LowerCallResults(MI, DL, BB, Subtarget, TII);
658 }
659 }
660
661 const char *
getTargetNodeName(unsigned Opcode) const662 WebAssemblyTargetLowering::getTargetNodeName(unsigned Opcode) const {
663 switch (static_cast<WebAssemblyISD::NodeType>(Opcode)) {
664 case WebAssemblyISD::FIRST_NUMBER:
665 case WebAssemblyISD::FIRST_MEM_OPCODE:
666 break;
667 #define HANDLE_NODETYPE(NODE) \
668 case WebAssemblyISD::NODE: \
669 return "WebAssemblyISD::" #NODE;
670 #define HANDLE_MEM_NODETYPE(NODE) HANDLE_NODETYPE(NODE)
671 #include "WebAssemblyISD.def"
672 #undef HANDLE_MEM_NODETYPE
673 #undef HANDLE_NODETYPE
674 }
675 return nullptr;
676 }
677
678 std::pair<unsigned, const TargetRegisterClass *>
getRegForInlineAsmConstraint(const TargetRegisterInfo * TRI,StringRef Constraint,MVT VT) const679 WebAssemblyTargetLowering::getRegForInlineAsmConstraint(
680 const TargetRegisterInfo *TRI, StringRef Constraint, MVT VT) const {
681 // First, see if this is a constraint that directly corresponds to a
682 // WebAssembly register class.
683 if (Constraint.size() == 1) {
684 switch (Constraint[0]) {
685 case 'r':
686 assert(VT != MVT::iPTR && "Pointer MVT not expected here");
687 if (Subtarget->hasSIMD128() && VT.isVector()) {
688 if (VT.getSizeInBits() == 128)
689 return std::make_pair(0U, &WebAssembly::V128RegClass);
690 }
691 if (VT.isInteger() && !VT.isVector()) {
692 if (VT.getSizeInBits() <= 32)
693 return std::make_pair(0U, &WebAssembly::I32RegClass);
694 if (VT.getSizeInBits() <= 64)
695 return std::make_pair(0U, &WebAssembly::I64RegClass);
696 }
697 if (VT.isFloatingPoint() && !VT.isVector()) {
698 switch (VT.getSizeInBits()) {
699 case 32:
700 return std::make_pair(0U, &WebAssembly::F32RegClass);
701 case 64:
702 return std::make_pair(0U, &WebAssembly::F64RegClass);
703 default:
704 break;
705 }
706 }
707 break;
708 default:
709 break;
710 }
711 }
712
713 return TargetLowering::getRegForInlineAsmConstraint(TRI, Constraint, VT);
714 }
715
isCheapToSpeculateCttz() const716 bool WebAssemblyTargetLowering::isCheapToSpeculateCttz() const {
717 // Assume ctz is a relatively cheap operation.
718 return true;
719 }
720
isCheapToSpeculateCtlz() const721 bool WebAssemblyTargetLowering::isCheapToSpeculateCtlz() const {
722 // Assume clz is a relatively cheap operation.
723 return true;
724 }
725
isLegalAddressingMode(const DataLayout & DL,const AddrMode & AM,Type * Ty,unsigned AS,Instruction * I) const726 bool WebAssemblyTargetLowering::isLegalAddressingMode(const DataLayout &DL,
727 const AddrMode &AM,
728 Type *Ty, unsigned AS,
729 Instruction *I) const {
730 // WebAssembly offsets are added as unsigned without wrapping. The
731 // isLegalAddressingMode gives us no way to determine if wrapping could be
732 // happening, so we approximate this by accepting only non-negative offsets.
733 if (AM.BaseOffs < 0)
734 return false;
735
736 // WebAssembly has no scale register operands.
737 if (AM.Scale != 0)
738 return false;
739
740 // Everything else is legal.
741 return true;
742 }
743
allowsMisalignedMemoryAccesses(EVT,unsigned,Align,MachineMemOperand::Flags,bool * Fast) const744 bool WebAssemblyTargetLowering::allowsMisalignedMemoryAccesses(
745 EVT /*VT*/, unsigned /*AddrSpace*/, Align /*Align*/,
746 MachineMemOperand::Flags /*Flags*/, bool *Fast) const {
747 // WebAssembly supports unaligned accesses, though it should be declared
748 // with the p2align attribute on loads and stores which do so, and there
749 // may be a performance impact. We tell LLVM they're "fast" because
750 // for the kinds of things that LLVM uses this for (merging adjacent stores
751 // of constants, etc.), WebAssembly implementations will either want the
752 // unaligned access or they'll split anyway.
753 if (Fast)
754 *Fast = true;
755 return true;
756 }
757
isIntDivCheap(EVT VT,AttributeList Attr) const758 bool WebAssemblyTargetLowering::isIntDivCheap(EVT VT,
759 AttributeList Attr) const {
760 // The current thinking is that wasm engines will perform this optimization,
761 // so we can save on code size.
762 return true;
763 }
764
isVectorLoadExtDesirable(SDValue ExtVal) const765 bool WebAssemblyTargetLowering::isVectorLoadExtDesirable(SDValue ExtVal) const {
766 EVT ExtT = ExtVal.getValueType();
767 EVT MemT = cast<LoadSDNode>(ExtVal->getOperand(0))->getValueType(0);
768 return (ExtT == MVT::v8i16 && MemT == MVT::v8i8) ||
769 (ExtT == MVT::v4i32 && MemT == MVT::v4i16) ||
770 (ExtT == MVT::v2i64 && MemT == MVT::v2i32);
771 }
772
getSetCCResultType(const DataLayout & DL,LLVMContext & C,EVT VT) const773 EVT WebAssemblyTargetLowering::getSetCCResultType(const DataLayout &DL,
774 LLVMContext &C,
775 EVT VT) const {
776 if (VT.isVector())
777 return VT.changeVectorElementTypeToInteger();
778
779 // So far, all branch instructions in Wasm take an I32 condition.
780 // The default TargetLowering::getSetCCResultType returns the pointer size,
781 // which would be useful to reduce instruction counts when testing
782 // against 64-bit pointers/values if at some point Wasm supports that.
783 return EVT::getIntegerVT(C, 32);
784 }
785
getTgtMemIntrinsic(IntrinsicInfo & Info,const CallInst & I,MachineFunction & MF,unsigned Intrinsic) const786 bool WebAssemblyTargetLowering::getTgtMemIntrinsic(IntrinsicInfo &Info,
787 const CallInst &I,
788 MachineFunction &MF,
789 unsigned Intrinsic) const {
790 switch (Intrinsic) {
791 case Intrinsic::wasm_memory_atomic_notify:
792 Info.opc = ISD::INTRINSIC_W_CHAIN;
793 Info.memVT = MVT::i32;
794 Info.ptrVal = I.getArgOperand(0);
795 Info.offset = 0;
796 Info.align = Align(4);
797 // atomic.notify instruction does not really load the memory specified with
798 // this argument, but MachineMemOperand should either be load or store, so
799 // we set this to a load.
800 // FIXME Volatile isn't really correct, but currently all LLVM atomic
801 // instructions are treated as volatiles in the backend, so we should be
802 // consistent. The same applies for wasm_atomic_wait intrinsics too.
803 Info.flags = MachineMemOperand::MOVolatile | MachineMemOperand::MOLoad;
804 return true;
805 case Intrinsic::wasm_memory_atomic_wait32:
806 Info.opc = ISD::INTRINSIC_W_CHAIN;
807 Info.memVT = MVT::i32;
808 Info.ptrVal = I.getArgOperand(0);
809 Info.offset = 0;
810 Info.align = Align(4);
811 Info.flags = MachineMemOperand::MOVolatile | MachineMemOperand::MOLoad;
812 return true;
813 case Intrinsic::wasm_memory_atomic_wait64:
814 Info.opc = ISD::INTRINSIC_W_CHAIN;
815 Info.memVT = MVT::i64;
816 Info.ptrVal = I.getArgOperand(0);
817 Info.offset = 0;
818 Info.align = Align(8);
819 Info.flags = MachineMemOperand::MOVolatile | MachineMemOperand::MOLoad;
820 return true;
821 default:
822 return false;
823 }
824 }
825
826 //===----------------------------------------------------------------------===//
827 // WebAssembly Lowering private implementation.
828 //===----------------------------------------------------------------------===//
829
830 //===----------------------------------------------------------------------===//
831 // Lowering Code
832 //===----------------------------------------------------------------------===//
833
fail(const SDLoc & DL,SelectionDAG & DAG,const char * Msg)834 static void fail(const SDLoc &DL, SelectionDAG &DAG, const char *Msg) {
835 MachineFunction &MF = DAG.getMachineFunction();
836 DAG.getContext()->diagnose(
837 DiagnosticInfoUnsupported(MF.getFunction(), Msg, DL.getDebugLoc()));
838 }
839
840 // Test whether the given calling convention is supported.
callingConvSupported(CallingConv::ID CallConv)841 static bool callingConvSupported(CallingConv::ID CallConv) {
842 // We currently support the language-independent target-independent
843 // conventions. We don't yet have a way to annotate calls with properties like
844 // "cold", and we don't have any call-clobbered registers, so these are mostly
845 // all handled the same.
846 return CallConv == CallingConv::C || CallConv == CallingConv::Fast ||
847 CallConv == CallingConv::Cold ||
848 CallConv == CallingConv::PreserveMost ||
849 CallConv == CallingConv::PreserveAll ||
850 CallConv == CallingConv::CXX_FAST_TLS ||
851 CallConv == CallingConv::WASM_EmscriptenInvoke ||
852 CallConv == CallingConv::Swift;
853 }
854
855 SDValue
LowerCall(CallLoweringInfo & CLI,SmallVectorImpl<SDValue> & InVals) const856 WebAssemblyTargetLowering::LowerCall(CallLoweringInfo &CLI,
857 SmallVectorImpl<SDValue> &InVals) const {
858 SelectionDAG &DAG = CLI.DAG;
859 SDLoc DL = CLI.DL;
860 SDValue Chain = CLI.Chain;
861 SDValue Callee = CLI.Callee;
862 MachineFunction &MF = DAG.getMachineFunction();
863 auto Layout = MF.getDataLayout();
864
865 CallingConv::ID CallConv = CLI.CallConv;
866 if (!callingConvSupported(CallConv))
867 fail(DL, DAG,
868 "WebAssembly doesn't support language-specific or target-specific "
869 "calling conventions yet");
870 if (CLI.IsPatchPoint)
871 fail(DL, DAG, "WebAssembly doesn't support patch point yet");
872
873 if (CLI.IsTailCall) {
874 auto NoTail = [&](const char *Msg) {
875 if (CLI.CB && CLI.CB->isMustTailCall())
876 fail(DL, DAG, Msg);
877 CLI.IsTailCall = false;
878 };
879
880 if (!Subtarget->hasTailCall())
881 NoTail("WebAssembly 'tail-call' feature not enabled");
882
883 // Varargs calls cannot be tail calls because the buffer is on the stack
884 if (CLI.IsVarArg)
885 NoTail("WebAssembly does not support varargs tail calls");
886
887 // Do not tail call unless caller and callee return types match
888 const Function &F = MF.getFunction();
889 const TargetMachine &TM = getTargetMachine();
890 Type *RetTy = F.getReturnType();
891 SmallVector<MVT, 4> CallerRetTys;
892 SmallVector<MVT, 4> CalleeRetTys;
893 computeLegalValueVTs(F, TM, RetTy, CallerRetTys);
894 computeLegalValueVTs(F, TM, CLI.RetTy, CalleeRetTys);
895 bool TypesMatch = CallerRetTys.size() == CalleeRetTys.size() &&
896 std::equal(CallerRetTys.begin(), CallerRetTys.end(),
897 CalleeRetTys.begin());
898 if (!TypesMatch)
899 NoTail("WebAssembly tail call requires caller and callee return types to "
900 "match");
901
902 // If pointers to local stack values are passed, we cannot tail call
903 if (CLI.CB) {
904 for (auto &Arg : CLI.CB->args()) {
905 Value *Val = Arg.get();
906 // Trace the value back through pointer operations
907 while (true) {
908 Value *Src = Val->stripPointerCastsAndAliases();
909 if (auto *GEP = dyn_cast<GetElementPtrInst>(Src))
910 Src = GEP->getPointerOperand();
911 if (Val == Src)
912 break;
913 Val = Src;
914 }
915 if (isa<AllocaInst>(Val)) {
916 NoTail(
917 "WebAssembly does not support tail calling with stack arguments");
918 break;
919 }
920 }
921 }
922 }
923
924 SmallVectorImpl<ISD::InputArg> &Ins = CLI.Ins;
925 SmallVectorImpl<ISD::OutputArg> &Outs = CLI.Outs;
926 SmallVectorImpl<SDValue> &OutVals = CLI.OutVals;
927
928 // The generic code may have added an sret argument. If we're lowering an
929 // invoke function, the ABI requires that the function pointer be the first
930 // argument, so we may have to swap the arguments.
931 if (CallConv == CallingConv::WASM_EmscriptenInvoke && Outs.size() >= 2 &&
932 Outs[0].Flags.isSRet()) {
933 std::swap(Outs[0], Outs[1]);
934 std::swap(OutVals[0], OutVals[1]);
935 }
936
937 bool HasSwiftSelfArg = false;
938 bool HasSwiftErrorArg = false;
939 unsigned NumFixedArgs = 0;
940 for (unsigned I = 0; I < Outs.size(); ++I) {
941 const ISD::OutputArg &Out = Outs[I];
942 SDValue &OutVal = OutVals[I];
943 HasSwiftSelfArg |= Out.Flags.isSwiftSelf();
944 HasSwiftErrorArg |= Out.Flags.isSwiftError();
945 if (Out.Flags.isNest())
946 fail(DL, DAG, "WebAssembly hasn't implemented nest arguments");
947 if (Out.Flags.isInAlloca())
948 fail(DL, DAG, "WebAssembly hasn't implemented inalloca arguments");
949 if (Out.Flags.isInConsecutiveRegs())
950 fail(DL, DAG, "WebAssembly hasn't implemented cons regs arguments");
951 if (Out.Flags.isInConsecutiveRegsLast())
952 fail(DL, DAG, "WebAssembly hasn't implemented cons regs last arguments");
953 if (Out.Flags.isByVal() && Out.Flags.getByValSize() != 0) {
954 auto &MFI = MF.getFrameInfo();
955 int FI = MFI.CreateStackObject(Out.Flags.getByValSize(),
956 Out.Flags.getNonZeroByValAlign(),
957 /*isSS=*/false);
958 SDValue SizeNode =
959 DAG.getConstant(Out.Flags.getByValSize(), DL, MVT::i32);
960 SDValue FINode = DAG.getFrameIndex(FI, getPointerTy(Layout));
961 Chain = DAG.getMemcpy(
962 Chain, DL, FINode, OutVal, SizeNode, Out.Flags.getNonZeroByValAlign(),
963 /*isVolatile*/ false, /*AlwaysInline=*/false,
964 /*isTailCall*/ false, MachinePointerInfo(), MachinePointerInfo());
965 OutVal = FINode;
966 }
967 // Count the number of fixed args *after* legalization.
968 NumFixedArgs += Out.IsFixed;
969 }
970
971 bool IsVarArg = CLI.IsVarArg;
972 auto PtrVT = getPointerTy(Layout);
973
974 // For swiftcc, emit additional swiftself and swifterror arguments
975 // if there aren't. These additional arguments are also added for callee
976 // signature They are necessary to match callee and caller signature for
977 // indirect call.
978 if (CallConv == CallingConv::Swift) {
979 if (!HasSwiftSelfArg) {
980 NumFixedArgs++;
981 ISD::OutputArg Arg;
982 Arg.Flags.setSwiftSelf();
983 CLI.Outs.push_back(Arg);
984 SDValue ArgVal = DAG.getUNDEF(PtrVT);
985 CLI.OutVals.push_back(ArgVal);
986 }
987 if (!HasSwiftErrorArg) {
988 NumFixedArgs++;
989 ISD::OutputArg Arg;
990 Arg.Flags.setSwiftError();
991 CLI.Outs.push_back(Arg);
992 SDValue ArgVal = DAG.getUNDEF(PtrVT);
993 CLI.OutVals.push_back(ArgVal);
994 }
995 }
996
997 // Analyze operands of the call, assigning locations to each operand.
998 SmallVector<CCValAssign, 16> ArgLocs;
999 CCState CCInfo(CallConv, IsVarArg, MF, ArgLocs, *DAG.getContext());
1000
1001 if (IsVarArg) {
1002 // Outgoing non-fixed arguments are placed in a buffer. First
1003 // compute their offsets and the total amount of buffer space needed.
1004 for (unsigned I = NumFixedArgs; I < Outs.size(); ++I) {
1005 const ISD::OutputArg &Out = Outs[I];
1006 SDValue &Arg = OutVals[I];
1007 EVT VT = Arg.getValueType();
1008 assert(VT != MVT::iPTR && "Legalized args should be concrete");
1009 Type *Ty = VT.getTypeForEVT(*DAG.getContext());
1010 Align Alignment =
1011 std::max(Out.Flags.getNonZeroOrigAlign(), Layout.getABITypeAlign(Ty));
1012 unsigned Offset =
1013 CCInfo.AllocateStack(Layout.getTypeAllocSize(Ty), Alignment);
1014 CCInfo.addLoc(CCValAssign::getMem(ArgLocs.size(), VT.getSimpleVT(),
1015 Offset, VT.getSimpleVT(),
1016 CCValAssign::Full));
1017 }
1018 }
1019
1020 unsigned NumBytes = CCInfo.getAlignedCallFrameSize();
1021
1022 SDValue FINode;
1023 if (IsVarArg && NumBytes) {
1024 // For non-fixed arguments, next emit stores to store the argument values
1025 // to the stack buffer at the offsets computed above.
1026 int FI = MF.getFrameInfo().CreateStackObject(NumBytes,
1027 Layout.getStackAlignment(),
1028 /*isSS=*/false);
1029 unsigned ValNo = 0;
1030 SmallVector<SDValue, 8> Chains;
1031 for (SDValue Arg : drop_begin(OutVals, NumFixedArgs)) {
1032 assert(ArgLocs[ValNo].getValNo() == ValNo &&
1033 "ArgLocs should remain in order and only hold varargs args");
1034 unsigned Offset = ArgLocs[ValNo++].getLocMemOffset();
1035 FINode = DAG.getFrameIndex(FI, getPointerTy(Layout));
1036 SDValue Add = DAG.getNode(ISD::ADD, DL, PtrVT, FINode,
1037 DAG.getConstant(Offset, DL, PtrVT));
1038 Chains.push_back(
1039 DAG.getStore(Chain, DL, Arg, Add,
1040 MachinePointerInfo::getFixedStack(MF, FI, Offset)));
1041 }
1042 if (!Chains.empty())
1043 Chain = DAG.getNode(ISD::TokenFactor, DL, MVT::Other, Chains);
1044 } else if (IsVarArg) {
1045 FINode = DAG.getIntPtrConstant(0, DL);
1046 }
1047
1048 if (Callee->getOpcode() == ISD::GlobalAddress) {
1049 // If the callee is a GlobalAddress node (quite common, every direct call
1050 // is) turn it into a TargetGlobalAddress node so that LowerGlobalAddress
1051 // doesn't at MO_GOT which is not needed for direct calls.
1052 GlobalAddressSDNode* GA = cast<GlobalAddressSDNode>(Callee);
1053 Callee = DAG.getTargetGlobalAddress(GA->getGlobal(), DL,
1054 getPointerTy(DAG.getDataLayout()),
1055 GA->getOffset());
1056 Callee = DAG.getNode(WebAssemblyISD::Wrapper, DL,
1057 getPointerTy(DAG.getDataLayout()), Callee);
1058 }
1059
1060 // Compute the operands for the CALLn node.
1061 SmallVector<SDValue, 16> Ops;
1062 Ops.push_back(Chain);
1063 Ops.push_back(Callee);
1064
1065 // Add all fixed arguments. Note that for non-varargs calls, NumFixedArgs
1066 // isn't reliable.
1067 Ops.append(OutVals.begin(),
1068 IsVarArg ? OutVals.begin() + NumFixedArgs : OutVals.end());
1069 // Add a pointer to the vararg buffer.
1070 if (IsVarArg)
1071 Ops.push_back(FINode);
1072
1073 SmallVector<EVT, 8> InTys;
1074 for (const auto &In : Ins) {
1075 assert(!In.Flags.isByVal() && "byval is not valid for return values");
1076 assert(!In.Flags.isNest() && "nest is not valid for return values");
1077 if (In.Flags.isInAlloca())
1078 fail(DL, DAG, "WebAssembly hasn't implemented inalloca return values");
1079 if (In.Flags.isInConsecutiveRegs())
1080 fail(DL, DAG, "WebAssembly hasn't implemented cons regs return values");
1081 if (In.Flags.isInConsecutiveRegsLast())
1082 fail(DL, DAG,
1083 "WebAssembly hasn't implemented cons regs last return values");
1084 // Ignore In.getNonZeroOrigAlign() because all our arguments are passed in
1085 // registers.
1086 InTys.push_back(In.VT);
1087 }
1088
1089 // Lastly, if this is a call to a funcref we need to add an instruction
1090 // table.set to the chain and transform the call.
1091 if (CLI.CB && isFuncrefType(CLI.CB->getCalledOperand()->getType())) {
1092 // In the absence of function references proposal where a funcref call is
1093 // lowered to call_ref, using reference types we generate a table.set to set
1094 // the funcref to a special table used solely for this purpose, followed by
1095 // a call_indirect. Here we just generate the table set, and return the
1096 // SDValue of the table.set so that LowerCall can finalize the lowering by
1097 // generating the call_indirect.
1098 SDValue Chain = Ops[0];
1099
1100 MCSymbolWasm *Table = WebAssembly::getOrCreateFuncrefCallTableSymbol(
1101 MF.getContext(), Subtarget);
1102 SDValue Sym = DAG.getMCSymbol(Table, PtrVT);
1103 SDValue TableSlot = DAG.getConstant(0, DL, MVT::i32);
1104 SDValue TableSetOps[] = {Chain, Sym, TableSlot, Callee};
1105 SDValue TableSet = DAG.getMemIntrinsicNode(
1106 WebAssemblyISD::TABLE_SET, DL, DAG.getVTList(MVT::Other), TableSetOps,
1107 MVT::funcref,
1108 // Machine Mem Operand args
1109 MachinePointerInfo(WasmAddressSpace::FUNCREF),
1110 CLI.CB->getCalledOperand()->getPointerAlignment(DAG.getDataLayout()),
1111 MachineMemOperand::MOStore);
1112
1113 Ops[0] = TableSet; // The new chain is the TableSet itself
1114 }
1115
1116 if (CLI.IsTailCall) {
1117 // ret_calls do not return values to the current frame
1118 SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue);
1119 return DAG.getNode(WebAssemblyISD::RET_CALL, DL, NodeTys, Ops);
1120 }
1121
1122 InTys.push_back(MVT::Other);
1123 SDVTList InTyList = DAG.getVTList(InTys);
1124 SDValue Res = DAG.getNode(WebAssemblyISD::CALL, DL, InTyList, Ops);
1125
1126 for (size_t I = 0; I < Ins.size(); ++I)
1127 InVals.push_back(Res.getValue(I));
1128
1129 // Return the chain
1130 return Res.getValue(Ins.size());
1131 }
1132
CanLowerReturn(CallingConv::ID,MachineFunction &,bool,const SmallVectorImpl<ISD::OutputArg> & Outs,LLVMContext &) const1133 bool WebAssemblyTargetLowering::CanLowerReturn(
1134 CallingConv::ID /*CallConv*/, MachineFunction & /*MF*/, bool /*IsVarArg*/,
1135 const SmallVectorImpl<ISD::OutputArg> &Outs,
1136 LLVMContext & /*Context*/) const {
1137 // WebAssembly can only handle returning tuples with multivalue enabled
1138 return Subtarget->hasMultivalue() || Outs.size() <= 1;
1139 }
1140
LowerReturn(SDValue Chain,CallingConv::ID CallConv,bool,const SmallVectorImpl<ISD::OutputArg> & Outs,const SmallVectorImpl<SDValue> & OutVals,const SDLoc & DL,SelectionDAG & DAG) const1141 SDValue WebAssemblyTargetLowering::LowerReturn(
1142 SDValue Chain, CallingConv::ID CallConv, bool /*IsVarArg*/,
1143 const SmallVectorImpl<ISD::OutputArg> &Outs,
1144 const SmallVectorImpl<SDValue> &OutVals, const SDLoc &DL,
1145 SelectionDAG &DAG) const {
1146 assert((Subtarget->hasMultivalue() || Outs.size() <= 1) &&
1147 "MVP WebAssembly can only return up to one value");
1148 if (!callingConvSupported(CallConv))
1149 fail(DL, DAG, "WebAssembly doesn't support non-C calling conventions");
1150
1151 SmallVector<SDValue, 4> RetOps(1, Chain);
1152 RetOps.append(OutVals.begin(), OutVals.end());
1153 Chain = DAG.getNode(WebAssemblyISD::RETURN, DL, MVT::Other, RetOps);
1154
1155 // Record the number and types of the return values.
1156 for (const ISD::OutputArg &Out : Outs) {
1157 assert(!Out.Flags.isByVal() && "byval is not valid for return values");
1158 assert(!Out.Flags.isNest() && "nest is not valid for return values");
1159 assert(Out.IsFixed && "non-fixed return value is not valid");
1160 if (Out.Flags.isInAlloca())
1161 fail(DL, DAG, "WebAssembly hasn't implemented inalloca results");
1162 if (Out.Flags.isInConsecutiveRegs())
1163 fail(DL, DAG, "WebAssembly hasn't implemented cons regs results");
1164 if (Out.Flags.isInConsecutiveRegsLast())
1165 fail(DL, DAG, "WebAssembly hasn't implemented cons regs last results");
1166 }
1167
1168 return Chain;
1169 }
1170
LowerFormalArguments(SDValue Chain,CallingConv::ID CallConv,bool IsVarArg,const SmallVectorImpl<ISD::InputArg> & Ins,const SDLoc & DL,SelectionDAG & DAG,SmallVectorImpl<SDValue> & InVals) const1171 SDValue WebAssemblyTargetLowering::LowerFormalArguments(
1172 SDValue Chain, CallingConv::ID CallConv, bool IsVarArg,
1173 const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &DL,
1174 SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const {
1175 if (!callingConvSupported(CallConv))
1176 fail(DL, DAG, "WebAssembly doesn't support non-C calling conventions");
1177
1178 MachineFunction &MF = DAG.getMachineFunction();
1179 auto *MFI = MF.getInfo<WebAssemblyFunctionInfo>();
1180
1181 // Set up the incoming ARGUMENTS value, which serves to represent the liveness
1182 // of the incoming values before they're represented by virtual registers.
1183 MF.getRegInfo().addLiveIn(WebAssembly::ARGUMENTS);
1184
1185 bool HasSwiftErrorArg = false;
1186 bool HasSwiftSelfArg = false;
1187 for (const ISD::InputArg &In : Ins) {
1188 HasSwiftSelfArg |= In.Flags.isSwiftSelf();
1189 HasSwiftErrorArg |= In.Flags.isSwiftError();
1190 if (In.Flags.isInAlloca())
1191 fail(DL, DAG, "WebAssembly hasn't implemented inalloca arguments");
1192 if (In.Flags.isNest())
1193 fail(DL, DAG, "WebAssembly hasn't implemented nest arguments");
1194 if (In.Flags.isInConsecutiveRegs())
1195 fail(DL, DAG, "WebAssembly hasn't implemented cons regs arguments");
1196 if (In.Flags.isInConsecutiveRegsLast())
1197 fail(DL, DAG, "WebAssembly hasn't implemented cons regs last arguments");
1198 // Ignore In.getNonZeroOrigAlign() because all our arguments are passed in
1199 // registers.
1200 InVals.push_back(In.Used ? DAG.getNode(WebAssemblyISD::ARGUMENT, DL, In.VT,
1201 DAG.getTargetConstant(InVals.size(),
1202 DL, MVT::i32))
1203 : DAG.getUNDEF(In.VT));
1204
1205 // Record the number and types of arguments.
1206 MFI->addParam(In.VT);
1207 }
1208
1209 // For swiftcc, emit additional swiftself and swifterror arguments
1210 // if there aren't. These additional arguments are also added for callee
1211 // signature They are necessary to match callee and caller signature for
1212 // indirect call.
1213 auto PtrVT = getPointerTy(MF.getDataLayout());
1214 if (CallConv == CallingConv::Swift) {
1215 if (!HasSwiftSelfArg) {
1216 MFI->addParam(PtrVT);
1217 }
1218 if (!HasSwiftErrorArg) {
1219 MFI->addParam(PtrVT);
1220 }
1221 }
1222 // Varargs are copied into a buffer allocated by the caller, and a pointer to
1223 // the buffer is passed as an argument.
1224 if (IsVarArg) {
1225 MVT PtrVT = getPointerTy(MF.getDataLayout());
1226 Register VarargVreg =
1227 MF.getRegInfo().createVirtualRegister(getRegClassFor(PtrVT));
1228 MFI->setVarargBufferVreg(VarargVreg);
1229 Chain = DAG.getCopyToReg(
1230 Chain, DL, VarargVreg,
1231 DAG.getNode(WebAssemblyISD::ARGUMENT, DL, PtrVT,
1232 DAG.getTargetConstant(Ins.size(), DL, MVT::i32)));
1233 MFI->addParam(PtrVT);
1234 }
1235
1236 // Record the number and types of arguments and results.
1237 SmallVector<MVT, 4> Params;
1238 SmallVector<MVT, 4> Results;
1239 computeSignatureVTs(MF.getFunction().getFunctionType(), &MF.getFunction(),
1240 MF.getFunction(), DAG.getTarget(), Params, Results);
1241 for (MVT VT : Results)
1242 MFI->addResult(VT);
1243 // TODO: Use signatures in WebAssemblyMachineFunctionInfo too and unify
1244 // the param logic here with ComputeSignatureVTs
1245 assert(MFI->getParams().size() == Params.size() &&
1246 std::equal(MFI->getParams().begin(), MFI->getParams().end(),
1247 Params.begin()));
1248
1249 return Chain;
1250 }
1251
ReplaceNodeResults(SDNode * N,SmallVectorImpl<SDValue> & Results,SelectionDAG & DAG) const1252 void WebAssemblyTargetLowering::ReplaceNodeResults(
1253 SDNode *N, SmallVectorImpl<SDValue> &Results, SelectionDAG &DAG) const {
1254 switch (N->getOpcode()) {
1255 case ISD::SIGN_EXTEND_INREG:
1256 // Do not add any results, signifying that N should not be custom lowered
1257 // after all. This happens because simd128 turns on custom lowering for
1258 // SIGN_EXTEND_INREG, but for non-vector sign extends the result might be an
1259 // illegal type.
1260 break;
1261 default:
1262 llvm_unreachable(
1263 "ReplaceNodeResults not implemented for this op for WebAssembly!");
1264 }
1265 }
1266
1267 //===----------------------------------------------------------------------===//
1268 // Custom lowering hooks.
1269 //===----------------------------------------------------------------------===//
1270
LowerOperation(SDValue Op,SelectionDAG & DAG) const1271 SDValue WebAssemblyTargetLowering::LowerOperation(SDValue Op,
1272 SelectionDAG &DAG) const {
1273 SDLoc DL(Op);
1274 switch (Op.getOpcode()) {
1275 default:
1276 llvm_unreachable("unimplemented operation lowering");
1277 return SDValue();
1278 case ISD::FrameIndex:
1279 return LowerFrameIndex(Op, DAG);
1280 case ISD::GlobalAddress:
1281 return LowerGlobalAddress(Op, DAG);
1282 case ISD::GlobalTLSAddress:
1283 return LowerGlobalTLSAddress(Op, DAG);
1284 case ISD::ExternalSymbol:
1285 return LowerExternalSymbol(Op, DAG);
1286 case ISD::JumpTable:
1287 return LowerJumpTable(Op, DAG);
1288 case ISD::BR_JT:
1289 return LowerBR_JT(Op, DAG);
1290 case ISD::VASTART:
1291 return LowerVASTART(Op, DAG);
1292 case ISD::BlockAddress:
1293 case ISD::BRIND:
1294 fail(DL, DAG, "WebAssembly hasn't implemented computed gotos");
1295 return SDValue();
1296 case ISD::RETURNADDR:
1297 return LowerRETURNADDR(Op, DAG);
1298 case ISD::FRAMEADDR:
1299 return LowerFRAMEADDR(Op, DAG);
1300 case ISD::CopyToReg:
1301 return LowerCopyToReg(Op, DAG);
1302 case ISD::EXTRACT_VECTOR_ELT:
1303 case ISD::INSERT_VECTOR_ELT:
1304 return LowerAccessVectorElement(Op, DAG);
1305 case ISD::INTRINSIC_VOID:
1306 case ISD::INTRINSIC_WO_CHAIN:
1307 case ISD::INTRINSIC_W_CHAIN:
1308 return LowerIntrinsic(Op, DAG);
1309 case ISD::SIGN_EXTEND_INREG:
1310 return LowerSIGN_EXTEND_INREG(Op, DAG);
1311 case ISD::BUILD_VECTOR:
1312 return LowerBUILD_VECTOR(Op, DAG);
1313 case ISD::VECTOR_SHUFFLE:
1314 return LowerVECTOR_SHUFFLE(Op, DAG);
1315 case ISD::SETCC:
1316 return LowerSETCC(Op, DAG);
1317 case ISD::SHL:
1318 case ISD::SRA:
1319 case ISD::SRL:
1320 return LowerShift(Op, DAG);
1321 case ISD::FP_TO_SINT_SAT:
1322 case ISD::FP_TO_UINT_SAT:
1323 return LowerFP_TO_INT_SAT(Op, DAG);
1324 case ISD::LOAD:
1325 return LowerLoad(Op, DAG);
1326 case ISD::STORE:
1327 return LowerStore(Op, DAG);
1328 }
1329 }
1330
IsWebAssemblyGlobal(SDValue Op)1331 static bool IsWebAssemblyGlobal(SDValue Op) {
1332 if (const GlobalAddressSDNode *GA = dyn_cast<GlobalAddressSDNode>(Op))
1333 return WebAssembly::isWasmVarAddressSpace(GA->getAddressSpace());
1334
1335 return false;
1336 }
1337
IsWebAssemblyLocal(SDValue Op,SelectionDAG & DAG)1338 static Optional<unsigned> IsWebAssemblyLocal(SDValue Op, SelectionDAG &DAG) {
1339 const FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(Op);
1340 if (!FI)
1341 return None;
1342
1343 auto &MF = DAG.getMachineFunction();
1344 return WebAssemblyFrameLowering::getLocalForStackObject(MF, FI->getIndex());
1345 }
1346
isFuncrefType(const Type * Ty)1347 bool WebAssemblyTargetLowering::isFuncrefType(const Type *Ty) {
1348 return isa<PointerType>(Ty) &&
1349 Ty->getPointerAddressSpace() == WasmAddressSpace::FUNCREF;
1350 }
1351
isExternrefType(const Type * Ty)1352 bool WebAssemblyTargetLowering::isExternrefType(const Type *Ty) {
1353 return isa<PointerType>(Ty) &&
1354 Ty->getPointerAddressSpace() == WasmAddressSpace::EXTERNREF;
1355 }
1356
LowerStore(SDValue Op,SelectionDAG & DAG) const1357 SDValue WebAssemblyTargetLowering::LowerStore(SDValue Op,
1358 SelectionDAG &DAG) const {
1359 SDLoc DL(Op);
1360 StoreSDNode *SN = cast<StoreSDNode>(Op.getNode());
1361 const SDValue &Value = SN->getValue();
1362 const SDValue &Base = SN->getBasePtr();
1363 const SDValue &Offset = SN->getOffset();
1364
1365 if (IsWebAssemblyGlobal(Base)) {
1366 if (!Offset->isUndef())
1367 report_fatal_error("unexpected offset when storing to webassembly global",
1368 false);
1369
1370 SDVTList Tys = DAG.getVTList(MVT::Other);
1371 SDValue Ops[] = {SN->getChain(), Value, Base};
1372 return DAG.getMemIntrinsicNode(WebAssemblyISD::GLOBAL_SET, DL, Tys, Ops,
1373 SN->getMemoryVT(), SN->getMemOperand());
1374 }
1375
1376 if (Optional<unsigned> Local = IsWebAssemblyLocal(Base, DAG)) {
1377 if (!Offset->isUndef())
1378 report_fatal_error("unexpected offset when storing to webassembly local",
1379 false);
1380
1381 SDValue Idx = DAG.getTargetConstant(*Local, Base, MVT::i32);
1382 SDVTList Tys = DAG.getVTList(MVT::Other); // The chain.
1383 SDValue Ops[] = {SN->getChain(), Idx, Value};
1384 return DAG.getNode(WebAssemblyISD::LOCAL_SET, DL, Tys, Ops);
1385 }
1386
1387 return Op;
1388 }
1389
LowerLoad(SDValue Op,SelectionDAG & DAG) const1390 SDValue WebAssemblyTargetLowering::LowerLoad(SDValue Op,
1391 SelectionDAG &DAG) const {
1392 SDLoc DL(Op);
1393 LoadSDNode *LN = cast<LoadSDNode>(Op.getNode());
1394 const SDValue &Base = LN->getBasePtr();
1395 const SDValue &Offset = LN->getOffset();
1396
1397 if (IsWebAssemblyGlobal(Base)) {
1398 if (!Offset->isUndef())
1399 report_fatal_error(
1400 "unexpected offset when loading from webassembly global", false);
1401
1402 SDVTList Tys = DAG.getVTList(LN->getValueType(0), MVT::Other);
1403 SDValue Ops[] = {LN->getChain(), Base};
1404 return DAG.getMemIntrinsicNode(WebAssemblyISD::GLOBAL_GET, DL, Tys, Ops,
1405 LN->getMemoryVT(), LN->getMemOperand());
1406 }
1407
1408 if (Optional<unsigned> Local = IsWebAssemblyLocal(Base, DAG)) {
1409 if (!Offset->isUndef())
1410 report_fatal_error(
1411 "unexpected offset when loading from webassembly local", false);
1412
1413 SDValue Idx = DAG.getTargetConstant(*Local, Base, MVT::i32);
1414 EVT LocalVT = LN->getValueType(0);
1415 SDValue LocalGet = DAG.getNode(WebAssemblyISD::LOCAL_GET, DL, LocalVT,
1416 {LN->getChain(), Idx});
1417 SDValue Result = DAG.getMergeValues({LocalGet, LN->getChain()}, DL);
1418 assert(Result->getNumValues() == 2 && "Loads must carry a chain!");
1419 return Result;
1420 }
1421
1422 return Op;
1423 }
1424
LowerCopyToReg(SDValue Op,SelectionDAG & DAG) const1425 SDValue WebAssemblyTargetLowering::LowerCopyToReg(SDValue Op,
1426 SelectionDAG &DAG) const {
1427 SDValue Src = Op.getOperand(2);
1428 if (isa<FrameIndexSDNode>(Src.getNode())) {
1429 // CopyToReg nodes don't support FrameIndex operands. Other targets select
1430 // the FI to some LEA-like instruction, but since we don't have that, we
1431 // need to insert some kind of instruction that can take an FI operand and
1432 // produces a value usable by CopyToReg (i.e. in a vreg). So insert a dummy
1433 // local.copy between Op and its FI operand.
1434 SDValue Chain = Op.getOperand(0);
1435 SDLoc DL(Op);
1436 unsigned Reg = cast<RegisterSDNode>(Op.getOperand(1))->getReg();
1437 EVT VT = Src.getValueType();
1438 SDValue Copy(DAG.getMachineNode(VT == MVT::i32 ? WebAssembly::COPY_I32
1439 : WebAssembly::COPY_I64,
1440 DL, VT, Src),
1441 0);
1442 return Op.getNode()->getNumValues() == 1
1443 ? DAG.getCopyToReg(Chain, DL, Reg, Copy)
1444 : DAG.getCopyToReg(Chain, DL, Reg, Copy,
1445 Op.getNumOperands() == 4 ? Op.getOperand(3)
1446 : SDValue());
1447 }
1448 return SDValue();
1449 }
1450
LowerFrameIndex(SDValue Op,SelectionDAG & DAG) const1451 SDValue WebAssemblyTargetLowering::LowerFrameIndex(SDValue Op,
1452 SelectionDAG &DAG) const {
1453 int FI = cast<FrameIndexSDNode>(Op)->getIndex();
1454 return DAG.getTargetFrameIndex(FI, Op.getValueType());
1455 }
1456
LowerRETURNADDR(SDValue Op,SelectionDAG & DAG) const1457 SDValue WebAssemblyTargetLowering::LowerRETURNADDR(SDValue Op,
1458 SelectionDAG &DAG) const {
1459 SDLoc DL(Op);
1460
1461 if (!Subtarget->getTargetTriple().isOSEmscripten()) {
1462 fail(DL, DAG,
1463 "Non-Emscripten WebAssembly hasn't implemented "
1464 "__builtin_return_address");
1465 return SDValue();
1466 }
1467
1468 if (verifyReturnAddressArgumentIsConstant(Op, DAG))
1469 return SDValue();
1470
1471 unsigned Depth = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
1472 MakeLibCallOptions CallOptions;
1473 return makeLibCall(DAG, RTLIB::RETURN_ADDRESS, Op.getValueType(),
1474 {DAG.getConstant(Depth, DL, MVT::i32)}, CallOptions, DL)
1475 .first;
1476 }
1477
LowerFRAMEADDR(SDValue Op,SelectionDAG & DAG) const1478 SDValue WebAssemblyTargetLowering::LowerFRAMEADDR(SDValue Op,
1479 SelectionDAG &DAG) const {
1480 // Non-zero depths are not supported by WebAssembly currently. Use the
1481 // legalizer's default expansion, which is to return 0 (what this function is
1482 // documented to do).
1483 if (Op.getConstantOperandVal(0) > 0)
1484 return SDValue();
1485
1486 DAG.getMachineFunction().getFrameInfo().setFrameAddressIsTaken(true);
1487 EVT VT = Op.getValueType();
1488 Register FP =
1489 Subtarget->getRegisterInfo()->getFrameRegister(DAG.getMachineFunction());
1490 return DAG.getCopyFromReg(DAG.getEntryNode(), SDLoc(Op), FP, VT);
1491 }
1492
1493 SDValue
LowerGlobalTLSAddress(SDValue Op,SelectionDAG & DAG) const1494 WebAssemblyTargetLowering::LowerGlobalTLSAddress(SDValue Op,
1495 SelectionDAG &DAG) const {
1496 SDLoc DL(Op);
1497 const auto *GA = cast<GlobalAddressSDNode>(Op);
1498 MVT PtrVT = getPointerTy(DAG.getDataLayout());
1499
1500 MachineFunction &MF = DAG.getMachineFunction();
1501 if (!MF.getSubtarget<WebAssemblySubtarget>().hasBulkMemory())
1502 report_fatal_error("cannot use thread-local storage without bulk memory",
1503 false);
1504
1505 const GlobalValue *GV = GA->getGlobal();
1506
1507 // Currently Emscripten does not support dynamic linking with threads.
1508 // Therefore, if we have thread-local storage, only the local-exec model
1509 // is possible.
1510 // TODO: remove this and implement proper TLS models once Emscripten
1511 // supports dynamic linking with threads.
1512 if (GV->getThreadLocalMode() != GlobalValue::LocalExecTLSModel &&
1513 !Subtarget->getTargetTriple().isOSEmscripten()) {
1514 report_fatal_error("only -ftls-model=local-exec is supported for now on "
1515 "non-Emscripten OSes: variable " +
1516 GV->getName(),
1517 false);
1518 }
1519
1520 auto GlobalGet = PtrVT == MVT::i64 ? WebAssembly::GLOBAL_GET_I64
1521 : WebAssembly::GLOBAL_GET_I32;
1522 const char *BaseName = MF.createExternalSymbolName("__tls_base");
1523
1524 SDValue BaseAddr(
1525 DAG.getMachineNode(GlobalGet, DL, PtrVT,
1526 DAG.getTargetExternalSymbol(BaseName, PtrVT)),
1527 0);
1528
1529 SDValue TLSOffset = DAG.getTargetGlobalAddress(
1530 GV, DL, PtrVT, GA->getOffset(), WebAssemblyII::MO_TLS_BASE_REL);
1531 SDValue SymAddr = DAG.getNode(WebAssemblyISD::Wrapper, DL, PtrVT, TLSOffset);
1532
1533 return DAG.getNode(ISD::ADD, DL, PtrVT, BaseAddr, SymAddr);
1534 }
1535
LowerGlobalAddress(SDValue Op,SelectionDAG & DAG) const1536 SDValue WebAssemblyTargetLowering::LowerGlobalAddress(SDValue Op,
1537 SelectionDAG &DAG) const {
1538 SDLoc DL(Op);
1539 const auto *GA = cast<GlobalAddressSDNode>(Op);
1540 EVT VT = Op.getValueType();
1541 assert(GA->getTargetFlags() == 0 &&
1542 "Unexpected target flags on generic GlobalAddressSDNode");
1543 if (!WebAssembly::isValidAddressSpace(GA->getAddressSpace()))
1544 fail(DL, DAG, "Invalid address space for WebAssembly target");
1545
1546 unsigned OperandFlags = 0;
1547 if (isPositionIndependent()) {
1548 const GlobalValue *GV = GA->getGlobal();
1549 if (getTargetMachine().shouldAssumeDSOLocal(*GV->getParent(), GV)) {
1550 MachineFunction &MF = DAG.getMachineFunction();
1551 MVT PtrVT = getPointerTy(MF.getDataLayout());
1552 const char *BaseName;
1553 if (GV->getValueType()->isFunctionTy()) {
1554 BaseName = MF.createExternalSymbolName("__table_base");
1555 OperandFlags = WebAssemblyII::MO_TABLE_BASE_REL;
1556 }
1557 else {
1558 BaseName = MF.createExternalSymbolName("__memory_base");
1559 OperandFlags = WebAssemblyII::MO_MEMORY_BASE_REL;
1560 }
1561 SDValue BaseAddr =
1562 DAG.getNode(WebAssemblyISD::Wrapper, DL, PtrVT,
1563 DAG.getTargetExternalSymbol(BaseName, PtrVT));
1564
1565 SDValue SymAddr = DAG.getNode(
1566 WebAssemblyISD::WrapperPIC, DL, VT,
1567 DAG.getTargetGlobalAddress(GA->getGlobal(), DL, VT, GA->getOffset(),
1568 OperandFlags));
1569
1570 return DAG.getNode(ISD::ADD, DL, VT, BaseAddr, SymAddr);
1571 } else {
1572 OperandFlags = WebAssemblyII::MO_GOT;
1573 }
1574 }
1575
1576 return DAG.getNode(WebAssemblyISD::Wrapper, DL, VT,
1577 DAG.getTargetGlobalAddress(GA->getGlobal(), DL, VT,
1578 GA->getOffset(), OperandFlags));
1579 }
1580
1581 SDValue
LowerExternalSymbol(SDValue Op,SelectionDAG & DAG) const1582 WebAssemblyTargetLowering::LowerExternalSymbol(SDValue Op,
1583 SelectionDAG &DAG) const {
1584 SDLoc DL(Op);
1585 const auto *ES = cast<ExternalSymbolSDNode>(Op);
1586 EVT VT = Op.getValueType();
1587 assert(ES->getTargetFlags() == 0 &&
1588 "Unexpected target flags on generic ExternalSymbolSDNode");
1589 return DAG.getNode(WebAssemblyISD::Wrapper, DL, VT,
1590 DAG.getTargetExternalSymbol(ES->getSymbol(), VT));
1591 }
1592
LowerJumpTable(SDValue Op,SelectionDAG & DAG) const1593 SDValue WebAssemblyTargetLowering::LowerJumpTable(SDValue Op,
1594 SelectionDAG &DAG) const {
1595 // There's no need for a Wrapper node because we always incorporate a jump
1596 // table operand into a BR_TABLE instruction, rather than ever
1597 // materializing it in a register.
1598 const JumpTableSDNode *JT = cast<JumpTableSDNode>(Op);
1599 return DAG.getTargetJumpTable(JT->getIndex(), Op.getValueType(),
1600 JT->getTargetFlags());
1601 }
1602
LowerBR_JT(SDValue Op,SelectionDAG & DAG) const1603 SDValue WebAssemblyTargetLowering::LowerBR_JT(SDValue Op,
1604 SelectionDAG &DAG) const {
1605 SDLoc DL(Op);
1606 SDValue Chain = Op.getOperand(0);
1607 const auto *JT = cast<JumpTableSDNode>(Op.getOperand(1));
1608 SDValue Index = Op.getOperand(2);
1609 assert(JT->getTargetFlags() == 0 && "WebAssembly doesn't set target flags");
1610
1611 SmallVector<SDValue, 8> Ops;
1612 Ops.push_back(Chain);
1613 Ops.push_back(Index);
1614
1615 MachineJumpTableInfo *MJTI = DAG.getMachineFunction().getJumpTableInfo();
1616 const auto &MBBs = MJTI->getJumpTables()[JT->getIndex()].MBBs;
1617
1618 // Add an operand for each case.
1619 for (auto MBB : MBBs)
1620 Ops.push_back(DAG.getBasicBlock(MBB));
1621
1622 // Add the first MBB as a dummy default target for now. This will be replaced
1623 // with the proper default target (and the preceding range check eliminated)
1624 // if possible by WebAssemblyFixBrTableDefaults.
1625 Ops.push_back(DAG.getBasicBlock(*MBBs.begin()));
1626 return DAG.getNode(WebAssemblyISD::BR_TABLE, DL, MVT::Other, Ops);
1627 }
1628
LowerVASTART(SDValue Op,SelectionDAG & DAG) const1629 SDValue WebAssemblyTargetLowering::LowerVASTART(SDValue Op,
1630 SelectionDAG &DAG) const {
1631 SDLoc DL(Op);
1632 EVT PtrVT = getPointerTy(DAG.getMachineFunction().getDataLayout());
1633
1634 auto *MFI = DAG.getMachineFunction().getInfo<WebAssemblyFunctionInfo>();
1635 const Value *SV = cast<SrcValueSDNode>(Op.getOperand(2))->getValue();
1636
1637 SDValue ArgN = DAG.getCopyFromReg(DAG.getEntryNode(), DL,
1638 MFI->getVarargBufferVreg(), PtrVT);
1639 return DAG.getStore(Op.getOperand(0), DL, ArgN, Op.getOperand(1),
1640 MachinePointerInfo(SV));
1641 }
1642
getCppExceptionSymNode(SDValue Op,unsigned TagIndex,SelectionDAG & DAG)1643 static SDValue getCppExceptionSymNode(SDValue Op, unsigned TagIndex,
1644 SelectionDAG &DAG) {
1645 // We only support C++ exceptions for now
1646 int Tag =
1647 cast<ConstantSDNode>(Op.getOperand(TagIndex).getNode())->getZExtValue();
1648 if (Tag != WebAssembly::CPP_EXCEPTION)
1649 llvm_unreachable("Invalid tag: We only support C++ exceptions for now");
1650 auto &MF = DAG.getMachineFunction();
1651 const auto &TLI = DAG.getTargetLoweringInfo();
1652 MVT PtrVT = TLI.getPointerTy(DAG.getDataLayout());
1653 const char *SymName = MF.createExternalSymbolName("__cpp_exception");
1654 return DAG.getNode(WebAssemblyISD::Wrapper, SDLoc(Op), PtrVT,
1655 DAG.getTargetExternalSymbol(SymName, PtrVT));
1656 }
1657
LowerIntrinsic(SDValue Op,SelectionDAG & DAG) const1658 SDValue WebAssemblyTargetLowering::LowerIntrinsic(SDValue Op,
1659 SelectionDAG &DAG) const {
1660 MachineFunction &MF = DAG.getMachineFunction();
1661 unsigned IntNo;
1662 switch (Op.getOpcode()) {
1663 case ISD::INTRINSIC_VOID:
1664 case ISD::INTRINSIC_W_CHAIN:
1665 IntNo = cast<ConstantSDNode>(Op.getOperand(1))->getZExtValue();
1666 break;
1667 case ISD::INTRINSIC_WO_CHAIN:
1668 IntNo = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
1669 break;
1670 default:
1671 llvm_unreachable("Invalid intrinsic");
1672 }
1673 SDLoc DL(Op);
1674
1675 switch (IntNo) {
1676 default:
1677 return SDValue(); // Don't custom lower most intrinsics.
1678
1679 case Intrinsic::wasm_lsda: {
1680 EVT VT = Op.getValueType();
1681 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
1682 MVT PtrVT = TLI.getPointerTy(DAG.getDataLayout());
1683 auto &Context = MF.getMMI().getContext();
1684 MCSymbol *S = Context.getOrCreateSymbol(Twine("GCC_except_table") +
1685 Twine(MF.getFunctionNumber()));
1686 return DAG.getNode(WebAssemblyISD::Wrapper, DL, VT,
1687 DAG.getMCSymbol(S, PtrVT));
1688 }
1689
1690 case Intrinsic::wasm_throw: {
1691 SDValue SymNode = getCppExceptionSymNode(Op, 2, DAG);
1692 return DAG.getNode(WebAssemblyISD::THROW, DL,
1693 MVT::Other, // outchain type
1694 {
1695 Op.getOperand(0), // inchain
1696 SymNode, // exception symbol
1697 Op.getOperand(3) // thrown value
1698 });
1699 }
1700
1701 case Intrinsic::wasm_catch: {
1702 SDValue SymNode = getCppExceptionSymNode(Op, 2, DAG);
1703 return DAG.getNode(WebAssemblyISD::CATCH, DL,
1704 {
1705 MVT::i32, // outchain type
1706 MVT::Other // return value
1707 },
1708 {
1709 Op.getOperand(0), // inchain
1710 SymNode // exception symbol
1711 });
1712 }
1713
1714 case Intrinsic::wasm_shuffle: {
1715 // Drop in-chain and replace undefs, but otherwise pass through unchanged
1716 SDValue Ops[18];
1717 size_t OpIdx = 0;
1718 Ops[OpIdx++] = Op.getOperand(1);
1719 Ops[OpIdx++] = Op.getOperand(2);
1720 while (OpIdx < 18) {
1721 const SDValue &MaskIdx = Op.getOperand(OpIdx + 1);
1722 if (MaskIdx.isUndef() ||
1723 cast<ConstantSDNode>(MaskIdx.getNode())->getZExtValue() >= 32) {
1724 Ops[OpIdx++] = DAG.getConstant(0, DL, MVT::i32);
1725 } else {
1726 Ops[OpIdx++] = MaskIdx;
1727 }
1728 }
1729 return DAG.getNode(WebAssemblyISD::SHUFFLE, DL, Op.getValueType(), Ops);
1730 }
1731 }
1732 }
1733
1734 SDValue
LowerSIGN_EXTEND_INREG(SDValue Op,SelectionDAG & DAG) const1735 WebAssemblyTargetLowering::LowerSIGN_EXTEND_INREG(SDValue Op,
1736 SelectionDAG &DAG) const {
1737 SDLoc DL(Op);
1738 // If sign extension operations are disabled, allow sext_inreg only if operand
1739 // is a vector extract of an i8 or i16 lane. SIMD does not depend on sign
1740 // extension operations, but allowing sext_inreg in this context lets us have
1741 // simple patterns to select extract_lane_s instructions. Expanding sext_inreg
1742 // everywhere would be simpler in this file, but would necessitate large and
1743 // brittle patterns to undo the expansion and select extract_lane_s
1744 // instructions.
1745 assert(!Subtarget->hasSignExt() && Subtarget->hasSIMD128());
1746 if (Op.getOperand(0).getOpcode() != ISD::EXTRACT_VECTOR_ELT)
1747 return SDValue();
1748
1749 const SDValue &Extract = Op.getOperand(0);
1750 MVT VecT = Extract.getOperand(0).getSimpleValueType();
1751 if (VecT.getVectorElementType().getSizeInBits() > 32)
1752 return SDValue();
1753 MVT ExtractedLaneT =
1754 cast<VTSDNode>(Op.getOperand(1).getNode())->getVT().getSimpleVT();
1755 MVT ExtractedVecT =
1756 MVT::getVectorVT(ExtractedLaneT, 128 / ExtractedLaneT.getSizeInBits());
1757 if (ExtractedVecT == VecT)
1758 return Op;
1759
1760 // Bitcast vector to appropriate type to ensure ISel pattern coverage
1761 const SDNode *Index = Extract.getOperand(1).getNode();
1762 if (!isa<ConstantSDNode>(Index))
1763 return SDValue();
1764 unsigned IndexVal = cast<ConstantSDNode>(Index)->getZExtValue();
1765 unsigned Scale =
1766 ExtractedVecT.getVectorNumElements() / VecT.getVectorNumElements();
1767 assert(Scale > 1);
1768 SDValue NewIndex =
1769 DAG.getConstant(IndexVal * Scale, DL, Index->getValueType(0));
1770 SDValue NewExtract = DAG.getNode(
1771 ISD::EXTRACT_VECTOR_ELT, DL, Extract.getValueType(),
1772 DAG.getBitcast(ExtractedVecT, Extract.getOperand(0)), NewIndex);
1773 return DAG.getNode(ISD::SIGN_EXTEND_INREG, DL, Op.getValueType(), NewExtract,
1774 Op.getOperand(1));
1775 }
1776
LowerBUILD_VECTOR(SDValue Op,SelectionDAG & DAG) const1777 SDValue WebAssemblyTargetLowering::LowerBUILD_VECTOR(SDValue Op,
1778 SelectionDAG &DAG) const {
1779 SDLoc DL(Op);
1780 const EVT VecT = Op.getValueType();
1781 const EVT LaneT = Op.getOperand(0).getValueType();
1782 const size_t Lanes = Op.getNumOperands();
1783 bool CanSwizzle = VecT == MVT::v16i8;
1784
1785 // BUILD_VECTORs are lowered to the instruction that initializes the highest
1786 // possible number of lanes at once followed by a sequence of replace_lane
1787 // instructions to individually initialize any remaining lanes.
1788
1789 // TODO: Tune this. For example, lanewise swizzling is very expensive, so
1790 // swizzled lanes should be given greater weight.
1791
1792 // TODO: Investigate looping rather than always extracting/replacing specific
1793 // lanes to fill gaps.
1794
1795 auto IsConstant = [](const SDValue &V) {
1796 return V.getOpcode() == ISD::Constant || V.getOpcode() == ISD::ConstantFP;
1797 };
1798
1799 // Returns the source vector and index vector pair if they exist. Checks for:
1800 // (extract_vector_elt
1801 // $src,
1802 // (sign_extend_inreg (extract_vector_elt $indices, $i))
1803 // )
1804 auto GetSwizzleSrcs = [](size_t I, const SDValue &Lane) {
1805 auto Bail = std::make_pair(SDValue(), SDValue());
1806 if (Lane->getOpcode() != ISD::EXTRACT_VECTOR_ELT)
1807 return Bail;
1808 const SDValue &SwizzleSrc = Lane->getOperand(0);
1809 const SDValue &IndexExt = Lane->getOperand(1);
1810 if (IndexExt->getOpcode() != ISD::SIGN_EXTEND_INREG)
1811 return Bail;
1812 const SDValue &Index = IndexExt->getOperand(0);
1813 if (Index->getOpcode() != ISD::EXTRACT_VECTOR_ELT)
1814 return Bail;
1815 const SDValue &SwizzleIndices = Index->getOperand(0);
1816 if (SwizzleSrc.getValueType() != MVT::v16i8 ||
1817 SwizzleIndices.getValueType() != MVT::v16i8 ||
1818 Index->getOperand(1)->getOpcode() != ISD::Constant ||
1819 Index->getConstantOperandVal(1) != I)
1820 return Bail;
1821 return std::make_pair(SwizzleSrc, SwizzleIndices);
1822 };
1823
1824 // If the lane is extracted from another vector at a constant index, return
1825 // that vector. The source vector must not have more lanes than the dest
1826 // because the shufflevector indices are in terms of the destination lanes and
1827 // would not be able to address the smaller individual source lanes.
1828 auto GetShuffleSrc = [&](const SDValue &Lane) {
1829 if (Lane->getOpcode() != ISD::EXTRACT_VECTOR_ELT)
1830 return SDValue();
1831 if (!isa<ConstantSDNode>(Lane->getOperand(1).getNode()))
1832 return SDValue();
1833 if (Lane->getOperand(0).getValueType().getVectorNumElements() >
1834 VecT.getVectorNumElements())
1835 return SDValue();
1836 return Lane->getOperand(0);
1837 };
1838
1839 using ValueEntry = std::pair<SDValue, size_t>;
1840 SmallVector<ValueEntry, 16> SplatValueCounts;
1841
1842 using SwizzleEntry = std::pair<std::pair<SDValue, SDValue>, size_t>;
1843 SmallVector<SwizzleEntry, 16> SwizzleCounts;
1844
1845 using ShuffleEntry = std::pair<SDValue, size_t>;
1846 SmallVector<ShuffleEntry, 16> ShuffleCounts;
1847
1848 auto AddCount = [](auto &Counts, const auto &Val) {
1849 auto CountIt =
1850 llvm::find_if(Counts, [&Val](auto E) { return E.first == Val; });
1851 if (CountIt == Counts.end()) {
1852 Counts.emplace_back(Val, 1);
1853 } else {
1854 CountIt->second++;
1855 }
1856 };
1857
1858 auto GetMostCommon = [](auto &Counts) {
1859 auto CommonIt =
1860 std::max_element(Counts.begin(), Counts.end(),
1861 [](auto A, auto B) { return A.second < B.second; });
1862 assert(CommonIt != Counts.end() && "Unexpected all-undef build_vector");
1863 return *CommonIt;
1864 };
1865
1866 size_t NumConstantLanes = 0;
1867
1868 // Count eligible lanes for each type of vector creation op
1869 for (size_t I = 0; I < Lanes; ++I) {
1870 const SDValue &Lane = Op->getOperand(I);
1871 if (Lane.isUndef())
1872 continue;
1873
1874 AddCount(SplatValueCounts, Lane);
1875
1876 if (IsConstant(Lane))
1877 NumConstantLanes++;
1878 if (auto ShuffleSrc = GetShuffleSrc(Lane))
1879 AddCount(ShuffleCounts, ShuffleSrc);
1880 if (CanSwizzle) {
1881 auto SwizzleSrcs = GetSwizzleSrcs(I, Lane);
1882 if (SwizzleSrcs.first)
1883 AddCount(SwizzleCounts, SwizzleSrcs);
1884 }
1885 }
1886
1887 SDValue SplatValue;
1888 size_t NumSplatLanes;
1889 std::tie(SplatValue, NumSplatLanes) = GetMostCommon(SplatValueCounts);
1890
1891 SDValue SwizzleSrc;
1892 SDValue SwizzleIndices;
1893 size_t NumSwizzleLanes = 0;
1894 if (SwizzleCounts.size())
1895 std::forward_as_tuple(std::tie(SwizzleSrc, SwizzleIndices),
1896 NumSwizzleLanes) = GetMostCommon(SwizzleCounts);
1897
1898 // Shuffles can draw from up to two vectors, so find the two most common
1899 // sources.
1900 SDValue ShuffleSrc1, ShuffleSrc2;
1901 size_t NumShuffleLanes = 0;
1902 if (ShuffleCounts.size()) {
1903 std::tie(ShuffleSrc1, NumShuffleLanes) = GetMostCommon(ShuffleCounts);
1904 ShuffleCounts.erase(std::remove_if(ShuffleCounts.begin(),
1905 ShuffleCounts.end(),
1906 [&](const auto &Pair) {
1907 return Pair.first == ShuffleSrc1;
1908 }),
1909 ShuffleCounts.end());
1910 }
1911 if (ShuffleCounts.size()) {
1912 size_t AdditionalShuffleLanes;
1913 std::tie(ShuffleSrc2, AdditionalShuffleLanes) =
1914 GetMostCommon(ShuffleCounts);
1915 NumShuffleLanes += AdditionalShuffleLanes;
1916 }
1917
1918 // Predicate returning true if the lane is properly initialized by the
1919 // original instruction
1920 std::function<bool(size_t, const SDValue &)> IsLaneConstructed;
1921 SDValue Result;
1922 // Prefer swizzles over shuffles over vector consts over splats
1923 if (NumSwizzleLanes >= NumShuffleLanes &&
1924 NumSwizzleLanes >= NumConstantLanes && NumSwizzleLanes >= NumSplatLanes) {
1925 Result = DAG.getNode(WebAssemblyISD::SWIZZLE, DL, VecT, SwizzleSrc,
1926 SwizzleIndices);
1927 auto Swizzled = std::make_pair(SwizzleSrc, SwizzleIndices);
1928 IsLaneConstructed = [&, Swizzled](size_t I, const SDValue &Lane) {
1929 return Swizzled == GetSwizzleSrcs(I, Lane);
1930 };
1931 } else if (NumShuffleLanes >= NumConstantLanes &&
1932 NumShuffleLanes >= NumSplatLanes) {
1933 size_t DestLaneSize = VecT.getVectorElementType().getFixedSizeInBits() / 8;
1934 size_t DestLaneCount = VecT.getVectorNumElements();
1935 size_t Scale1 = 1;
1936 size_t Scale2 = 1;
1937 SDValue Src1 = ShuffleSrc1;
1938 SDValue Src2 = ShuffleSrc2 ? ShuffleSrc2 : DAG.getUNDEF(VecT);
1939 if (Src1.getValueType() != VecT) {
1940 size_t LaneSize =
1941 Src1.getValueType().getVectorElementType().getFixedSizeInBits() / 8;
1942 assert(LaneSize > DestLaneSize);
1943 Scale1 = LaneSize / DestLaneSize;
1944 Src1 = DAG.getBitcast(VecT, Src1);
1945 }
1946 if (Src2.getValueType() != VecT) {
1947 size_t LaneSize =
1948 Src2.getValueType().getVectorElementType().getFixedSizeInBits() / 8;
1949 assert(LaneSize > DestLaneSize);
1950 Scale2 = LaneSize / DestLaneSize;
1951 Src2 = DAG.getBitcast(VecT, Src2);
1952 }
1953
1954 int Mask[16];
1955 assert(DestLaneCount <= 16);
1956 for (size_t I = 0; I < DestLaneCount; ++I) {
1957 const SDValue &Lane = Op->getOperand(I);
1958 SDValue Src = GetShuffleSrc(Lane);
1959 if (Src == ShuffleSrc1) {
1960 Mask[I] = Lane->getConstantOperandVal(1) * Scale1;
1961 } else if (Src && Src == ShuffleSrc2) {
1962 Mask[I] = DestLaneCount + Lane->getConstantOperandVal(1) * Scale2;
1963 } else {
1964 Mask[I] = -1;
1965 }
1966 }
1967 ArrayRef<int> MaskRef(Mask, DestLaneCount);
1968 Result = DAG.getVectorShuffle(VecT, DL, Src1, Src2, MaskRef);
1969 IsLaneConstructed = [&](size_t, const SDValue &Lane) {
1970 auto Src = GetShuffleSrc(Lane);
1971 return Src == ShuffleSrc1 || (Src && Src == ShuffleSrc2);
1972 };
1973 } else if (NumConstantLanes >= NumSplatLanes) {
1974 SmallVector<SDValue, 16> ConstLanes;
1975 for (const SDValue &Lane : Op->op_values()) {
1976 if (IsConstant(Lane)) {
1977 ConstLanes.push_back(Lane);
1978 } else if (LaneT.isFloatingPoint()) {
1979 ConstLanes.push_back(DAG.getConstantFP(0, DL, LaneT));
1980 } else {
1981 ConstLanes.push_back(DAG.getConstant(0, DL, LaneT));
1982 }
1983 }
1984 Result = DAG.getBuildVector(VecT, DL, ConstLanes);
1985 IsLaneConstructed = [&IsConstant](size_t _, const SDValue &Lane) {
1986 return IsConstant(Lane);
1987 };
1988 } else {
1989 // Use a splat, but possibly a load_splat
1990 LoadSDNode *SplattedLoad;
1991 if ((SplattedLoad = dyn_cast<LoadSDNode>(SplatValue)) &&
1992 SplattedLoad->getMemoryVT() == VecT.getVectorElementType()) {
1993 Result = DAG.getMemIntrinsicNode(
1994 WebAssemblyISD::LOAD_SPLAT, DL, DAG.getVTList(VecT),
1995 {SplattedLoad->getChain(), SplattedLoad->getBasePtr(),
1996 SplattedLoad->getOffset()},
1997 SplattedLoad->getMemoryVT(), SplattedLoad->getMemOperand());
1998 } else {
1999 Result = DAG.getSplatBuildVector(VecT, DL, SplatValue);
2000 }
2001 IsLaneConstructed = [&SplatValue](size_t _, const SDValue &Lane) {
2002 return Lane == SplatValue;
2003 };
2004 }
2005
2006 assert(Result);
2007 assert(IsLaneConstructed);
2008
2009 // Add replace_lane instructions for any unhandled values
2010 for (size_t I = 0; I < Lanes; ++I) {
2011 const SDValue &Lane = Op->getOperand(I);
2012 if (!Lane.isUndef() && !IsLaneConstructed(I, Lane))
2013 Result = DAG.getNode(ISD::INSERT_VECTOR_ELT, DL, VecT, Result, Lane,
2014 DAG.getConstant(I, DL, MVT::i32));
2015 }
2016
2017 return Result;
2018 }
2019
2020 SDValue
LowerVECTOR_SHUFFLE(SDValue Op,SelectionDAG & DAG) const2021 WebAssemblyTargetLowering::LowerVECTOR_SHUFFLE(SDValue Op,
2022 SelectionDAG &DAG) const {
2023 SDLoc DL(Op);
2024 ArrayRef<int> Mask = cast<ShuffleVectorSDNode>(Op.getNode())->getMask();
2025 MVT VecType = Op.getOperand(0).getSimpleValueType();
2026 assert(VecType.is128BitVector() && "Unexpected shuffle vector type");
2027 size_t LaneBytes = VecType.getVectorElementType().getSizeInBits() / 8;
2028
2029 // Space for two vector args and sixteen mask indices
2030 SDValue Ops[18];
2031 size_t OpIdx = 0;
2032 Ops[OpIdx++] = Op.getOperand(0);
2033 Ops[OpIdx++] = Op.getOperand(1);
2034
2035 // Expand mask indices to byte indices and materialize them as operands
2036 for (int M : Mask) {
2037 for (size_t J = 0; J < LaneBytes; ++J) {
2038 // Lower undefs (represented by -1 in mask) to zero
2039 uint64_t ByteIndex = M == -1 ? 0 : (uint64_t)M * LaneBytes + J;
2040 Ops[OpIdx++] = DAG.getConstant(ByteIndex, DL, MVT::i32);
2041 }
2042 }
2043
2044 return DAG.getNode(WebAssemblyISD::SHUFFLE, DL, Op.getValueType(), Ops);
2045 }
2046
LowerSETCC(SDValue Op,SelectionDAG & DAG) const2047 SDValue WebAssemblyTargetLowering::LowerSETCC(SDValue Op,
2048 SelectionDAG &DAG) const {
2049 SDLoc DL(Op);
2050 // The legalizer does not know how to expand the unsupported comparison modes
2051 // of i64x2 vectors, so we manually unroll them here.
2052 assert(Op->getOperand(0)->getSimpleValueType(0) == MVT::v2i64);
2053 SmallVector<SDValue, 2> LHS, RHS;
2054 DAG.ExtractVectorElements(Op->getOperand(0), LHS);
2055 DAG.ExtractVectorElements(Op->getOperand(1), RHS);
2056 const SDValue &CC = Op->getOperand(2);
2057 auto MakeLane = [&](unsigned I) {
2058 return DAG.getNode(ISD::SELECT_CC, DL, MVT::i64, LHS[I], RHS[I],
2059 DAG.getConstant(uint64_t(-1), DL, MVT::i64),
2060 DAG.getConstant(uint64_t(0), DL, MVT::i64), CC);
2061 };
2062 return DAG.getBuildVector(Op->getValueType(0), DL,
2063 {MakeLane(0), MakeLane(1)});
2064 }
2065
2066 SDValue
LowerAccessVectorElement(SDValue Op,SelectionDAG & DAG) const2067 WebAssemblyTargetLowering::LowerAccessVectorElement(SDValue Op,
2068 SelectionDAG &DAG) const {
2069 // Allow constant lane indices, expand variable lane indices
2070 SDNode *IdxNode = Op.getOperand(Op.getNumOperands() - 1).getNode();
2071 if (isa<ConstantSDNode>(IdxNode) || IdxNode->isUndef())
2072 return Op;
2073 else
2074 // Perform default expansion
2075 return SDValue();
2076 }
2077
unrollVectorShift(SDValue Op,SelectionDAG & DAG)2078 static SDValue unrollVectorShift(SDValue Op, SelectionDAG &DAG) {
2079 EVT LaneT = Op.getSimpleValueType().getVectorElementType();
2080 // 32-bit and 64-bit unrolled shifts will have proper semantics
2081 if (LaneT.bitsGE(MVT::i32))
2082 return DAG.UnrollVectorOp(Op.getNode());
2083 // Otherwise mask the shift value to get proper semantics from 32-bit shift
2084 SDLoc DL(Op);
2085 size_t NumLanes = Op.getSimpleValueType().getVectorNumElements();
2086 SDValue Mask = DAG.getConstant(LaneT.getSizeInBits() - 1, DL, MVT::i32);
2087 unsigned ShiftOpcode = Op.getOpcode();
2088 SmallVector<SDValue, 16> ShiftedElements;
2089 DAG.ExtractVectorElements(Op.getOperand(0), ShiftedElements, 0, 0, MVT::i32);
2090 SmallVector<SDValue, 16> ShiftElements;
2091 DAG.ExtractVectorElements(Op.getOperand(1), ShiftElements, 0, 0, MVT::i32);
2092 SmallVector<SDValue, 16> UnrolledOps;
2093 for (size_t i = 0; i < NumLanes; ++i) {
2094 SDValue MaskedShiftValue =
2095 DAG.getNode(ISD::AND, DL, MVT::i32, ShiftElements[i], Mask);
2096 SDValue ShiftedValue = ShiftedElements[i];
2097 if (ShiftOpcode == ISD::SRA)
2098 ShiftedValue = DAG.getNode(ISD::SIGN_EXTEND_INREG, DL, MVT::i32,
2099 ShiftedValue, DAG.getValueType(LaneT));
2100 UnrolledOps.push_back(
2101 DAG.getNode(ShiftOpcode, DL, MVT::i32, ShiftedValue, MaskedShiftValue));
2102 }
2103 return DAG.getBuildVector(Op.getValueType(), DL, UnrolledOps);
2104 }
2105
LowerShift(SDValue Op,SelectionDAG & DAG) const2106 SDValue WebAssemblyTargetLowering::LowerShift(SDValue Op,
2107 SelectionDAG &DAG) const {
2108 SDLoc DL(Op);
2109
2110 // Only manually lower vector shifts
2111 assert(Op.getSimpleValueType().isVector());
2112
2113 auto ShiftVal = DAG.getSplatValue(Op.getOperand(1));
2114 if (!ShiftVal)
2115 return unrollVectorShift(Op, DAG);
2116
2117 // Use anyext because none of the high bits can affect the shift
2118 ShiftVal = DAG.getAnyExtOrTrunc(ShiftVal, DL, MVT::i32);
2119
2120 unsigned Opcode;
2121 switch (Op.getOpcode()) {
2122 case ISD::SHL:
2123 Opcode = WebAssemblyISD::VEC_SHL;
2124 break;
2125 case ISD::SRA:
2126 Opcode = WebAssemblyISD::VEC_SHR_S;
2127 break;
2128 case ISD::SRL:
2129 Opcode = WebAssemblyISD::VEC_SHR_U;
2130 break;
2131 default:
2132 llvm_unreachable("unexpected opcode");
2133 }
2134
2135 return DAG.getNode(Opcode, DL, Op.getValueType(), Op.getOperand(0), ShiftVal);
2136 }
2137
LowerFP_TO_INT_SAT(SDValue Op,SelectionDAG & DAG) const2138 SDValue WebAssemblyTargetLowering::LowerFP_TO_INT_SAT(SDValue Op,
2139 SelectionDAG &DAG) const {
2140 SDLoc DL(Op);
2141 EVT ResT = Op.getValueType();
2142 EVT SatVT = cast<VTSDNode>(Op.getOperand(1))->getVT();
2143
2144 if ((ResT == MVT::i32 || ResT == MVT::i64) &&
2145 (SatVT == MVT::i32 || SatVT == MVT::i64))
2146 return Op;
2147
2148 if (ResT == MVT::v4i32 && SatVT == MVT::i32)
2149 return Op;
2150
2151 return SDValue();
2152 }
2153
2154 //===----------------------------------------------------------------------===//
2155 // Custom DAG combine hooks
2156 //===----------------------------------------------------------------------===//
2157 static SDValue
performVECTOR_SHUFFLECombine(SDNode * N,TargetLowering::DAGCombinerInfo & DCI)2158 performVECTOR_SHUFFLECombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI) {
2159 auto &DAG = DCI.DAG;
2160 auto Shuffle = cast<ShuffleVectorSDNode>(N);
2161
2162 // Hoist vector bitcasts that don't change the number of lanes out of unary
2163 // shuffles, where they are less likely to get in the way of other combines.
2164 // (shuffle (vNxT1 (bitcast (vNxT0 x))), undef, mask) ->
2165 // (vNxT1 (bitcast (vNxT0 (shuffle x, undef, mask))))
2166 SDValue Bitcast = N->getOperand(0);
2167 if (Bitcast.getOpcode() != ISD::BITCAST)
2168 return SDValue();
2169 if (!N->getOperand(1).isUndef())
2170 return SDValue();
2171 SDValue CastOp = Bitcast.getOperand(0);
2172 MVT SrcType = CastOp.getSimpleValueType();
2173 MVT DstType = Bitcast.getSimpleValueType();
2174 if (!SrcType.is128BitVector() ||
2175 SrcType.getVectorNumElements() != DstType.getVectorNumElements())
2176 return SDValue();
2177 SDValue NewShuffle = DAG.getVectorShuffle(
2178 SrcType, SDLoc(N), CastOp, DAG.getUNDEF(SrcType), Shuffle->getMask());
2179 return DAG.getBitcast(DstType, NewShuffle);
2180 }
2181
2182 static SDValue
performVectorExtendCombine(SDNode * N,TargetLowering::DAGCombinerInfo & DCI)2183 performVectorExtendCombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI) {
2184 auto &DAG = DCI.DAG;
2185 assert(N->getOpcode() == ISD::SIGN_EXTEND ||
2186 N->getOpcode() == ISD::ZERO_EXTEND);
2187
2188 // Combine ({s,z}ext (extract_subvector src, i)) into a widening operation if
2189 // possible before the extract_subvector can be expanded.
2190 auto Extract = N->getOperand(0);
2191 if (Extract.getOpcode() != ISD::EXTRACT_SUBVECTOR)
2192 return SDValue();
2193 auto Source = Extract.getOperand(0);
2194 auto *IndexNode = dyn_cast<ConstantSDNode>(Extract.getOperand(1));
2195 if (IndexNode == nullptr)
2196 return SDValue();
2197 auto Index = IndexNode->getZExtValue();
2198
2199 // Only v8i8, v4i16, and v2i32 extracts can be widened, and only if the
2200 // extracted subvector is the low or high half of its source.
2201 EVT ResVT = N->getValueType(0);
2202 if (ResVT == MVT::v8i16) {
2203 if (Extract.getValueType() != MVT::v8i8 ||
2204 Source.getValueType() != MVT::v16i8 || (Index != 0 && Index != 8))
2205 return SDValue();
2206 } else if (ResVT == MVT::v4i32) {
2207 if (Extract.getValueType() != MVT::v4i16 ||
2208 Source.getValueType() != MVT::v8i16 || (Index != 0 && Index != 4))
2209 return SDValue();
2210 } else if (ResVT == MVT::v2i64) {
2211 if (Extract.getValueType() != MVT::v2i32 ||
2212 Source.getValueType() != MVT::v4i32 || (Index != 0 && Index != 2))
2213 return SDValue();
2214 } else {
2215 return SDValue();
2216 }
2217
2218 bool IsSext = N->getOpcode() == ISD::SIGN_EXTEND;
2219 bool IsLow = Index == 0;
2220
2221 unsigned Op = IsSext ? (IsLow ? WebAssemblyISD::EXTEND_LOW_S
2222 : WebAssemblyISD::EXTEND_HIGH_S)
2223 : (IsLow ? WebAssemblyISD::EXTEND_LOW_U
2224 : WebAssemblyISD::EXTEND_HIGH_U);
2225
2226 return DAG.getNode(Op, SDLoc(N), ResVT, Source);
2227 }
2228
2229 static SDValue
performVectorConvertLowCombine(SDNode * N,TargetLowering::DAGCombinerInfo & DCI)2230 performVectorConvertLowCombine(SDNode *N,
2231 TargetLowering::DAGCombinerInfo &DCI) {
2232 auto &DAG = DCI.DAG;
2233
2234 EVT ResVT = N->getValueType(0);
2235 if (ResVT != MVT::v2f64)
2236 return SDValue();
2237
2238 auto GetWasmConversionOp = [](unsigned Op) {
2239 switch (Op) {
2240 case ISD::SINT_TO_FP:
2241 return WebAssemblyISD::CONVERT_LOW_S;
2242 case ISD::UINT_TO_FP:
2243 return WebAssemblyISD::CONVERT_LOW_U;
2244 case ISD::FP_EXTEND:
2245 return WebAssemblyISD::PROMOTE_LOW;
2246 }
2247 llvm_unreachable("unexpected op");
2248 };
2249
2250 if (N->getOpcode() == ISD::EXTRACT_SUBVECTOR) {
2251 // Combine this:
2252 //
2253 // (v2f64 (extract_subvector
2254 // (v4f64 ({s,u}int_to_fp (v4i32 $x))), 0))
2255 //
2256 // into (f64x2.convert_low_i32x4_{s,u} $x).
2257 //
2258 // Or this:
2259 //
2260 // (v2f64 (extract_subvector
2261 // (v4f64 (fp_extend (v4f32 $x))), 0))
2262 //
2263 // into (f64x2.promote_low_f32x4 $x).
2264 auto Conversion = N->getOperand(0);
2265 auto ConversionOp = Conversion.getOpcode();
2266 MVT ExpectedSourceType;
2267 switch (ConversionOp) {
2268 case ISD::SINT_TO_FP:
2269 case ISD::UINT_TO_FP:
2270 ExpectedSourceType = MVT::v4i32;
2271 break;
2272 case ISD::FP_EXTEND:
2273 ExpectedSourceType = MVT::v4f32;
2274 break;
2275 default:
2276 return SDValue();
2277 }
2278
2279 if (Conversion.getValueType() != MVT::v4f64)
2280 return SDValue();
2281
2282 auto Source = Conversion.getOperand(0);
2283 if (Source.getValueType() != ExpectedSourceType)
2284 return SDValue();
2285
2286 auto IndexNode = dyn_cast<ConstantSDNode>(N->getOperand(1));
2287 if (IndexNode == nullptr || IndexNode->getZExtValue() != 0)
2288 return SDValue();
2289
2290 auto Op = GetWasmConversionOp(ConversionOp);
2291 return DAG.getNode(Op, SDLoc(N), ResVT, Source);
2292 }
2293
2294 // Combine this:
2295 //
2296 // (v2f64 ({s,u}int_to_fp
2297 // (v2i32 (extract_subvector (v4i32 $x), 0))))
2298 //
2299 // into (f64x2.convert_low_i32x4_{s,u} $x).
2300 //
2301 // Or this:
2302 //
2303 // (v2f64 (fp_extend
2304 // (v2f32 (extract_subvector (v4f32 $x), 0))))
2305 //
2306 // into (f64x2.promote_low_f32x4 $x).
2307 auto ConversionOp = N->getOpcode();
2308 MVT ExpectedExtractType;
2309 MVT ExpectedSourceType;
2310 switch (ConversionOp) {
2311 case ISD::SINT_TO_FP:
2312 case ISD::UINT_TO_FP:
2313 ExpectedExtractType = MVT::v2i32;
2314 ExpectedSourceType = MVT::v4i32;
2315 break;
2316 case ISD::FP_EXTEND:
2317 ExpectedExtractType = MVT::v2f32;
2318 ExpectedSourceType = MVT::v4f32;
2319 break;
2320 default:
2321 llvm_unreachable("unexpected opcode");
2322 }
2323
2324 auto Extract = N->getOperand(0);
2325 if (Extract.getOpcode() != ISD::EXTRACT_SUBVECTOR)
2326 return SDValue();
2327
2328 if (Extract.getValueType() != ExpectedExtractType)
2329 return SDValue();
2330
2331 auto Source = Extract.getOperand(0);
2332 if (Source.getValueType() != ExpectedSourceType)
2333 return SDValue();
2334
2335 auto *IndexNode = dyn_cast<ConstantSDNode>(Extract.getOperand(1));
2336 if (IndexNode == nullptr || IndexNode->getZExtValue() != 0)
2337 return SDValue();
2338
2339 unsigned Op = GetWasmConversionOp(ConversionOp);
2340 return DAG.getNode(Op, SDLoc(N), ResVT, Source);
2341 }
2342
2343 static SDValue
performVectorTruncZeroCombine(SDNode * N,TargetLowering::DAGCombinerInfo & DCI)2344 performVectorTruncZeroCombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI) {
2345 auto &DAG = DCI.DAG;
2346
2347 auto GetWasmConversionOp = [](unsigned Op) {
2348 switch (Op) {
2349 case ISD::FP_TO_SINT_SAT:
2350 return WebAssemblyISD::TRUNC_SAT_ZERO_S;
2351 case ISD::FP_TO_UINT_SAT:
2352 return WebAssemblyISD::TRUNC_SAT_ZERO_U;
2353 case ISD::FP_ROUND:
2354 return WebAssemblyISD::DEMOTE_ZERO;
2355 }
2356 llvm_unreachable("unexpected op");
2357 };
2358
2359 auto IsZeroSplat = [](SDValue SplatVal) {
2360 auto *Splat = dyn_cast<BuildVectorSDNode>(SplatVal.getNode());
2361 APInt SplatValue, SplatUndef;
2362 unsigned SplatBitSize;
2363 bool HasAnyUndefs;
2364 return Splat &&
2365 Splat->isConstantSplat(SplatValue, SplatUndef, SplatBitSize,
2366 HasAnyUndefs) &&
2367 SplatValue == 0;
2368 };
2369
2370 if (N->getOpcode() == ISD::CONCAT_VECTORS) {
2371 // Combine this:
2372 //
2373 // (concat_vectors (v2i32 (fp_to_{s,u}int_sat $x, 32)), (v2i32 (splat 0)))
2374 //
2375 // into (i32x4.trunc_sat_f64x2_zero_{s,u} $x).
2376 //
2377 // Or this:
2378 //
2379 // (concat_vectors (v2f32 (fp_round (v2f64 $x))), (v2f32 (splat 0)))
2380 //
2381 // into (f32x4.demote_zero_f64x2 $x).
2382 EVT ResVT;
2383 EVT ExpectedConversionType;
2384 auto Conversion = N->getOperand(0);
2385 auto ConversionOp = Conversion.getOpcode();
2386 switch (ConversionOp) {
2387 case ISD::FP_TO_SINT_SAT:
2388 case ISD::FP_TO_UINT_SAT:
2389 ResVT = MVT::v4i32;
2390 ExpectedConversionType = MVT::v2i32;
2391 break;
2392 case ISD::FP_ROUND:
2393 ResVT = MVT::v4f32;
2394 ExpectedConversionType = MVT::v2f32;
2395 break;
2396 default:
2397 return SDValue();
2398 }
2399
2400 if (N->getValueType(0) != ResVT)
2401 return SDValue();
2402
2403 if (Conversion.getValueType() != ExpectedConversionType)
2404 return SDValue();
2405
2406 auto Source = Conversion.getOperand(0);
2407 if (Source.getValueType() != MVT::v2f64)
2408 return SDValue();
2409
2410 if (!IsZeroSplat(N->getOperand(1)) ||
2411 N->getOperand(1).getValueType() != ExpectedConversionType)
2412 return SDValue();
2413
2414 unsigned Op = GetWasmConversionOp(ConversionOp);
2415 return DAG.getNode(Op, SDLoc(N), ResVT, Source);
2416 }
2417
2418 // Combine this:
2419 //
2420 // (fp_to_{s,u}int_sat (concat_vectors $x, (v2f64 (splat 0))), 32)
2421 //
2422 // into (i32x4.trunc_sat_f64x2_zero_{s,u} $x).
2423 //
2424 // Or this:
2425 //
2426 // (v4f32 (fp_round (concat_vectors $x, (v2f64 (splat 0)))))
2427 //
2428 // into (f32x4.demote_zero_f64x2 $x).
2429 EVT ResVT;
2430 auto ConversionOp = N->getOpcode();
2431 switch (ConversionOp) {
2432 case ISD::FP_TO_SINT_SAT:
2433 case ISD::FP_TO_UINT_SAT:
2434 ResVT = MVT::v4i32;
2435 break;
2436 case ISD::FP_ROUND:
2437 ResVT = MVT::v4f32;
2438 break;
2439 default:
2440 llvm_unreachable("unexpected op");
2441 }
2442
2443 if (N->getValueType(0) != ResVT)
2444 return SDValue();
2445
2446 auto Concat = N->getOperand(0);
2447 if (Concat.getValueType() != MVT::v4f64)
2448 return SDValue();
2449
2450 auto Source = Concat.getOperand(0);
2451 if (Source.getValueType() != MVT::v2f64)
2452 return SDValue();
2453
2454 if (!IsZeroSplat(Concat.getOperand(1)) ||
2455 Concat.getOperand(1).getValueType() != MVT::v2f64)
2456 return SDValue();
2457
2458 unsigned Op = GetWasmConversionOp(ConversionOp);
2459 return DAG.getNode(Op, SDLoc(N), ResVT, Source);
2460 }
2461
2462 SDValue
PerformDAGCombine(SDNode * N,DAGCombinerInfo & DCI) const2463 WebAssemblyTargetLowering::PerformDAGCombine(SDNode *N,
2464 DAGCombinerInfo &DCI) const {
2465 switch (N->getOpcode()) {
2466 default:
2467 return SDValue();
2468 case ISD::VECTOR_SHUFFLE:
2469 return performVECTOR_SHUFFLECombine(N, DCI);
2470 case ISD::SIGN_EXTEND:
2471 case ISD::ZERO_EXTEND:
2472 return performVectorExtendCombine(N, DCI);
2473 case ISD::SINT_TO_FP:
2474 case ISD::UINT_TO_FP:
2475 case ISD::FP_EXTEND:
2476 case ISD::EXTRACT_SUBVECTOR:
2477 return performVectorConvertLowCombine(N, DCI);
2478 case ISD::FP_TO_SINT_SAT:
2479 case ISD::FP_TO_UINT_SAT:
2480 case ISD::FP_ROUND:
2481 case ISD::CONCAT_VECTORS:
2482 return performVectorTruncZeroCombine(N, DCI);
2483 }
2484 }
2485