1//===-- IR/VPIntrinsics.def - Describes llvm.vp.* Intrinsics -*- C++ -*-===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file contains descriptions of the various Vector Predication intrinsics.
10// This is used as a central place for enumerating the different instructions
11// and should eventually be the place to put comments about the instructions.
12//
13//===----------------------------------------------------------------------===//
14
15// NOTE: NO INCLUDE GUARD DESIRED!
16
17// Provide definitions of macros so that users of this file do not have to
18// define everything to use it...
19//
20// Register a VP intrinsic and begin its property scope.
21// All VP intrinsic scopes are top level, ie it is illegal to place a
22// BEGIN_REGISTER_VP_INTRINSIC within a VP intrinsic scope.
23// \p VPID     The VP intrinsic id.
24// \p MASKPOS  The mask operand position.
25// \p EVLPOS   The explicit vector length operand position.
26#ifndef BEGIN_REGISTER_VP_INTRINSIC
27#define BEGIN_REGISTER_VP_INTRINSIC(VPID, MASKPOS, EVLPOS)
28#endif
29
30// End the property scope of a VP intrinsic.
31#ifndef END_REGISTER_VP_INTRINSIC
32#define END_REGISTER_VP_INTRINSIC(VPID)
33#endif
34
35// Register a new VP SDNode and begin its property scope.
36// When the SDNode scope is nested within a VP intrinsic scope, it is
37// implicitly registered as the canonical SDNode for this VP intrinsic. There
38// is one VP intrinsic that maps directly to one SDNode that goes by the
39// same name.  Since the operands are also the same, we open the property
40// scopes for both the VPIntrinsic and the SDNode at once.
41// \p VPSD     The SelectionDAG Node id (eg VP_ADD).
42// \p LEGALPOS The operand position of the SDNode that is used for legalizing.
43//             If LEGALPOS < 0, then the return type given by
44//             TheNode->getValueType(-1-LEGALPOS) is used.
45// \p TDNAME   The name of the TableGen definition of this SDNode.
46// \p MASKPOS  The mask operand position.
47// \p EVLPOS   The explicit vector length operand position.
48#ifndef BEGIN_REGISTER_VP_SDNODE
49#define BEGIN_REGISTER_VP_SDNODE(VPSD, LEGALPOS, TDNAME, MASKPOS, EVLPOS)
50#endif
51
52// End the property scope of a new VP SDNode.
53#ifndef END_REGISTER_VP_SDNODE
54#define END_REGISTER_VP_SDNODE(VPSD)
55#endif
56
57// Helper macro to set up the mapping from VP intrinsic to ISD opcode.
58// Note: More than one VP intrinsic may map to one ISD opcode.
59#ifndef HELPER_MAP_VPID_TO_VPSD
60#define HELPER_MAP_VPID_TO_VPSD(VPID, VPSD)
61#endif
62
63// Helper macros for the common "1:1 - Intrinsic : SDNode" case.
64//
65// There is one VP intrinsic that maps directly to one SDNode that goes by the
66// same name.  Since the operands are also the same, we open the property
67// scopes for both the VPIntrinsic and the SDNode at once.
68//
69// \p VPID     The canonical name (eg `vp_add`, which at the same time is the
70//             name of the intrinsic and the TableGen def of the SDNode).
71// \p MASKPOS  The mask operand position.
72// \p EVLPOS   The explicit vector length operand position.
73// \p VPSD     The SelectionDAG Node id (eg VP_ADD).
74// \p LEGALPOS The operand position of the SDNode that is used for legalizing
75//             this SDNode. This can be `-1`, in which case the return type of
76//             the SDNode is used.
77#define BEGIN_REGISTER_VP(VPID, MASKPOS, EVLPOS, VPSD, LEGALPOS)               \
78  BEGIN_REGISTER_VP_INTRINSIC(VPID, MASKPOS, EVLPOS)                           \
79  BEGIN_REGISTER_VP_SDNODE(VPSD, LEGALPOS, VPID, MASKPOS, EVLPOS)              \
80  HELPER_MAP_VPID_TO_VPSD(VPID, VPSD)
81
82#define END_REGISTER_VP(VPID, VPSD)                                            \
83  END_REGISTER_VP_INTRINSIC(VPID)                                              \
84  END_REGISTER_VP_SDNODE(VPSD)
85
86// The following macros attach properties to the scope they are placed in. This
87// assigns the property to the VP Intrinsic and/or SDNode that belongs to the
88// scope.
89//
90// Property Macros {
91
92// The intrinsic and/or SDNode has the same function as this LLVM IR Opcode.
93// \p OPC      The opcode of the instruction with the same function.
94#ifndef VP_PROPERTY_FUNCTIONAL_OPC
95#define VP_PROPERTY_FUNCTIONAL_OPC(OPC)
96#endif
97
98// Whether the intrinsic may have a rounding mode or exception behavior operand
99// bundle.
100// \p HASROUND   '1' if the intrinsic can have a rounding mode operand bundle,
101//               '0' otherwise.
102// \p HASEXCEPT  '1' if the intrinsic can have an exception behavior operand
103//               bundle, '0' otherwise.
104// \p INTRINID  The constrained fp intrinsic this VP intrinsic corresponds to.
105#ifndef VP_PROPERTY_CONSTRAINEDFP
106#define VP_PROPERTY_CONSTRAINEDFP(HASROUND, HASEXCEPT, INTRINID)
107#endif
108
109// Map this VP intrinsic to its canonical functional intrinsic.
110// \p INTRIN     The non-VP intrinsics with the same function.
111#ifndef VP_PROPERTY_FUNCTIONAL_INTRINSIC
112#define VP_PROPERTY_FUNCTIONAL_INTRINSIC(INTRIN)
113#endif
114
115// This VP Intrinsic is a memory operation
116// The pointer arg is at POINTERPOS and the data arg is at DATAPOS.
117#ifndef VP_PROPERTY_MEMOP
118#define VP_PROPERTY_MEMOP(POINTERPOS, DATAPOS)
119#endif
120
121// Map this VP reduction intrinsic to its reduction operand positions.
122#ifndef VP_PROPERTY_REDUCTION
123#define VP_PROPERTY_REDUCTION(STARTPOS, VECTORPOS)
124#endif
125
126// A property to infer VP binary-op SDNode opcodes automatically.
127#ifndef VP_PROPERTY_BINARYOP
128#define VP_PROPERTY_BINARYOP
129#endif
130
131// A property to infer VP type casts automatically.
132#ifndef VP_PROPERTY_CASTOP
133#define VP_PROPERTY_CASTOP
134#endif
135
136// This VP Intrinsic is a comparison operation
137// The condition code arg is at CCPOS and accepts floating-point condition
138// codes if ISFP is set, else it accepts integer condition codes.
139#ifndef VP_PROPERTY_CMP
140#define VP_PROPERTY_CMP(CCPOS, ISFP)
141#endif
142
143/// } Property Macros
144
145///// Integer Arithmetic {
146
147// Specialized helper macro for integer binary operators (%x, %y, %mask, %evl).
148#ifdef HELPER_REGISTER_BINARY_INT_VP
149#error                                                                         \
150    "The internal helper macro HELPER_REGISTER_BINARY_INT_VP is already defined!"
151#endif
152#define HELPER_REGISTER_BINARY_INT_VP(VPID, VPSD, IROPC)                       \
153  BEGIN_REGISTER_VP(VPID, 2, 3, VPSD, -1)                                      \
154  VP_PROPERTY_FUNCTIONAL_OPC(IROPC)                                            \
155  VP_PROPERTY_BINARYOP                                                         \
156  END_REGISTER_VP(VPID, VPSD)
157
158// llvm.vp.add(x,y,mask,vlen)
159HELPER_REGISTER_BINARY_INT_VP(vp_add, VP_ADD, Add)
160
161// llvm.vp.and(x,y,mask,vlen)
162HELPER_REGISTER_BINARY_INT_VP(vp_and, VP_AND, And)
163
164// llvm.vp.ashr(x,y,mask,vlen)
165HELPER_REGISTER_BINARY_INT_VP(vp_ashr, VP_ASHR, AShr)
166
167// llvm.vp.lshr(x,y,mask,vlen)
168HELPER_REGISTER_BINARY_INT_VP(vp_lshr, VP_LSHR, LShr)
169
170// llvm.vp.mul(x,y,mask,vlen)
171HELPER_REGISTER_BINARY_INT_VP(vp_mul, VP_MUL, Mul)
172
173// llvm.vp.or(x,y,mask,vlen)
174HELPER_REGISTER_BINARY_INT_VP(vp_or, VP_OR, Or)
175
176// llvm.vp.sdiv(x,y,mask,vlen)
177HELPER_REGISTER_BINARY_INT_VP(vp_sdiv, VP_SDIV, SDiv)
178
179// llvm.vp.shl(x,y,mask,vlen)
180HELPER_REGISTER_BINARY_INT_VP(vp_shl, VP_SHL, Shl)
181
182// llvm.vp.srem(x,y,mask,vlen)
183HELPER_REGISTER_BINARY_INT_VP(vp_srem, VP_SREM, SRem)
184
185// llvm.vp.sub(x,y,mask,vlen)
186HELPER_REGISTER_BINARY_INT_VP(vp_sub, VP_SUB, Sub)
187
188// llvm.vp.udiv(x,y,mask,vlen)
189HELPER_REGISTER_BINARY_INT_VP(vp_udiv, VP_UDIV, UDiv)
190
191// llvm.vp.urem(x,y,mask,vlen)
192HELPER_REGISTER_BINARY_INT_VP(vp_urem, VP_UREM, URem)
193
194// llvm.vp.xor(x,y,mask,vlen)
195HELPER_REGISTER_BINARY_INT_VP(vp_xor, VP_XOR, Xor)
196
197#undef HELPER_REGISTER_BINARY_INT_VP
198
199///// } Integer Arithmetic
200
201///// Floating-Point Arithmetic {
202
203// Specialized helper macro for floating-point binary operators
204// <operation>(%x, %y, %mask, %evl).
205#ifdef HELPER_REGISTER_BINARY_FP_VP
206#error                                                                         \
207    "The internal helper macro HELPER_REGISTER_BINARY_FP_VP is already defined!"
208#endif
209#define HELPER_REGISTER_BINARY_FP_VP(OPSUFFIX, VPSD, IROPC)                    \
210  BEGIN_REGISTER_VP(vp_##OPSUFFIX, 2, 3, VPSD, -1)                             \
211  VP_PROPERTY_FUNCTIONAL_OPC(IROPC)                                            \
212  VP_PROPERTY_CONSTRAINEDFP(1, 1, experimental_constrained_##OPSUFFIX)         \
213  VP_PROPERTY_BINARYOP                                                         \
214  END_REGISTER_VP(vp_##OPSUFFIX, VPSD)
215
216// llvm.vp.fadd(x,y,mask,vlen)
217HELPER_REGISTER_BINARY_FP_VP(fadd, VP_FADD, FAdd)
218
219// llvm.vp.fsub(x,y,mask,vlen)
220HELPER_REGISTER_BINARY_FP_VP(fsub, VP_FSUB, FSub)
221
222// llvm.vp.fmul(x,y,mask,vlen)
223HELPER_REGISTER_BINARY_FP_VP(fmul, VP_FMUL, FMul)
224
225// llvm.vp.fdiv(x,y,mask,vlen)
226HELPER_REGISTER_BINARY_FP_VP(fdiv, VP_FDIV, FDiv)
227
228// llvm.vp.frem(x,y,mask,vlen)
229HELPER_REGISTER_BINARY_FP_VP(frem, VP_FREM, FRem)
230
231#undef HELPER_REGISTER_BINARY_FP_VP
232
233// llvm.vp.fneg(x,mask,vlen)
234BEGIN_REGISTER_VP(vp_fneg, 1, 2, VP_FNEG, -1)
235VP_PROPERTY_FUNCTIONAL_OPC(FNeg)
236END_REGISTER_VP(vp_fneg, VP_FNEG)
237
238// llvm.vp.fma(x,y,z,mask,vlen)
239BEGIN_REGISTER_VP(vp_fma, 3, 4, VP_FMA, -1)
240VP_PROPERTY_CONSTRAINEDFP(1, 1, experimental_constrained_fma)
241END_REGISTER_VP(vp_fma, VP_FMA)
242
243///// } Floating-Point Arithmetic
244
245///// Type Casts {
246// Specialized helper macro for type conversions.
247// <operation>(%x, %mask, %evl).
248#ifdef HELPER_REGISTER_FP_CAST_VP
249#error                                                                         \
250    "The internal helper macro HELPER_REGISTER_FP_CAST_VP is already defined!"
251#endif
252#define HELPER_REGISTER_FP_CAST_VP(OPSUFFIX, VPSD, IROPC, HASROUND)            \
253  BEGIN_REGISTER_VP(vp_##OPSUFFIX, 1, 2, VPSD, -1)                             \
254  VP_PROPERTY_FUNCTIONAL_OPC(IROPC)                                            \
255  VP_PROPERTY_CONSTRAINEDFP(HASROUND, 1, experimental_constrained_##OPSUFFIX)  \
256  VP_PROPERTY_CASTOP                                                           \
257  END_REGISTER_VP(vp_##OPSUFFIX, VPSD)
258
259// llvm.vp.fptoui(x,mask,vlen)
260HELPER_REGISTER_FP_CAST_VP(fptoui, VP_FPTOUI, FPToUI, 0)
261
262// llvm.vp.fptosi(x,mask,vlen)
263HELPER_REGISTER_FP_CAST_VP(fptosi, VP_FPTOSI, FPToSI, 0)
264
265// llvm.vp.uitofp(x,mask,vlen)
266HELPER_REGISTER_FP_CAST_VP(uitofp, VP_UITOFP, UIToFP, 1)
267
268// llvm.vp.sitofp(x,mask,vlen)
269HELPER_REGISTER_FP_CAST_VP(sitofp, VP_SITOFP, SIToFP, 1)
270
271// llvm.vp.fptrunc(x,mask,vlen)
272HELPER_REGISTER_FP_CAST_VP(fptrunc, VP_FP_ROUND, FPTrunc, 1)
273
274// llvm.vp.fpext(x,mask,vlen)
275HELPER_REGISTER_FP_CAST_VP(fpext, VP_FP_EXTEND, FPExt, 0)
276
277#undef HELPER_REGISTER_FP_CAST_VP
278
279// Specialized helper macro for integer type conversions.
280// <operation>(%x, %mask, %evl).
281#ifdef HELPER_REGISTER_INT_CAST_VP
282#error                                                                         \
283    "The internal helper macro HELPER_REGISTER_INT_CAST_VP is already defined!"
284#endif
285#define HELPER_REGISTER_INT_CAST_VP(OPSUFFIX, VPSD, IROPC)                     \
286  BEGIN_REGISTER_VP(vp_##OPSUFFIX, 1, 2, VPSD, -1)                             \
287  VP_PROPERTY_FUNCTIONAL_OPC(IROPC)                                            \
288  VP_PROPERTY_CASTOP                                                           \
289  END_REGISTER_VP(vp_##OPSUFFIX, VPSD)
290
291// llvm.vp.trunc(x,mask,vlen)
292HELPER_REGISTER_INT_CAST_VP(trunc, VP_TRUNCATE, Trunc)
293
294// llvm.vp.zext(x,mask,vlen)
295HELPER_REGISTER_INT_CAST_VP(zext, VP_ZERO_EXTEND, ZExt)
296
297// llvm.vp.sext(x,mask,vlen)
298HELPER_REGISTER_INT_CAST_VP(sext, VP_SIGN_EXTEND, SExt)
299
300// llvm.vp.ptrtoint(x,mask,vlen)
301HELPER_REGISTER_INT_CAST_VP(ptrtoint, VP_PTRTOINT, PtrToInt)
302
303// llvm.vp.inttoptr(x,mask,vlen)
304HELPER_REGISTER_INT_CAST_VP(inttoptr, VP_INTTOPTR, IntToPtr)
305
306#undef HELPER_REGISTER_INT_CAST_VP
307
308///// } Type Casts
309
310///// Comparisons {
311
312// VP_SETCC (ISel only)
313BEGIN_REGISTER_VP_SDNODE(VP_SETCC, 0, vp_setcc, 3, 4)
314END_REGISTER_VP_SDNODE(VP_SETCC)
315
316// llvm.vp.fcmp(x,y,cc,mask,vlen)
317BEGIN_REGISTER_VP_INTRINSIC(vp_fcmp, 3, 4)
318HELPER_MAP_VPID_TO_VPSD(vp_fcmp, VP_SETCC)
319VP_PROPERTY_FUNCTIONAL_OPC(FCmp)
320VP_PROPERTY_CMP(2, true)
321VP_PROPERTY_CONSTRAINEDFP(0, 1, experimental_constrained_fcmp)
322END_REGISTER_VP_INTRINSIC(vp_fcmp)
323
324// llvm.vp.icmp(x,y,cc,mask,vlen)
325BEGIN_REGISTER_VP_INTRINSIC(vp_icmp, 3, 4)
326HELPER_MAP_VPID_TO_VPSD(vp_icmp, VP_SETCC)
327VP_PROPERTY_FUNCTIONAL_OPC(ICmp)
328VP_PROPERTY_CMP(2, false)
329END_REGISTER_VP_INTRINSIC(vp_icmp)
330
331///// } Comparisons
332
333///// Memory Operations {
334// llvm.vp.store(val,ptr,mask,vlen)
335BEGIN_REGISTER_VP_INTRINSIC(vp_store, 2, 3)
336// chain = VP_STORE chain,val,base,offset,mask,evl
337BEGIN_REGISTER_VP_SDNODE(VP_STORE, 0, vp_store, 4, 5)
338HELPER_MAP_VPID_TO_VPSD(vp_store, VP_STORE)
339VP_PROPERTY_FUNCTIONAL_OPC(Store)
340VP_PROPERTY_FUNCTIONAL_INTRINSIC(masked_store)
341VP_PROPERTY_MEMOP(1, 0)
342END_REGISTER_VP(vp_store, VP_STORE)
343
344// llvm.experimental.vp.strided.store(val,ptr,stride,mask,vlen)
345BEGIN_REGISTER_VP_INTRINSIC(experimental_vp_strided_store, 3, 4)
346// chain = EXPERIMENTAL_VP_STRIDED_STORE chain,val,base,offset,stride,mask,evl
347BEGIN_REGISTER_VP_SDNODE(EXPERIMENTAL_VP_STRIDED_STORE, 0, experimental_vp_strided_store, 5, 6)
348HELPER_MAP_VPID_TO_VPSD(experimental_vp_strided_store, EXPERIMENTAL_VP_STRIDED_STORE)
349VP_PROPERTY_MEMOP(1, 0)
350END_REGISTER_VP(experimental_vp_strided_store, EXPERIMENTAL_VP_STRIDED_STORE)
351
352// llvm.vp.scatter(ptr,val,mask,vlen)
353BEGIN_REGISTER_VP_INTRINSIC(vp_scatter, 2, 3)
354// chain = VP_SCATTER chain,val,base,indices,scale,mask,evl
355BEGIN_REGISTER_VP_SDNODE(VP_SCATTER, -1, vp_scatter, 5, 6)
356HELPER_MAP_VPID_TO_VPSD(vp_scatter, VP_SCATTER)
357VP_PROPERTY_FUNCTIONAL_INTRINSIC(masked_scatter)
358VP_PROPERTY_MEMOP(1, 0)
359END_REGISTER_VP(vp_scatter, VP_SCATTER)
360
361// llvm.vp.load(ptr,mask,vlen)
362BEGIN_REGISTER_VP_INTRINSIC(vp_load, 1, 2)
363// val,chain = VP_LOAD chain,base,offset,mask,evl
364BEGIN_REGISTER_VP_SDNODE(VP_LOAD, -1, vp_load, 3, 4)
365HELPER_MAP_VPID_TO_VPSD(vp_load, VP_LOAD)
366VP_PROPERTY_FUNCTIONAL_OPC(Load)
367VP_PROPERTY_FUNCTIONAL_INTRINSIC(masked_load)
368VP_PROPERTY_MEMOP(0, None)
369END_REGISTER_VP(vp_load, VP_LOAD)
370
371// llvm.experimental.vp.strided.load(ptr,stride,mask,vlen)
372BEGIN_REGISTER_VP_INTRINSIC(experimental_vp_strided_load, 2, 3)
373// chain = EXPERIMENTAL_VP_STRIDED_LOAD chain,base,offset,stride,mask,evl
374BEGIN_REGISTER_VP_SDNODE(EXPERIMENTAL_VP_STRIDED_LOAD, -1, experimental_vp_strided_load, 4, 5)
375HELPER_MAP_VPID_TO_VPSD(experimental_vp_strided_load, EXPERIMENTAL_VP_STRIDED_LOAD)
376VP_PROPERTY_MEMOP(0, None)
377END_REGISTER_VP(experimental_vp_strided_load, EXPERIMENTAL_VP_STRIDED_LOAD)
378
379// llvm.vp.gather(ptr,mask,vlen)
380BEGIN_REGISTER_VP_INTRINSIC(vp_gather, 1, 2)
381// val,chain = VP_GATHER chain,base,indices,scale,mask,evl
382BEGIN_REGISTER_VP_SDNODE(VP_GATHER, -1, vp_gather, 4, 5)
383HELPER_MAP_VPID_TO_VPSD(vp_gather, VP_GATHER)
384VP_PROPERTY_FUNCTIONAL_INTRINSIC(masked_gather)
385VP_PROPERTY_MEMOP(0, None)
386END_REGISTER_VP(vp_gather, VP_GATHER)
387
388///// } Memory Operations
389
390///// Reductions {
391
392// Specialized helper macro for VP reductions (%start, %x, %mask, %evl).
393#ifdef HELPER_REGISTER_REDUCTION_VP
394#error                                                                         \
395    "The internal helper macro HELPER_REGISTER_REDUCTION_VP is already defined!"
396#endif
397#define HELPER_REGISTER_REDUCTION_VP(VPID, VPSD, INTRIN)                       \
398  BEGIN_REGISTER_VP(VPID, 2, 3, VPSD, -1)                                      \
399  VP_PROPERTY_FUNCTIONAL_INTRINSIC(INTRIN)                                     \
400  VP_PROPERTY_REDUCTION(0, 1)                                                  \
401  END_REGISTER_VP(VPID, VPSD)
402
403// llvm.vp.reduce.add(start,x,mask,vlen)
404HELPER_REGISTER_REDUCTION_VP(vp_reduce_add, VP_REDUCE_ADD,
405                             experimental_vector_reduce_add)
406
407// llvm.vp.reduce.mul(start,x,mask,vlen)
408HELPER_REGISTER_REDUCTION_VP(vp_reduce_mul, VP_REDUCE_MUL,
409                             experimental_vector_reduce_mul)
410
411// llvm.vp.reduce.and(start,x,mask,vlen)
412HELPER_REGISTER_REDUCTION_VP(vp_reduce_and, VP_REDUCE_AND,
413                             experimental_vector_reduce_and)
414
415// llvm.vp.reduce.or(start,x,mask,vlen)
416HELPER_REGISTER_REDUCTION_VP(vp_reduce_or, VP_REDUCE_OR,
417                             experimental_vector_reduce_or)
418
419// llvm.vp.reduce.xor(start,x,mask,vlen)
420HELPER_REGISTER_REDUCTION_VP(vp_reduce_xor, VP_REDUCE_XOR,
421                             experimental_vector_reduce_xor)
422
423// llvm.vp.reduce.smax(start,x,mask,vlen)
424HELPER_REGISTER_REDUCTION_VP(vp_reduce_smax, VP_REDUCE_SMAX,
425                             experimental_vector_reduce_smax)
426
427// llvm.vp.reduce.smin(start,x,mask,vlen)
428HELPER_REGISTER_REDUCTION_VP(vp_reduce_smin, VP_REDUCE_SMIN,
429                             experimental_vector_reduce_smin)
430
431// llvm.vp.reduce.umax(start,x,mask,vlen)
432HELPER_REGISTER_REDUCTION_VP(vp_reduce_umax, VP_REDUCE_UMAX,
433                             experimental_vector_reduce_umax)
434
435// llvm.vp.reduce.umin(start,x,mask,vlen)
436HELPER_REGISTER_REDUCTION_VP(vp_reduce_umin, VP_REDUCE_UMIN,
437                             experimental_vector_reduce_umin)
438
439// llvm.vp.reduce.fmax(start,x,mask,vlen)
440HELPER_REGISTER_REDUCTION_VP(vp_reduce_fmax, VP_REDUCE_FMAX,
441                             experimental_vector_reduce_fmax)
442
443// llvm.vp.reduce.fmin(start,x,mask,vlen)
444HELPER_REGISTER_REDUCTION_VP(vp_reduce_fmin, VP_REDUCE_FMIN,
445                             experimental_vector_reduce_fmin)
446
447#undef HELPER_REGISTER_REDUCTION_VP
448
449// Specialized helper macro for VP reductions as above but with two forms:
450// sequential and reassociative. These manifest as the presence of 'reassoc'
451// fast-math flags in the IR and as two distinct ISD opcodes in the
452// SelectionDAG.
453// Note we by default map from the VP intrinsic to the SEQ ISD opcode, which
454// can then be relaxed to the non-SEQ ISD opcode if the 'reassoc' flag is set.
455#ifdef HELPER_REGISTER_REDUCTION_SEQ_VP
456#error                                                                         \
457    "The internal helper macro HELPER_REGISTER_REDUCTION_SEQ_VP is already defined!"
458#endif
459#define HELPER_REGISTER_REDUCTION_SEQ_VP(VPID, VPSD, SEQ_VPSD, INTRIN)         \
460  BEGIN_REGISTER_VP_INTRINSIC(VPID, 2, 3)                                      \
461  BEGIN_REGISTER_VP_SDNODE(VPSD, -1, VPID, 2, 3)                               \
462  VP_PROPERTY_REDUCTION(0, 1)                                                  \
463  END_REGISTER_VP_SDNODE(VPSD)                                                 \
464  BEGIN_REGISTER_VP_SDNODE(SEQ_VPSD, -1, VPID, 2, 3)                           \
465  HELPER_MAP_VPID_TO_VPSD(VPID, SEQ_VPSD)                                      \
466  VP_PROPERTY_REDUCTION(0, 1)                                                  \
467  END_REGISTER_VP_SDNODE(SEQ_VPSD)                                             \
468  VP_PROPERTY_FUNCTIONAL_INTRINSIC(INTRIN)                                     \
469  END_REGISTER_VP_INTRINSIC(VPID)
470
471// llvm.vp.reduce.fadd(start,x,mask,vlen)
472HELPER_REGISTER_REDUCTION_SEQ_VP(vp_reduce_fadd, VP_REDUCE_FADD,
473                                 VP_REDUCE_SEQ_FADD,
474                                 experimental_vector_reduce_fadd)
475
476// llvm.vp.reduce.fmul(start,x,mask,vlen)
477HELPER_REGISTER_REDUCTION_SEQ_VP(vp_reduce_fmul, VP_REDUCE_FMUL,
478                                 VP_REDUCE_SEQ_FMUL,
479                                 experimental_vector_reduce_fmul)
480
481#undef HELPER_REGISTER_REDUCTION_SEQ_VP
482
483///// } Reduction
484
485///// Shuffles {
486
487// The mask 'cond' operand of llvm.vp.select and llvm.vp.merge are not reported
488// as masks with the BEGIN_REGISTER_VP_* macros.  This is because, unlike other
489// VP intrinsics, these two have a defined result on lanes where the mask is
490// false.
491//
492// llvm.vp.select(cond,on_true,on_false,vlen)
493BEGIN_REGISTER_VP(vp_select, None, 3, VP_SELECT, -1)
494VP_PROPERTY_FUNCTIONAL_OPC(Select)
495END_REGISTER_VP(vp_select, VP_SELECT)
496
497// llvm.vp.merge(cond,on_true,on_false,pivot)
498BEGIN_REGISTER_VP(vp_merge, None, 3, VP_MERGE, -1)
499END_REGISTER_VP(vp_merge, VP_MERGE)
500
501BEGIN_REGISTER_VP(experimental_vp_splice, 3, 5, EXPERIMENTAL_VP_SPLICE, -1)
502END_REGISTER_VP(experimental_vp_splice, EXPERIMENTAL_VP_SPLICE)
503
504///// } Shuffles
505
506#undef BEGIN_REGISTER_VP
507#undef BEGIN_REGISTER_VP_INTRINSIC
508#undef BEGIN_REGISTER_VP_SDNODE
509#undef END_REGISTER_VP
510#undef END_REGISTER_VP_INTRINSIC
511#undef END_REGISTER_VP_SDNODE
512#undef HELPER_MAP_VPID_TO_VPSD
513#undef VP_PROPERTY_BINARYOP
514#undef VP_PROPERTY_CASTOP
515#undef VP_PROPERTY_CMP
516#undef VP_PROPERTY_CONSTRAINEDFP
517#undef VP_PROPERTY_FUNCTIONAL_INTRINSIC
518#undef VP_PROPERTY_FUNCTIONAL_OPC
519#undef VP_PROPERTY_MEMOP
520#undef VP_PROPERTY_REDUCTION
521