1 //===-- llvm/CodeGen/ISDOpcodes.h - CodeGen opcodes -------------*- C++ -*-===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file declares codegen opcodes and related utilities.
10 //
11 //===----------------------------------------------------------------------===//
12
13 #ifndef LLVM_CODEGEN_ISDOPCODES_H
14 #define LLVM_CODEGEN_ISDOPCODES_H
15
16 #include "llvm/CodeGen/ValueTypes.h"
17
18 namespace llvm {
19
20 /// ISD namespace - This namespace contains an enum which represents all of the
21 /// SelectionDAG node types and value types.
22 ///
23 namespace ISD {
24
25 //===--------------------------------------------------------------------===//
26 /// ISD::NodeType enum - This enum defines the target-independent operators
27 /// for a SelectionDAG.
28 ///
29 /// Targets may also define target-dependent operator codes for SDNodes. For
30 /// example, on x86, these are the enum values in the X86ISD namespace.
31 /// Targets should aim to use target-independent operators to model their
32 /// instruction sets as much as possible, and only use target-dependent
33 /// operators when they have special requirements.
34 ///
35 /// Finally, during and after selection proper, SNodes may use special
36 /// operator codes that correspond directly with MachineInstr opcodes. These
37 /// are used to represent selected instructions. See the isMachineOpcode()
38 /// and getMachineOpcode() member functions of SDNode.
39 ///
40 enum NodeType {
41
42 /// DELETED_NODE - This is an illegal value that is used to catch
43 /// errors. This opcode is not a legal opcode for any node.
44 DELETED_NODE,
45
46 /// EntryToken - This is the marker used to indicate the start of a region.
47 EntryToken,
48
49 /// TokenFactor - This node takes multiple tokens as input and produces a
50 /// single token result. This is used to represent the fact that the operand
51 /// operators are independent of each other.
52 TokenFactor,
53
54 /// AssertSext, AssertZext - These nodes record if a register contains a
55 /// value that has already been zero or sign extended from a narrower type.
56 /// These nodes take two operands. The first is the node that has already
57 /// been extended, and the second is a value type node indicating the width
58 /// of the extension.
59 /// NOTE: In case of the source value (or any vector element value) is
60 /// poisoned the assertion will not be true for that value.
61 AssertSext,
62 AssertZext,
63
64 /// AssertAlign - These nodes record if a register contains a value that
65 /// has a known alignment and the trailing bits are known to be zero.
66 /// NOTE: In case of the source value (or any vector element value) is
67 /// poisoned the assertion will not be true for that value.
68 AssertAlign,
69
70 /// Various leaf nodes.
71 BasicBlock,
72 VALUETYPE,
73 CONDCODE,
74 Register,
75 RegisterMask,
76 Constant,
77 ConstantFP,
78 GlobalAddress,
79 GlobalTLSAddress,
80 FrameIndex,
81 JumpTable,
82 ConstantPool,
83 ExternalSymbol,
84 BlockAddress,
85
86 /// The address of the GOT
87 GLOBAL_OFFSET_TABLE,
88
89 /// FRAMEADDR, RETURNADDR - These nodes represent llvm.frameaddress and
90 /// llvm.returnaddress on the DAG. These nodes take one operand, the index
91 /// of the frame or return address to return. An index of zero corresponds
92 /// to the current function's frame or return address, an index of one to
93 /// the parent's frame or return address, and so on.
94 FRAMEADDR,
95 RETURNADDR,
96
97 /// ADDROFRETURNADDR - Represents the llvm.addressofreturnaddress intrinsic.
98 /// This node takes no operand, returns a target-specific pointer to the
99 /// place in the stack frame where the return address of the current
100 /// function is stored.
101 ADDROFRETURNADDR,
102
103 /// SPONENTRY - Represents the llvm.sponentry intrinsic. Takes no argument
104 /// and returns the stack pointer value at the entry of the current
105 /// function calling this intrinsic.
106 SPONENTRY,
107
108 /// LOCAL_RECOVER - Represents the llvm.localrecover intrinsic.
109 /// Materializes the offset from the local object pointer of another
110 /// function to a particular local object passed to llvm.localescape. The
111 /// operand is the MCSymbol label used to represent this offset, since
112 /// typically the offset is not known until after code generation of the
113 /// parent.
114 LOCAL_RECOVER,
115
116 /// READ_REGISTER, WRITE_REGISTER - This node represents llvm.register on
117 /// the DAG, which implements the named register global variables extension.
118 READ_REGISTER,
119 WRITE_REGISTER,
120
121 /// FRAME_TO_ARGS_OFFSET - This node represents offset from frame pointer to
122 /// first (possible) on-stack argument. This is needed for correct stack
123 /// adjustment during unwind.
124 FRAME_TO_ARGS_OFFSET,
125
126 /// EH_DWARF_CFA - This node represents the pointer to the DWARF Canonical
127 /// Frame Address (CFA), generally the value of the stack pointer at the
128 /// call site in the previous frame.
129 EH_DWARF_CFA,
130
131 /// OUTCHAIN = EH_RETURN(INCHAIN, OFFSET, HANDLER) - This node represents
132 /// 'eh_return' gcc dwarf builtin, which is used to return from
133 /// exception. The general meaning is: adjust stack by OFFSET and pass
134 /// execution to HANDLER. Many platform-related details also :)
135 EH_RETURN,
136
137 /// RESULT, OUTCHAIN = EH_SJLJ_SETJMP(INCHAIN, buffer)
138 /// This corresponds to the eh.sjlj.setjmp intrinsic.
139 /// It takes an input chain and a pointer to the jump buffer as inputs
140 /// and returns an outchain.
141 EH_SJLJ_SETJMP,
142
143 /// OUTCHAIN = EH_SJLJ_LONGJMP(INCHAIN, buffer)
144 /// This corresponds to the eh.sjlj.longjmp intrinsic.
145 /// It takes an input chain and a pointer to the jump buffer as inputs
146 /// and returns an outchain.
147 EH_SJLJ_LONGJMP,
148
149 /// OUTCHAIN = EH_SJLJ_SETUP_DISPATCH(INCHAIN)
150 /// The target initializes the dispatch table here.
151 EH_SJLJ_SETUP_DISPATCH,
152
153 /// TargetConstant* - Like Constant*, but the DAG does not do any folding,
154 /// simplification, or lowering of the constant. They are used for constants
155 /// which are known to fit in the immediate fields of their users, or for
156 /// carrying magic numbers which are not values which need to be
157 /// materialized in registers.
158 TargetConstant,
159 TargetConstantFP,
160
161 /// TargetGlobalAddress - Like GlobalAddress, but the DAG does no folding or
162 /// anything else with this node, and this is valid in the target-specific
163 /// dag, turning into a GlobalAddress operand.
164 TargetGlobalAddress,
165 TargetGlobalTLSAddress,
166 TargetFrameIndex,
167 TargetJumpTable,
168 TargetConstantPool,
169 TargetExternalSymbol,
170 TargetBlockAddress,
171
172 MCSymbol,
173
174 /// TargetIndex - Like a constant pool entry, but with completely
175 /// target-dependent semantics. Holds target flags, a 32-bit index, and a
176 /// 64-bit index. Targets can use this however they like.
177 TargetIndex,
178
179 /// RESULT = INTRINSIC_WO_CHAIN(INTRINSICID, arg1, arg2, ...)
180 /// This node represents a target intrinsic function with no side effects.
181 /// The first operand is the ID number of the intrinsic from the
182 /// llvm::Intrinsic namespace. The operands to the intrinsic follow. The
183 /// node returns the result of the intrinsic.
184 INTRINSIC_WO_CHAIN,
185
186 /// RESULT,OUTCHAIN = INTRINSIC_W_CHAIN(INCHAIN, INTRINSICID, arg1, ...)
187 /// This node represents a target intrinsic function with side effects that
188 /// returns a result. The first operand is a chain pointer. The second is
189 /// the ID number of the intrinsic from the llvm::Intrinsic namespace. The
190 /// operands to the intrinsic follow. The node has two results, the result
191 /// of the intrinsic and an output chain.
192 INTRINSIC_W_CHAIN,
193
194 /// OUTCHAIN = INTRINSIC_VOID(INCHAIN, INTRINSICID, arg1, arg2, ...)
195 /// This node represents a target intrinsic function with side effects that
196 /// does not return a result. The first operand is a chain pointer. The
197 /// second is the ID number of the intrinsic from the llvm::Intrinsic
198 /// namespace. The operands to the intrinsic follow.
199 INTRINSIC_VOID,
200
201 /// CopyToReg - This node has three operands: a chain, a register number to
202 /// set to this value, and a value.
203 CopyToReg,
204
205 /// CopyFromReg - This node indicates that the input value is a virtual or
206 /// physical register that is defined outside of the scope of this
207 /// SelectionDAG. The register is available from the RegisterSDNode object.
208 CopyFromReg,
209
210 /// UNDEF - An undefined node.
211 UNDEF,
212
213 // FREEZE - FREEZE(VAL) returns an arbitrary value if VAL is UNDEF (or
214 // is evaluated to UNDEF), or returns VAL otherwise. Note that each
215 // read of UNDEF can yield different value, but FREEZE(UNDEF) cannot.
216 FREEZE,
217
218 /// EXTRACT_ELEMENT - This is used to get the lower or upper (determined by
219 /// a Constant, which is required to be operand #1) half of the integer or
220 /// float value specified as operand #0. This is only for use before
221 /// legalization, for values that will be broken into multiple registers.
222 EXTRACT_ELEMENT,
223
224 /// BUILD_PAIR - This is the opposite of EXTRACT_ELEMENT in some ways.
225 /// Given two values of the same integer value type, this produces a value
226 /// twice as big. Like EXTRACT_ELEMENT, this can only be used before
227 /// legalization. The lower part of the composite value should be in
228 /// element 0 and the upper part should be in element 1.
229 BUILD_PAIR,
230
231 /// MERGE_VALUES - This node takes multiple discrete operands and returns
232 /// them all as its individual results. This nodes has exactly the same
233 /// number of inputs and outputs. This node is useful for some pieces of the
234 /// code generator that want to think about a single node with multiple
235 /// results, not multiple nodes.
236 MERGE_VALUES,
237
238 /// Simple integer binary arithmetic operators.
239 ADD,
240 SUB,
241 MUL,
242 SDIV,
243 UDIV,
244 SREM,
245 UREM,
246
247 /// SMUL_LOHI/UMUL_LOHI - Multiply two integers of type iN, producing
248 /// a signed/unsigned value of type i[2*N], and return the full value as
249 /// two results, each of type iN.
250 SMUL_LOHI,
251 UMUL_LOHI,
252
253 /// SDIVREM/UDIVREM - Divide two integers and produce both a quotient and
254 /// remainder result.
255 SDIVREM,
256 UDIVREM,
257
258 /// CARRY_FALSE - This node is used when folding other nodes,
259 /// like ADDC/SUBC, which indicate the carry result is always false.
260 CARRY_FALSE,
261
262 /// Carry-setting nodes for multiple precision addition and subtraction.
263 /// These nodes take two operands of the same value type, and produce two
264 /// results. The first result is the normal add or sub result, the second
265 /// result is the carry flag result.
266 /// FIXME: These nodes are deprecated in favor of ADDCARRY and SUBCARRY.
267 /// They are kept around for now to provide a smooth transition path
268 /// toward the use of ADDCARRY/SUBCARRY and will eventually be removed.
269 ADDC,
270 SUBC,
271
272 /// Carry-using nodes for multiple precision addition and subtraction. These
273 /// nodes take three operands: The first two are the normal lhs and rhs to
274 /// the add or sub, and the third is the input carry flag. These nodes
275 /// produce two results; the normal result of the add or sub, and the output
276 /// carry flag. These nodes both read and write a carry flag to allow them
277 /// to them to be chained together for add and sub of arbitrarily large
278 /// values.
279 ADDE,
280 SUBE,
281
282 /// Carry-using nodes for multiple precision addition and subtraction.
283 /// These nodes take three operands: The first two are the normal lhs and
284 /// rhs to the add or sub, and the third is a boolean value that is 1 if and
285 /// only if there is an incoming carry/borrow. These nodes produce two
286 /// results: the normal result of the add or sub, and a boolean value that is
287 /// 1 if and only if there is an outgoing carry/borrow.
288 ///
289 /// Care must be taken if these opcodes are lowered to hardware instructions
290 /// that use the inverse logic -- 0 if and only if there is an
291 /// incoming/outgoing carry/borrow. In such cases, you must preserve the
292 /// semantics of these opcodes by inverting the incoming carry/borrow, feeding
293 /// it to the add/sub hardware instruction, and then inverting the outgoing
294 /// carry/borrow.
295 ///
296 /// The use of these opcodes is preferable to adde/sube if the target supports
297 /// it, as the carry is a regular value rather than a glue, which allows
298 /// further optimisation.
299 ///
300 /// These opcodes are different from [US]{ADD,SUB}O in that ADDCARRY/SUBCARRY
301 /// consume and produce a carry/borrow, whereas [US]{ADD,SUB}O produce an
302 /// overflow.
303 ADDCARRY,
304 SUBCARRY,
305
306 /// Carry-using overflow-aware nodes for multiple precision addition and
307 /// subtraction. These nodes take three operands: The first two are normal lhs
308 /// and rhs to the add or sub, and the third is a boolean indicating if there
309 /// is an incoming carry. They produce two results: the normal result of the
310 /// add or sub, and a boolean that indicates if an overflow occurred (*not*
311 /// flag, because it may be a store to memory, etc.). If the type of the
312 /// boolean is not i1 then the high bits conform to getBooleanContents.
313 SADDO_CARRY,
314 SSUBO_CARRY,
315
316 /// RESULT, BOOL = [SU]ADDO(LHS, RHS) - Overflow-aware nodes for addition.
317 /// These nodes take two operands: the normal LHS and RHS to the add. They
318 /// produce two results: the normal result of the add, and a boolean that
319 /// indicates if an overflow occurred (*not* a flag, because it may be store
320 /// to memory, etc.). If the type of the boolean is not i1 then the high
321 /// bits conform to getBooleanContents.
322 /// These nodes are generated from llvm.[su]add.with.overflow intrinsics.
323 SADDO,
324 UADDO,
325
326 /// Same for subtraction.
327 SSUBO,
328 USUBO,
329
330 /// Same for multiplication.
331 SMULO,
332 UMULO,
333
334 /// RESULT = [US]ADDSAT(LHS, RHS) - Perform saturation addition on 2
335 /// integers with the same bit width (W). If the true value of LHS + RHS
336 /// exceeds the largest value that can be represented by W bits, the
337 /// resulting value is this maximum value. Otherwise, if this value is less
338 /// than the smallest value that can be represented by W bits, the
339 /// resulting value is this minimum value.
340 SADDSAT,
341 UADDSAT,
342
343 /// RESULT = [US]SUBSAT(LHS, RHS) - Perform saturation subtraction on 2
344 /// integers with the same bit width (W). If the true value of LHS - RHS
345 /// exceeds the largest value that can be represented by W bits, the
346 /// resulting value is this maximum value. Otherwise, if this value is less
347 /// than the smallest value that can be represented by W bits, the
348 /// resulting value is this minimum value.
349 SSUBSAT,
350 USUBSAT,
351
352 /// RESULT = [US]SHLSAT(LHS, RHS) - Perform saturation left shift. The first
353 /// operand is the value to be shifted, and the second argument is the amount
354 /// to shift by. Both must be integers of the same bit width (W). If the true
355 /// value of LHS << RHS exceeds the largest value that can be represented by
356 /// W bits, the resulting value is this maximum value, Otherwise, if this
357 /// value is less than the smallest value that can be represented by W bits,
358 /// the resulting value is this minimum value.
359 SSHLSAT,
360 USHLSAT,
361
362 /// RESULT = [US]MULFIX(LHS, RHS, SCALE) - Perform fixed point multiplication
363 /// on 2 integers with the same width and scale. SCALE represents the scale
364 /// of both operands as fixed point numbers. This SCALE parameter must be a
365 /// constant integer. A scale of zero is effectively performing
366 /// multiplication on 2 integers.
367 SMULFIX,
368 UMULFIX,
369
370 /// Same as the corresponding unsaturated fixed point instructions, but the
371 /// result is clamped between the min and max values representable by the
372 /// bits of the first 2 operands.
373 SMULFIXSAT,
374 UMULFIXSAT,
375
376 /// RESULT = [US]DIVFIX(LHS, RHS, SCALE) - Perform fixed point division on
377 /// 2 integers with the same width and scale. SCALE represents the scale
378 /// of both operands as fixed point numbers. This SCALE parameter must be a
379 /// constant integer.
380 SDIVFIX,
381 UDIVFIX,
382
383 /// Same as the corresponding unsaturated fixed point instructions, but the
384 /// result is clamped between the min and max values representable by the
385 /// bits of the first 2 operands.
386 SDIVFIXSAT,
387 UDIVFIXSAT,
388
389 /// Simple binary floating point operators.
390 FADD,
391 FSUB,
392 FMUL,
393 FDIV,
394 FREM,
395
396 /// Constrained versions of the binary floating point operators.
397 /// These will be lowered to the simple operators before final selection.
398 /// They are used to limit optimizations while the DAG is being
399 /// optimized.
400 STRICT_FADD,
401 STRICT_FSUB,
402 STRICT_FMUL,
403 STRICT_FDIV,
404 STRICT_FREM,
405 STRICT_FMA,
406
407 /// Constrained versions of libm-equivalent floating point intrinsics.
408 /// These will be lowered to the equivalent non-constrained pseudo-op
409 /// (or expanded to the equivalent library call) before final selection.
410 /// They are used to limit optimizations while the DAG is being optimized.
411 STRICT_FSQRT,
412 STRICT_FPOW,
413 STRICT_FPOWI,
414 STRICT_FSIN,
415 STRICT_FCOS,
416 STRICT_FEXP,
417 STRICT_FEXP2,
418 STRICT_FLOG,
419 STRICT_FLOG10,
420 STRICT_FLOG2,
421 STRICT_FRINT,
422 STRICT_FNEARBYINT,
423 STRICT_FMAXNUM,
424 STRICT_FMINNUM,
425 STRICT_FCEIL,
426 STRICT_FFLOOR,
427 STRICT_FROUND,
428 STRICT_FROUNDEVEN,
429 STRICT_FTRUNC,
430 STRICT_LROUND,
431 STRICT_LLROUND,
432 STRICT_LRINT,
433 STRICT_LLRINT,
434 STRICT_FMAXIMUM,
435 STRICT_FMINIMUM,
436
437 /// STRICT_FP_TO_[US]INT - Convert a floating point value to a signed or
438 /// unsigned integer. These have the same semantics as fptosi and fptoui
439 /// in IR.
440 /// They are used to limit optimizations while the DAG is being optimized.
441 STRICT_FP_TO_SINT,
442 STRICT_FP_TO_UINT,
443
444 /// STRICT_[US]INT_TO_FP - Convert a signed or unsigned integer to
445 /// a floating point value. These have the same semantics as sitofp and
446 /// uitofp in IR.
447 /// They are used to limit optimizations while the DAG is being optimized.
448 STRICT_SINT_TO_FP,
449 STRICT_UINT_TO_FP,
450
451 /// X = STRICT_FP_ROUND(Y, TRUNC) - Rounding 'Y' from a larger floating
452 /// point type down to the precision of the destination VT. TRUNC is a
453 /// flag, which is always an integer that is zero or one. If TRUNC is 0,
454 /// this is a normal rounding, if it is 1, this FP_ROUND is known to not
455 /// change the value of Y.
456 ///
457 /// The TRUNC = 1 case is used in cases where we know that the value will
458 /// not be modified by the node, because Y is not using any of the extra
459 /// precision of source type. This allows certain transformations like
460 /// STRICT_FP_EXTEND(STRICT_FP_ROUND(X,1)) -> X which are not safe for
461 /// STRICT_FP_EXTEND(STRICT_FP_ROUND(X,0)) because the extra bits aren't
462 /// removed.
463 /// It is used to limit optimizations while the DAG is being optimized.
464 STRICT_FP_ROUND,
465
466 /// X = STRICT_FP_EXTEND(Y) - Extend a smaller FP type into a larger FP
467 /// type.
468 /// It is used to limit optimizations while the DAG is being optimized.
469 STRICT_FP_EXTEND,
470
471 /// STRICT_FSETCC/STRICT_FSETCCS - Constrained versions of SETCC, used
472 /// for floating-point operands only. STRICT_FSETCC performs a quiet
473 /// comparison operation, while STRICT_FSETCCS performs a signaling
474 /// comparison operation.
475 STRICT_FSETCC,
476 STRICT_FSETCCS,
477
478 // FPTRUNC_ROUND - This corresponds to the fptrunc_round intrinsic.
479 FPTRUNC_ROUND,
480
481 /// FMA - Perform a * b + c with no intermediate rounding step.
482 FMA,
483
484 /// FMAD - Perform a * b + c, while getting the same result as the
485 /// separately rounded operations.
486 FMAD,
487
488 /// FCOPYSIGN(X, Y) - Return the value of X with the sign of Y. NOTE: This
489 /// DAG node does not require that X and Y have the same type, just that
490 /// they are both floating point. X and the result must have the same type.
491 /// FCOPYSIGN(f32, f64) is allowed.
492 FCOPYSIGN,
493
494 /// INT = FGETSIGN(FP) - Return the sign bit of the specified floating point
495 /// value as an integer 0/1 value.
496 FGETSIGN,
497
498 /// Returns platform specific canonical encoding of a floating point number.
499 FCANONICALIZE,
500
501 /// Performs a check of floating point class property, defined by IEEE-754.
502 /// The first operand is the floating point value to check. The second operand
503 /// specifies the checked property and is a TargetConstant which specifies
504 /// test in the same way as intrinsic 'is_fpclass'.
505 /// Returns boolean value.
506 IS_FPCLASS,
507
508 /// BUILD_VECTOR(ELT0, ELT1, ELT2, ELT3,...) - Return a fixed-width vector
509 /// with the specified, possibly variable, elements. The types of the
510 /// operands must match the vector element type, except that integer types
511 /// are allowed to be larger than the element type, in which case the
512 /// operands are implicitly truncated. The types of the operands must all
513 /// be the same.
514 BUILD_VECTOR,
515
516 /// INSERT_VECTOR_ELT(VECTOR, VAL, IDX) - Returns VECTOR with the element
517 /// at IDX replaced with VAL. If the type of VAL is larger than the vector
518 /// element type then VAL is truncated before replacement.
519 ///
520 /// If VECTOR is a scalable vector, then IDX may be larger than the minimum
521 /// vector width. IDX is not first scaled by the runtime scaling factor of
522 /// VECTOR.
523 INSERT_VECTOR_ELT,
524
525 /// EXTRACT_VECTOR_ELT(VECTOR, IDX) - Returns a single element from VECTOR
526 /// identified by the (potentially variable) element number IDX. If the return
527 /// type is an integer type larger than the element type of the vector, the
528 /// result is extended to the width of the return type. In that case, the high
529 /// bits are undefined.
530 ///
531 /// If VECTOR is a scalable vector, then IDX may be larger than the minimum
532 /// vector width. IDX is not first scaled by the runtime scaling factor of
533 /// VECTOR.
534 EXTRACT_VECTOR_ELT,
535
536 /// CONCAT_VECTORS(VECTOR0, VECTOR1, ...) - Given a number of values of
537 /// vector type with the same length and element type, this produces a
538 /// concatenated vector result value, with length equal to the sum of the
539 /// lengths of the input vectors. If VECTOR0 is a fixed-width vector, then
540 /// VECTOR1..VECTORN must all be fixed-width vectors. Similarly, if VECTOR0
541 /// is a scalable vector, then VECTOR1..VECTORN must all be scalable vectors.
542 CONCAT_VECTORS,
543
544 /// INSERT_SUBVECTOR(VECTOR1, VECTOR2, IDX) - Returns a vector with VECTOR2
545 /// inserted into VECTOR1. IDX represents the starting element number at which
546 /// VECTOR2 will be inserted. IDX must be a constant multiple of T's known
547 /// minimum vector length. Let the type of VECTOR2 be T, then if T is a
548 /// scalable vector, IDX is first scaled by the runtime scaling factor of T.
549 /// The elements of VECTOR1 starting at IDX are overwritten with VECTOR2.
550 /// Elements IDX through (IDX + num_elements(T) - 1) must be valid VECTOR1
551 /// indices. If this condition cannot be determined statically but is false at
552 /// runtime, then the result vector is undefined. The IDX parameter must be a
553 /// vector index constant type, which for most targets will be an integer
554 /// pointer type.
555 ///
556 /// This operation supports inserting a fixed-width vector into a scalable
557 /// vector, but not the other way around.
558 INSERT_SUBVECTOR,
559
560 /// EXTRACT_SUBVECTOR(VECTOR, IDX) - Returns a subvector from VECTOR.
561 /// Let the result type be T, then IDX represents the starting element number
562 /// from which a subvector of type T is extracted. IDX must be a constant
563 /// multiple of T's known minimum vector length. If T is a scalable vector,
564 /// IDX is first scaled by the runtime scaling factor of T. Elements IDX
565 /// through (IDX + num_elements(T) - 1) must be valid VECTOR indices. If this
566 /// condition cannot be determined statically but is false at runtime, then
567 /// the result vector is undefined. The IDX parameter must be a vector index
568 /// constant type, which for most targets will be an integer pointer type.
569 ///
570 /// This operation supports extracting a fixed-width vector from a scalable
571 /// vector, but not the other way around.
572 EXTRACT_SUBVECTOR,
573
574 /// VECTOR_REVERSE(VECTOR) - Returns a vector, of the same type as VECTOR,
575 /// whose elements are shuffled using the following algorithm:
576 /// RESULT[i] = VECTOR[VECTOR.ElementCount - 1 - i]
577 VECTOR_REVERSE,
578
579 /// VECTOR_SHUFFLE(VEC1, VEC2) - Returns a vector, of the same type as
580 /// VEC1/VEC2. A VECTOR_SHUFFLE node also contains an array of constant int
581 /// values that indicate which value (or undef) each result element will
582 /// get. These constant ints are accessible through the
583 /// ShuffleVectorSDNode class. This is quite similar to the Altivec
584 /// 'vperm' instruction, except that the indices must be constants and are
585 /// in terms of the element size of VEC1/VEC2, not in terms of bytes.
586 VECTOR_SHUFFLE,
587
588 /// VECTOR_SPLICE(VEC1, VEC2, IMM) - Returns a subvector of the same type as
589 /// VEC1/VEC2 from CONCAT_VECTORS(VEC1, VEC2), based on the IMM in two ways.
590 /// Let the result type be T, if IMM is positive it represents the starting
591 /// element number (an index) from which a subvector of type T is extracted
592 /// from CONCAT_VECTORS(VEC1, VEC2). If IMM is negative it represents a count
593 /// specifying the number of trailing elements to extract from VEC1, where the
594 /// elements of T are selected using the following algorithm:
595 /// RESULT[i] = CONCAT_VECTORS(VEC1,VEC2)[VEC1.ElementCount - ABS(IMM) + i]
596 /// If IMM is not in the range [-VL, VL-1] the result vector is undefined. IMM
597 /// is a constant integer.
598 VECTOR_SPLICE,
599
600 /// SCALAR_TO_VECTOR(VAL) - This represents the operation of loading a
601 /// scalar value into element 0 of the resultant vector type. The top
602 /// elements 1 to N-1 of the N-element vector are undefined. The type
603 /// of the operand must match the vector element type, except when they
604 /// are integer types. In this case the operand is allowed to be wider
605 /// than the vector element type, and is implicitly truncated to it.
606 SCALAR_TO_VECTOR,
607
608 /// SPLAT_VECTOR(VAL) - Returns a vector with the scalar value VAL
609 /// duplicated in all lanes. The type of the operand must match the vector
610 /// element type, except when they are integer types. In this case the
611 /// operand is allowed to be wider than the vector element type, and is
612 /// implicitly truncated to it.
613 SPLAT_VECTOR,
614
615 /// SPLAT_VECTOR_PARTS(SCALAR1, SCALAR2, ...) - Returns a vector with the
616 /// scalar values joined together and then duplicated in all lanes. This
617 /// represents a SPLAT_VECTOR that has had its scalar operand expanded. This
618 /// allows representing a 64-bit splat on a target with 32-bit integers. The
619 /// total width of the scalars must cover the element width. SCALAR1 contains
620 /// the least significant bits of the value regardless of endianness and all
621 /// scalars should have the same type.
622 SPLAT_VECTOR_PARTS,
623
624 /// STEP_VECTOR(IMM) - Returns a scalable vector whose lanes are comprised
625 /// of a linear sequence of unsigned values starting from 0 with a step of
626 /// IMM, where IMM must be a TargetConstant with type equal to the vector
627 /// element type. The arithmetic is performed modulo the bitwidth of the
628 /// element.
629 ///
630 /// The operation does not support returning fixed-width vectors or
631 /// non-constant operands.
632 STEP_VECTOR,
633
634 /// MULHU/MULHS - Multiply high - Multiply two integers of type iN,
635 /// producing an unsigned/signed value of type i[2*N], then return the top
636 /// part.
637 MULHU,
638 MULHS,
639
640 /// AVGFLOORS/AVGFLOORU - Averaging add - Add two integers using an integer of
641 /// type i[N+1], halving the result by shifting it one bit right.
642 /// shr(add(ext(X), ext(Y)), 1)
643 AVGFLOORS,
644 AVGFLOORU,
645 /// AVGCEILS/AVGCEILU - Rounding averaging add - Add two integers using an
646 /// integer of type i[N+2], add 1 and halve the result by shifting it one bit
647 /// right. shr(add(ext(X), ext(Y), 1), 1)
648 AVGCEILS,
649 AVGCEILU,
650
651 // ABDS/ABDU - Absolute difference - Return the absolute difference between
652 // two numbers interpreted as signed/unsigned.
653 // i.e trunc(abs(sext(Op0) - sext(Op1))) becomes abds(Op0, Op1)
654 // or trunc(abs(zext(Op0) - zext(Op1))) becomes abdu(Op0, Op1)
655 ABDS,
656 ABDU,
657
658 /// [US]{MIN/MAX} - Binary minimum or maximum of signed or unsigned
659 /// integers.
660 SMIN,
661 SMAX,
662 UMIN,
663 UMAX,
664
665 /// Bitwise operators - logical and, logical or, logical xor.
666 AND,
667 OR,
668 XOR,
669
670 /// ABS - Determine the unsigned absolute value of a signed integer value of
671 /// the same bitwidth.
672 /// Note: A value of INT_MIN will return INT_MIN, no saturation or overflow
673 /// is performed.
674 ABS,
675
676 /// Shift and rotation operations. After legalization, the type of the
677 /// shift amount is known to be TLI.getShiftAmountTy(). Before legalization
678 /// the shift amount can be any type, but care must be taken to ensure it is
679 /// large enough. TLI.getShiftAmountTy() is i8 on some targets, but before
680 /// legalization, types like i1024 can occur and i8 doesn't have enough bits
681 /// to represent the shift amount.
682 /// When the 1st operand is a vector, the shift amount must be in the same
683 /// type. (TLI.getShiftAmountTy() will return the same type when the input
684 /// type is a vector.)
685 /// For rotates and funnel shifts, the shift amount is treated as an unsigned
686 /// amount modulo the element size of the first operand.
687 ///
688 /// Funnel 'double' shifts take 3 operands, 2 inputs and the shift amount.
689 /// fshl(X,Y,Z): (X << (Z % BW)) | (Y >> (BW - (Z % BW)))
690 /// fshr(X,Y,Z): (X << (BW - (Z % BW))) | (Y >> (Z % BW))
691 SHL,
692 SRA,
693 SRL,
694 ROTL,
695 ROTR,
696 FSHL,
697 FSHR,
698
699 /// Byte Swap and Counting operators.
700 BSWAP,
701 CTTZ,
702 CTLZ,
703 CTPOP,
704 BITREVERSE,
705 PARITY,
706
707 /// Bit counting operators with an undefined result for zero inputs.
708 CTTZ_ZERO_UNDEF,
709 CTLZ_ZERO_UNDEF,
710
711 /// Select(COND, TRUEVAL, FALSEVAL). If the type of the boolean COND is not
712 /// i1 then the high bits must conform to getBooleanContents.
713 SELECT,
714
715 /// Select with a vector condition (op #0) and two vector operands (ops #1
716 /// and #2), returning a vector result. All vectors have the same length.
717 /// Much like the scalar select and setcc, each bit in the condition selects
718 /// whether the corresponding result element is taken from op #1 or op #2.
719 /// At first, the VSELECT condition is of vXi1 type. Later, targets may
720 /// change the condition type in order to match the VSELECT node using a
721 /// pattern. The condition follows the BooleanContent format of the target.
722 VSELECT,
723
724 /// Select with condition operator - This selects between a true value and
725 /// a false value (ops #2 and #3) based on the boolean result of comparing
726 /// the lhs and rhs (ops #0 and #1) of a conditional expression with the
727 /// condition code in op #4, a CondCodeSDNode.
728 SELECT_CC,
729
730 /// SetCC operator - This evaluates to a true value iff the condition is
731 /// true. If the result value type is not i1 then the high bits conform
732 /// to getBooleanContents. The operands to this are the left and right
733 /// operands to compare (ops #0, and #1) and the condition code to compare
734 /// them with (op #2) as a CondCodeSDNode. If the operands are vector types
735 /// then the result type must also be a vector type.
736 SETCC,
737
738 /// Like SetCC, ops #0 and #1 are the LHS and RHS operands to compare, but
739 /// op #2 is a boolean indicating if there is an incoming carry. This
740 /// operator checks the result of "LHS - RHS - Carry", and can be used to
741 /// compare two wide integers:
742 /// (setcccarry lhshi rhshi (subcarry lhslo rhslo) cc).
743 /// Only valid for integers.
744 SETCCCARRY,
745
746 /// SHL_PARTS/SRA_PARTS/SRL_PARTS - These operators are used for expanded
747 /// integer shift operations. The operation ordering is:
748 /// [Lo,Hi] = op [LoLHS,HiLHS], Amt
749 SHL_PARTS,
750 SRA_PARTS,
751 SRL_PARTS,
752
753 /// Conversion operators. These are all single input single output
754 /// operations. For all of these, the result type must be strictly
755 /// wider or narrower (depending on the operation) than the source
756 /// type.
757
758 /// SIGN_EXTEND - Used for integer types, replicating the sign bit
759 /// into new bits.
760 SIGN_EXTEND,
761
762 /// ZERO_EXTEND - Used for integer types, zeroing the new bits.
763 ZERO_EXTEND,
764
765 /// ANY_EXTEND - Used for integer types. The high bits are undefined.
766 ANY_EXTEND,
767
768 /// TRUNCATE - Completely drop the high bits.
769 TRUNCATE,
770
771 /// [SU]INT_TO_FP - These operators convert integers (whose interpreted sign
772 /// depends on the first letter) to floating point.
773 SINT_TO_FP,
774 UINT_TO_FP,
775
776 /// SIGN_EXTEND_INREG - This operator atomically performs a SHL/SRA pair to
777 /// sign extend a small value in a large integer register (e.g. sign
778 /// extending the low 8 bits of a 32-bit register to fill the top 24 bits
779 /// with the 7th bit). The size of the smaller type is indicated by the 1th
780 /// operand, a ValueType node.
781 SIGN_EXTEND_INREG,
782
783 /// ANY_EXTEND_VECTOR_INREG(Vector) - This operator represents an
784 /// in-register any-extension of the low lanes of an integer vector. The
785 /// result type must have fewer elements than the operand type, and those
786 /// elements must be larger integer types such that the total size of the
787 /// operand type is less than or equal to the size of the result type. Each
788 /// of the low operand elements is any-extended into the corresponding,
789 /// wider result elements with the high bits becoming undef.
790 /// NOTE: The type legalizer prefers to make the operand and result size
791 /// the same to allow expansion to shuffle vector during op legalization.
792 ANY_EXTEND_VECTOR_INREG,
793
794 /// SIGN_EXTEND_VECTOR_INREG(Vector) - This operator represents an
795 /// in-register sign-extension of the low lanes of an integer vector. The
796 /// result type must have fewer elements than the operand type, and those
797 /// elements must be larger integer types such that the total size of the
798 /// operand type is less than or equal to the size of the result type. Each
799 /// of the low operand elements is sign-extended into the corresponding,
800 /// wider result elements.
801 /// NOTE: The type legalizer prefers to make the operand and result size
802 /// the same to allow expansion to shuffle vector during op legalization.
803 SIGN_EXTEND_VECTOR_INREG,
804
805 /// ZERO_EXTEND_VECTOR_INREG(Vector) - This operator represents an
806 /// in-register zero-extension of the low lanes of an integer vector. The
807 /// result type must have fewer elements than the operand type, and those
808 /// elements must be larger integer types such that the total size of the
809 /// operand type is less than or equal to the size of the result type. Each
810 /// of the low operand elements is zero-extended into the corresponding,
811 /// wider result elements.
812 /// NOTE: The type legalizer prefers to make the operand and result size
813 /// the same to allow expansion to shuffle vector during op legalization.
814 ZERO_EXTEND_VECTOR_INREG,
815
816 /// FP_TO_[US]INT - Convert a floating point value to a signed or unsigned
817 /// integer. These have the same semantics as fptosi and fptoui in IR. If
818 /// the FP value cannot fit in the integer type, the results are undefined.
819 FP_TO_SINT,
820 FP_TO_UINT,
821
822 /// FP_TO_[US]INT_SAT - Convert floating point value in operand 0 to a
823 /// signed or unsigned scalar integer type given in operand 1 with the
824 /// following semantics:
825 ///
826 /// * If the value is NaN, zero is returned.
827 /// * If the value is larger/smaller than the largest/smallest integer,
828 /// the largest/smallest integer is returned (saturation).
829 /// * Otherwise the result of rounding the value towards zero is returned.
830 ///
831 /// The scalar width of the type given in operand 1 must be equal to, or
832 /// smaller than, the scalar result type width. It may end up being smaller
833 /// than the result width as a result of integer type legalization.
834 ///
835 /// After converting to the scalar integer type in operand 1, the value is
836 /// extended to the result VT. FP_TO_SINT_SAT sign extends and FP_TO_UINT_SAT
837 /// zero extends.
838 FP_TO_SINT_SAT,
839 FP_TO_UINT_SAT,
840
841 /// X = FP_ROUND(Y, TRUNC) - Rounding 'Y' from a larger floating point type
842 /// down to the precision of the destination VT. TRUNC is a flag, which is
843 /// always an integer that is zero or one. If TRUNC is 0, this is a
844 /// normal rounding, if it is 1, this FP_ROUND is known to not change the
845 /// value of Y.
846 ///
847 /// The TRUNC = 1 case is used in cases where we know that the value will
848 /// not be modified by the node, because Y is not using any of the extra
849 /// precision of source type. This allows certain transformations like
850 /// FP_EXTEND(FP_ROUND(X,1)) -> X which are not safe for
851 /// FP_EXTEND(FP_ROUND(X,0)) because the extra bits aren't removed.
852 FP_ROUND,
853
854 /// Returns current rounding mode:
855 /// -1 Undefined
856 /// 0 Round to 0
857 /// 1 Round to nearest, ties to even
858 /// 2 Round to +inf
859 /// 3 Round to -inf
860 /// 4 Round to nearest, ties to zero
861 /// Result is rounding mode and chain. Input is a chain.
862 GET_ROUNDING,
863
864 /// Set rounding mode.
865 /// The first operand is a chain pointer. The second specifies the required
866 /// rounding mode, encoded in the same way as used in '``GET_ROUNDING``'.
867 SET_ROUNDING,
868
869 /// X = FP_EXTEND(Y) - Extend a smaller FP type into a larger FP type.
870 FP_EXTEND,
871
872 /// BITCAST - This operator converts between integer, vector and FP
873 /// values, as if the value was stored to memory with one type and loaded
874 /// from the same address with the other type (or equivalently for vector
875 /// format conversions, etc). The source and result are required to have
876 /// the same bit size (e.g. f32 <-> i32). This can also be used for
877 /// int-to-int or fp-to-fp conversions, but that is a noop, deleted by
878 /// getNode().
879 ///
880 /// This operator is subtly different from the bitcast instruction from
881 /// LLVM-IR since this node may change the bits in the register. For
882 /// example, this occurs on big-endian NEON and big-endian MSA where the
883 /// layout of the bits in the register depends on the vector type and this
884 /// operator acts as a shuffle operation for some vector type combinations.
885 BITCAST,
886
887 /// ADDRSPACECAST - This operator converts between pointers of different
888 /// address spaces.
889 ADDRSPACECAST,
890
891 /// FP16_TO_FP, FP_TO_FP16 - These operators are used to perform promotions
892 /// and truncation for half-precision (16 bit) floating numbers. These nodes
893 /// form a semi-softened interface for dealing with f16 (as an i16), which
894 /// is often a storage-only type but has native conversions.
895 FP16_TO_FP,
896 FP_TO_FP16,
897 STRICT_FP16_TO_FP,
898 STRICT_FP_TO_FP16,
899
900 /// BF16_TO_FP, FP_TO_BF16 - These operators are used to perform promotions
901 /// and truncation for bfloat16. These nodes form a semi-softened interface
902 /// for dealing with bf16 (as an i16), which is often a storage-only type but
903 /// has native conversions.
904 BF16_TO_FP,
905 FP_TO_BF16,
906
907 /// Perform various unary floating-point operations inspired by libm. For
908 /// FPOWI, the result is undefined if if the integer operand doesn't fit into
909 /// sizeof(int).
910 FNEG,
911 FABS,
912 FSQRT,
913 FCBRT,
914 FSIN,
915 FCOS,
916 FPOWI,
917 FPOW,
918 FLOG,
919 FLOG2,
920 FLOG10,
921 FEXP,
922 FEXP2,
923 FCEIL,
924 FTRUNC,
925 FRINT,
926 FNEARBYINT,
927 FROUND,
928 FROUNDEVEN,
929 FFLOOR,
930 LROUND,
931 LLROUND,
932 LRINT,
933 LLRINT,
934
935 /// FMINNUM/FMAXNUM - Perform floating-point minimum or maximum on two
936 /// values.
937 //
938 /// In the case where a single input is a NaN (either signaling or quiet),
939 /// the non-NaN input is returned.
940 ///
941 /// The return value of (FMINNUM 0.0, -0.0) could be either 0.0 or -0.0.
942 FMINNUM,
943 FMAXNUM,
944
945 /// FMINNUM_IEEE/FMAXNUM_IEEE - Perform floating-point minimum or maximum on
946 /// two values, following the IEEE-754 2008 definition. This differs from
947 /// FMINNUM/FMAXNUM in the handling of signaling NaNs. If one input is a
948 /// signaling NaN, returns a quiet NaN.
949 FMINNUM_IEEE,
950 FMAXNUM_IEEE,
951
952 /// FMINIMUM/FMAXIMUM - NaN-propagating minimum/maximum that also treat -0.0
953 /// as less than 0.0. While FMINNUM_IEEE/FMAXNUM_IEEE follow IEEE 754-2008
954 /// semantics, FMINIMUM/FMAXIMUM follow IEEE 754-2018 draft semantics.
955 FMINIMUM,
956 FMAXIMUM,
957
958 /// FSINCOS - Compute both fsin and fcos as a single operation.
959 FSINCOS,
960
961 /// LOAD and STORE have token chains as their first operand, then the same
962 /// operands as an LLVM load/store instruction, then an offset node that
963 /// is added / subtracted from the base pointer to form the address (for
964 /// indexed memory ops).
965 LOAD,
966 STORE,
967
968 /// DYNAMIC_STACKALLOC - Allocate some number of bytes on the stack aligned
969 /// to a specified boundary. This node always has two return values: a new
970 /// stack pointer value and a chain. The first operand is the token chain,
971 /// the second is the number of bytes to allocate, and the third is the
972 /// alignment boundary. The size is guaranteed to be a multiple of the
973 /// stack alignment, and the alignment is guaranteed to be bigger than the
974 /// stack alignment (if required) or 0 to get standard stack alignment.
975 DYNAMIC_STACKALLOC,
976
977 /// Control flow instructions. These all have token chains.
978
979 /// BR - Unconditional branch. The first operand is the chain
980 /// operand, the second is the MBB to branch to.
981 BR,
982
983 /// BRIND - Indirect branch. The first operand is the chain, the second
984 /// is the value to branch to, which must be of the same type as the
985 /// target's pointer type.
986 BRIND,
987
988 /// BR_JT - Jumptable branch. The first operand is the chain, the second
989 /// is the jumptable index, the last one is the jumptable entry index.
990 BR_JT,
991
992 /// BRCOND - Conditional branch. The first operand is the chain, the
993 /// second is the condition, the third is the block to branch to if the
994 /// condition is true. If the type of the condition is not i1, then the
995 /// high bits must conform to getBooleanContents. If the condition is undef,
996 /// it nondeterministically jumps to the block.
997 /// TODO: Its semantics w.r.t undef requires further discussion; we need to
998 /// make it sure that it is consistent with optimizations in MIR & the
999 /// meaning of IMPLICIT_DEF. See https://reviews.llvm.org/D92015
1000 BRCOND,
1001
1002 /// BR_CC - Conditional branch. The behavior is like that of SELECT_CC, in
1003 /// that the condition is represented as condition code, and two nodes to
1004 /// compare, rather than as a combined SetCC node. The operands in order
1005 /// are chain, cc, lhs, rhs, block to branch to if condition is true. If
1006 /// condition is undef, it nondeterministically jumps to the block.
1007 BR_CC,
1008
1009 /// INLINEASM - Represents an inline asm block. This node always has two
1010 /// return values: a chain and a flag result. The inputs are as follows:
1011 /// Operand #0 : Input chain.
1012 /// Operand #1 : a ExternalSymbolSDNode with a pointer to the asm string.
1013 /// Operand #2 : a MDNodeSDNode with the !srcloc metadata.
1014 /// Operand #3 : HasSideEffect, IsAlignStack bits.
1015 /// After this, it is followed by a list of operands with this format:
1016 /// ConstantSDNode: Flags that encode whether it is a mem or not, the
1017 /// of operands that follow, etc. See InlineAsm.h.
1018 /// ... however many operands ...
1019 /// Operand #last: Optional, an incoming flag.
1020 ///
1021 /// The variable width operands are required to represent target addressing
1022 /// modes as a single "operand", even though they may have multiple
1023 /// SDOperands.
1024 INLINEASM,
1025
1026 /// INLINEASM_BR - Branching version of inline asm. Used by asm-goto.
1027 INLINEASM_BR,
1028
1029 /// EH_LABEL - Represents a label in mid basic block used to track
1030 /// locations needed for debug and exception handling tables. These nodes
1031 /// take a chain as input and return a chain.
1032 EH_LABEL,
1033
1034 /// ANNOTATION_LABEL - Represents a mid basic block label used by
1035 /// annotations. This should remain within the basic block and be ordered
1036 /// with respect to other call instructions, but loads and stores may float
1037 /// past it.
1038 ANNOTATION_LABEL,
1039
1040 /// CATCHRET - Represents a return from a catch block funclet. Used for
1041 /// MSVC compatible exception handling. Takes a chain operand and a
1042 /// destination basic block operand.
1043 CATCHRET,
1044
1045 /// CLEANUPRET - Represents a return from a cleanup block funclet. Used for
1046 /// MSVC compatible exception handling. Takes only a chain operand.
1047 CLEANUPRET,
1048
1049 /// STACKSAVE - STACKSAVE has one operand, an input chain. It produces a
1050 /// value, the same type as the pointer type for the system, and an output
1051 /// chain.
1052 STACKSAVE,
1053
1054 /// STACKRESTORE has two operands, an input chain and a pointer to restore
1055 /// to it returns an output chain.
1056 STACKRESTORE,
1057
1058 /// CALLSEQ_START/CALLSEQ_END - These operators mark the beginning and end
1059 /// of a call sequence, and carry arbitrary information that target might
1060 /// want to know. The first operand is a chain, the rest are specified by
1061 /// the target and not touched by the DAG optimizers.
1062 /// Targets that may use stack to pass call arguments define additional
1063 /// operands:
1064 /// - size of the call frame part that must be set up within the
1065 /// CALLSEQ_START..CALLSEQ_END pair,
1066 /// - part of the call frame prepared prior to CALLSEQ_START.
1067 /// Both these parameters must be constants, their sum is the total call
1068 /// frame size.
1069 /// CALLSEQ_START..CALLSEQ_END pairs may not be nested.
1070 CALLSEQ_START, // Beginning of a call sequence
1071 CALLSEQ_END, // End of a call sequence
1072
1073 /// VAARG - VAARG has four operands: an input chain, a pointer, a SRCVALUE,
1074 /// and the alignment. It returns a pair of values: the vaarg value and a
1075 /// new chain.
1076 VAARG,
1077
1078 /// VACOPY - VACOPY has 5 operands: an input chain, a destination pointer,
1079 /// a source pointer, a SRCVALUE for the destination, and a SRCVALUE for the
1080 /// source.
1081 VACOPY,
1082
1083 /// VAEND, VASTART - VAEND and VASTART have three operands: an input chain,
1084 /// pointer, and a SRCVALUE.
1085 VAEND,
1086 VASTART,
1087
1088 // PREALLOCATED_SETUP - This has 2 operands: an input chain and a SRCVALUE
1089 // with the preallocated call Value.
1090 PREALLOCATED_SETUP,
1091 // PREALLOCATED_ARG - This has 3 operands: an input chain, a SRCVALUE
1092 // with the preallocated call Value, and a constant int.
1093 PREALLOCATED_ARG,
1094
1095 /// SRCVALUE - This is a node type that holds a Value* that is used to
1096 /// make reference to a value in the LLVM IR.
1097 SRCVALUE,
1098
1099 /// MDNODE_SDNODE - This is a node that holdes an MDNode*, which is used to
1100 /// reference metadata in the IR.
1101 MDNODE_SDNODE,
1102
1103 /// PCMARKER - This corresponds to the pcmarker intrinsic.
1104 PCMARKER,
1105
1106 /// READCYCLECOUNTER - This corresponds to the readcyclecounter intrinsic.
1107 /// It produces a chain and one i64 value. The only operand is a chain.
1108 /// If i64 is not legal, the result will be expanded into smaller values.
1109 /// Still, it returns an i64, so targets should set legality for i64.
1110 /// The result is the content of the architecture-specific cycle
1111 /// counter-like register (or other high accuracy low latency clock source).
1112 READCYCLECOUNTER,
1113
1114 /// HANDLENODE node - Used as a handle for various purposes.
1115 HANDLENODE,
1116
1117 /// INIT_TRAMPOLINE - This corresponds to the init_trampoline intrinsic. It
1118 /// takes as input a token chain, the pointer to the trampoline, the pointer
1119 /// to the nested function, the pointer to pass for the 'nest' parameter, a
1120 /// SRCVALUE for the trampoline and another for the nested function
1121 /// (allowing targets to access the original Function*).
1122 /// It produces a token chain as output.
1123 INIT_TRAMPOLINE,
1124
1125 /// ADJUST_TRAMPOLINE - This corresponds to the adjust_trampoline intrinsic.
1126 /// It takes a pointer to the trampoline and produces a (possibly) new
1127 /// pointer to the same trampoline with platform-specific adjustments
1128 /// applied. The pointer it returns points to an executable block of code.
1129 ADJUST_TRAMPOLINE,
1130
1131 /// TRAP - Trapping instruction
1132 TRAP,
1133
1134 /// DEBUGTRAP - Trap intended to get the attention of a debugger.
1135 DEBUGTRAP,
1136
1137 /// UBSANTRAP - Trap with an immediate describing the kind of sanitizer
1138 /// failure.
1139 UBSANTRAP,
1140
1141 /// PREFETCH - This corresponds to a prefetch intrinsic. The first operand
1142 /// is the chain. The other operands are the address to prefetch,
1143 /// read / write specifier, locality specifier and instruction / data cache
1144 /// specifier.
1145 PREFETCH,
1146
1147 /// ARITH_FENCE - This corresponds to a arithmetic fence intrinsic. Both its
1148 /// operand and output are the same floating type.
1149 ARITH_FENCE,
1150
1151 /// MEMBARRIER - Compiler barrier only; generate a no-op.
1152 MEMBARRIER,
1153
1154 /// OUTCHAIN = ATOMIC_FENCE(INCHAIN, ordering, scope)
1155 /// This corresponds to the fence instruction. It takes an input chain, and
1156 /// two integer constants: an AtomicOrdering and a SynchronizationScope.
1157 ATOMIC_FENCE,
1158
1159 /// Val, OUTCHAIN = ATOMIC_LOAD(INCHAIN, ptr)
1160 /// This corresponds to "load atomic" instruction.
1161 ATOMIC_LOAD,
1162
1163 /// OUTCHAIN = ATOMIC_STORE(INCHAIN, ptr, val)
1164 /// This corresponds to "store atomic" instruction.
1165 ATOMIC_STORE,
1166
1167 /// Val, OUTCHAIN = ATOMIC_CMP_SWAP(INCHAIN, ptr, cmp, swap)
1168 /// For double-word atomic operations:
1169 /// ValLo, ValHi, OUTCHAIN = ATOMIC_CMP_SWAP(INCHAIN, ptr, cmpLo, cmpHi,
1170 /// swapLo, swapHi)
1171 /// This corresponds to the cmpxchg instruction.
1172 ATOMIC_CMP_SWAP,
1173
1174 /// Val, Success, OUTCHAIN
1175 /// = ATOMIC_CMP_SWAP_WITH_SUCCESS(INCHAIN, ptr, cmp, swap)
1176 /// N.b. this is still a strong cmpxchg operation, so
1177 /// Success == "Val == cmp".
1178 ATOMIC_CMP_SWAP_WITH_SUCCESS,
1179
1180 /// Val, OUTCHAIN = ATOMIC_SWAP(INCHAIN, ptr, amt)
1181 /// Val, OUTCHAIN = ATOMIC_LOAD_[OpName](INCHAIN, ptr, amt)
1182 /// For double-word atomic operations:
1183 /// ValLo, ValHi, OUTCHAIN = ATOMIC_SWAP(INCHAIN, ptr, amtLo, amtHi)
1184 /// ValLo, ValHi, OUTCHAIN = ATOMIC_LOAD_[OpName](INCHAIN, ptr, amtLo, amtHi)
1185 /// These correspond to the atomicrmw instruction.
1186 ATOMIC_SWAP,
1187 ATOMIC_LOAD_ADD,
1188 ATOMIC_LOAD_SUB,
1189 ATOMIC_LOAD_AND,
1190 ATOMIC_LOAD_CLR,
1191 ATOMIC_LOAD_OR,
1192 ATOMIC_LOAD_XOR,
1193 ATOMIC_LOAD_NAND,
1194 ATOMIC_LOAD_MIN,
1195 ATOMIC_LOAD_MAX,
1196 ATOMIC_LOAD_UMIN,
1197 ATOMIC_LOAD_UMAX,
1198 ATOMIC_LOAD_FADD,
1199 ATOMIC_LOAD_FSUB,
1200 ATOMIC_LOAD_FMAX,
1201 ATOMIC_LOAD_FMIN,
1202 ATOMIC_LOAD_UINC_WRAP,
1203 ATOMIC_LOAD_UDEC_WRAP,
1204
1205 // Masked load and store - consecutive vector load and store operations
1206 // with additional mask operand that prevents memory accesses to the
1207 // masked-off lanes.
1208 //
1209 // Val, OutChain = MLOAD(BasePtr, Mask, PassThru)
1210 // OutChain = MSTORE(Value, BasePtr, Mask)
1211 MLOAD,
1212 MSTORE,
1213
1214 // Masked gather and scatter - load and store operations for a vector of
1215 // random addresses with additional mask operand that prevents memory
1216 // accesses to the masked-off lanes.
1217 //
1218 // Val, OutChain = GATHER(InChain, PassThru, Mask, BasePtr, Index, Scale)
1219 // OutChain = SCATTER(InChain, Value, Mask, BasePtr, Index, Scale)
1220 //
1221 // The Index operand can have more vector elements than the other operands
1222 // due to type legalization. The extra elements are ignored.
1223 MGATHER,
1224 MSCATTER,
1225
1226 /// This corresponds to the llvm.lifetime.* intrinsics. The first operand
1227 /// is the chain and the second operand is the alloca pointer.
1228 LIFETIME_START,
1229 LIFETIME_END,
1230
1231 /// GC_TRANSITION_START/GC_TRANSITION_END - These operators mark the
1232 /// beginning and end of GC transition sequence, and carry arbitrary
1233 /// information that target might need for lowering. The first operand is
1234 /// a chain, the rest are specified by the target and not touched by the DAG
1235 /// optimizers. GC_TRANSITION_START..GC_TRANSITION_END pairs may not be
1236 /// nested.
1237 GC_TRANSITION_START,
1238 GC_TRANSITION_END,
1239
1240 /// GET_DYNAMIC_AREA_OFFSET - get offset from native SP to the address of
1241 /// the most recent dynamic alloca. For most targets that would be 0, but
1242 /// for some others (e.g. PowerPC, PowerPC64) that would be compile-time
1243 /// known nonzero constant. The only operand here is the chain.
1244 GET_DYNAMIC_AREA_OFFSET,
1245
1246 /// Pseudo probe for AutoFDO, as a place holder in a basic block to improve
1247 /// the sample counts quality.
1248 PSEUDO_PROBE,
1249
1250 /// VSCALE(IMM) - Returns the runtime scaling factor used to calculate the
1251 /// number of elements within a scalable vector. IMM is a constant integer
1252 /// multiplier that is applied to the runtime value.
1253 VSCALE,
1254
1255 /// Generic reduction nodes. These nodes represent horizontal vector
1256 /// reduction operations, producing a scalar result.
1257 /// The SEQ variants perform reductions in sequential order. The first
1258 /// operand is an initial scalar accumulator value, and the second operand
1259 /// is the vector to reduce.
1260 /// E.g. RES = VECREDUCE_SEQ_FADD f32 ACC, <4 x f32> SRC_VEC
1261 /// ... is equivalent to
1262 /// RES = (((ACC + SRC_VEC[0]) + SRC_VEC[1]) + SRC_VEC[2]) + SRC_VEC[3]
1263 VECREDUCE_SEQ_FADD,
1264 VECREDUCE_SEQ_FMUL,
1265
1266 /// These reductions have relaxed evaluation order semantics, and have a
1267 /// single vector operand. The order of evaluation is unspecified. For
1268 /// pow-of-2 vectors, one valid legalizer expansion is to use a tree
1269 /// reduction, i.e.:
1270 /// For RES = VECREDUCE_FADD <8 x f16> SRC_VEC
1271 /// PART_RDX = FADD SRC_VEC[0:3], SRC_VEC[4:7]
1272 /// PART_RDX2 = FADD PART_RDX[0:1], PART_RDX[2:3]
1273 /// RES = FADD PART_RDX2[0], PART_RDX2[1]
1274 /// For non-pow-2 vectors, this can be computed by extracting each element
1275 /// and performing the operation as if it were scalarized.
1276 VECREDUCE_FADD,
1277 VECREDUCE_FMUL,
1278 /// FMIN/FMAX nodes can have flags, for NaN/NoNaN variants.
1279 VECREDUCE_FMAX,
1280 VECREDUCE_FMIN,
1281 /// Integer reductions may have a result type larger than the vector element
1282 /// type. However, the reduction is performed using the vector element type
1283 /// and the value in the top bits is unspecified.
1284 VECREDUCE_ADD,
1285 VECREDUCE_MUL,
1286 VECREDUCE_AND,
1287 VECREDUCE_OR,
1288 VECREDUCE_XOR,
1289 VECREDUCE_SMAX,
1290 VECREDUCE_SMIN,
1291 VECREDUCE_UMAX,
1292 VECREDUCE_UMIN,
1293
1294 // The `llvm.experimental.stackmap` intrinsic.
1295 // Operands: input chain, glue, <id>, <numShadowBytes>, [live0[, live1...]]
1296 // Outputs: output chain, glue
1297 STACKMAP,
1298
1299 // The `llvm.experimental.patchpoint.*` intrinsic.
1300 // Operands: input chain, [glue], reg-mask, <id>, <numShadowBytes>, callee,
1301 // <numArgs>, cc, ...
1302 // Outputs: [rv], output chain, glue
1303 PATCHPOINT,
1304
1305 // Vector Predication
1306 #define BEGIN_REGISTER_VP_SDNODE(VPSDID, ...) VPSDID,
1307 #include "llvm/IR/VPIntrinsics.def"
1308
1309 /// BUILTIN_OP_END - This must be the last enum value in this list.
1310 /// The target-specific pre-isel opcode values start here.
1311 BUILTIN_OP_END
1312 };
1313
1314 /// FIRST_TARGET_STRICTFP_OPCODE - Target-specific pre-isel operations
1315 /// which cannot raise FP exceptions should be less than this value.
1316 /// Those that do must not be less than this value.
1317 static const int FIRST_TARGET_STRICTFP_OPCODE = BUILTIN_OP_END + 400;
1318
1319 /// FIRST_TARGET_MEMORY_OPCODE - Target-specific pre-isel operations
1320 /// which do not reference a specific memory location should be less than
1321 /// this value. Those that do must not be less than this value, and can
1322 /// be used with SelectionDAG::getMemIntrinsicNode.
1323 static const int FIRST_TARGET_MEMORY_OPCODE = BUILTIN_OP_END + 500;
1324
1325 /// Whether this is bitwise logic opcode.
isBitwiseLogicOp(unsigned Opcode)1326 inline bool isBitwiseLogicOp(unsigned Opcode) {
1327 return Opcode == ISD::AND || Opcode == ISD::OR || Opcode == ISD::XOR;
1328 }
1329
1330 /// Get underlying scalar opcode for VECREDUCE opcode.
1331 /// For example ISD::AND for ISD::VECREDUCE_AND.
1332 NodeType getVecReduceBaseOpcode(unsigned VecReduceOpcode);
1333
1334 /// Whether this is a vector-predicated Opcode.
1335 bool isVPOpcode(unsigned Opcode);
1336
1337 /// Whether this is a vector-predicated binary operation opcode.
1338 bool isVPBinaryOp(unsigned Opcode);
1339
1340 /// Whether this is a vector-predicated reduction opcode.
1341 bool isVPReduction(unsigned Opcode);
1342
1343 /// The operand position of the vector mask.
1344 std::optional<unsigned> getVPMaskIdx(unsigned Opcode);
1345
1346 /// The operand position of the explicit vector length parameter.
1347 std::optional<unsigned> getVPExplicitVectorLengthIdx(unsigned Opcode);
1348
1349 //===--------------------------------------------------------------------===//
1350 /// MemIndexedMode enum - This enum defines the load / store indexed
1351 /// addressing modes.
1352 ///
1353 /// UNINDEXED "Normal" load / store. The effective address is already
1354 /// computed and is available in the base pointer. The offset
1355 /// operand is always undefined. In addition to producing a
1356 /// chain, an unindexed load produces one value (result of the
1357 /// load); an unindexed store does not produce a value.
1358 ///
1359 /// PRE_INC Similar to the unindexed mode where the effective address is
1360 /// PRE_DEC the value of the base pointer add / subtract the offset.
1361 /// It considers the computation as being folded into the load /
1362 /// store operation (i.e. the load / store does the address
1363 /// computation as well as performing the memory transaction).
1364 /// The base operand is always undefined. In addition to
1365 /// producing a chain, pre-indexed load produces two values
1366 /// (result of the load and the result of the address
1367 /// computation); a pre-indexed store produces one value (result
1368 /// of the address computation).
1369 ///
1370 /// POST_INC The effective address is the value of the base pointer. The
1371 /// POST_DEC value of the offset operand is then added to / subtracted
1372 /// from the base after memory transaction. In addition to
1373 /// producing a chain, post-indexed load produces two values
1374 /// (the result of the load and the result of the base +/- offset
1375 /// computation); a post-indexed store produces one value (the
1376 /// the result of the base +/- offset computation).
1377 enum MemIndexedMode { UNINDEXED = 0, PRE_INC, PRE_DEC, POST_INC, POST_DEC };
1378
1379 static const int LAST_INDEXED_MODE = POST_DEC + 1;
1380
1381 //===--------------------------------------------------------------------===//
1382 /// MemIndexType enum - This enum defines how to interpret MGATHER/SCATTER's
1383 /// index parameter when calculating addresses.
1384 ///
1385 /// SIGNED_SCALED Addr = Base + ((signed)Index * Scale)
1386 /// UNSIGNED_SCALED Addr = Base + ((unsigned)Index * Scale)
1387 ///
1388 /// NOTE: The value of Scale is typically only known to the node owning the
1389 /// IndexType, with a value of 1 the equivalent of being unscaled.
1390 enum MemIndexType { SIGNED_SCALED = 0, UNSIGNED_SCALED };
1391
1392 static const int LAST_MEM_INDEX_TYPE = UNSIGNED_SCALED + 1;
1393
isIndexTypeSigned(MemIndexType IndexType)1394 inline bool isIndexTypeSigned(MemIndexType IndexType) {
1395 return IndexType == SIGNED_SCALED;
1396 }
1397
1398 //===--------------------------------------------------------------------===//
1399 /// LoadExtType enum - This enum defines the three variants of LOADEXT
1400 /// (load with extension).
1401 ///
1402 /// SEXTLOAD loads the integer operand and sign extends it to a larger
1403 /// integer result type.
1404 /// ZEXTLOAD loads the integer operand and zero extends it to a larger
1405 /// integer result type.
1406 /// EXTLOAD is used for two things: floating point extending loads and
1407 /// integer extending loads [the top bits are undefined].
1408 enum LoadExtType { NON_EXTLOAD = 0, EXTLOAD, SEXTLOAD, ZEXTLOAD };
1409
1410 static const int LAST_LOADEXT_TYPE = ZEXTLOAD + 1;
1411
1412 NodeType getExtForLoadExtType(bool IsFP, LoadExtType);
1413
1414 //===--------------------------------------------------------------------===//
1415 /// ISD::CondCode enum - These are ordered carefully to make the bitfields
1416 /// below work out, when considering SETFALSE (something that never exists
1417 /// dynamically) as 0. "U" -> Unsigned (for integer operands) or Unordered
1418 /// (for floating point), "L" -> Less than, "G" -> Greater than, "E" -> Equal
1419 /// to. If the "N" column is 1, the result of the comparison is undefined if
1420 /// the input is a NAN.
1421 ///
1422 /// All of these (except for the 'always folded ops') should be handled for
1423 /// floating point. For integer, only the SETEQ,SETNE,SETLT,SETLE,SETGT,
1424 /// SETGE,SETULT,SETULE,SETUGT, and SETUGE opcodes are used.
1425 ///
1426 /// Note that these are laid out in a specific order to allow bit-twiddling
1427 /// to transform conditions.
1428 enum CondCode {
1429 // Opcode N U L G E Intuitive operation
1430 SETFALSE, // 0 0 0 0 Always false (always folded)
1431 SETOEQ, // 0 0 0 1 True if ordered and equal
1432 SETOGT, // 0 0 1 0 True if ordered and greater than
1433 SETOGE, // 0 0 1 1 True if ordered and greater than or equal
1434 SETOLT, // 0 1 0 0 True if ordered and less than
1435 SETOLE, // 0 1 0 1 True if ordered and less than or equal
1436 SETONE, // 0 1 1 0 True if ordered and operands are unequal
1437 SETO, // 0 1 1 1 True if ordered (no nans)
1438 SETUO, // 1 0 0 0 True if unordered: isnan(X) | isnan(Y)
1439 SETUEQ, // 1 0 0 1 True if unordered or equal
1440 SETUGT, // 1 0 1 0 True if unordered or greater than
1441 SETUGE, // 1 0 1 1 True if unordered, greater than, or equal
1442 SETULT, // 1 1 0 0 True if unordered or less than
1443 SETULE, // 1 1 0 1 True if unordered, less than, or equal
1444 SETUNE, // 1 1 1 0 True if unordered or not equal
1445 SETTRUE, // 1 1 1 1 Always true (always folded)
1446 // Don't care operations: undefined if the input is a nan.
1447 SETFALSE2, // 1 X 0 0 0 Always false (always folded)
1448 SETEQ, // 1 X 0 0 1 True if equal
1449 SETGT, // 1 X 0 1 0 True if greater than
1450 SETGE, // 1 X 0 1 1 True if greater than or equal
1451 SETLT, // 1 X 1 0 0 True if less than
1452 SETLE, // 1 X 1 0 1 True if less than or equal
1453 SETNE, // 1 X 1 1 0 True if not equal
1454 SETTRUE2, // 1 X 1 1 1 Always true (always folded)
1455
1456 SETCC_INVALID // Marker value.
1457 };
1458
1459 /// Return true if this is a setcc instruction that performs a signed
1460 /// comparison when used with integer operands.
isSignedIntSetCC(CondCode Code)1461 inline bool isSignedIntSetCC(CondCode Code) {
1462 return Code == SETGT || Code == SETGE || Code == SETLT || Code == SETLE;
1463 }
1464
1465 /// Return true if this is a setcc instruction that performs an unsigned
1466 /// comparison when used with integer operands.
isUnsignedIntSetCC(CondCode Code)1467 inline bool isUnsignedIntSetCC(CondCode Code) {
1468 return Code == SETUGT || Code == SETUGE || Code == SETULT || Code == SETULE;
1469 }
1470
1471 /// Return true if this is a setcc instruction that performs an equality
1472 /// comparison when used with integer operands.
isIntEqualitySetCC(CondCode Code)1473 inline bool isIntEqualitySetCC(CondCode Code) {
1474 return Code == SETEQ || Code == SETNE;
1475 }
1476
1477 /// Return true if the specified condition returns true if the two operands to
1478 /// the condition are equal. Note that if one of the two operands is a NaN,
1479 /// this value is meaningless.
isTrueWhenEqual(CondCode Cond)1480 inline bool isTrueWhenEqual(CondCode Cond) { return ((int)Cond & 1) != 0; }
1481
1482 /// This function returns 0 if the condition is always false if an operand is
1483 /// a NaN, 1 if the condition is always true if the operand is a NaN, and 2 if
1484 /// the condition is undefined if the operand is a NaN.
getUnorderedFlavor(CondCode Cond)1485 inline unsigned getUnorderedFlavor(CondCode Cond) {
1486 return ((int)Cond >> 3) & 3;
1487 }
1488
1489 /// Return the operation corresponding to !(X op Y), where 'op' is a valid
1490 /// SetCC operation.
1491 CondCode getSetCCInverse(CondCode Operation, EVT Type);
1492
isExtOpcode(unsigned Opcode)1493 inline bool isExtOpcode(unsigned Opcode) {
1494 return Opcode == ISD::ANY_EXTEND || Opcode == ISD::ZERO_EXTEND ||
1495 Opcode == ISD::SIGN_EXTEND;
1496 }
1497
1498 namespace GlobalISel {
1499 /// Return the operation corresponding to !(X op Y), where 'op' is a valid
1500 /// SetCC operation. The U bit of the condition code has different meanings
1501 /// between floating point and integer comparisons and LLT's don't provide
1502 /// this distinction. As such we need to be told whether the comparison is
1503 /// floating point or integer-like. Pointers should use integer-like
1504 /// comparisons.
1505 CondCode getSetCCInverse(CondCode Operation, bool isIntegerLike);
1506 } // end namespace GlobalISel
1507
1508 /// Return the operation corresponding to (Y op X) when given the operation
1509 /// for (X op Y).
1510 CondCode getSetCCSwappedOperands(CondCode Operation);
1511
1512 /// Return the result of a logical OR between different comparisons of
1513 /// identical values: ((X op1 Y) | (X op2 Y)). This function returns
1514 /// SETCC_INVALID if it is not possible to represent the resultant comparison.
1515 CondCode getSetCCOrOperation(CondCode Op1, CondCode Op2, EVT Type);
1516
1517 /// Return the result of a logical AND between different comparisons of
1518 /// identical values: ((X op1 Y) & (X op2 Y)). This function returns
1519 /// SETCC_INVALID if it is not possible to represent the resultant comparison.
1520 CondCode getSetCCAndOperation(CondCode Op1, CondCode Op2, EVT Type);
1521
1522 } // namespace ISD
1523
1524 } // namespace llvm
1525
1526 #endif
1527