1 //===-- llvm/CodeGen/ISDOpcodes.h - CodeGen opcodes -------------*- C++ -*-===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file declares codegen opcodes and related utilities.
10 //
11 //===----------------------------------------------------------------------===//
12 
13 #ifndef LLVM_CODEGEN_ISDOPCODES_H
14 #define LLVM_CODEGEN_ISDOPCODES_H
15 
16 #include "llvm/CodeGen/ValueTypes.h"
17 
18 namespace llvm {
19 
20 /// ISD namespace - This namespace contains an enum which represents all of the
21 /// SelectionDAG node types and value types.
22 ///
23 namespace ISD {
24 
25 //===--------------------------------------------------------------------===//
26 /// ISD::NodeType enum - This enum defines the target-independent operators
27 /// for a SelectionDAG.
28 ///
29 /// Targets may also define target-dependent operator codes for SDNodes. For
30 /// example, on x86, these are the enum values in the X86ISD namespace.
31 /// Targets should aim to use target-independent operators to model their
32 /// instruction sets as much as possible, and only use target-dependent
33 /// operators when they have special requirements.
34 ///
35 /// Finally, during and after selection proper, SNodes may use special
36 /// operator codes that correspond directly with MachineInstr opcodes. These
37 /// are used to represent selected instructions. See the isMachineOpcode()
38 /// and getMachineOpcode() member functions of SDNode.
39 ///
40 enum NodeType {
41 
42   /// DELETED_NODE - This is an illegal value that is used to catch
43   /// errors.  This opcode is not a legal opcode for any node.
44   DELETED_NODE,
45 
46   /// EntryToken - This is the marker used to indicate the start of a region.
47   EntryToken,
48 
49   /// TokenFactor - This node takes multiple tokens as input and produces a
50   /// single token result. This is used to represent the fact that the operand
51   /// operators are independent of each other.
52   TokenFactor,
53 
54   /// AssertSext, AssertZext - These nodes record if a register contains a
55   /// value that has already been zero or sign extended from a narrower type.
56   /// These nodes take two operands.  The first is the node that has already
57   /// been extended, and the second is a value type node indicating the width
58   /// of the extension.
59   /// NOTE: In case of the source value (or any vector element value) is
60   /// poisoned the assertion will not be true for that value.
61   AssertSext,
62   AssertZext,
63 
64   /// AssertAlign - These nodes record if a register contains a value that
65   /// has a known alignment and the trailing bits are known to be zero.
66   /// NOTE: In case of the source value (or any vector element value) is
67   /// poisoned the assertion will not be true for that value.
68   AssertAlign,
69 
70   /// Various leaf nodes.
71   BasicBlock,
72   VALUETYPE,
73   CONDCODE,
74   Register,
75   RegisterMask,
76   Constant,
77   ConstantFP,
78   GlobalAddress,
79   GlobalTLSAddress,
80   FrameIndex,
81   JumpTable,
82   ConstantPool,
83   ExternalSymbol,
84   BlockAddress,
85 
86   /// The address of the GOT
87   GLOBAL_OFFSET_TABLE,
88 
89   /// FRAMEADDR, RETURNADDR - These nodes represent llvm.frameaddress and
90   /// llvm.returnaddress on the DAG.  These nodes take one operand, the index
91   /// of the frame or return address to return.  An index of zero corresponds
92   /// to the current function's frame or return address, an index of one to
93   /// the parent's frame or return address, and so on.
94   FRAMEADDR,
95   RETURNADDR,
96 
97   /// ADDROFRETURNADDR - Represents the llvm.addressofreturnaddress intrinsic.
98   /// This node takes no operand, returns a target-specific pointer to the
99   /// place in the stack frame where the return address of the current
100   /// function is stored.
101   ADDROFRETURNADDR,
102 
103   /// SPONENTRY - Represents the llvm.sponentry intrinsic. Takes no argument
104   /// and returns the stack pointer value at the entry of the current
105   /// function calling this intrinsic.
106   SPONENTRY,
107 
108   /// LOCAL_RECOVER - Represents the llvm.localrecover intrinsic.
109   /// Materializes the offset from the local object pointer of another
110   /// function to a particular local object passed to llvm.localescape. The
111   /// operand is the MCSymbol label used to represent this offset, since
112   /// typically the offset is not known until after code generation of the
113   /// parent.
114   LOCAL_RECOVER,
115 
116   /// READ_REGISTER, WRITE_REGISTER - This node represents llvm.register on
117   /// the DAG, which implements the named register global variables extension.
118   READ_REGISTER,
119   WRITE_REGISTER,
120 
121   /// FRAME_TO_ARGS_OFFSET - This node represents offset from frame pointer to
122   /// first (possible) on-stack argument. This is needed for correct stack
123   /// adjustment during unwind.
124   FRAME_TO_ARGS_OFFSET,
125 
126   /// EH_DWARF_CFA - This node represents the pointer to the DWARF Canonical
127   /// Frame Address (CFA), generally the value of the stack pointer at the
128   /// call site in the previous frame.
129   EH_DWARF_CFA,
130 
131   /// OUTCHAIN = EH_RETURN(INCHAIN, OFFSET, HANDLER) - This node represents
132   /// 'eh_return' gcc dwarf builtin, which is used to return from
133   /// exception. The general meaning is: adjust stack by OFFSET and pass
134   /// execution to HANDLER. Many platform-related details also :)
135   EH_RETURN,
136 
137   /// RESULT, OUTCHAIN = EH_SJLJ_SETJMP(INCHAIN, buffer)
138   /// This corresponds to the eh.sjlj.setjmp intrinsic.
139   /// It takes an input chain and a pointer to the jump buffer as inputs
140   /// and returns an outchain.
141   EH_SJLJ_SETJMP,
142 
143   /// OUTCHAIN = EH_SJLJ_LONGJMP(INCHAIN, buffer)
144   /// This corresponds to the eh.sjlj.longjmp intrinsic.
145   /// It takes an input chain and a pointer to the jump buffer as inputs
146   /// and returns an outchain.
147   EH_SJLJ_LONGJMP,
148 
149   /// OUTCHAIN = EH_SJLJ_SETUP_DISPATCH(INCHAIN)
150   /// The target initializes the dispatch table here.
151   EH_SJLJ_SETUP_DISPATCH,
152 
153   /// TargetConstant* - Like Constant*, but the DAG does not do any folding,
154   /// simplification, or lowering of the constant. They are used for constants
155   /// which are known to fit in the immediate fields of their users, or for
156   /// carrying magic numbers which are not values which need to be
157   /// materialized in registers.
158   TargetConstant,
159   TargetConstantFP,
160 
161   /// TargetGlobalAddress - Like GlobalAddress, but the DAG does no folding or
162   /// anything else with this node, and this is valid in the target-specific
163   /// dag, turning into a GlobalAddress operand.
164   TargetGlobalAddress,
165   TargetGlobalTLSAddress,
166   TargetFrameIndex,
167   TargetJumpTable,
168   TargetConstantPool,
169   TargetExternalSymbol,
170   TargetBlockAddress,
171 
172   MCSymbol,
173 
174   /// TargetIndex - Like a constant pool entry, but with completely
175   /// target-dependent semantics. Holds target flags, a 32-bit index, and a
176   /// 64-bit index. Targets can use this however they like.
177   TargetIndex,
178 
179   /// RESULT = INTRINSIC_WO_CHAIN(INTRINSICID, arg1, arg2, ...)
180   /// This node represents a target intrinsic function with no side effects.
181   /// The first operand is the ID number of the intrinsic from the
182   /// llvm::Intrinsic namespace.  The operands to the intrinsic follow.  The
183   /// node returns the result of the intrinsic.
184   INTRINSIC_WO_CHAIN,
185 
186   /// RESULT,OUTCHAIN = INTRINSIC_W_CHAIN(INCHAIN, INTRINSICID, arg1, ...)
187   /// This node represents a target intrinsic function with side effects that
188   /// returns a result.  The first operand is a chain pointer.  The second is
189   /// the ID number of the intrinsic from the llvm::Intrinsic namespace.  The
190   /// operands to the intrinsic follow.  The node has two results, the result
191   /// of the intrinsic and an output chain.
192   INTRINSIC_W_CHAIN,
193 
194   /// OUTCHAIN = INTRINSIC_VOID(INCHAIN, INTRINSICID, arg1, arg2, ...)
195   /// This node represents a target intrinsic function with side effects that
196   /// does not return a result.  The first operand is a chain pointer.  The
197   /// second is the ID number of the intrinsic from the llvm::Intrinsic
198   /// namespace.  The operands to the intrinsic follow.
199   INTRINSIC_VOID,
200 
201   /// CopyToReg - This node has three operands: a chain, a register number to
202   /// set to this value, and a value.
203   CopyToReg,
204 
205   /// CopyFromReg - This node indicates that the input value is a virtual or
206   /// physical register that is defined outside of the scope of this
207   /// SelectionDAG.  The register is available from the RegisterSDNode object.
208   CopyFromReg,
209 
210   /// UNDEF - An undefined node.
211   UNDEF,
212 
213   // FREEZE - FREEZE(VAL) returns an arbitrary value if VAL is UNDEF (or
214   // is evaluated to UNDEF), or returns VAL otherwise. Note that each
215   // read of UNDEF can yield different value, but FREEZE(UNDEF) cannot.
216   FREEZE,
217 
218   /// EXTRACT_ELEMENT - This is used to get the lower or upper (determined by
219   /// a Constant, which is required to be operand #1) half of the integer or
220   /// float value specified as operand #0.  This is only for use before
221   /// legalization, for values that will be broken into multiple registers.
222   EXTRACT_ELEMENT,
223 
224   /// BUILD_PAIR - This is the opposite of EXTRACT_ELEMENT in some ways.
225   /// Given two values of the same integer value type, this produces a value
226   /// twice as big.  Like EXTRACT_ELEMENT, this can only be used before
227   /// legalization. The lower part of the composite value should be in
228   /// element 0 and the upper part should be in element 1.
229   BUILD_PAIR,
230 
231   /// MERGE_VALUES - This node takes multiple discrete operands and returns
232   /// them all as its individual results.  This nodes has exactly the same
233   /// number of inputs and outputs. This node is useful for some pieces of the
234   /// code generator that want to think about a single node with multiple
235   /// results, not multiple nodes.
236   MERGE_VALUES,
237 
238   /// Simple integer binary arithmetic operators.
239   ADD,
240   SUB,
241   MUL,
242   SDIV,
243   UDIV,
244   SREM,
245   UREM,
246 
247   /// SMUL_LOHI/UMUL_LOHI - Multiply two integers of type iN, producing
248   /// a signed/unsigned value of type i[2*N], and return the full value as
249   /// two results, each of type iN.
250   SMUL_LOHI,
251   UMUL_LOHI,
252 
253   /// SDIVREM/UDIVREM - Divide two integers and produce both a quotient and
254   /// remainder result.
255   SDIVREM,
256   UDIVREM,
257 
258   /// CARRY_FALSE - This node is used when folding other nodes,
259   /// like ADDC/SUBC, which indicate the carry result is always false.
260   CARRY_FALSE,
261 
262   /// Carry-setting nodes for multiple precision addition and subtraction.
263   /// These nodes take two operands of the same value type, and produce two
264   /// results.  The first result is the normal add or sub result, the second
265   /// result is the carry flag result.
266   /// FIXME: These nodes are deprecated in favor of ADDCARRY and SUBCARRY.
267   /// They are kept around for now to provide a smooth transition path
268   /// toward the use of ADDCARRY/SUBCARRY and will eventually be removed.
269   ADDC,
270   SUBC,
271 
272   /// Carry-using nodes for multiple precision addition and subtraction. These
273   /// nodes take three operands: The first two are the normal lhs and rhs to
274   /// the add or sub, and the third is the input carry flag.  These nodes
275   /// produce two results; the normal result of the add or sub, and the output
276   /// carry flag.  These nodes both read and write a carry flag to allow them
277   /// to them to be chained together for add and sub of arbitrarily large
278   /// values.
279   ADDE,
280   SUBE,
281 
282   /// Carry-using nodes for multiple precision addition and subtraction.
283   /// These nodes take three operands: The first two are the normal lhs and
284   /// rhs to the add or sub, and the third is a boolean indicating if there
285   /// is an incoming carry. These nodes produce two results: the normal
286   /// result of the add or sub, and the output carry so they can be chained
287   /// together. The use of this opcode is preferable to adde/sube if the
288   /// target supports it, as the carry is a regular value rather than a
289   /// glue, which allows further optimisation.
290   ADDCARRY,
291   SUBCARRY,
292 
293   /// Carry-using overflow-aware nodes for multiple precision addition and
294   /// subtraction. These nodes take three operands: The first two are normal lhs
295   /// and rhs to the add or sub, and the third is a boolean indicating if there
296   /// is an incoming carry. They produce two results: the normal result of the
297   /// add or sub, and a boolean that indicates if an overflow occured (*not*
298   /// flag, because it may be a store to memory, etc.). If the type of the
299   /// boolean is not i1 then the high bits conform to getBooleanContents.
300   SADDO_CARRY,
301   SSUBO_CARRY,
302 
303   /// RESULT, BOOL = [SU]ADDO(LHS, RHS) - Overflow-aware nodes for addition.
304   /// These nodes take two operands: the normal LHS and RHS to the add. They
305   /// produce two results: the normal result of the add, and a boolean that
306   /// indicates if an overflow occurred (*not* a flag, because it may be store
307   /// to memory, etc.).  If the type of the boolean is not i1 then the high
308   /// bits conform to getBooleanContents.
309   /// These nodes are generated from llvm.[su]add.with.overflow intrinsics.
310   SADDO,
311   UADDO,
312 
313   /// Same for subtraction.
314   SSUBO,
315   USUBO,
316 
317   /// Same for multiplication.
318   SMULO,
319   UMULO,
320 
321   /// RESULT = [US]ADDSAT(LHS, RHS) - Perform saturation addition on 2
322   /// integers with the same bit width (W). If the true value of LHS + RHS
323   /// exceeds the largest value that can be represented by W bits, the
324   /// resulting value is this maximum value. Otherwise, if this value is less
325   /// than the smallest value that can be represented by W bits, the
326   /// resulting value is this minimum value.
327   SADDSAT,
328   UADDSAT,
329 
330   /// RESULT = [US]SUBSAT(LHS, RHS) - Perform saturation subtraction on 2
331   /// integers with the same bit width (W). If the true value of LHS - RHS
332   /// exceeds the largest value that can be represented by W bits, the
333   /// resulting value is this maximum value. Otherwise, if this value is less
334   /// than the smallest value that can be represented by W bits, the
335   /// resulting value is this minimum value.
336   SSUBSAT,
337   USUBSAT,
338 
339   /// RESULT = [US]SHLSAT(LHS, RHS) - Perform saturation left shift. The first
340   /// operand is the value to be shifted, and the second argument is the amount
341   /// to shift by. Both must be integers of the same bit width (W). If the true
342   /// value of LHS << RHS exceeds the largest value that can be represented by
343   /// W bits, the resulting value is this maximum value, Otherwise, if this
344   /// value is less than the smallest value that can be represented by W bits,
345   /// the resulting value is this minimum value.
346   SSHLSAT,
347   USHLSAT,
348 
349   /// RESULT = [US]MULFIX(LHS, RHS, SCALE) - Perform fixed point multiplication
350   /// on 2 integers with the same width and scale. SCALE represents the scale
351   /// of both operands as fixed point numbers. This SCALE parameter must be a
352   /// constant integer. A scale of zero is effectively performing
353   /// multiplication on 2 integers.
354   SMULFIX,
355   UMULFIX,
356 
357   /// Same as the corresponding unsaturated fixed point instructions, but the
358   /// result is clamped between the min and max values representable by the
359   /// bits of the first 2 operands.
360   SMULFIXSAT,
361   UMULFIXSAT,
362 
363   /// RESULT = [US]DIVFIX(LHS, RHS, SCALE) - Perform fixed point division on
364   /// 2 integers with the same width and scale. SCALE represents the scale
365   /// of both operands as fixed point numbers. This SCALE parameter must be a
366   /// constant integer.
367   SDIVFIX,
368   UDIVFIX,
369 
370   /// Same as the corresponding unsaturated fixed point instructions, but the
371   /// result is clamped between the min and max values representable by the
372   /// bits of the first 2 operands.
373   SDIVFIXSAT,
374   UDIVFIXSAT,
375 
376   /// Simple binary floating point operators.
377   FADD,
378   FSUB,
379   FMUL,
380   FDIV,
381   FREM,
382 
383   /// Constrained versions of the binary floating point operators.
384   /// These will be lowered to the simple operators before final selection.
385   /// They are used to limit optimizations while the DAG is being
386   /// optimized.
387   STRICT_FADD,
388   STRICT_FSUB,
389   STRICT_FMUL,
390   STRICT_FDIV,
391   STRICT_FREM,
392   STRICT_FMA,
393 
394   /// Constrained versions of libm-equivalent floating point intrinsics.
395   /// These will be lowered to the equivalent non-constrained pseudo-op
396   /// (or expanded to the equivalent library call) before final selection.
397   /// They are used to limit optimizations while the DAG is being optimized.
398   STRICT_FSQRT,
399   STRICT_FPOW,
400   STRICT_FPOWI,
401   STRICT_FSIN,
402   STRICT_FCOS,
403   STRICT_FEXP,
404   STRICT_FEXP2,
405   STRICT_FLOG,
406   STRICT_FLOG10,
407   STRICT_FLOG2,
408   STRICT_FRINT,
409   STRICT_FNEARBYINT,
410   STRICT_FMAXNUM,
411   STRICT_FMINNUM,
412   STRICT_FCEIL,
413   STRICT_FFLOOR,
414   STRICT_FROUND,
415   STRICT_FROUNDEVEN,
416   STRICT_FTRUNC,
417   STRICT_LROUND,
418   STRICT_LLROUND,
419   STRICT_LRINT,
420   STRICT_LLRINT,
421   STRICT_FMAXIMUM,
422   STRICT_FMINIMUM,
423 
424   /// STRICT_FP_TO_[US]INT - Convert a floating point value to a signed or
425   /// unsigned integer. These have the same semantics as fptosi and fptoui
426   /// in IR.
427   /// They are used to limit optimizations while the DAG is being optimized.
428   STRICT_FP_TO_SINT,
429   STRICT_FP_TO_UINT,
430 
431   /// STRICT_[US]INT_TO_FP - Convert a signed or unsigned integer to
432   /// a floating point value. These have the same semantics as sitofp and
433   /// uitofp in IR.
434   /// They are used to limit optimizations while the DAG is being optimized.
435   STRICT_SINT_TO_FP,
436   STRICT_UINT_TO_FP,
437 
438   /// X = STRICT_FP_ROUND(Y, TRUNC) - Rounding 'Y' from a larger floating
439   /// point type down to the precision of the destination VT.  TRUNC is a
440   /// flag, which is always an integer that is zero or one.  If TRUNC is 0,
441   /// this is a normal rounding, if it is 1, this FP_ROUND is known to not
442   /// change the value of Y.
443   ///
444   /// The TRUNC = 1 case is used in cases where we know that the value will
445   /// not be modified by the node, because Y is not using any of the extra
446   /// precision of source type.  This allows certain transformations like
447   /// STRICT_FP_EXTEND(STRICT_FP_ROUND(X,1)) -> X which are not safe for
448   /// STRICT_FP_EXTEND(STRICT_FP_ROUND(X,0)) because the extra bits aren't
449   /// removed.
450   /// It is used to limit optimizations while the DAG is being optimized.
451   STRICT_FP_ROUND,
452 
453   /// X = STRICT_FP_EXTEND(Y) - Extend a smaller FP type into a larger FP
454   /// type.
455   /// It is used to limit optimizations while the DAG is being optimized.
456   STRICT_FP_EXTEND,
457 
458   /// STRICT_FSETCC/STRICT_FSETCCS - Constrained versions of SETCC, used
459   /// for floating-point operands only.  STRICT_FSETCC performs a quiet
460   /// comparison operation, while STRICT_FSETCCS performs a signaling
461   /// comparison operation.
462   STRICT_FSETCC,
463   STRICT_FSETCCS,
464 
465   /// FMA - Perform a * b + c with no intermediate rounding step.
466   FMA,
467 
468   /// FMAD - Perform a * b + c, while getting the same result as the
469   /// separately rounded operations.
470   FMAD,
471 
472   /// FCOPYSIGN(X, Y) - Return the value of X with the sign of Y.  NOTE: This
473   /// DAG node does not require that X and Y have the same type, just that
474   /// they are both floating point.  X and the result must have the same type.
475   /// FCOPYSIGN(f32, f64) is allowed.
476   FCOPYSIGN,
477 
478   /// INT = FGETSIGN(FP) - Return the sign bit of the specified floating point
479   /// value as an integer 0/1 value.
480   FGETSIGN,
481 
482   /// Returns platform specific canonical encoding of a floating point number.
483   FCANONICALIZE,
484 
485   /// BUILD_VECTOR(ELT0, ELT1, ELT2, ELT3,...) - Return a fixed-width vector
486   /// with the specified, possibly variable, elements. The types of the
487   /// operands must match the vector element type, except that integer types
488   /// are allowed to be larger than the element type, in which case the
489   /// operands are implicitly truncated. The types of the operands must all
490   /// be the same.
491   BUILD_VECTOR,
492 
493   /// INSERT_VECTOR_ELT(VECTOR, VAL, IDX) - Returns VECTOR with the element
494   /// at IDX replaced with VAL. If the type of VAL is larger than the vector
495   /// element type then VAL is truncated before replacement.
496   ///
497   /// If VECTOR is a scalable vector, then IDX may be larger than the minimum
498   /// vector width. IDX is not first scaled by the runtime scaling factor of
499   /// VECTOR.
500   INSERT_VECTOR_ELT,
501 
502   /// EXTRACT_VECTOR_ELT(VECTOR, IDX) - Returns a single element from VECTOR
503   /// identified by the (potentially variable) element number IDX. If the return
504   /// type is an integer type larger than the element type of the vector, the
505   /// result is extended to the width of the return type. In that case, the high
506   /// bits are undefined.
507   ///
508   /// If VECTOR is a scalable vector, then IDX may be larger than the minimum
509   /// vector width. IDX is not first scaled by the runtime scaling factor of
510   /// VECTOR.
511   EXTRACT_VECTOR_ELT,
512 
513   /// CONCAT_VECTORS(VECTOR0, VECTOR1, ...) - Given a number of values of
514   /// vector type with the same length and element type, this produces a
515   /// concatenated vector result value, with length equal to the sum of the
516   /// lengths of the input vectors. If VECTOR0 is a fixed-width vector, then
517   /// VECTOR1..VECTORN must all be fixed-width vectors. Similarly, if VECTOR0
518   /// is a scalable vector, then VECTOR1..VECTORN must all be scalable vectors.
519   CONCAT_VECTORS,
520 
521   /// INSERT_SUBVECTOR(VECTOR1, VECTOR2, IDX) - Returns a vector with VECTOR2
522   /// inserted into VECTOR1. IDX represents the starting element number at which
523   /// VECTOR2 will be inserted. IDX must be a constant multiple of T's known
524   /// minimum vector length. Let the type of VECTOR2 be T, then if T is a
525   /// scalable vector, IDX is first scaled by the runtime scaling factor of T.
526   /// The elements of VECTOR1 starting at IDX are overwritten with VECTOR2.
527   /// Elements IDX through (IDX + num_elements(T) - 1) must be valid VECTOR1
528   /// indices. If this condition cannot be determined statically but is false at
529   /// runtime, then the result vector is undefined. The IDX parameter must be a
530   /// vector index constant type, which for most targets will be an integer
531   /// pointer type.
532   ///
533   /// This operation supports inserting a fixed-width vector into a scalable
534   /// vector, but not the other way around.
535   INSERT_SUBVECTOR,
536 
537   /// EXTRACT_SUBVECTOR(VECTOR, IDX) - Returns a subvector from VECTOR.
538   /// Let the result type be T, then IDX represents the starting element number
539   /// from which a subvector of type T is extracted. IDX must be a constant
540   /// multiple of T's known minimum vector length. If T is a scalable vector,
541   /// IDX is first scaled by the runtime scaling factor of T. Elements IDX
542   /// through (IDX + num_elements(T) - 1) must be valid VECTOR indices. If this
543   /// condition cannot be determined statically but is false at runtime, then
544   /// the result vector is undefined. The IDX parameter must be a vector index
545   /// constant type, which for most targets will be an integer pointer type.
546   ///
547   /// This operation supports extracting a fixed-width vector from a scalable
548   /// vector, but not the other way around.
549   EXTRACT_SUBVECTOR,
550 
551   /// VECTOR_REVERSE(VECTOR) - Returns a vector, of the same type as VECTOR,
552   /// whose elements are shuffled using the following algorithm:
553   ///   RESULT[i] = VECTOR[VECTOR.ElementCount - 1 - i]
554   VECTOR_REVERSE,
555 
556   /// VECTOR_SHUFFLE(VEC1, VEC2) - Returns a vector, of the same type as
557   /// VEC1/VEC2.  A VECTOR_SHUFFLE node also contains an array of constant int
558   /// values that indicate which value (or undef) each result element will
559   /// get.  These constant ints are accessible through the
560   /// ShuffleVectorSDNode class.  This is quite similar to the Altivec
561   /// 'vperm' instruction, except that the indices must be constants and are
562   /// in terms of the element size of VEC1/VEC2, not in terms of bytes.
563   VECTOR_SHUFFLE,
564 
565   /// VECTOR_SPLICE(VEC1, VEC2, IMM) - Returns a subvector of the same type as
566   /// VEC1/VEC2 from CONCAT_VECTORS(VEC1, VEC2), based on the IMM in two ways.
567   /// Let the result type be T, if IMM is positive it represents the starting
568   /// element number (an index) from which a subvector of type T is extracted
569   /// from CONCAT_VECTORS(VEC1, VEC2). If IMM is negative it represents a count
570   /// specifying the number of trailing elements to extract from VEC1, where the
571   /// elements of T are selected using the following algorithm:
572   ///   RESULT[i] = CONCAT_VECTORS(VEC1,VEC2)[VEC1.ElementCount - ABS(IMM) + i]
573   /// If IMM is not in the range [-VL, VL-1] the result vector is undefined. IMM
574   /// is a constant integer.
575   VECTOR_SPLICE,
576 
577   /// SCALAR_TO_VECTOR(VAL) - This represents the operation of loading a
578   /// scalar value into element 0 of the resultant vector type.  The top
579   /// elements 1 to N-1 of the N-element vector are undefined.  The type
580   /// of the operand must match the vector element type, except when they
581   /// are integer types.  In this case the operand is allowed to be wider
582   /// than the vector element type, and is implicitly truncated to it.
583   SCALAR_TO_VECTOR,
584 
585   /// SPLAT_VECTOR(VAL) - Returns a vector with the scalar value VAL
586   /// duplicated in all lanes. The type of the operand must match the vector
587   /// element type, except when they are integer types.  In this case the
588   /// operand is allowed to be wider than the vector element type, and is
589   /// implicitly truncated to it.
590   SPLAT_VECTOR,
591 
592   /// SPLAT_VECTOR_PARTS(SCALAR1, SCALAR2, ...) - Returns a vector with the
593   /// scalar values joined together and then duplicated in all lanes. This
594   /// represents a SPLAT_VECTOR that has had its scalar operand expanded. This
595   /// allows representing a 64-bit splat on a target with 32-bit integers. The
596   /// total width of the scalars must cover the element width. SCALAR1 contains
597   /// the least significant bits of the value regardless of endianness and all
598   /// scalars should have the same type.
599   SPLAT_VECTOR_PARTS,
600 
601   /// STEP_VECTOR(IMM) - Returns a scalable vector whose lanes are comprised
602   /// of a linear sequence of unsigned values starting from 0 with a step of
603   /// IMM, where IMM must be a TargetConstant with type equal to the vector
604   /// element type. The arithmetic is performed modulo the bitwidth of the
605   /// element.
606   ///
607   /// The operation does not support returning fixed-width vectors or
608   /// non-constant operands.
609   STEP_VECTOR,
610 
611   /// MULHU/MULHS - Multiply high - Multiply two integers of type iN,
612   /// producing an unsigned/signed value of type i[2*N], then return the top
613   /// part.
614   MULHU,
615   MULHS,
616 
617   // ABDS/ABDU - Absolute difference - Return the absolute difference between
618   // two numbers interpreted as signed/unsigned.
619   // i.e trunc(abs(sext(Op0) - sext(Op1))) becomes abds(Op0, Op1)
620   //  or trunc(abs(zext(Op0) - zext(Op1))) becomes abdu(Op0, Op1)
621   ABDS,
622   ABDU,
623 
624   /// [US]{MIN/MAX} - Binary minimum or maximum of signed or unsigned
625   /// integers.
626   SMIN,
627   SMAX,
628   UMIN,
629   UMAX,
630 
631   /// Bitwise operators - logical and, logical or, logical xor.
632   AND,
633   OR,
634   XOR,
635 
636   /// ABS - Determine the unsigned absolute value of a signed integer value of
637   /// the same bitwidth.
638   /// Note: A value of INT_MIN will return INT_MIN, no saturation or overflow
639   /// is performed.
640   ABS,
641 
642   /// Shift and rotation operations.  After legalization, the type of the
643   /// shift amount is known to be TLI.getShiftAmountTy().  Before legalization
644   /// the shift amount can be any type, but care must be taken to ensure it is
645   /// large enough.  TLI.getShiftAmountTy() is i8 on some targets, but before
646   /// legalization, types like i1024 can occur and i8 doesn't have enough bits
647   /// to represent the shift amount.
648   /// When the 1st operand is a vector, the shift amount must be in the same
649   /// type. (TLI.getShiftAmountTy() will return the same type when the input
650   /// type is a vector.)
651   /// For rotates and funnel shifts, the shift amount is treated as an unsigned
652   /// amount modulo the element size of the first operand.
653   ///
654   /// Funnel 'double' shifts take 3 operands, 2 inputs and the shift amount.
655   /// fshl(X,Y,Z): (X << (Z % BW)) | (Y >> (BW - (Z % BW)))
656   /// fshr(X,Y,Z): (X << (BW - (Z % BW))) | (Y >> (Z % BW))
657   SHL,
658   SRA,
659   SRL,
660   ROTL,
661   ROTR,
662   FSHL,
663   FSHR,
664 
665   /// Byte Swap and Counting operators.
666   BSWAP,
667   CTTZ,
668   CTLZ,
669   CTPOP,
670   BITREVERSE,
671   PARITY,
672 
673   /// Bit counting operators with an undefined result for zero inputs.
674   CTTZ_ZERO_UNDEF,
675   CTLZ_ZERO_UNDEF,
676 
677   /// Select(COND, TRUEVAL, FALSEVAL).  If the type of the boolean COND is not
678   /// i1 then the high bits must conform to getBooleanContents.
679   SELECT,
680 
681   /// Select with a vector condition (op #0) and two vector operands (ops #1
682   /// and #2), returning a vector result.  All vectors have the same length.
683   /// Much like the scalar select and setcc, each bit in the condition selects
684   /// whether the corresponding result element is taken from op #1 or op #2.
685   /// At first, the VSELECT condition is of vXi1 type. Later, targets may
686   /// change the condition type in order to match the VSELECT node using a
687   /// pattern. The condition follows the BooleanContent format of the target.
688   VSELECT,
689 
690   /// Select with condition operator - This selects between a true value and
691   /// a false value (ops #2 and #3) based on the boolean result of comparing
692   /// the lhs and rhs (ops #0 and #1) of a conditional expression with the
693   /// condition code in op #4, a CondCodeSDNode.
694   SELECT_CC,
695 
696   /// SetCC operator - This evaluates to a true value iff the condition is
697   /// true.  If the result value type is not i1 then the high bits conform
698   /// to getBooleanContents.  The operands to this are the left and right
699   /// operands to compare (ops #0, and #1) and the condition code to compare
700   /// them with (op #2) as a CondCodeSDNode. If the operands are vector types
701   /// then the result type must also be a vector type.
702   SETCC,
703 
704   /// Like SetCC, ops #0 and #1 are the LHS and RHS operands to compare, but
705   /// op #2 is a boolean indicating if there is an incoming carry. This
706   /// operator checks the result of "LHS - RHS - Carry", and can be used to
707   /// compare two wide integers:
708   /// (setcccarry lhshi rhshi (subcarry lhslo rhslo) cc).
709   /// Only valid for integers.
710   SETCCCARRY,
711 
712   /// SHL_PARTS/SRA_PARTS/SRL_PARTS - These operators are used for expanded
713   /// integer shift operations.  The operation ordering is:
714   ///       [Lo,Hi] = op [LoLHS,HiLHS], Amt
715   SHL_PARTS,
716   SRA_PARTS,
717   SRL_PARTS,
718 
719   /// Conversion operators.  These are all single input single output
720   /// operations.  For all of these, the result type must be strictly
721   /// wider or narrower (depending on the operation) than the source
722   /// type.
723 
724   /// SIGN_EXTEND - Used for integer types, replicating the sign bit
725   /// into new bits.
726   SIGN_EXTEND,
727 
728   /// ZERO_EXTEND - Used for integer types, zeroing the new bits.
729   ZERO_EXTEND,
730 
731   /// ANY_EXTEND - Used for integer types.  The high bits are undefined.
732   ANY_EXTEND,
733 
734   /// TRUNCATE - Completely drop the high bits.
735   TRUNCATE,
736 
737   /// [SU]INT_TO_FP - These operators convert integers (whose interpreted sign
738   /// depends on the first letter) to floating point.
739   SINT_TO_FP,
740   UINT_TO_FP,
741 
742   /// SIGN_EXTEND_INREG - This operator atomically performs a SHL/SRA pair to
743   /// sign extend a small value in a large integer register (e.g. sign
744   /// extending the low 8 bits of a 32-bit register to fill the top 24 bits
745   /// with the 7th bit).  The size of the smaller type is indicated by the 1th
746   /// operand, a ValueType node.
747   SIGN_EXTEND_INREG,
748 
749   /// ANY_EXTEND_VECTOR_INREG(Vector) - This operator represents an
750   /// in-register any-extension of the low lanes of an integer vector. The
751   /// result type must have fewer elements than the operand type, and those
752   /// elements must be larger integer types such that the total size of the
753   /// operand type is less than or equal to the size of the result type. Each
754   /// of the low operand elements is any-extended into the corresponding,
755   /// wider result elements with the high bits becoming undef.
756   /// NOTE: The type legalizer prefers to make the operand and result size
757   /// the same to allow expansion to shuffle vector during op legalization.
758   ANY_EXTEND_VECTOR_INREG,
759 
760   /// SIGN_EXTEND_VECTOR_INREG(Vector) - This operator represents an
761   /// in-register sign-extension of the low lanes of an integer vector. The
762   /// result type must have fewer elements than the operand type, and those
763   /// elements must be larger integer types such that the total size of the
764   /// operand type is less than or equal to the size of the result type. Each
765   /// of the low operand elements is sign-extended into the corresponding,
766   /// wider result elements.
767   /// NOTE: The type legalizer prefers to make the operand and result size
768   /// the same to allow expansion to shuffle vector during op legalization.
769   SIGN_EXTEND_VECTOR_INREG,
770 
771   /// ZERO_EXTEND_VECTOR_INREG(Vector) - This operator represents an
772   /// in-register zero-extension of the low lanes of an integer vector. The
773   /// result type must have fewer elements than the operand type, and those
774   /// elements must be larger integer types such that the total size of the
775   /// operand type is less than or equal to the size of the result type. Each
776   /// of the low operand elements is zero-extended into the corresponding,
777   /// wider result elements.
778   /// NOTE: The type legalizer prefers to make the operand and result size
779   /// the same to allow expansion to shuffle vector during op legalization.
780   ZERO_EXTEND_VECTOR_INREG,
781 
782   /// FP_TO_[US]INT - Convert a floating point value to a signed or unsigned
783   /// integer. These have the same semantics as fptosi and fptoui in IR. If
784   /// the FP value cannot fit in the integer type, the results are undefined.
785   FP_TO_SINT,
786   FP_TO_UINT,
787 
788   /// FP_TO_[US]INT_SAT - Convert floating point value in operand 0 to a
789   /// signed or unsigned scalar integer type given in operand 1 with the
790   /// following semantics:
791   ///
792   ///  * If the value is NaN, zero is returned.
793   ///  * If the value is larger/smaller than the largest/smallest integer,
794   ///    the largest/smallest integer is returned (saturation).
795   ///  * Otherwise the result of rounding the value towards zero is returned.
796   ///
797   /// The scalar width of the type given in operand 1 must be equal to, or
798   /// smaller than, the scalar result type width. It may end up being smaller
799   /// than the result width as a result of integer type legalization.
800   FP_TO_SINT_SAT,
801   FP_TO_UINT_SAT,
802 
803   /// X = FP_ROUND(Y, TRUNC) - Rounding 'Y' from a larger floating point type
804   /// down to the precision of the destination VT.  TRUNC is a flag, which is
805   /// always an integer that is zero or one.  If TRUNC is 0, this is a
806   /// normal rounding, if it is 1, this FP_ROUND is known to not change the
807   /// value of Y.
808   ///
809   /// The TRUNC = 1 case is used in cases where we know that the value will
810   /// not be modified by the node, because Y is not using any of the extra
811   /// precision of source type.  This allows certain transformations like
812   /// FP_EXTEND(FP_ROUND(X,1)) -> X which are not safe for
813   /// FP_EXTEND(FP_ROUND(X,0)) because the extra bits aren't removed.
814   FP_ROUND,
815 
816   /// Returns current rounding mode:
817   /// -1 Undefined
818   ///  0 Round to 0
819   ///  1 Round to nearest, ties to even
820   ///  2 Round to +inf
821   ///  3 Round to -inf
822   ///  4 Round to nearest, ties to zero
823   /// Result is rounding mode and chain. Input is a chain.
824   /// TODO: Rename this node to GET_ROUNDING.
825   FLT_ROUNDS_,
826 
827   /// Set rounding mode.
828   /// The first operand is a chain pointer. The second specifies the required
829   /// rounding mode, encoded in the same way as used in '``FLT_ROUNDS_``'.
830   SET_ROUNDING,
831 
832   /// X = FP_EXTEND(Y) - Extend a smaller FP type into a larger FP type.
833   FP_EXTEND,
834 
835   /// BITCAST - This operator converts between integer, vector and FP
836   /// values, as if the value was stored to memory with one type and loaded
837   /// from the same address with the other type (or equivalently for vector
838   /// format conversions, etc).  The source and result are required to have
839   /// the same bit size (e.g.  f32 <-> i32).  This can also be used for
840   /// int-to-int or fp-to-fp conversions, but that is a noop, deleted by
841   /// getNode().
842   ///
843   /// This operator is subtly different from the bitcast instruction from
844   /// LLVM-IR since this node may change the bits in the register. For
845   /// example, this occurs on big-endian NEON and big-endian MSA where the
846   /// layout of the bits in the register depends on the vector type and this
847   /// operator acts as a shuffle operation for some vector type combinations.
848   BITCAST,
849 
850   /// ADDRSPACECAST - This operator converts between pointers of different
851   /// address spaces.
852   ADDRSPACECAST,
853 
854   /// FP16_TO_FP, FP_TO_FP16 - These operators are used to perform promotions
855   /// and truncation for half-precision (16 bit) floating numbers. These nodes
856   /// form a semi-softened interface for dealing with f16 (as an i16), which
857   /// is often a storage-only type but has native conversions.
858   FP16_TO_FP,
859   FP_TO_FP16,
860   STRICT_FP16_TO_FP,
861   STRICT_FP_TO_FP16,
862 
863   /// Perform various unary floating-point operations inspired by libm. For
864   /// FPOWI, the result is undefined if if the integer operand doesn't fit into
865   /// sizeof(int).
866   FNEG,
867   FABS,
868   FSQRT,
869   FCBRT,
870   FSIN,
871   FCOS,
872   FPOWI,
873   FPOW,
874   FLOG,
875   FLOG2,
876   FLOG10,
877   FEXP,
878   FEXP2,
879   FCEIL,
880   FTRUNC,
881   FRINT,
882   FNEARBYINT,
883   FROUND,
884   FROUNDEVEN,
885   FFLOOR,
886   LROUND,
887   LLROUND,
888   LRINT,
889   LLRINT,
890 
891   /// FMINNUM/FMAXNUM - Perform floating-point minimum or maximum on two
892   /// values.
893   //
894   /// In the case where a single input is a NaN (either signaling or quiet),
895   /// the non-NaN input is returned.
896   ///
897   /// The return value of (FMINNUM 0.0, -0.0) could be either 0.0 or -0.0.
898   FMINNUM,
899   FMAXNUM,
900 
901   /// FMINNUM_IEEE/FMAXNUM_IEEE - Perform floating-point minimum or maximum on
902   /// two values, following the IEEE-754 2008 definition. This differs from
903   /// FMINNUM/FMAXNUM in the handling of signaling NaNs. If one input is a
904   /// signaling NaN, returns a quiet NaN.
905   FMINNUM_IEEE,
906   FMAXNUM_IEEE,
907 
908   /// FMINIMUM/FMAXIMUM - NaN-propagating minimum/maximum that also treat -0.0
909   /// as less than 0.0. While FMINNUM_IEEE/FMAXNUM_IEEE follow IEEE 754-2008
910   /// semantics, FMINIMUM/FMAXIMUM follow IEEE 754-2018 draft semantics.
911   FMINIMUM,
912   FMAXIMUM,
913 
914   /// FSINCOS - Compute both fsin and fcos as a single operation.
915   FSINCOS,
916 
917   /// LOAD and STORE have token chains as their first operand, then the same
918   /// operands as an LLVM load/store instruction, then an offset node that
919   /// is added / subtracted from the base pointer to form the address (for
920   /// indexed memory ops).
921   LOAD,
922   STORE,
923 
924   /// DYNAMIC_STACKALLOC - Allocate some number of bytes on the stack aligned
925   /// to a specified boundary.  This node always has two return values: a new
926   /// stack pointer value and a chain. The first operand is the token chain,
927   /// the second is the number of bytes to allocate, and the third is the
928   /// alignment boundary.  The size is guaranteed to be a multiple of the
929   /// stack alignment, and the alignment is guaranteed to be bigger than the
930   /// stack alignment (if required) or 0 to get standard stack alignment.
931   DYNAMIC_STACKALLOC,
932 
933   /// Control flow instructions.  These all have token chains.
934 
935   /// BR - Unconditional branch.  The first operand is the chain
936   /// operand, the second is the MBB to branch to.
937   BR,
938 
939   /// BRIND - Indirect branch.  The first operand is the chain, the second
940   /// is the value to branch to, which must be of the same type as the
941   /// target's pointer type.
942   BRIND,
943 
944   /// BR_JT - Jumptable branch. The first operand is the chain, the second
945   /// is the jumptable index, the last one is the jumptable entry index.
946   BR_JT,
947 
948   /// BRCOND - Conditional branch.  The first operand is the chain, the
949   /// second is the condition, the third is the block to branch to if the
950   /// condition is true.  If the type of the condition is not i1, then the
951   /// high bits must conform to getBooleanContents. If the condition is undef,
952   /// it nondeterministically jumps to the block.
953   /// TODO: Its semantics w.r.t undef requires further discussion; we need to
954   /// make it sure that it is consistent with optimizations in MIR & the
955   /// meaning of IMPLICIT_DEF. See https://reviews.llvm.org/D92015
956   BRCOND,
957 
958   /// BR_CC - Conditional branch.  The behavior is like that of SELECT_CC, in
959   /// that the condition is represented as condition code, and two nodes to
960   /// compare, rather than as a combined SetCC node.  The operands in order
961   /// are chain, cc, lhs, rhs, block to branch to if condition is true. If
962   /// condition is undef, it nondeterministically jumps to the block.
963   BR_CC,
964 
965   /// INLINEASM - Represents an inline asm block.  This node always has two
966   /// return values: a chain and a flag result.  The inputs are as follows:
967   ///   Operand #0  : Input chain.
968   ///   Operand #1  : a ExternalSymbolSDNode with a pointer to the asm string.
969   ///   Operand #2  : a MDNodeSDNode with the !srcloc metadata.
970   ///   Operand #3  : HasSideEffect, IsAlignStack bits.
971   ///   After this, it is followed by a list of operands with this format:
972   ///     ConstantSDNode: Flags that encode whether it is a mem or not, the
973   ///                     of operands that follow, etc.  See InlineAsm.h.
974   ///     ... however many operands ...
975   ///   Operand #last: Optional, an incoming flag.
976   ///
977   /// The variable width operands are required to represent target addressing
978   /// modes as a single "operand", even though they may have multiple
979   /// SDOperands.
980   INLINEASM,
981 
982   /// INLINEASM_BR - Branching version of inline asm. Used by asm-goto.
983   INLINEASM_BR,
984 
985   /// EH_LABEL - Represents a label in mid basic block used to track
986   /// locations needed for debug and exception handling tables.  These nodes
987   /// take a chain as input and return a chain.
988   EH_LABEL,
989 
990   /// ANNOTATION_LABEL - Represents a mid basic block label used by
991   /// annotations. This should remain within the basic block and be ordered
992   /// with respect to other call instructions, but loads and stores may float
993   /// past it.
994   ANNOTATION_LABEL,
995 
996   /// CATCHRET - Represents a return from a catch block funclet. Used for
997   /// MSVC compatible exception handling. Takes a chain operand and a
998   /// destination basic block operand.
999   CATCHRET,
1000 
1001   /// CLEANUPRET - Represents a return from a cleanup block funclet.  Used for
1002   /// MSVC compatible exception handling. Takes only a chain operand.
1003   CLEANUPRET,
1004 
1005   /// STACKSAVE - STACKSAVE has one operand, an input chain.  It produces a
1006   /// value, the same type as the pointer type for the system, and an output
1007   /// chain.
1008   STACKSAVE,
1009 
1010   /// STACKRESTORE has two operands, an input chain and a pointer to restore
1011   /// to it returns an output chain.
1012   STACKRESTORE,
1013 
1014   /// CALLSEQ_START/CALLSEQ_END - These operators mark the beginning and end
1015   /// of a call sequence, and carry arbitrary information that target might
1016   /// want to know.  The first operand is a chain, the rest are specified by
1017   /// the target and not touched by the DAG optimizers.
1018   /// Targets that may use stack to pass call arguments define additional
1019   /// operands:
1020   /// - size of the call frame part that must be set up within the
1021   ///   CALLSEQ_START..CALLSEQ_END pair,
1022   /// - part of the call frame prepared prior to CALLSEQ_START.
1023   /// Both these parameters must be constants, their sum is the total call
1024   /// frame size.
1025   /// CALLSEQ_START..CALLSEQ_END pairs may not be nested.
1026   CALLSEQ_START, // Beginning of a call sequence
1027   CALLSEQ_END,   // End of a call sequence
1028 
1029   /// VAARG - VAARG has four operands: an input chain, a pointer, a SRCVALUE,
1030   /// and the alignment. It returns a pair of values: the vaarg value and a
1031   /// new chain.
1032   VAARG,
1033 
1034   /// VACOPY - VACOPY has 5 operands: an input chain, a destination pointer,
1035   /// a source pointer, a SRCVALUE for the destination, and a SRCVALUE for the
1036   /// source.
1037   VACOPY,
1038 
1039   /// VAEND, VASTART - VAEND and VASTART have three operands: an input chain,
1040   /// pointer, and a SRCVALUE.
1041   VAEND,
1042   VASTART,
1043 
1044   // PREALLOCATED_SETUP - This has 2 operands: an input chain and a SRCVALUE
1045   // with the preallocated call Value.
1046   PREALLOCATED_SETUP,
1047   // PREALLOCATED_ARG - This has 3 operands: an input chain, a SRCVALUE
1048   // with the preallocated call Value, and a constant int.
1049   PREALLOCATED_ARG,
1050 
1051   /// SRCVALUE - This is a node type that holds a Value* that is used to
1052   /// make reference to a value in the LLVM IR.
1053   SRCVALUE,
1054 
1055   /// MDNODE_SDNODE - This is a node that holdes an MDNode*, which is used to
1056   /// reference metadata in the IR.
1057   MDNODE_SDNODE,
1058 
1059   /// PCMARKER - This corresponds to the pcmarker intrinsic.
1060   PCMARKER,
1061 
1062   /// READCYCLECOUNTER - This corresponds to the readcyclecounter intrinsic.
1063   /// It produces a chain and one i64 value. The only operand is a chain.
1064   /// If i64 is not legal, the result will be expanded into smaller values.
1065   /// Still, it returns an i64, so targets should set legality for i64.
1066   /// The result is the content of the architecture-specific cycle
1067   /// counter-like register (or other high accuracy low latency clock source).
1068   READCYCLECOUNTER,
1069 
1070   /// HANDLENODE node - Used as a handle for various purposes.
1071   HANDLENODE,
1072 
1073   /// INIT_TRAMPOLINE - This corresponds to the init_trampoline intrinsic.  It
1074   /// takes as input a token chain, the pointer to the trampoline, the pointer
1075   /// to the nested function, the pointer to pass for the 'nest' parameter, a
1076   /// SRCVALUE for the trampoline and another for the nested function
1077   /// (allowing targets to access the original Function*).
1078   /// It produces a token chain as output.
1079   INIT_TRAMPOLINE,
1080 
1081   /// ADJUST_TRAMPOLINE - This corresponds to the adjust_trampoline intrinsic.
1082   /// It takes a pointer to the trampoline and produces a (possibly) new
1083   /// pointer to the same trampoline with platform-specific adjustments
1084   /// applied.  The pointer it returns points to an executable block of code.
1085   ADJUST_TRAMPOLINE,
1086 
1087   /// TRAP - Trapping instruction
1088   TRAP,
1089 
1090   /// DEBUGTRAP - Trap intended to get the attention of a debugger.
1091   DEBUGTRAP,
1092 
1093   /// UBSANTRAP - Trap with an immediate describing the kind of sanitizer
1094   /// failure.
1095   UBSANTRAP,
1096 
1097   /// PREFETCH - This corresponds to a prefetch intrinsic. The first operand
1098   /// is the chain.  The other operands are the address to prefetch,
1099   /// read / write specifier, locality specifier and instruction / data cache
1100   /// specifier.
1101   PREFETCH,
1102 
1103   /// ARITH_FENCE - This corresponds to a arithmetic fence intrinsic. Both its
1104   /// operand and output are the same floating type.
1105   ARITH_FENCE,
1106 
1107   /// OUTCHAIN = ATOMIC_FENCE(INCHAIN, ordering, scope)
1108   /// This corresponds to the fence instruction. It takes an input chain, and
1109   /// two integer constants: an AtomicOrdering and a SynchronizationScope.
1110   ATOMIC_FENCE,
1111 
1112   /// Val, OUTCHAIN = ATOMIC_LOAD(INCHAIN, ptr)
1113   /// This corresponds to "load atomic" instruction.
1114   ATOMIC_LOAD,
1115 
1116   /// OUTCHAIN = ATOMIC_STORE(INCHAIN, ptr, val)
1117   /// This corresponds to "store atomic" instruction.
1118   ATOMIC_STORE,
1119 
1120   /// Val, OUTCHAIN = ATOMIC_CMP_SWAP(INCHAIN, ptr, cmp, swap)
1121   /// For double-word atomic operations:
1122   /// ValLo, ValHi, OUTCHAIN = ATOMIC_CMP_SWAP(INCHAIN, ptr, cmpLo, cmpHi,
1123   ///                                          swapLo, swapHi)
1124   /// This corresponds to the cmpxchg instruction.
1125   ATOMIC_CMP_SWAP,
1126 
1127   /// Val, Success, OUTCHAIN
1128   ///     = ATOMIC_CMP_SWAP_WITH_SUCCESS(INCHAIN, ptr, cmp, swap)
1129   /// N.b. this is still a strong cmpxchg operation, so
1130   /// Success == "Val == cmp".
1131   ATOMIC_CMP_SWAP_WITH_SUCCESS,
1132 
1133   /// Val, OUTCHAIN = ATOMIC_SWAP(INCHAIN, ptr, amt)
1134   /// Val, OUTCHAIN = ATOMIC_LOAD_[OpName](INCHAIN, ptr, amt)
1135   /// For double-word atomic operations:
1136   /// ValLo, ValHi, OUTCHAIN = ATOMIC_SWAP(INCHAIN, ptr, amtLo, amtHi)
1137   /// ValLo, ValHi, OUTCHAIN = ATOMIC_LOAD_[OpName](INCHAIN, ptr, amtLo, amtHi)
1138   /// These correspond to the atomicrmw instruction.
1139   ATOMIC_SWAP,
1140   ATOMIC_LOAD_ADD,
1141   ATOMIC_LOAD_SUB,
1142   ATOMIC_LOAD_AND,
1143   ATOMIC_LOAD_CLR,
1144   ATOMIC_LOAD_OR,
1145   ATOMIC_LOAD_XOR,
1146   ATOMIC_LOAD_NAND,
1147   ATOMIC_LOAD_MIN,
1148   ATOMIC_LOAD_MAX,
1149   ATOMIC_LOAD_UMIN,
1150   ATOMIC_LOAD_UMAX,
1151   ATOMIC_LOAD_FADD,
1152   ATOMIC_LOAD_FSUB,
1153 
1154   // Masked load and store - consecutive vector load and store operations
1155   // with additional mask operand that prevents memory accesses to the
1156   // masked-off lanes.
1157   //
1158   // Val, OutChain = MLOAD(BasePtr, Mask, PassThru)
1159   // OutChain = MSTORE(Value, BasePtr, Mask)
1160   MLOAD,
1161   MSTORE,
1162 
1163   // Masked gather and scatter - load and store operations for a vector of
1164   // random addresses with additional mask operand that prevents memory
1165   // accesses to the masked-off lanes.
1166   //
1167   // Val, OutChain = GATHER(InChain, PassThru, Mask, BasePtr, Index, Scale)
1168   // OutChain = SCATTER(InChain, Value, Mask, BasePtr, Index, Scale)
1169   //
1170   // The Index operand can have more vector elements than the other operands
1171   // due to type legalization. The extra elements are ignored.
1172   MGATHER,
1173   MSCATTER,
1174 
1175   /// This corresponds to the llvm.lifetime.* intrinsics. The first operand
1176   /// is the chain and the second operand is the alloca pointer.
1177   LIFETIME_START,
1178   LIFETIME_END,
1179 
1180   /// GC_TRANSITION_START/GC_TRANSITION_END - These operators mark the
1181   /// beginning and end of GC transition  sequence, and carry arbitrary
1182   /// information that target might need for lowering.  The first operand is
1183   /// a chain, the rest are specified by the target and not touched by the DAG
1184   /// optimizers. GC_TRANSITION_START..GC_TRANSITION_END pairs may not be
1185   /// nested.
1186   GC_TRANSITION_START,
1187   GC_TRANSITION_END,
1188 
1189   /// GET_DYNAMIC_AREA_OFFSET - get offset from native SP to the address of
1190   /// the most recent dynamic alloca. For most targets that would be 0, but
1191   /// for some others (e.g. PowerPC, PowerPC64) that would be compile-time
1192   /// known nonzero constant. The only operand here is the chain.
1193   GET_DYNAMIC_AREA_OFFSET,
1194 
1195   /// Pseudo probe for AutoFDO, as a place holder in a basic block to improve
1196   /// the sample counts quality.
1197   PSEUDO_PROBE,
1198 
1199   /// VSCALE(IMM) - Returns the runtime scaling factor used to calculate the
1200   /// number of elements within a scalable vector. IMM is a constant integer
1201   /// multiplier that is applied to the runtime value.
1202   VSCALE,
1203 
1204   /// Generic reduction nodes. These nodes represent horizontal vector
1205   /// reduction operations, producing a scalar result.
1206   /// The SEQ variants perform reductions in sequential order. The first
1207   /// operand is an initial scalar accumulator value, and the second operand
1208   /// is the vector to reduce.
1209   /// E.g. RES = VECREDUCE_SEQ_FADD f32 ACC, <4 x f32> SRC_VEC
1210   ///  ... is equivalent to
1211   /// RES = (((ACC + SRC_VEC[0]) + SRC_VEC[1]) + SRC_VEC[2]) + SRC_VEC[3]
1212   VECREDUCE_SEQ_FADD,
1213   VECREDUCE_SEQ_FMUL,
1214 
1215   /// These reductions have relaxed evaluation order semantics, and have a
1216   /// single vector operand. The order of evaluation is unspecified. For
1217   /// pow-of-2 vectors, one valid legalizer expansion is to use a tree
1218   /// reduction, i.e.:
1219   /// For RES = VECREDUCE_FADD <8 x f16> SRC_VEC
1220   ///   PART_RDX = FADD SRC_VEC[0:3], SRC_VEC[4:7]
1221   ///   PART_RDX2 = FADD PART_RDX[0:1], PART_RDX[2:3]
1222   ///   RES = FADD PART_RDX2[0], PART_RDX2[1]
1223   /// For non-pow-2 vectors, this can be computed by extracting each element
1224   /// and performing the operation as if it were scalarized.
1225   VECREDUCE_FADD,
1226   VECREDUCE_FMUL,
1227   /// FMIN/FMAX nodes can have flags, for NaN/NoNaN variants.
1228   VECREDUCE_FMAX,
1229   VECREDUCE_FMIN,
1230   /// Integer reductions may have a result type larger than the vector element
1231   /// type. However, the reduction is performed using the vector element type
1232   /// and the value in the top bits is unspecified.
1233   VECREDUCE_ADD,
1234   VECREDUCE_MUL,
1235   VECREDUCE_AND,
1236   VECREDUCE_OR,
1237   VECREDUCE_XOR,
1238   VECREDUCE_SMAX,
1239   VECREDUCE_SMIN,
1240   VECREDUCE_UMAX,
1241   VECREDUCE_UMIN,
1242 
1243 // Vector Predication
1244 #define BEGIN_REGISTER_VP_SDNODE(VPSDID, ...) VPSDID,
1245 #include "llvm/IR/VPIntrinsics.def"
1246 
1247   /// BUILTIN_OP_END - This must be the last enum value in this list.
1248   /// The target-specific pre-isel opcode values start here.
1249   BUILTIN_OP_END
1250 };
1251 
1252 /// FIRST_TARGET_STRICTFP_OPCODE - Target-specific pre-isel operations
1253 /// which cannot raise FP exceptions should be less than this value.
1254 /// Those that do must not be less than this value.
1255 static const int FIRST_TARGET_STRICTFP_OPCODE = BUILTIN_OP_END + 400;
1256 
1257 /// FIRST_TARGET_MEMORY_OPCODE - Target-specific pre-isel operations
1258 /// which do not reference a specific memory location should be less than
1259 /// this value. Those that do must not be less than this value, and can
1260 /// be used with SelectionDAG::getMemIntrinsicNode.
1261 static const int FIRST_TARGET_MEMORY_OPCODE = BUILTIN_OP_END + 500;
1262 
1263 /// Get underlying scalar opcode for VECREDUCE opcode.
1264 /// For example ISD::AND for ISD::VECREDUCE_AND.
1265 NodeType getVecReduceBaseOpcode(unsigned VecReduceOpcode);
1266 
1267 /// Whether this is a vector-predicated Opcode.
1268 bool isVPOpcode(unsigned Opcode);
1269 
1270 /// The operand position of the vector mask.
1271 Optional<unsigned> getVPMaskIdx(unsigned Opcode);
1272 
1273 /// The operand position of the explicit vector length parameter.
1274 Optional<unsigned> getVPExplicitVectorLengthIdx(unsigned Opcode);
1275 
1276 //===--------------------------------------------------------------------===//
1277 /// MemIndexedMode enum - This enum defines the load / store indexed
1278 /// addressing modes.
1279 ///
1280 /// UNINDEXED    "Normal" load / store. The effective address is already
1281 ///              computed and is available in the base pointer. The offset
1282 ///              operand is always undefined. In addition to producing a
1283 ///              chain, an unindexed load produces one value (result of the
1284 ///              load); an unindexed store does not produce a value.
1285 ///
1286 /// PRE_INC      Similar to the unindexed mode where the effective address is
1287 /// PRE_DEC      the value of the base pointer add / subtract the offset.
1288 ///              It considers the computation as being folded into the load /
1289 ///              store operation (i.e. the load / store does the address
1290 ///              computation as well as performing the memory transaction).
1291 ///              The base operand is always undefined. In addition to
1292 ///              producing a chain, pre-indexed load produces two values
1293 ///              (result of the load and the result of the address
1294 ///              computation); a pre-indexed store produces one value (result
1295 ///              of the address computation).
1296 ///
1297 /// POST_INC     The effective address is the value of the base pointer. The
1298 /// POST_DEC     value of the offset operand is then added to / subtracted
1299 ///              from the base after memory transaction. In addition to
1300 ///              producing a chain, post-indexed load produces two values
1301 ///              (the result of the load and the result of the base +/- offset
1302 ///              computation); a post-indexed store produces one value (the
1303 ///              the result of the base +/- offset computation).
1304 enum MemIndexedMode { UNINDEXED = 0, PRE_INC, PRE_DEC, POST_INC, POST_DEC };
1305 
1306 static const int LAST_INDEXED_MODE = POST_DEC + 1;
1307 
1308 //===--------------------------------------------------------------------===//
1309 /// MemIndexType enum - This enum defines how to interpret MGATHER/SCATTER's
1310 /// index parameter when calculating addresses.
1311 ///
1312 /// SIGNED_SCALED     Addr = Base + ((signed)Index * sizeof(element))
1313 /// SIGNED_UNSCALED   Addr = Base + (signed)Index
1314 /// UNSIGNED_SCALED   Addr = Base + ((unsigned)Index * sizeof(element))
1315 /// UNSIGNED_UNSCALED Addr = Base + (unsigned)Index
1316 enum MemIndexType {
1317   SIGNED_SCALED = 0,
1318   SIGNED_UNSCALED,
1319   UNSIGNED_SCALED,
1320   UNSIGNED_UNSCALED
1321 };
1322 
1323 static const int LAST_MEM_INDEX_TYPE = UNSIGNED_UNSCALED + 1;
1324 
1325 //===--------------------------------------------------------------------===//
1326 /// LoadExtType enum - This enum defines the three variants of LOADEXT
1327 /// (load with extension).
1328 ///
1329 /// SEXTLOAD loads the integer operand and sign extends it to a larger
1330 ///          integer result type.
1331 /// ZEXTLOAD loads the integer operand and zero extends it to a larger
1332 ///          integer result type.
1333 /// EXTLOAD  is used for two things: floating point extending loads and
1334 ///          integer extending loads [the top bits are undefined].
1335 enum LoadExtType { NON_EXTLOAD = 0, EXTLOAD, SEXTLOAD, ZEXTLOAD };
1336 
1337 static const int LAST_LOADEXT_TYPE = ZEXTLOAD + 1;
1338 
1339 NodeType getExtForLoadExtType(bool IsFP, LoadExtType);
1340 
1341 //===--------------------------------------------------------------------===//
1342 /// ISD::CondCode enum - These are ordered carefully to make the bitfields
1343 /// below work out, when considering SETFALSE (something that never exists
1344 /// dynamically) as 0.  "U" -> Unsigned (for integer operands) or Unordered
1345 /// (for floating point), "L" -> Less than, "G" -> Greater than, "E" -> Equal
1346 /// to.  If the "N" column is 1, the result of the comparison is undefined if
1347 /// the input is a NAN.
1348 ///
1349 /// All of these (except for the 'always folded ops') should be handled for
1350 /// floating point.  For integer, only the SETEQ,SETNE,SETLT,SETLE,SETGT,
1351 /// SETGE,SETULT,SETULE,SETUGT, and SETUGE opcodes are used.
1352 ///
1353 /// Note that these are laid out in a specific order to allow bit-twiddling
1354 /// to transform conditions.
1355 enum CondCode {
1356   // Opcode       N U L G E       Intuitive operation
1357   SETFALSE, //      0 0 0 0       Always false (always folded)
1358   SETOEQ,   //      0 0 0 1       True if ordered and equal
1359   SETOGT,   //      0 0 1 0       True if ordered and greater than
1360   SETOGE,   //      0 0 1 1       True if ordered and greater than or equal
1361   SETOLT,   //      0 1 0 0       True if ordered and less than
1362   SETOLE,   //      0 1 0 1       True if ordered and less than or equal
1363   SETONE,   //      0 1 1 0       True if ordered and operands are unequal
1364   SETO,     //      0 1 1 1       True if ordered (no nans)
1365   SETUO,    //      1 0 0 0       True if unordered: isnan(X) | isnan(Y)
1366   SETUEQ,   //      1 0 0 1       True if unordered or equal
1367   SETUGT,   //      1 0 1 0       True if unordered or greater than
1368   SETUGE,   //      1 0 1 1       True if unordered, greater than, or equal
1369   SETULT,   //      1 1 0 0       True if unordered or less than
1370   SETULE,   //      1 1 0 1       True if unordered, less than, or equal
1371   SETUNE,   //      1 1 1 0       True if unordered or not equal
1372   SETTRUE,  //      1 1 1 1       Always true (always folded)
1373   // Don't care operations: undefined if the input is a nan.
1374   SETFALSE2, //   1 X 0 0 0       Always false (always folded)
1375   SETEQ,     //   1 X 0 0 1       True if equal
1376   SETGT,     //   1 X 0 1 0       True if greater than
1377   SETGE,     //   1 X 0 1 1       True if greater than or equal
1378   SETLT,     //   1 X 1 0 0       True if less than
1379   SETLE,     //   1 X 1 0 1       True if less than or equal
1380   SETNE,     //   1 X 1 1 0       True if not equal
1381   SETTRUE2,  //   1 X 1 1 1       Always true (always folded)
1382 
1383   SETCC_INVALID // Marker value.
1384 };
1385 
1386 /// Return true if this is a setcc instruction that performs a signed
1387 /// comparison when used with integer operands.
isSignedIntSetCC(CondCode Code)1388 inline bool isSignedIntSetCC(CondCode Code) {
1389   return Code == SETGT || Code == SETGE || Code == SETLT || Code == SETLE;
1390 }
1391 
1392 /// Return true if this is a setcc instruction that performs an unsigned
1393 /// comparison when used with integer operands.
isUnsignedIntSetCC(CondCode Code)1394 inline bool isUnsignedIntSetCC(CondCode Code) {
1395   return Code == SETUGT || Code == SETUGE || Code == SETULT || Code == SETULE;
1396 }
1397 
1398 /// Return true if this is a setcc instruction that performs an equality
1399 /// comparison when used with integer operands.
isIntEqualitySetCC(CondCode Code)1400 inline bool isIntEqualitySetCC(CondCode Code) {
1401   return Code == SETEQ || Code == SETNE;
1402 }
1403 
1404 /// Return true if the specified condition returns true if the two operands to
1405 /// the condition are equal. Note that if one of the two operands is a NaN,
1406 /// this value is meaningless.
isTrueWhenEqual(CondCode Cond)1407 inline bool isTrueWhenEqual(CondCode Cond) { return ((int)Cond & 1) != 0; }
1408 
1409 /// This function returns 0 if the condition is always false if an operand is
1410 /// a NaN, 1 if the condition is always true if the operand is a NaN, and 2 if
1411 /// the condition is undefined if the operand is a NaN.
getUnorderedFlavor(CondCode Cond)1412 inline unsigned getUnorderedFlavor(CondCode Cond) {
1413   return ((int)Cond >> 3) & 3;
1414 }
1415 
1416 /// Return the operation corresponding to !(X op Y), where 'op' is a valid
1417 /// SetCC operation.
1418 CondCode getSetCCInverse(CondCode Operation, EVT Type);
1419 
1420 namespace GlobalISel {
1421 /// Return the operation corresponding to !(X op Y), where 'op' is a valid
1422 /// SetCC operation. The U bit of the condition code has different meanings
1423 /// between floating point and integer comparisons and LLT's don't provide
1424 /// this distinction. As such we need to be told whether the comparison is
1425 /// floating point or integer-like. Pointers should use integer-like
1426 /// comparisons.
1427 CondCode getSetCCInverse(CondCode Operation, bool isIntegerLike);
1428 } // end namespace GlobalISel
1429 
1430 /// Return the operation corresponding to (Y op X) when given the operation
1431 /// for (X op Y).
1432 CondCode getSetCCSwappedOperands(CondCode Operation);
1433 
1434 /// Return the result of a logical OR between different comparisons of
1435 /// identical values: ((X op1 Y) | (X op2 Y)). This function returns
1436 /// SETCC_INVALID if it is not possible to represent the resultant comparison.
1437 CondCode getSetCCOrOperation(CondCode Op1, CondCode Op2, EVT Type);
1438 
1439 /// Return the result of a logical AND between different comparisons of
1440 /// identical values: ((X op1 Y) & (X op2 Y)). This function returns
1441 /// SETCC_INVALID if it is not possible to represent the resultant comparison.
1442 CondCode getSetCCAndOperation(CondCode Op1, CondCode Op2, EVT Type);
1443 
1444 } // namespace ISD
1445 
1446 } // namespace llvm
1447 
1448 #endif
1449