1 //===-- llvm/CodeGen/ISDOpcodes.h - CodeGen opcodes -------------*- C++ -*-===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file declares codegen opcodes and related utilities.
10 //
11 //===----------------------------------------------------------------------===//
12 
13 #ifndef LLVM_CODEGEN_ISDOPCODES_H
14 #define LLVM_CODEGEN_ISDOPCODES_H
15 
16 #include "llvm/CodeGen/ValueTypes.h"
17 
18 namespace llvm {
19 
20 /// ISD namespace - This namespace contains an enum which represents all of the
21 /// SelectionDAG node types and value types.
22 ///
23 namespace ISD {
24 
25 //===--------------------------------------------------------------------===//
26 /// ISD::NodeType enum - This enum defines the target-independent operators
27 /// for a SelectionDAG.
28 ///
29 /// Targets may also define target-dependent operator codes for SDNodes. For
30 /// example, on x86, these are the enum values in the X86ISD namespace.
31 /// Targets should aim to use target-independent operators to model their
32 /// instruction sets as much as possible, and only use target-dependent
33 /// operators when they have special requirements.
34 ///
35 /// Finally, during and after selection proper, SNodes may use special
36 /// operator codes that correspond directly with MachineInstr opcodes. These
37 /// are used to represent selected instructions. See the isMachineOpcode()
38 /// and getMachineOpcode() member functions of SDNode.
39 ///
40 enum NodeType {
41 
42   /// DELETED_NODE - This is an illegal value that is used to catch
43   /// errors.  This opcode is not a legal opcode for any node.
44   DELETED_NODE,
45 
46   /// EntryToken - This is the marker used to indicate the start of a region.
47   EntryToken,
48 
49   /// TokenFactor - This node takes multiple tokens as input and produces a
50   /// single token result. This is used to represent the fact that the operand
51   /// operators are independent of each other.
52   TokenFactor,
53 
54   /// AssertSext, AssertZext - These nodes record if a register contains a
55   /// value that has already been zero or sign extended from a narrower type.
56   /// These nodes take two operands.  The first is the node that has already
57   /// been extended, and the second is a value type node indicating the width
58   /// of the extension
59   AssertSext,
60   AssertZext,
61   AssertAlign,
62 
63   /// Various leaf nodes.
64   BasicBlock,
65   VALUETYPE,
66   CONDCODE,
67   Register,
68   RegisterMask,
69   Constant,
70   ConstantFP,
71   GlobalAddress,
72   GlobalTLSAddress,
73   FrameIndex,
74   JumpTable,
75   ConstantPool,
76   ExternalSymbol,
77   BlockAddress,
78 
79   /// The address of the GOT
80   GLOBAL_OFFSET_TABLE,
81 
82   /// FRAMEADDR, RETURNADDR - These nodes represent llvm.frameaddress and
83   /// llvm.returnaddress on the DAG.  These nodes take one operand, the index
84   /// of the frame or return address to return.  An index of zero corresponds
85   /// to the current function's frame or return address, an index of one to
86   /// the parent's frame or return address, and so on.
87   FRAMEADDR,
88   RETURNADDR,
89 
90   /// ADDROFRETURNADDR - Represents the llvm.addressofreturnaddress intrinsic.
91   /// This node takes no operand, returns a target-specific pointer to the
92   /// place in the stack frame where the return address of the current
93   /// function is stored.
94   ADDROFRETURNADDR,
95 
96   /// SPONENTRY - Represents the llvm.sponentry intrinsic. Takes no argument
97   /// and returns the stack pointer value at the entry of the current
98   /// function calling this intrinsic.
99   SPONENTRY,
100 
101   /// LOCAL_RECOVER - Represents the llvm.localrecover intrinsic.
102   /// Materializes the offset from the local object pointer of another
103   /// function to a particular local object passed to llvm.localescape. The
104   /// operand is the MCSymbol label used to represent this offset, since
105   /// typically the offset is not known until after code generation of the
106   /// parent.
107   LOCAL_RECOVER,
108 
109   /// READ_REGISTER, WRITE_REGISTER - This node represents llvm.register on
110   /// the DAG, which implements the named register global variables extension.
111   READ_REGISTER,
112   WRITE_REGISTER,
113 
114   /// FRAME_TO_ARGS_OFFSET - This node represents offset from frame pointer to
115   /// first (possible) on-stack argument. This is needed for correct stack
116   /// adjustment during unwind.
117   FRAME_TO_ARGS_OFFSET,
118 
119   /// EH_DWARF_CFA - This node represents the pointer to the DWARF Canonical
120   /// Frame Address (CFA), generally the value of the stack pointer at the
121   /// call site in the previous frame.
122   EH_DWARF_CFA,
123 
124   /// OUTCHAIN = EH_RETURN(INCHAIN, OFFSET, HANDLER) - This node represents
125   /// 'eh_return' gcc dwarf builtin, which is used to return from
126   /// exception. The general meaning is: adjust stack by OFFSET and pass
127   /// execution to HANDLER. Many platform-related details also :)
128   EH_RETURN,
129 
130   /// RESULT, OUTCHAIN = EH_SJLJ_SETJMP(INCHAIN, buffer)
131   /// This corresponds to the eh.sjlj.setjmp intrinsic.
132   /// It takes an input chain and a pointer to the jump buffer as inputs
133   /// and returns an outchain.
134   EH_SJLJ_SETJMP,
135 
136   /// OUTCHAIN = EH_SJLJ_LONGJMP(INCHAIN, buffer)
137   /// This corresponds to the eh.sjlj.longjmp intrinsic.
138   /// It takes an input chain and a pointer to the jump buffer as inputs
139   /// and returns an outchain.
140   EH_SJLJ_LONGJMP,
141 
142   /// OUTCHAIN = EH_SJLJ_SETUP_DISPATCH(INCHAIN)
143   /// The target initializes the dispatch table here.
144   EH_SJLJ_SETUP_DISPATCH,
145 
146   /// TargetConstant* - Like Constant*, but the DAG does not do any folding,
147   /// simplification, or lowering of the constant. They are used for constants
148   /// which are known to fit in the immediate fields of their users, or for
149   /// carrying magic numbers which are not values which need to be
150   /// materialized in registers.
151   TargetConstant,
152   TargetConstantFP,
153 
154   /// TargetGlobalAddress - Like GlobalAddress, but the DAG does no folding or
155   /// anything else with this node, and this is valid in the target-specific
156   /// dag, turning into a GlobalAddress operand.
157   TargetGlobalAddress,
158   TargetGlobalTLSAddress,
159   TargetFrameIndex,
160   TargetJumpTable,
161   TargetConstantPool,
162   TargetExternalSymbol,
163   TargetBlockAddress,
164 
165   MCSymbol,
166 
167   /// TargetIndex - Like a constant pool entry, but with completely
168   /// target-dependent semantics. Holds target flags, a 32-bit index, and a
169   /// 64-bit index. Targets can use this however they like.
170   TargetIndex,
171 
172   /// RESULT = INTRINSIC_WO_CHAIN(INTRINSICID, arg1, arg2, ...)
173   /// This node represents a target intrinsic function with no side effects.
174   /// The first operand is the ID number of the intrinsic from the
175   /// llvm::Intrinsic namespace.  The operands to the intrinsic follow.  The
176   /// node returns the result of the intrinsic.
177   INTRINSIC_WO_CHAIN,
178 
179   /// RESULT,OUTCHAIN = INTRINSIC_W_CHAIN(INCHAIN, INTRINSICID, arg1, ...)
180   /// This node represents a target intrinsic function with side effects that
181   /// returns a result.  The first operand is a chain pointer.  The second is
182   /// the ID number of the intrinsic from the llvm::Intrinsic namespace.  The
183   /// operands to the intrinsic follow.  The node has two results, the result
184   /// of the intrinsic and an output chain.
185   INTRINSIC_W_CHAIN,
186 
187   /// OUTCHAIN = INTRINSIC_VOID(INCHAIN, INTRINSICID, arg1, arg2, ...)
188   /// This node represents a target intrinsic function with side effects that
189   /// does not return a result.  The first operand is a chain pointer.  The
190   /// second is the ID number of the intrinsic from the llvm::Intrinsic
191   /// namespace.  The operands to the intrinsic follow.
192   INTRINSIC_VOID,
193 
194   /// CopyToReg - This node has three operands: a chain, a register number to
195   /// set to this value, and a value.
196   CopyToReg,
197 
198   /// CopyFromReg - This node indicates that the input value is a virtual or
199   /// physical register that is defined outside of the scope of this
200   /// SelectionDAG.  The register is available from the RegisterSDNode object.
201   CopyFromReg,
202 
203   /// UNDEF - An undefined node.
204   UNDEF,
205 
206   // FREEZE - FREEZE(VAL) returns an arbitrary value if VAL is UNDEF (or
207   // is evaluated to UNDEF), or returns VAL otherwise. Note that each
208   // read of UNDEF can yield different value, but FREEZE(UNDEF) cannot.
209   FREEZE,
210 
211   /// EXTRACT_ELEMENT - This is used to get the lower or upper (determined by
212   /// a Constant, which is required to be operand #1) half of the integer or
213   /// float value specified as operand #0.  This is only for use before
214   /// legalization, for values that will be broken into multiple registers.
215   EXTRACT_ELEMENT,
216 
217   /// BUILD_PAIR - This is the opposite of EXTRACT_ELEMENT in some ways.
218   /// Given two values of the same integer value type, this produces a value
219   /// twice as big.  Like EXTRACT_ELEMENT, this can only be used before
220   /// legalization. The lower part of the composite value should be in
221   /// element 0 and the upper part should be in element 1.
222   BUILD_PAIR,
223 
224   /// MERGE_VALUES - This node takes multiple discrete operands and returns
225   /// them all as its individual results.  This nodes has exactly the same
226   /// number of inputs and outputs. This node is useful for some pieces of the
227   /// code generator that want to think about a single node with multiple
228   /// results, not multiple nodes.
229   MERGE_VALUES,
230 
231   /// Simple integer binary arithmetic operators.
232   ADD,
233   SUB,
234   MUL,
235   SDIV,
236   UDIV,
237   SREM,
238   UREM,
239 
240   /// SMUL_LOHI/UMUL_LOHI - Multiply two integers of type iN, producing
241   /// a signed/unsigned value of type i[2*N], and return the full value as
242   /// two results, each of type iN.
243   SMUL_LOHI,
244   UMUL_LOHI,
245 
246   /// SDIVREM/UDIVREM - Divide two integers and produce both a quotient and
247   /// remainder result.
248   SDIVREM,
249   UDIVREM,
250 
251   /// CARRY_FALSE - This node is used when folding other nodes,
252   /// like ADDC/SUBC, which indicate the carry result is always false.
253   CARRY_FALSE,
254 
255   /// Carry-setting nodes for multiple precision addition and subtraction.
256   /// These nodes take two operands of the same value type, and produce two
257   /// results.  The first result is the normal add or sub result, the second
258   /// result is the carry flag result.
259   /// FIXME: These nodes are deprecated in favor of ADDCARRY and SUBCARRY.
260   /// They are kept around for now to provide a smooth transition path
261   /// toward the use of ADDCARRY/SUBCARRY and will eventually be removed.
262   ADDC,
263   SUBC,
264 
265   /// Carry-using nodes for multiple precision addition and subtraction. These
266   /// nodes take three operands: The first two are the normal lhs and rhs to
267   /// the add or sub, and the third is the input carry flag.  These nodes
268   /// produce two results; the normal result of the add or sub, and the output
269   /// carry flag.  These nodes both read and write a carry flag to allow them
270   /// to them to be chained together for add and sub of arbitrarily large
271   /// values.
272   ADDE,
273   SUBE,
274 
275   /// Carry-using nodes for multiple precision addition and subtraction.
276   /// These nodes take three operands: The first two are the normal lhs and
277   /// rhs to the add or sub, and the third is a boolean indicating if there
278   /// is an incoming carry. These nodes produce two results: the normal
279   /// result of the add or sub, and the output carry so they can be chained
280   /// together. The use of this opcode is preferable to adde/sube if the
281   /// target supports it, as the carry is a regular value rather than a
282   /// glue, which allows further optimisation.
283   ADDCARRY,
284   SUBCARRY,
285 
286   /// Carry-using overflow-aware nodes for multiple precision addition and
287   /// subtraction. These nodes take three operands: The first two are normal lhs
288   /// and rhs to the add or sub, and the third is a boolean indicating if there
289   /// is an incoming carry. They produce two results: the normal result of the
290   /// add or sub, and a boolean that indicates if an overflow occured (*not*
291   /// flag, because it may be a store to memory, etc.). If the type of the
292   /// boolean is not i1 then the high bits conform to getBooleanContents.
293   SADDO_CARRY,
294   SSUBO_CARRY,
295 
296   /// RESULT, BOOL = [SU]ADDO(LHS, RHS) - Overflow-aware nodes for addition.
297   /// These nodes take two operands: the normal LHS and RHS to the add. They
298   /// produce two results: the normal result of the add, and a boolean that
299   /// indicates if an overflow occurred (*not* a flag, because it may be store
300   /// to memory, etc.).  If the type of the boolean is not i1 then the high
301   /// bits conform to getBooleanContents.
302   /// These nodes are generated from llvm.[su]add.with.overflow intrinsics.
303   SADDO,
304   UADDO,
305 
306   /// Same for subtraction.
307   SSUBO,
308   USUBO,
309 
310   /// Same for multiplication.
311   SMULO,
312   UMULO,
313 
314   /// RESULT = [US]ADDSAT(LHS, RHS) - Perform saturation addition on 2
315   /// integers with the same bit width (W). If the true value of LHS + RHS
316   /// exceeds the largest value that can be represented by W bits, the
317   /// resulting value is this maximum value. Otherwise, if this value is less
318   /// than the smallest value that can be represented by W bits, the
319   /// resulting value is this minimum value.
320   SADDSAT,
321   UADDSAT,
322 
323   /// RESULT = [US]SUBSAT(LHS, RHS) - Perform saturation subtraction on 2
324   /// integers with the same bit width (W). If the true value of LHS - RHS
325   /// exceeds the largest value that can be represented by W bits, the
326   /// resulting value is this maximum value. Otherwise, if this value is less
327   /// than the smallest value that can be represented by W bits, the
328   /// resulting value is this minimum value.
329   SSUBSAT,
330   USUBSAT,
331 
332   /// RESULT = [US]SHLSAT(LHS, RHS) - Perform saturation left shift. The first
333   /// operand is the value to be shifted, and the second argument is the amount
334   /// to shift by. Both must be integers of the same bit width (W). If the true
335   /// value of LHS << RHS exceeds the largest value that can be represented by
336   /// W bits, the resulting value is this maximum value, Otherwise, if this
337   /// value is less than the smallest value that can be represented by W bits,
338   /// the resulting value is this minimum value.
339   SSHLSAT,
340   USHLSAT,
341 
342   /// RESULT = [US]MULFIX(LHS, RHS, SCALE) - Perform fixed point multiplication
343   /// on
344   /// 2 integers with the same width and scale. SCALE represents the scale of
345   /// both operands as fixed point numbers. This SCALE parameter must be a
346   /// constant integer. A scale of zero is effectively performing
347   /// multiplication on 2 integers.
348   SMULFIX,
349   UMULFIX,
350 
351   /// Same as the corresponding unsaturated fixed point instructions, but the
352   /// result is clamped between the min and max values representable by the
353   /// bits of the first 2 operands.
354   SMULFIXSAT,
355   UMULFIXSAT,
356 
357   /// RESULT = [US]DIVFIX(LHS, RHS, SCALE) - Perform fixed point division on
358   /// 2 integers with the same width and scale. SCALE represents the scale
359   /// of both operands as fixed point numbers. This SCALE parameter must be a
360   /// constant integer.
361   SDIVFIX,
362   UDIVFIX,
363 
364   /// Same as the corresponding unsaturated fixed point instructions, but the
365   /// result is clamped between the min and max values representable by the
366   /// bits of the first 2 operands.
367   SDIVFIXSAT,
368   UDIVFIXSAT,
369 
370   /// Simple binary floating point operators.
371   FADD,
372   FSUB,
373   FMUL,
374   FDIV,
375   FREM,
376 
377   /// Constrained versions of the binary floating point operators.
378   /// These will be lowered to the simple operators before final selection.
379   /// They are used to limit optimizations while the DAG is being
380   /// optimized.
381   STRICT_FADD,
382   STRICT_FSUB,
383   STRICT_FMUL,
384   STRICT_FDIV,
385   STRICT_FREM,
386   STRICT_FMA,
387 
388   /// Constrained versions of libm-equivalent floating point intrinsics.
389   /// These will be lowered to the equivalent non-constrained pseudo-op
390   /// (or expanded to the equivalent library call) before final selection.
391   /// They are used to limit optimizations while the DAG is being optimized.
392   STRICT_FSQRT,
393   STRICT_FPOW,
394   STRICT_FPOWI,
395   STRICT_FSIN,
396   STRICT_FCOS,
397   STRICT_FEXP,
398   STRICT_FEXP2,
399   STRICT_FLOG,
400   STRICT_FLOG10,
401   STRICT_FLOG2,
402   STRICT_FRINT,
403   STRICT_FNEARBYINT,
404   STRICT_FMAXNUM,
405   STRICT_FMINNUM,
406   STRICT_FCEIL,
407   STRICT_FFLOOR,
408   STRICT_FROUND,
409   STRICT_FROUNDEVEN,
410   STRICT_FTRUNC,
411   STRICT_LROUND,
412   STRICT_LLROUND,
413   STRICT_LRINT,
414   STRICT_LLRINT,
415   STRICT_FMAXIMUM,
416   STRICT_FMINIMUM,
417 
418   /// STRICT_FP_TO_[US]INT - Convert a floating point value to a signed or
419   /// unsigned integer. These have the same semantics as fptosi and fptoui
420   /// in IR.
421   /// They are used to limit optimizations while the DAG is being optimized.
422   STRICT_FP_TO_SINT,
423   STRICT_FP_TO_UINT,
424 
425   /// STRICT_[US]INT_TO_FP - Convert a signed or unsigned integer to
426   /// a floating point value. These have the same semantics as sitofp and
427   /// uitofp in IR.
428   /// They are used to limit optimizations while the DAG is being optimized.
429   STRICT_SINT_TO_FP,
430   STRICT_UINT_TO_FP,
431 
432   /// X = STRICT_FP_ROUND(Y, TRUNC) - Rounding 'Y' from a larger floating
433   /// point type down to the precision of the destination VT.  TRUNC is a
434   /// flag, which is always an integer that is zero or one.  If TRUNC is 0,
435   /// this is a normal rounding, if it is 1, this FP_ROUND is known to not
436   /// change the value of Y.
437   ///
438   /// The TRUNC = 1 case is used in cases where we know that the value will
439   /// not be modified by the node, because Y is not using any of the extra
440   /// precision of source type.  This allows certain transformations like
441   /// STRICT_FP_EXTEND(STRICT_FP_ROUND(X,1)) -> X which are not safe for
442   /// STRICT_FP_EXTEND(STRICT_FP_ROUND(X,0)) because the extra bits aren't
443   /// removed.
444   /// It is used to limit optimizations while the DAG is being optimized.
445   STRICT_FP_ROUND,
446 
447   /// X = STRICT_FP_EXTEND(Y) - Extend a smaller FP type into a larger FP
448   /// type.
449   /// It is used to limit optimizations while the DAG is being optimized.
450   STRICT_FP_EXTEND,
451 
452   /// STRICT_FSETCC/STRICT_FSETCCS - Constrained versions of SETCC, used
453   /// for floating-point operands only.  STRICT_FSETCC performs a quiet
454   /// comparison operation, while STRICT_FSETCCS performs a signaling
455   /// comparison operation.
456   STRICT_FSETCC,
457   STRICT_FSETCCS,
458 
459   /// FMA - Perform a * b + c with no intermediate rounding step.
460   FMA,
461 
462   /// FMAD - Perform a * b + c, while getting the same result as the
463   /// separately rounded operations.
464   FMAD,
465 
466   /// FCOPYSIGN(X, Y) - Return the value of X with the sign of Y.  NOTE: This
467   /// DAG node does not require that X and Y have the same type, just that
468   /// they are both floating point.  X and the result must have the same type.
469   /// FCOPYSIGN(f32, f64) is allowed.
470   FCOPYSIGN,
471 
472   /// INT = FGETSIGN(FP) - Return the sign bit of the specified floating point
473   /// value as an integer 0/1 value.
474   FGETSIGN,
475 
476   /// Returns platform specific canonical encoding of a floating point number.
477   FCANONICALIZE,
478 
479   /// BUILD_VECTOR(ELT0, ELT1, ELT2, ELT3,...) - Return a fixed-width vector
480   /// with the specified, possibly variable, elements. The types of the
481   /// operands must match the vector element type, except that integer types
482   /// are allowed to be larger than the element type, in which case the
483   /// operands are implicitly truncated. The types of the operands must all
484   /// be the same.
485   BUILD_VECTOR,
486 
487   /// INSERT_VECTOR_ELT(VECTOR, VAL, IDX) - Returns VECTOR with the element
488   /// at IDX replaced with VAL. If the type of VAL is larger than the vector
489   /// element type then VAL is truncated before replacement.
490   ///
491   /// If VECTOR is a scalable vector, then IDX may be larger than the minimum
492   /// vector width. IDX is not first scaled by the runtime scaling factor of
493   /// VECTOR.
494   INSERT_VECTOR_ELT,
495 
496   /// EXTRACT_VECTOR_ELT(VECTOR, IDX) - Returns a single element from VECTOR
497   /// identified by the (potentially variable) element number IDX. If the return
498   /// type is an integer type larger than the element type of the vector, the
499   /// result is extended to the width of the return type. In that case, the high
500   /// bits are undefined.
501   ///
502   /// If VECTOR is a scalable vector, then IDX may be larger than the minimum
503   /// vector width. IDX is not first scaled by the runtime scaling factor of
504   /// VECTOR.
505   EXTRACT_VECTOR_ELT,
506 
507   /// CONCAT_VECTORS(VECTOR0, VECTOR1, ...) - Given a number of values of
508   /// vector type with the same length and element type, this produces a
509   /// concatenated vector result value, with length equal to the sum of the
510   /// lengths of the input vectors. If VECTOR0 is a fixed-width vector, then
511   /// VECTOR1..VECTORN must all be fixed-width vectors. Similarly, if VECTOR0
512   /// is a scalable vector, then VECTOR1..VECTORN must all be scalable vectors.
513   CONCAT_VECTORS,
514 
515   /// INSERT_SUBVECTOR(VECTOR1, VECTOR2, IDX) - Returns a vector with VECTOR2
516   /// inserted into VECTOR1. IDX represents the starting element number at which
517   /// VECTOR2 will be inserted. IDX must be a constant multiple of T's known
518   /// minimum vector length. Let the type of VECTOR2 be T, then if T is a
519   /// scalable vector, IDX is first scaled by the runtime scaling factor of T.
520   /// The elements of VECTOR1 starting at IDX are overwritten with VECTOR2.
521   /// Elements IDX through (IDX + num_elements(T) - 1) must be valid VECTOR1
522   /// indices. If this condition cannot be determined statically but is false at
523   /// runtime, then the result vector is undefined. The IDX parameter must be a
524   /// vector index constant type, which for most targets will be an integer
525   /// pointer type.
526   ///
527   /// This operation supports inserting a fixed-width vector into a scalable
528   /// vector, but not the other way around.
529   INSERT_SUBVECTOR,
530 
531   /// EXTRACT_SUBVECTOR(VECTOR, IDX) - Returns a subvector from VECTOR.
532   /// Let the result type be T, then IDX represents the starting element number
533   /// from which a subvector of type T is extracted. IDX must be a constant
534   /// multiple of T's known minimum vector length. If T is a scalable vector,
535   /// IDX is first scaled by the runtime scaling factor of T. Elements IDX
536   /// through (IDX + num_elements(T) - 1) must be valid VECTOR indices. If this
537   /// condition cannot be determined statically but is false at runtime, then
538   /// the result vector is undefined. The IDX parameter must be a vector index
539   /// constant type, which for most targets will be an integer pointer type.
540   ///
541   /// This operation supports extracting a fixed-width vector from a scalable
542   /// vector, but not the other way around.
543   EXTRACT_SUBVECTOR,
544 
545   /// VECTOR_REVERSE(VECTOR) - Returns a vector, of the same type as VECTOR,
546   /// whose elements are shuffled using the following algorithm:
547   ///   RESULT[i] = VECTOR[VECTOR.ElementCount - 1 - i]
548   VECTOR_REVERSE,
549 
550   /// VECTOR_SHUFFLE(VEC1, VEC2) - Returns a vector, of the same type as
551   /// VEC1/VEC2.  A VECTOR_SHUFFLE node also contains an array of constant int
552   /// values that indicate which value (or undef) each result element will
553   /// get.  These constant ints are accessible through the
554   /// ShuffleVectorSDNode class.  This is quite similar to the Altivec
555   /// 'vperm' instruction, except that the indices must be constants and are
556   /// in terms of the element size of VEC1/VEC2, not in terms of bytes.
557   VECTOR_SHUFFLE,
558 
559   /// VECTOR_SPLICE(VEC1, VEC2, IMM) - Returns a subvector of the same type as
560   /// VEC1/VEC2 from CONCAT_VECTORS(VEC1, VEC2), based on the IMM in two ways.
561   /// Let the result type be T, if IMM is positive it represents the starting
562   /// element number (an index) from which a subvector of type T is extracted
563   /// from CONCAT_VECTORS(VEC1, VEC2). If IMM is negative it represents a count
564   /// specifying the number of trailing elements to extract from VEC1, where the
565   /// elements of T are selected using the following algorithm:
566   ///   RESULT[i] = CONCAT_VECTORS(VEC1,VEC2)[VEC1.ElementCount - ABS(IMM) + i]
567   /// If IMM is not in the range [-VL, VL-1] the result vector is undefined. IMM
568   /// is a constant integer.
569   VECTOR_SPLICE,
570 
571   /// SCALAR_TO_VECTOR(VAL) - This represents the operation of loading a
572   /// scalar value into element 0 of the resultant vector type.  The top
573   /// elements 1 to N-1 of the N-element vector are undefined.  The type
574   /// of the operand must match the vector element type, except when they
575   /// are integer types.  In this case the operand is allowed to be wider
576   /// than the vector element type, and is implicitly truncated to it.
577   SCALAR_TO_VECTOR,
578 
579   /// SPLAT_VECTOR(VAL) - Returns a vector with the scalar value VAL
580   /// duplicated in all lanes. The type of the operand must match the vector
581   /// element type, except when they are integer types.  In this case the
582   /// operand is allowed to be wider than the vector element type, and is
583   /// implicitly truncated to it.
584   SPLAT_VECTOR,
585 
586   /// SPLAT_VECTOR_PARTS(SCALAR1, SCALAR2, ...) - Returns a vector with the
587   /// scalar values joined together and then duplicated in all lanes. This
588   /// represents a SPLAT_VECTOR that has had its scalar operand expanded. This
589   /// allows representing a 64-bit splat on a target with 32-bit integers. The
590   /// total width of the scalars must cover the element width. SCALAR1 contains
591   /// the least significant bits of the value regardless of endianness and all
592   /// scalars should have the same type.
593   SPLAT_VECTOR_PARTS,
594 
595   /// STEP_VECTOR(IMM) - Returns a scalable vector whose lanes are comprised
596   /// of a linear sequence of unsigned values starting from 0 with a step of
597   /// IMM, where IMM must be a vector index constant integer value which must
598   /// fit in the vector element type.
599   /// Note that IMM may be a smaller type than the vector element type, in
600   /// which case the step is implicitly sign-extended to the vector element
601   /// type. IMM may also be a larger type than the vector element type, in
602   /// which case the step is implicitly truncated to the vector element type.
603   /// The operation does not support returning fixed-width vectors or
604   /// non-constant operands. If the sequence value exceeds the limit allowed
605   /// for the element type then the values for those lanes are undefined.
606   STEP_VECTOR,
607 
608   /// MULHU/MULHS - Multiply high - Multiply two integers of type iN,
609   /// producing an unsigned/signed value of type i[2*N], then return the top
610   /// part.
611   MULHU,
612   MULHS,
613 
614   /// [US]{MIN/MAX} - Binary minimum or maximum of signed or unsigned
615   /// integers.
616   SMIN,
617   SMAX,
618   UMIN,
619   UMAX,
620 
621   /// Bitwise operators - logical and, logical or, logical xor.
622   AND,
623   OR,
624   XOR,
625 
626   /// ABS - Determine the unsigned absolute value of a signed integer value of
627   /// the same bitwidth.
628   /// Note: A value of INT_MIN will return INT_MIN, no saturation or overflow
629   /// is performed.
630   ABS,
631 
632   /// Shift and rotation operations.  After legalization, the type of the
633   /// shift amount is known to be TLI.getShiftAmountTy().  Before legalization
634   /// the shift amount can be any type, but care must be taken to ensure it is
635   /// large enough.  TLI.getShiftAmountTy() is i8 on some targets, but before
636   /// legalization, types like i1024 can occur and i8 doesn't have enough bits
637   /// to represent the shift amount.
638   /// When the 1st operand is a vector, the shift amount must be in the same
639   /// type. (TLI.getShiftAmountTy() will return the same type when the input
640   /// type is a vector.)
641   /// For rotates and funnel shifts, the shift amount is treated as an unsigned
642   /// amount modulo the element size of the first operand.
643   ///
644   /// Funnel 'double' shifts take 3 operands, 2 inputs and the shift amount.
645   /// fshl(X,Y,Z): (X << (Z % BW)) | (Y >> (BW - (Z % BW)))
646   /// fshr(X,Y,Z): (X << (BW - (Z % BW))) | (Y >> (Z % BW))
647   SHL,
648   SRA,
649   SRL,
650   ROTL,
651   ROTR,
652   FSHL,
653   FSHR,
654 
655   /// Byte Swap and Counting operators.
656   BSWAP,
657   CTTZ,
658   CTLZ,
659   CTPOP,
660   BITREVERSE,
661   PARITY,
662 
663   /// Bit counting operators with an undefined result for zero inputs.
664   CTTZ_ZERO_UNDEF,
665   CTLZ_ZERO_UNDEF,
666 
667   /// Select(COND, TRUEVAL, FALSEVAL).  If the type of the boolean COND is not
668   /// i1 then the high bits must conform to getBooleanContents.
669   SELECT,
670 
671   /// Select with a vector condition (op #0) and two vector operands (ops #1
672   /// and #2), returning a vector result.  All vectors have the same length.
673   /// Much like the scalar select and setcc, each bit in the condition selects
674   /// whether the corresponding result element is taken from op #1 or op #2.
675   /// At first, the VSELECT condition is of vXi1 type. Later, targets may
676   /// change the condition type in order to match the VSELECT node using a
677   /// pattern. The condition follows the BooleanContent format of the target.
678   VSELECT,
679 
680   /// Select with condition operator - This selects between a true value and
681   /// a false value (ops #2 and #3) based on the boolean result of comparing
682   /// the lhs and rhs (ops #0 and #1) of a conditional expression with the
683   /// condition code in op #4, a CondCodeSDNode.
684   SELECT_CC,
685 
686   /// SetCC operator - This evaluates to a true value iff the condition is
687   /// true.  If the result value type is not i1 then the high bits conform
688   /// to getBooleanContents.  The operands to this are the left and right
689   /// operands to compare (ops #0, and #1) and the condition code to compare
690   /// them with (op #2) as a CondCodeSDNode. If the operands are vector types
691   /// then the result type must also be a vector type.
692   SETCC,
693 
694   /// Like SetCC, ops #0 and #1 are the LHS and RHS operands to compare, but
695   /// op #2 is a boolean indicating if there is an incoming carry. This
696   /// operator checks the result of "LHS - RHS - Carry", and can be used to
697   /// compare two wide integers:
698   /// (setcccarry lhshi rhshi (subcarry lhslo rhslo) cc).
699   /// Only valid for integers.
700   SETCCCARRY,
701 
702   /// SHL_PARTS/SRA_PARTS/SRL_PARTS - These operators are used for expanded
703   /// integer shift operations.  The operation ordering is:
704   ///       [Lo,Hi] = op [LoLHS,HiLHS], Amt
705   SHL_PARTS,
706   SRA_PARTS,
707   SRL_PARTS,
708 
709   /// Conversion operators.  These are all single input single output
710   /// operations.  For all of these, the result type must be strictly
711   /// wider or narrower (depending on the operation) than the source
712   /// type.
713 
714   /// SIGN_EXTEND - Used for integer types, replicating the sign bit
715   /// into new bits.
716   SIGN_EXTEND,
717 
718   /// ZERO_EXTEND - Used for integer types, zeroing the new bits.
719   ZERO_EXTEND,
720 
721   /// ANY_EXTEND - Used for integer types.  The high bits are undefined.
722   ANY_EXTEND,
723 
724   /// TRUNCATE - Completely drop the high bits.
725   TRUNCATE,
726 
727   /// [SU]INT_TO_FP - These operators convert integers (whose interpreted sign
728   /// depends on the first letter) to floating point.
729   SINT_TO_FP,
730   UINT_TO_FP,
731 
732   /// SIGN_EXTEND_INREG - This operator atomically performs a SHL/SRA pair to
733   /// sign extend a small value in a large integer register (e.g. sign
734   /// extending the low 8 bits of a 32-bit register to fill the top 24 bits
735   /// with the 7th bit).  The size of the smaller type is indicated by the 1th
736   /// operand, a ValueType node.
737   SIGN_EXTEND_INREG,
738 
739   /// ANY_EXTEND_VECTOR_INREG(Vector) - This operator represents an
740   /// in-register any-extension of the low lanes of an integer vector. The
741   /// result type must have fewer elements than the operand type, and those
742   /// elements must be larger integer types such that the total size of the
743   /// operand type is less than or equal to the size of the result type. Each
744   /// of the low operand elements is any-extended into the corresponding,
745   /// wider result elements with the high bits becoming undef.
746   /// NOTE: The type legalizer prefers to make the operand and result size
747   /// the same to allow expansion to shuffle vector during op legalization.
748   ANY_EXTEND_VECTOR_INREG,
749 
750   /// SIGN_EXTEND_VECTOR_INREG(Vector) - This operator represents an
751   /// in-register sign-extension of the low lanes of an integer vector. The
752   /// result type must have fewer elements than the operand type, and those
753   /// elements must be larger integer types such that the total size of the
754   /// operand type is less than or equal to the size of the result type. Each
755   /// of the low operand elements is sign-extended into the corresponding,
756   /// wider result elements.
757   /// NOTE: The type legalizer prefers to make the operand and result size
758   /// the same to allow expansion to shuffle vector during op legalization.
759   SIGN_EXTEND_VECTOR_INREG,
760 
761   /// ZERO_EXTEND_VECTOR_INREG(Vector) - This operator represents an
762   /// in-register zero-extension of the low lanes of an integer vector. The
763   /// result type must have fewer elements than the operand type, and those
764   /// elements must be larger integer types such that the total size of the
765   /// operand type is less than or equal to the size of the result type. Each
766   /// of the low operand elements is zero-extended into the corresponding,
767   /// wider result elements.
768   /// NOTE: The type legalizer prefers to make the operand and result size
769   /// the same to allow expansion to shuffle vector during op legalization.
770   ZERO_EXTEND_VECTOR_INREG,
771 
772   /// FP_TO_[US]INT - Convert a floating point value to a signed or unsigned
773   /// integer. These have the same semantics as fptosi and fptoui in IR. If
774   /// the FP value cannot fit in the integer type, the results are undefined.
775   FP_TO_SINT,
776   FP_TO_UINT,
777 
778   /// FP_TO_[US]INT_SAT - Convert floating point value in operand 0 to a
779   /// signed or unsigned scalar integer type given in operand 1 with the
780   /// following semantics:
781   ///
782   ///  * If the value is NaN, zero is returned.
783   ///  * If the value is larger/smaller than the largest/smallest integer,
784   ///    the largest/smallest integer is returned (saturation).
785   ///  * Otherwise the result of rounding the value towards zero is returned.
786   ///
787   /// The scalar width of the type given in operand 1 must be equal to, or
788   /// smaller than, the scalar result type width. It may end up being smaller
789   /// than the result width as a result of integer type legalization.
790   FP_TO_SINT_SAT,
791   FP_TO_UINT_SAT,
792 
793   /// X = FP_ROUND(Y, TRUNC) - Rounding 'Y' from a larger floating point type
794   /// down to the precision of the destination VT.  TRUNC is a flag, which is
795   /// always an integer that is zero or one.  If TRUNC is 0, this is a
796   /// normal rounding, if it is 1, this FP_ROUND is known to not change the
797   /// value of Y.
798   ///
799   /// The TRUNC = 1 case is used in cases where we know that the value will
800   /// not be modified by the node, because Y is not using any of the extra
801   /// precision of source type.  This allows certain transformations like
802   /// FP_EXTEND(FP_ROUND(X,1)) -> X which are not safe for
803   /// FP_EXTEND(FP_ROUND(X,0)) because the extra bits aren't removed.
804   FP_ROUND,
805 
806   /// Returns current rounding mode:
807   /// -1 Undefined
808   ///  0 Round to 0
809   ///  1 Round to nearest, ties to even
810   ///  2 Round to +inf
811   ///  3 Round to -inf
812   ///  4 Round to nearest, ties to zero
813   /// Result is rounding mode and chain. Input is a chain.
814   /// TODO: Rename this node to GET_ROUNDING.
815   FLT_ROUNDS_,
816 
817   /// Set rounding mode.
818   /// The first operand is a chain pointer. The second specifies the required
819   /// rounding mode, encoded in the same way as used in '``FLT_ROUNDS_``'.
820   SET_ROUNDING,
821 
822   /// X = FP_EXTEND(Y) - Extend a smaller FP type into a larger FP type.
823   FP_EXTEND,
824 
825   /// BITCAST - This operator converts between integer, vector and FP
826   /// values, as if the value was stored to memory with one type and loaded
827   /// from the same address with the other type (or equivalently for vector
828   /// format conversions, etc).  The source and result are required to have
829   /// the same bit size (e.g.  f32 <-> i32).  This can also be used for
830   /// int-to-int or fp-to-fp conversions, but that is a noop, deleted by
831   /// getNode().
832   ///
833   /// This operator is subtly different from the bitcast instruction from
834   /// LLVM-IR since this node may change the bits in the register. For
835   /// example, this occurs on big-endian NEON and big-endian MSA where the
836   /// layout of the bits in the register depends on the vector type and this
837   /// operator acts as a shuffle operation for some vector type combinations.
838   BITCAST,
839 
840   /// ADDRSPACECAST - This operator converts between pointers of different
841   /// address spaces.
842   ADDRSPACECAST,
843 
844   /// FP16_TO_FP, FP_TO_FP16 - These operators are used to perform promotions
845   /// and truncation for half-precision (16 bit) floating numbers. These nodes
846   /// form a semi-softened interface for dealing with f16 (as an i16), which
847   /// is often a storage-only type but has native conversions.
848   FP16_TO_FP,
849   FP_TO_FP16,
850   STRICT_FP16_TO_FP,
851   STRICT_FP_TO_FP16,
852 
853   /// Perform various unary floating-point operations inspired by libm. For
854   /// FPOWI, the result is undefined if if the integer operand doesn't fit
855   /// into 32 bits.
856   FNEG,
857   FABS,
858   FSQRT,
859   FCBRT,
860   FSIN,
861   FCOS,
862   FPOWI,
863   FPOW,
864   FLOG,
865   FLOG2,
866   FLOG10,
867   FEXP,
868   FEXP2,
869   FCEIL,
870   FTRUNC,
871   FRINT,
872   FNEARBYINT,
873   FROUND,
874   FROUNDEVEN,
875   FFLOOR,
876   LROUND,
877   LLROUND,
878   LRINT,
879   LLRINT,
880 
881   /// FMINNUM/FMAXNUM - Perform floating-point minimum or maximum on two
882   /// values.
883   //
884   /// In the case where a single input is a NaN (either signaling or quiet),
885   /// the non-NaN input is returned.
886   ///
887   /// The return value of (FMINNUM 0.0, -0.0) could be either 0.0 or -0.0.
888   FMINNUM,
889   FMAXNUM,
890 
891   /// FMINNUM_IEEE/FMAXNUM_IEEE - Perform floating-point minimum or maximum on
892   /// two values, following the IEEE-754 2008 definition. This differs from
893   /// FMINNUM/FMAXNUM in the handling of signaling NaNs. If one input is a
894   /// signaling NaN, returns a quiet NaN.
895   FMINNUM_IEEE,
896   FMAXNUM_IEEE,
897 
898   /// FMINIMUM/FMAXIMUM - NaN-propagating minimum/maximum that also treat -0.0
899   /// as less than 0.0. While FMINNUM_IEEE/FMAXNUM_IEEE follow IEEE 754-2008
900   /// semantics, FMINIMUM/FMAXIMUM follow IEEE 754-2018 draft semantics.
901   FMINIMUM,
902   FMAXIMUM,
903 
904   /// FSINCOS - Compute both fsin and fcos as a single operation.
905   FSINCOS,
906 
907   /// LOAD and STORE have token chains as their first operand, then the same
908   /// operands as an LLVM load/store instruction, then an offset node that
909   /// is added / subtracted from the base pointer to form the address (for
910   /// indexed memory ops).
911   LOAD,
912   STORE,
913 
914   /// DYNAMIC_STACKALLOC - Allocate some number of bytes on the stack aligned
915   /// to a specified boundary.  This node always has two return values: a new
916   /// stack pointer value and a chain. The first operand is the token chain,
917   /// the second is the number of bytes to allocate, and the third is the
918   /// alignment boundary.  The size is guaranteed to be a multiple of the
919   /// stack alignment, and the alignment is guaranteed to be bigger than the
920   /// stack alignment (if required) or 0 to get standard stack alignment.
921   DYNAMIC_STACKALLOC,
922 
923   /// Control flow instructions.  These all have token chains.
924 
925   /// BR - Unconditional branch.  The first operand is the chain
926   /// operand, the second is the MBB to branch to.
927   BR,
928 
929   /// BRIND - Indirect branch.  The first operand is the chain, the second
930   /// is the value to branch to, which must be of the same type as the
931   /// target's pointer type.
932   BRIND,
933 
934   /// BR_JT - Jumptable branch. The first operand is the chain, the second
935   /// is the jumptable index, the last one is the jumptable entry index.
936   BR_JT,
937 
938   /// BRCOND - Conditional branch.  The first operand is the chain, the
939   /// second is the condition, the third is the block to branch to if the
940   /// condition is true.  If the type of the condition is not i1, then the
941   /// high bits must conform to getBooleanContents. If the condition is undef,
942   /// it nondeterministically jumps to the block.
943   /// TODO: Its semantics w.r.t undef requires further discussion; we need to
944   /// make it sure that it is consistent with optimizations in MIR & the
945   /// meaning of IMPLICIT_DEF. See https://reviews.llvm.org/D92015
946   BRCOND,
947 
948   /// BR_CC - Conditional branch.  The behavior is like that of SELECT_CC, in
949   /// that the condition is represented as condition code, and two nodes to
950   /// compare, rather than as a combined SetCC node.  The operands in order
951   /// are chain, cc, lhs, rhs, block to branch to if condition is true. If
952   /// condition is undef, it nondeterministically jumps to the block.
953   BR_CC,
954 
955   /// INLINEASM - Represents an inline asm block.  This node always has two
956   /// return values: a chain and a flag result.  The inputs are as follows:
957   ///   Operand #0  : Input chain.
958   ///   Operand #1  : a ExternalSymbolSDNode with a pointer to the asm string.
959   ///   Operand #2  : a MDNodeSDNode with the !srcloc metadata.
960   ///   Operand #3  : HasSideEffect, IsAlignStack bits.
961   ///   After this, it is followed by a list of operands with this format:
962   ///     ConstantSDNode: Flags that encode whether it is a mem or not, the
963   ///                     of operands that follow, etc.  See InlineAsm.h.
964   ///     ... however many operands ...
965   ///   Operand #last: Optional, an incoming flag.
966   ///
967   /// The variable width operands are required to represent target addressing
968   /// modes as a single "operand", even though they may have multiple
969   /// SDOperands.
970   INLINEASM,
971 
972   /// INLINEASM_BR - Branching version of inline asm. Used by asm-goto.
973   INLINEASM_BR,
974 
975   /// EH_LABEL - Represents a label in mid basic block used to track
976   /// locations needed for debug and exception handling tables.  These nodes
977   /// take a chain as input and return a chain.
978   EH_LABEL,
979 
980   /// ANNOTATION_LABEL - Represents a mid basic block label used by
981   /// annotations. This should remain within the basic block and be ordered
982   /// with respect to other call instructions, but loads and stores may float
983   /// past it.
984   ANNOTATION_LABEL,
985 
986   /// CATCHRET - Represents a return from a catch block funclet. Used for
987   /// MSVC compatible exception handling. Takes a chain operand and a
988   /// destination basic block operand.
989   CATCHRET,
990 
991   /// CLEANUPRET - Represents a return from a cleanup block funclet.  Used for
992   /// MSVC compatible exception handling. Takes only a chain operand.
993   CLEANUPRET,
994 
995   /// STACKSAVE - STACKSAVE has one operand, an input chain.  It produces a
996   /// value, the same type as the pointer type for the system, and an output
997   /// chain.
998   STACKSAVE,
999 
1000   /// STACKRESTORE has two operands, an input chain and a pointer to restore
1001   /// to it returns an output chain.
1002   STACKRESTORE,
1003 
1004   /// CALLSEQ_START/CALLSEQ_END - These operators mark the beginning and end
1005   /// of a call sequence, and carry arbitrary information that target might
1006   /// want to know.  The first operand is a chain, the rest are specified by
1007   /// the target and not touched by the DAG optimizers.
1008   /// Targets that may use stack to pass call arguments define additional
1009   /// operands:
1010   /// - size of the call frame part that must be set up within the
1011   ///   CALLSEQ_START..CALLSEQ_END pair,
1012   /// - part of the call frame prepared prior to CALLSEQ_START.
1013   /// Both these parameters must be constants, their sum is the total call
1014   /// frame size.
1015   /// CALLSEQ_START..CALLSEQ_END pairs may not be nested.
1016   CALLSEQ_START, // Beginning of a call sequence
1017   CALLSEQ_END,   // End of a call sequence
1018 
1019   /// VAARG - VAARG has four operands: an input chain, a pointer, a SRCVALUE,
1020   /// and the alignment. It returns a pair of values: the vaarg value and a
1021   /// new chain.
1022   VAARG,
1023 
1024   /// VACOPY - VACOPY has 5 operands: an input chain, a destination pointer,
1025   /// a source pointer, a SRCVALUE for the destination, and a SRCVALUE for the
1026   /// source.
1027   VACOPY,
1028 
1029   /// VAEND, VASTART - VAEND and VASTART have three operands: an input chain,
1030   /// pointer, and a SRCVALUE.
1031   VAEND,
1032   VASTART,
1033 
1034   // PREALLOCATED_SETUP - This has 2 operands: an input chain and a SRCVALUE
1035   // with the preallocated call Value.
1036   PREALLOCATED_SETUP,
1037   // PREALLOCATED_ARG - This has 3 operands: an input chain, a SRCVALUE
1038   // with the preallocated call Value, and a constant int.
1039   PREALLOCATED_ARG,
1040 
1041   /// SRCVALUE - This is a node type that holds a Value* that is used to
1042   /// make reference to a value in the LLVM IR.
1043   SRCVALUE,
1044 
1045   /// MDNODE_SDNODE - This is a node that holdes an MDNode*, which is used to
1046   /// reference metadata in the IR.
1047   MDNODE_SDNODE,
1048 
1049   /// PCMARKER - This corresponds to the pcmarker intrinsic.
1050   PCMARKER,
1051 
1052   /// READCYCLECOUNTER - This corresponds to the readcyclecounter intrinsic.
1053   /// It produces a chain and one i64 value. The only operand is a chain.
1054   /// If i64 is not legal, the result will be expanded into smaller values.
1055   /// Still, it returns an i64, so targets should set legality for i64.
1056   /// The result is the content of the architecture-specific cycle
1057   /// counter-like register (or other high accuracy low latency clock source).
1058   READCYCLECOUNTER,
1059 
1060   /// HANDLENODE node - Used as a handle for various purposes.
1061   HANDLENODE,
1062 
1063   /// INIT_TRAMPOLINE - This corresponds to the init_trampoline intrinsic.  It
1064   /// takes as input a token chain, the pointer to the trampoline, the pointer
1065   /// to the nested function, the pointer to pass for the 'nest' parameter, a
1066   /// SRCVALUE for the trampoline and another for the nested function
1067   /// (allowing targets to access the original Function*).
1068   /// It produces a token chain as output.
1069   INIT_TRAMPOLINE,
1070 
1071   /// ADJUST_TRAMPOLINE - This corresponds to the adjust_trampoline intrinsic.
1072   /// It takes a pointer to the trampoline and produces a (possibly) new
1073   /// pointer to the same trampoline with platform-specific adjustments
1074   /// applied.  The pointer it returns points to an executable block of code.
1075   ADJUST_TRAMPOLINE,
1076 
1077   /// TRAP - Trapping instruction
1078   TRAP,
1079 
1080   /// DEBUGTRAP - Trap intended to get the attention of a debugger.
1081   DEBUGTRAP,
1082 
1083   /// UBSANTRAP - Trap with an immediate describing the kind of sanitizer
1084   /// failure.
1085   UBSANTRAP,
1086 
1087   /// PREFETCH - This corresponds to a prefetch intrinsic. The first operand
1088   /// is the chain.  The other operands are the address to prefetch,
1089   /// read / write specifier, locality specifier and instruction / data cache
1090   /// specifier.
1091   PREFETCH,
1092 
1093   /// OUTCHAIN = ATOMIC_FENCE(INCHAIN, ordering, scope)
1094   /// This corresponds to the fence instruction. It takes an input chain, and
1095   /// two integer constants: an AtomicOrdering and a SynchronizationScope.
1096   ATOMIC_FENCE,
1097 
1098   /// Val, OUTCHAIN = ATOMIC_LOAD(INCHAIN, ptr)
1099   /// This corresponds to "load atomic" instruction.
1100   ATOMIC_LOAD,
1101 
1102   /// OUTCHAIN = ATOMIC_STORE(INCHAIN, ptr, val)
1103   /// This corresponds to "store atomic" instruction.
1104   ATOMIC_STORE,
1105 
1106   /// Val, OUTCHAIN = ATOMIC_CMP_SWAP(INCHAIN, ptr, cmp, swap)
1107   /// For double-word atomic operations:
1108   /// ValLo, ValHi, OUTCHAIN = ATOMIC_CMP_SWAP(INCHAIN, ptr, cmpLo, cmpHi,
1109   ///                                          swapLo, swapHi)
1110   /// This corresponds to the cmpxchg instruction.
1111   ATOMIC_CMP_SWAP,
1112 
1113   /// Val, Success, OUTCHAIN
1114   ///     = ATOMIC_CMP_SWAP_WITH_SUCCESS(INCHAIN, ptr, cmp, swap)
1115   /// N.b. this is still a strong cmpxchg operation, so
1116   /// Success == "Val == cmp".
1117   ATOMIC_CMP_SWAP_WITH_SUCCESS,
1118 
1119   /// Val, OUTCHAIN = ATOMIC_SWAP(INCHAIN, ptr, amt)
1120   /// Val, OUTCHAIN = ATOMIC_LOAD_[OpName](INCHAIN, ptr, amt)
1121   /// For double-word atomic operations:
1122   /// ValLo, ValHi, OUTCHAIN = ATOMIC_SWAP(INCHAIN, ptr, amtLo, amtHi)
1123   /// ValLo, ValHi, OUTCHAIN = ATOMIC_LOAD_[OpName](INCHAIN, ptr, amtLo, amtHi)
1124   /// These correspond to the atomicrmw instruction.
1125   ATOMIC_SWAP,
1126   ATOMIC_LOAD_ADD,
1127   ATOMIC_LOAD_SUB,
1128   ATOMIC_LOAD_AND,
1129   ATOMIC_LOAD_CLR,
1130   ATOMIC_LOAD_OR,
1131   ATOMIC_LOAD_XOR,
1132   ATOMIC_LOAD_NAND,
1133   ATOMIC_LOAD_MIN,
1134   ATOMIC_LOAD_MAX,
1135   ATOMIC_LOAD_UMIN,
1136   ATOMIC_LOAD_UMAX,
1137   ATOMIC_LOAD_FADD,
1138   ATOMIC_LOAD_FSUB,
1139 
1140   // Masked load and store - consecutive vector load and store operations
1141   // with additional mask operand that prevents memory accesses to the
1142   // masked-off lanes.
1143   //
1144   // Val, OutChain = MLOAD(BasePtr, Mask, PassThru)
1145   // OutChain = MSTORE(Value, BasePtr, Mask)
1146   MLOAD,
1147   MSTORE,
1148 
1149   // Masked gather and scatter - load and store operations for a vector of
1150   // random addresses with additional mask operand that prevents memory
1151   // accesses to the masked-off lanes.
1152   //
1153   // Val, OutChain = GATHER(InChain, PassThru, Mask, BasePtr, Index, Scale)
1154   // OutChain = SCATTER(InChain, Value, Mask, BasePtr, Index, Scale)
1155   //
1156   // The Index operand can have more vector elements than the other operands
1157   // due to type legalization. The extra elements are ignored.
1158   MGATHER,
1159   MSCATTER,
1160 
1161   /// This corresponds to the llvm.lifetime.* intrinsics. The first operand
1162   /// is the chain and the second operand is the alloca pointer.
1163   LIFETIME_START,
1164   LIFETIME_END,
1165 
1166   /// GC_TRANSITION_START/GC_TRANSITION_END - These operators mark the
1167   /// beginning and end of GC transition  sequence, and carry arbitrary
1168   /// information that target might need for lowering.  The first operand is
1169   /// a chain, the rest are specified by the target and not touched by the DAG
1170   /// optimizers. GC_TRANSITION_START..GC_TRANSITION_END pairs may not be
1171   /// nested.
1172   GC_TRANSITION_START,
1173   GC_TRANSITION_END,
1174 
1175   /// GET_DYNAMIC_AREA_OFFSET - get offset from native SP to the address of
1176   /// the most recent dynamic alloca. For most targets that would be 0, but
1177   /// for some others (e.g. PowerPC, PowerPC64) that would be compile-time
1178   /// known nonzero constant. The only operand here is the chain.
1179   GET_DYNAMIC_AREA_OFFSET,
1180 
1181   /// Pseudo probe for AutoFDO, as a place holder in a basic block to improve
1182   /// the sample counts quality.
1183   PSEUDO_PROBE,
1184 
1185   /// VSCALE(IMM) - Returns the runtime scaling factor used to calculate the
1186   /// number of elements within a scalable vector. IMM is a constant integer
1187   /// multiplier that is applied to the runtime value.
1188   VSCALE,
1189 
1190   /// Generic reduction nodes. These nodes represent horizontal vector
1191   /// reduction operations, producing a scalar result.
1192   /// The SEQ variants perform reductions in sequential order. The first
1193   /// operand is an initial scalar accumulator value, and the second operand
1194   /// is the vector to reduce.
1195   /// E.g. RES = VECREDUCE_SEQ_FADD f32 ACC, <4 x f32> SRC_VEC
1196   ///  ... is equivalent to
1197   /// RES = (((ACC + SRC_VEC[0]) + SRC_VEC[1]) + SRC_VEC[2]) + SRC_VEC[3]
1198   VECREDUCE_SEQ_FADD,
1199   VECREDUCE_SEQ_FMUL,
1200 
1201   /// These reductions have relaxed evaluation order semantics, and have a
1202   /// single vector operand. The order of evaluation is unspecified. For
1203   /// pow-of-2 vectors, one valid legalizer expansion is to use a tree
1204   /// reduction, i.e.:
1205   /// For RES = VECREDUCE_FADD <8 x f16> SRC_VEC
1206   ///   PART_RDX = FADD SRC_VEC[0:3], SRC_VEC[4:7]
1207   ///   PART_RDX2 = FADD PART_RDX[0:1], PART_RDX[2:3]
1208   ///   RES = FADD PART_RDX2[0], PART_RDX2[1]
1209   /// For non-pow-2 vectors, this can be computed by extracting each element
1210   /// and performing the operation as if it were scalarized.
1211   VECREDUCE_FADD,
1212   VECREDUCE_FMUL,
1213   /// FMIN/FMAX nodes can have flags, for NaN/NoNaN variants.
1214   VECREDUCE_FMAX,
1215   VECREDUCE_FMIN,
1216   /// Integer reductions may have a result type larger than the vector element
1217   /// type. However, the reduction is performed using the vector element type
1218   /// and the value in the top bits is unspecified.
1219   VECREDUCE_ADD,
1220   VECREDUCE_MUL,
1221   VECREDUCE_AND,
1222   VECREDUCE_OR,
1223   VECREDUCE_XOR,
1224   VECREDUCE_SMAX,
1225   VECREDUCE_SMIN,
1226   VECREDUCE_UMAX,
1227   VECREDUCE_UMIN,
1228 
1229 // Vector Predication
1230 #define BEGIN_REGISTER_VP_SDNODE(VPSDID, ...) VPSDID,
1231 #include "llvm/IR/VPIntrinsics.def"
1232 
1233   /// BUILTIN_OP_END - This must be the last enum value in this list.
1234   /// The target-specific pre-isel opcode values start here.
1235   BUILTIN_OP_END
1236 };
1237 
1238 /// FIRST_TARGET_STRICTFP_OPCODE - Target-specific pre-isel operations
1239 /// which cannot raise FP exceptions should be less than this value.
1240 /// Those that do must not be less than this value.
1241 static const int FIRST_TARGET_STRICTFP_OPCODE = BUILTIN_OP_END + 400;
1242 
1243 /// FIRST_TARGET_MEMORY_OPCODE - Target-specific pre-isel operations
1244 /// which do not reference a specific memory location should be less than
1245 /// this value. Those that do must not be less than this value, and can
1246 /// be used with SelectionDAG::getMemIntrinsicNode.
1247 static const int FIRST_TARGET_MEMORY_OPCODE = BUILTIN_OP_END + 500;
1248 
1249 /// Get underlying scalar opcode for VECREDUCE opcode.
1250 /// For example ISD::AND for ISD::VECREDUCE_AND.
1251 NodeType getVecReduceBaseOpcode(unsigned VecReduceOpcode);
1252 
1253 /// Whether this is a vector-predicated Opcode.
1254 bool isVPOpcode(unsigned Opcode);
1255 
1256 /// The operand position of the vector mask.
1257 Optional<unsigned> getVPMaskIdx(unsigned Opcode);
1258 
1259 /// The operand position of the explicit vector length parameter.
1260 Optional<unsigned> getVPExplicitVectorLengthIdx(unsigned Opcode);
1261 
1262 //===--------------------------------------------------------------------===//
1263 /// MemIndexedMode enum - This enum defines the load / store indexed
1264 /// addressing modes.
1265 ///
1266 /// UNINDEXED    "Normal" load / store. The effective address is already
1267 ///              computed and is available in the base pointer. The offset
1268 ///              operand is always undefined. In addition to producing a
1269 ///              chain, an unindexed load produces one value (result of the
1270 ///              load); an unindexed store does not produce a value.
1271 ///
1272 /// PRE_INC      Similar to the unindexed mode where the effective address is
1273 /// PRE_DEC      the value of the base pointer add / subtract the offset.
1274 ///              It considers the computation as being folded into the load /
1275 ///              store operation (i.e. the load / store does the address
1276 ///              computation as well as performing the memory transaction).
1277 ///              The base operand is always undefined. In addition to
1278 ///              producing a chain, pre-indexed load produces two values
1279 ///              (result of the load and the result of the address
1280 ///              computation); a pre-indexed store produces one value (result
1281 ///              of the address computation).
1282 ///
1283 /// POST_INC     The effective address is the value of the base pointer. The
1284 /// POST_DEC     value of the offset operand is then added to / subtracted
1285 ///              from the base after memory transaction. In addition to
1286 ///              producing a chain, post-indexed load produces two values
1287 ///              (the result of the load and the result of the base +/- offset
1288 ///              computation); a post-indexed store produces one value (the
1289 ///              the result of the base +/- offset computation).
1290 enum MemIndexedMode { UNINDEXED = 0, PRE_INC, PRE_DEC, POST_INC, POST_DEC };
1291 
1292 static const int LAST_INDEXED_MODE = POST_DEC + 1;
1293 
1294 //===--------------------------------------------------------------------===//
1295 /// MemIndexType enum - This enum defines how to interpret MGATHER/SCATTER's
1296 /// index parameter when calculating addresses.
1297 ///
1298 /// SIGNED_SCALED     Addr = Base + ((signed)Index * sizeof(element))
1299 /// SIGNED_UNSCALED   Addr = Base + (signed)Index
1300 /// UNSIGNED_SCALED   Addr = Base + ((unsigned)Index * sizeof(element))
1301 /// UNSIGNED_UNSCALED Addr = Base + (unsigned)Index
1302 enum MemIndexType {
1303   SIGNED_SCALED = 0,
1304   SIGNED_UNSCALED,
1305   UNSIGNED_SCALED,
1306   UNSIGNED_UNSCALED
1307 };
1308 
1309 static const int LAST_MEM_INDEX_TYPE = UNSIGNED_UNSCALED + 1;
1310 
1311 //===--------------------------------------------------------------------===//
1312 /// LoadExtType enum - This enum defines the three variants of LOADEXT
1313 /// (load with extension).
1314 ///
1315 /// SEXTLOAD loads the integer operand and sign extends it to a larger
1316 ///          integer result type.
1317 /// ZEXTLOAD loads the integer operand and zero extends it to a larger
1318 ///          integer result type.
1319 /// EXTLOAD  is used for two things: floating point extending loads and
1320 ///          integer extending loads [the top bits are undefined].
1321 enum LoadExtType { NON_EXTLOAD = 0, EXTLOAD, SEXTLOAD, ZEXTLOAD };
1322 
1323 static const int LAST_LOADEXT_TYPE = ZEXTLOAD + 1;
1324 
1325 NodeType getExtForLoadExtType(bool IsFP, LoadExtType);
1326 
1327 //===--------------------------------------------------------------------===//
1328 /// ISD::CondCode enum - These are ordered carefully to make the bitfields
1329 /// below work out, when considering SETFALSE (something that never exists
1330 /// dynamically) as 0.  "U" -> Unsigned (for integer operands) or Unordered
1331 /// (for floating point), "L" -> Less than, "G" -> Greater than, "E" -> Equal
1332 /// to.  If the "N" column is 1, the result of the comparison is undefined if
1333 /// the input is a NAN.
1334 ///
1335 /// All of these (except for the 'always folded ops') should be handled for
1336 /// floating point.  For integer, only the SETEQ,SETNE,SETLT,SETLE,SETGT,
1337 /// SETGE,SETULT,SETULE,SETUGT, and SETUGE opcodes are used.
1338 ///
1339 /// Note that these are laid out in a specific order to allow bit-twiddling
1340 /// to transform conditions.
1341 enum CondCode {
1342   // Opcode       N U L G E       Intuitive operation
1343   SETFALSE, //      0 0 0 0       Always false (always folded)
1344   SETOEQ,   //      0 0 0 1       True if ordered and equal
1345   SETOGT,   //      0 0 1 0       True if ordered and greater than
1346   SETOGE,   //      0 0 1 1       True if ordered and greater than or equal
1347   SETOLT,   //      0 1 0 0       True if ordered and less than
1348   SETOLE,   //      0 1 0 1       True if ordered and less than or equal
1349   SETONE,   //      0 1 1 0       True if ordered and operands are unequal
1350   SETO,     //      0 1 1 1       True if ordered (no nans)
1351   SETUO,    //      1 0 0 0       True if unordered: isnan(X) | isnan(Y)
1352   SETUEQ,   //      1 0 0 1       True if unordered or equal
1353   SETUGT,   //      1 0 1 0       True if unordered or greater than
1354   SETUGE,   //      1 0 1 1       True if unordered, greater than, or equal
1355   SETULT,   //      1 1 0 0       True if unordered or less than
1356   SETULE,   //      1 1 0 1       True if unordered, less than, or equal
1357   SETUNE,   //      1 1 1 0       True if unordered or not equal
1358   SETTRUE,  //      1 1 1 1       Always true (always folded)
1359   // Don't care operations: undefined if the input is a nan.
1360   SETFALSE2, //   1 X 0 0 0       Always false (always folded)
1361   SETEQ,     //   1 X 0 0 1       True if equal
1362   SETGT,     //   1 X 0 1 0       True if greater than
1363   SETGE,     //   1 X 0 1 1       True if greater than or equal
1364   SETLT,     //   1 X 1 0 0       True if less than
1365   SETLE,     //   1 X 1 0 1       True if less than or equal
1366   SETNE,     //   1 X 1 1 0       True if not equal
1367   SETTRUE2,  //   1 X 1 1 1       Always true (always folded)
1368 
1369   SETCC_INVALID // Marker value.
1370 };
1371 
1372 /// Return true if this is a setcc instruction that performs a signed
1373 /// comparison when used with integer operands.
isSignedIntSetCC(CondCode Code)1374 inline bool isSignedIntSetCC(CondCode Code) {
1375   return Code == SETGT || Code == SETGE || Code == SETLT || Code == SETLE;
1376 }
1377 
1378 /// Return true if this is a setcc instruction that performs an unsigned
1379 /// comparison when used with integer operands.
isUnsignedIntSetCC(CondCode Code)1380 inline bool isUnsignedIntSetCC(CondCode Code) {
1381   return Code == SETUGT || Code == SETUGE || Code == SETULT || Code == SETULE;
1382 }
1383 
1384 /// Return true if this is a setcc instruction that performs an equality
1385 /// comparison when used with integer operands.
isIntEqualitySetCC(CondCode Code)1386 inline bool isIntEqualitySetCC(CondCode Code) {
1387   return Code == SETEQ || Code == SETNE;
1388 }
1389 
1390 /// Return true if the specified condition returns true if the two operands to
1391 /// the condition are equal. Note that if one of the two operands is a NaN,
1392 /// this value is meaningless.
isTrueWhenEqual(CondCode Cond)1393 inline bool isTrueWhenEqual(CondCode Cond) { return ((int)Cond & 1) != 0; }
1394 
1395 /// This function returns 0 if the condition is always false if an operand is
1396 /// a NaN, 1 if the condition is always true if the operand is a NaN, and 2 if
1397 /// the condition is undefined if the operand is a NaN.
getUnorderedFlavor(CondCode Cond)1398 inline unsigned getUnorderedFlavor(CondCode Cond) {
1399   return ((int)Cond >> 3) & 3;
1400 }
1401 
1402 /// Return the operation corresponding to !(X op Y), where 'op' is a valid
1403 /// SetCC operation.
1404 CondCode getSetCCInverse(CondCode Operation, EVT Type);
1405 
1406 namespace GlobalISel {
1407 /// Return the operation corresponding to !(X op Y), where 'op' is a valid
1408 /// SetCC operation. The U bit of the condition code has different meanings
1409 /// between floating point and integer comparisons and LLT's don't provide
1410 /// this distinction. As such we need to be told whether the comparison is
1411 /// floating point or integer-like. Pointers should use integer-like
1412 /// comparisons.
1413 CondCode getSetCCInverse(CondCode Operation, bool isIntegerLike);
1414 } // end namespace GlobalISel
1415 
1416 /// Return the operation corresponding to (Y op X) when given the operation
1417 /// for (X op Y).
1418 CondCode getSetCCSwappedOperands(CondCode Operation);
1419 
1420 /// Return the result of a logical OR between different comparisons of
1421 /// identical values: ((X op1 Y) | (X op2 Y)). This function returns
1422 /// SETCC_INVALID if it is not possible to represent the resultant comparison.
1423 CondCode getSetCCOrOperation(CondCode Op1, CondCode Op2, EVT Type);
1424 
1425 /// Return the result of a logical AND between different comparisons of
1426 /// identical values: ((X op1 Y) & (X op2 Y)). This function returns
1427 /// SETCC_INVALID if it is not possible to represent the resultant comparison.
1428 CondCode getSetCCAndOperation(CondCode Op1, CondCode Op2, EVT Type);
1429 
1430 } // namespace ISD
1431 
1432 } // namespace llvm
1433 
1434 #endif
1435