1 //===-- llvm/CodeGen/ISDOpcodes.h - CodeGen opcodes -------------*- C++ -*-===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This file declares codegen opcodes and related utilities. 10 // 11 //===----------------------------------------------------------------------===// 12 13 #ifndef LLVM_CODEGEN_ISDOPCODES_H 14 #define LLVM_CODEGEN_ISDOPCODES_H 15 16 #include "llvm/CodeGen/ValueTypes.h" 17 18 namespace llvm { 19 20 /// ISD namespace - This namespace contains an enum which represents all of the 21 /// SelectionDAG node types and value types. 22 /// 23 namespace ISD { 24 25 //===--------------------------------------------------------------------===// 26 /// ISD::NodeType enum - This enum defines the target-independent operators 27 /// for a SelectionDAG. 28 /// 29 /// Targets may also define target-dependent operator codes for SDNodes. For 30 /// example, on x86, these are the enum values in the X86ISD namespace. 31 /// Targets should aim to use target-independent operators to model their 32 /// instruction sets as much as possible, and only use target-dependent 33 /// operators when they have special requirements. 34 /// 35 /// Finally, during and after selection proper, SNodes may use special 36 /// operator codes that correspond directly with MachineInstr opcodes. These 37 /// are used to represent selected instructions. See the isMachineOpcode() 38 /// and getMachineOpcode() member functions of SDNode. 39 /// 40 enum NodeType { 41 42 /// DELETED_NODE - This is an illegal value that is used to catch 43 /// errors. This opcode is not a legal opcode for any node. 44 DELETED_NODE, 45 46 /// EntryToken - This is the marker used to indicate the start of a region. 47 EntryToken, 48 49 /// TokenFactor - This node takes multiple tokens as input and produces a 50 /// single token result. This is used to represent the fact that the operand 51 /// operators are independent of each other. 52 TokenFactor, 53 54 /// AssertSext, AssertZext - These nodes record if a register contains a 55 /// value that has already been zero or sign extended from a narrower type. 56 /// These nodes take two operands. The first is the node that has already 57 /// been extended, and the second is a value type node indicating the width 58 /// of the extension. 59 /// NOTE: In case of the source value (or any vector element value) is 60 /// poisoned the assertion will not be true for that value. 61 AssertSext, 62 AssertZext, 63 64 /// AssertAlign - These nodes record if a register contains a value that 65 /// has a known alignment and the trailing bits are known to be zero. 66 /// NOTE: In case of the source value (or any vector element value) is 67 /// poisoned the assertion will not be true for that value. 68 AssertAlign, 69 70 /// Various leaf nodes. 71 BasicBlock, 72 VALUETYPE, 73 CONDCODE, 74 Register, 75 RegisterMask, 76 Constant, 77 ConstantFP, 78 GlobalAddress, 79 GlobalTLSAddress, 80 FrameIndex, 81 JumpTable, 82 ConstantPool, 83 ExternalSymbol, 84 BlockAddress, 85 86 /// The address of the GOT 87 GLOBAL_OFFSET_TABLE, 88 89 /// FRAMEADDR, RETURNADDR - These nodes represent llvm.frameaddress and 90 /// llvm.returnaddress on the DAG. These nodes take one operand, the index 91 /// of the frame or return address to return. An index of zero corresponds 92 /// to the current function's frame or return address, an index of one to 93 /// the parent's frame or return address, and so on. 94 FRAMEADDR, 95 RETURNADDR, 96 97 /// ADDROFRETURNADDR - Represents the llvm.addressofreturnaddress intrinsic. 98 /// This node takes no operand, returns a target-specific pointer to the 99 /// place in the stack frame where the return address of the current 100 /// function is stored. 101 ADDROFRETURNADDR, 102 103 /// SPONENTRY - Represents the llvm.sponentry intrinsic. Takes no argument 104 /// and returns the stack pointer value at the entry of the current 105 /// function calling this intrinsic. 106 SPONENTRY, 107 108 /// LOCAL_RECOVER - Represents the llvm.localrecover intrinsic. 109 /// Materializes the offset from the local object pointer of another 110 /// function to a particular local object passed to llvm.localescape. The 111 /// operand is the MCSymbol label used to represent this offset, since 112 /// typically the offset is not known until after code generation of the 113 /// parent. 114 LOCAL_RECOVER, 115 116 /// READ_REGISTER, WRITE_REGISTER - This node represents llvm.register on 117 /// the DAG, which implements the named register global variables extension. 118 READ_REGISTER, 119 WRITE_REGISTER, 120 121 /// FRAME_TO_ARGS_OFFSET - This node represents offset from frame pointer to 122 /// first (possible) on-stack argument. This is needed for correct stack 123 /// adjustment during unwind. 124 FRAME_TO_ARGS_OFFSET, 125 126 /// EH_DWARF_CFA - This node represents the pointer to the DWARF Canonical 127 /// Frame Address (CFA), generally the value of the stack pointer at the 128 /// call site in the previous frame. 129 EH_DWARF_CFA, 130 131 /// OUTCHAIN = EH_RETURN(INCHAIN, OFFSET, HANDLER) - This node represents 132 /// 'eh_return' gcc dwarf builtin, which is used to return from 133 /// exception. The general meaning is: adjust stack by OFFSET and pass 134 /// execution to HANDLER. Many platform-related details also :) 135 EH_RETURN, 136 137 /// RESULT, OUTCHAIN = EH_SJLJ_SETJMP(INCHAIN, buffer) 138 /// This corresponds to the eh.sjlj.setjmp intrinsic. 139 /// It takes an input chain and a pointer to the jump buffer as inputs 140 /// and returns an outchain. 141 EH_SJLJ_SETJMP, 142 143 /// OUTCHAIN = EH_SJLJ_LONGJMP(INCHAIN, buffer) 144 /// This corresponds to the eh.sjlj.longjmp intrinsic. 145 /// It takes an input chain and a pointer to the jump buffer as inputs 146 /// and returns an outchain. 147 EH_SJLJ_LONGJMP, 148 149 /// OUTCHAIN = EH_SJLJ_SETUP_DISPATCH(INCHAIN) 150 /// The target initializes the dispatch table here. 151 EH_SJLJ_SETUP_DISPATCH, 152 153 /// TargetConstant* - Like Constant*, but the DAG does not do any folding, 154 /// simplification, or lowering of the constant. They are used for constants 155 /// which are known to fit in the immediate fields of their users, or for 156 /// carrying magic numbers which are not values which need to be 157 /// materialized in registers. 158 TargetConstant, 159 TargetConstantFP, 160 161 /// TargetGlobalAddress - Like GlobalAddress, but the DAG does no folding or 162 /// anything else with this node, and this is valid in the target-specific 163 /// dag, turning into a GlobalAddress operand. 164 TargetGlobalAddress, 165 TargetGlobalTLSAddress, 166 TargetFrameIndex, 167 TargetJumpTable, 168 TargetConstantPool, 169 TargetExternalSymbol, 170 TargetBlockAddress, 171 172 MCSymbol, 173 174 /// TargetIndex - Like a constant pool entry, but with completely 175 /// target-dependent semantics. Holds target flags, a 32-bit index, and a 176 /// 64-bit index. Targets can use this however they like. 177 TargetIndex, 178 179 /// RESULT = INTRINSIC_WO_CHAIN(INTRINSICID, arg1, arg2, ...) 180 /// This node represents a target intrinsic function with no side effects. 181 /// The first operand is the ID number of the intrinsic from the 182 /// llvm::Intrinsic namespace. The operands to the intrinsic follow. The 183 /// node returns the result of the intrinsic. 184 INTRINSIC_WO_CHAIN, 185 186 /// RESULT,OUTCHAIN = INTRINSIC_W_CHAIN(INCHAIN, INTRINSICID, arg1, ...) 187 /// This node represents a target intrinsic function with side effects that 188 /// returns a result. The first operand is a chain pointer. The second is 189 /// the ID number of the intrinsic from the llvm::Intrinsic namespace. The 190 /// operands to the intrinsic follow. The node has two results, the result 191 /// of the intrinsic and an output chain. 192 INTRINSIC_W_CHAIN, 193 194 /// OUTCHAIN = INTRINSIC_VOID(INCHAIN, INTRINSICID, arg1, arg2, ...) 195 /// This node represents a target intrinsic function with side effects that 196 /// does not return a result. The first operand is a chain pointer. The 197 /// second is the ID number of the intrinsic from the llvm::Intrinsic 198 /// namespace. The operands to the intrinsic follow. 199 INTRINSIC_VOID, 200 201 /// CopyToReg - This node has three operands: a chain, a register number to 202 /// set to this value, and a value. 203 CopyToReg, 204 205 /// CopyFromReg - This node indicates that the input value is a virtual or 206 /// physical register that is defined outside of the scope of this 207 /// SelectionDAG. The register is available from the RegisterSDNode object. 208 CopyFromReg, 209 210 /// UNDEF - An undefined node. 211 UNDEF, 212 213 // FREEZE - FREEZE(VAL) returns an arbitrary value if VAL is UNDEF (or 214 // is evaluated to UNDEF), or returns VAL otherwise. Note that each 215 // read of UNDEF can yield different value, but FREEZE(UNDEF) cannot. 216 FREEZE, 217 218 /// EXTRACT_ELEMENT - This is used to get the lower or upper (determined by 219 /// a Constant, which is required to be operand #1) half of the integer or 220 /// float value specified as operand #0. This is only for use before 221 /// legalization, for values that will be broken into multiple registers. 222 EXTRACT_ELEMENT, 223 224 /// BUILD_PAIR - This is the opposite of EXTRACT_ELEMENT in some ways. 225 /// Given two values of the same integer value type, this produces a value 226 /// twice as big. Like EXTRACT_ELEMENT, this can only be used before 227 /// legalization. The lower part of the composite value should be in 228 /// element 0 and the upper part should be in element 1. 229 BUILD_PAIR, 230 231 /// MERGE_VALUES - This node takes multiple discrete operands and returns 232 /// them all as its individual results. This nodes has exactly the same 233 /// number of inputs and outputs. This node is useful for some pieces of the 234 /// code generator that want to think about a single node with multiple 235 /// results, not multiple nodes. 236 MERGE_VALUES, 237 238 /// Simple integer binary arithmetic operators. 239 ADD, 240 SUB, 241 MUL, 242 SDIV, 243 UDIV, 244 SREM, 245 UREM, 246 247 /// SMUL_LOHI/UMUL_LOHI - Multiply two integers of type iN, producing 248 /// a signed/unsigned value of type i[2*N], and return the full value as 249 /// two results, each of type iN. 250 SMUL_LOHI, 251 UMUL_LOHI, 252 253 /// SDIVREM/UDIVREM - Divide two integers and produce both a quotient and 254 /// remainder result. 255 SDIVREM, 256 UDIVREM, 257 258 /// CARRY_FALSE - This node is used when folding other nodes, 259 /// like ADDC/SUBC, which indicate the carry result is always false. 260 CARRY_FALSE, 261 262 /// Carry-setting nodes for multiple precision addition and subtraction. 263 /// These nodes take two operands of the same value type, and produce two 264 /// results. The first result is the normal add or sub result, the second 265 /// result is the carry flag result. 266 /// FIXME: These nodes are deprecated in favor of UADDO_CARRY and USUBO_CARRY. 267 /// They are kept around for now to provide a smooth transition path 268 /// toward the use of UADDO_CARRY/USUBO_CARRY and will eventually be removed. 269 ADDC, 270 SUBC, 271 272 /// Carry-using nodes for multiple precision addition and subtraction. These 273 /// nodes take three operands: The first two are the normal lhs and rhs to 274 /// the add or sub, and the third is the input carry flag. These nodes 275 /// produce two results; the normal result of the add or sub, and the output 276 /// carry flag. These nodes both read and write a carry flag to allow them 277 /// to them to be chained together for add and sub of arbitrarily large 278 /// values. 279 ADDE, 280 SUBE, 281 282 /// Carry-using nodes for multiple precision addition and subtraction. 283 /// These nodes take three operands: The first two are the normal lhs and 284 /// rhs to the add or sub, and the third is a boolean value that is 1 if and 285 /// only if there is an incoming carry/borrow. These nodes produce two 286 /// results: the normal result of the add or sub, and a boolean value that is 287 /// 1 if and only if there is an outgoing carry/borrow. 288 /// 289 /// Care must be taken if these opcodes are lowered to hardware instructions 290 /// that use the inverse logic -- 0 if and only if there is an 291 /// incoming/outgoing carry/borrow. In such cases, you must preserve the 292 /// semantics of these opcodes by inverting the incoming carry/borrow, feeding 293 /// it to the add/sub hardware instruction, and then inverting the outgoing 294 /// carry/borrow. 295 /// 296 /// The use of these opcodes is preferable to adde/sube if the target supports 297 /// it, as the carry is a regular value rather than a glue, which allows 298 /// further optimisation. 299 /// 300 /// These opcodes are different from [US]{ADD,SUB}O in that 301 /// U{ADD,SUB}O_CARRY consume and produce a carry/borrow, whereas 302 /// [US]{ADD,SUB}O produce an overflow. 303 UADDO_CARRY, 304 USUBO_CARRY, 305 306 /// Carry-using overflow-aware nodes for multiple precision addition and 307 /// subtraction. These nodes take three operands: The first two are normal lhs 308 /// and rhs to the add or sub, and the third is a boolean indicating if there 309 /// is an incoming carry. They produce two results: the normal result of the 310 /// add or sub, and a boolean that indicates if an overflow occurred (*not* 311 /// flag, because it may be a store to memory, etc.). If the type of the 312 /// boolean is not i1 then the high bits conform to getBooleanContents. 313 SADDO_CARRY, 314 SSUBO_CARRY, 315 316 /// RESULT, BOOL = [SU]ADDO(LHS, RHS) - Overflow-aware nodes for addition. 317 /// These nodes take two operands: the normal LHS and RHS to the add. They 318 /// produce two results: the normal result of the add, and a boolean that 319 /// indicates if an overflow occurred (*not* a flag, because it may be store 320 /// to memory, etc.). If the type of the boolean is not i1 then the high 321 /// bits conform to getBooleanContents. 322 /// These nodes are generated from llvm.[su]add.with.overflow intrinsics. 323 SADDO, 324 UADDO, 325 326 /// Same for subtraction. 327 SSUBO, 328 USUBO, 329 330 /// Same for multiplication. 331 SMULO, 332 UMULO, 333 334 /// RESULT = [US]ADDSAT(LHS, RHS) - Perform saturation addition on 2 335 /// integers with the same bit width (W). If the true value of LHS + RHS 336 /// exceeds the largest value that can be represented by W bits, the 337 /// resulting value is this maximum value. Otherwise, if this value is less 338 /// than the smallest value that can be represented by W bits, the 339 /// resulting value is this minimum value. 340 SADDSAT, 341 UADDSAT, 342 343 /// RESULT = [US]SUBSAT(LHS, RHS) - Perform saturation subtraction on 2 344 /// integers with the same bit width (W). If the true value of LHS - RHS 345 /// exceeds the largest value that can be represented by W bits, the 346 /// resulting value is this maximum value. Otherwise, if this value is less 347 /// than the smallest value that can be represented by W bits, the 348 /// resulting value is this minimum value. 349 SSUBSAT, 350 USUBSAT, 351 352 /// RESULT = [US]SHLSAT(LHS, RHS) - Perform saturation left shift. The first 353 /// operand is the value to be shifted, and the second argument is the amount 354 /// to shift by. Both must be integers of the same bit width (W). If the true 355 /// value of LHS << RHS exceeds the largest value that can be represented by 356 /// W bits, the resulting value is this maximum value, Otherwise, if this 357 /// value is less than the smallest value that can be represented by W bits, 358 /// the resulting value is this minimum value. 359 SSHLSAT, 360 USHLSAT, 361 362 /// RESULT = [US]MULFIX(LHS, RHS, SCALE) - Perform fixed point multiplication 363 /// on 2 integers with the same width and scale. SCALE represents the scale 364 /// of both operands as fixed point numbers. This SCALE parameter must be a 365 /// constant integer. A scale of zero is effectively performing 366 /// multiplication on 2 integers. 367 SMULFIX, 368 UMULFIX, 369 370 /// Same as the corresponding unsaturated fixed point instructions, but the 371 /// result is clamped between the min and max values representable by the 372 /// bits of the first 2 operands. 373 SMULFIXSAT, 374 UMULFIXSAT, 375 376 /// RESULT = [US]DIVFIX(LHS, RHS, SCALE) - Perform fixed point division on 377 /// 2 integers with the same width and scale. SCALE represents the scale 378 /// of both operands as fixed point numbers. This SCALE parameter must be a 379 /// constant integer. 380 SDIVFIX, 381 UDIVFIX, 382 383 /// Same as the corresponding unsaturated fixed point instructions, but the 384 /// result is clamped between the min and max values representable by the 385 /// bits of the first 2 operands. 386 SDIVFIXSAT, 387 UDIVFIXSAT, 388 389 /// Simple binary floating point operators. 390 FADD, 391 FSUB, 392 FMUL, 393 FDIV, 394 FREM, 395 396 /// Constrained versions of the binary floating point operators. 397 /// These will be lowered to the simple operators before final selection. 398 /// They are used to limit optimizations while the DAG is being 399 /// optimized. 400 STRICT_FADD, 401 STRICT_FSUB, 402 STRICT_FMUL, 403 STRICT_FDIV, 404 STRICT_FREM, 405 STRICT_FMA, 406 407 /// Constrained versions of libm-equivalent floating point intrinsics. 408 /// These will be lowered to the equivalent non-constrained pseudo-op 409 /// (or expanded to the equivalent library call) before final selection. 410 /// They are used to limit optimizations while the DAG is being optimized. 411 STRICT_FSQRT, 412 STRICT_FPOW, 413 STRICT_FPOWI, 414 STRICT_FLDEXP, 415 STRICT_FSIN, 416 STRICT_FCOS, 417 STRICT_FEXP, 418 STRICT_FEXP2, 419 STRICT_FLOG, 420 STRICT_FLOG10, 421 STRICT_FLOG2, 422 STRICT_FRINT, 423 STRICT_FNEARBYINT, 424 STRICT_FMAXNUM, 425 STRICT_FMINNUM, 426 STRICT_FCEIL, 427 STRICT_FFLOOR, 428 STRICT_FROUND, 429 STRICT_FROUNDEVEN, 430 STRICT_FTRUNC, 431 STRICT_LROUND, 432 STRICT_LLROUND, 433 STRICT_LRINT, 434 STRICT_LLRINT, 435 STRICT_FMAXIMUM, 436 STRICT_FMINIMUM, 437 438 /// STRICT_FP_TO_[US]INT - Convert a floating point value to a signed or 439 /// unsigned integer. These have the same semantics as fptosi and fptoui 440 /// in IR. 441 /// They are used to limit optimizations while the DAG is being optimized. 442 STRICT_FP_TO_SINT, 443 STRICT_FP_TO_UINT, 444 445 /// STRICT_[US]INT_TO_FP - Convert a signed or unsigned integer to 446 /// a floating point value. These have the same semantics as sitofp and 447 /// uitofp in IR. 448 /// They are used to limit optimizations while the DAG is being optimized. 449 STRICT_SINT_TO_FP, 450 STRICT_UINT_TO_FP, 451 452 /// X = STRICT_FP_ROUND(Y, TRUNC) - Rounding 'Y' from a larger floating 453 /// point type down to the precision of the destination VT. TRUNC is a 454 /// flag, which is always an integer that is zero or one. If TRUNC is 0, 455 /// this is a normal rounding, if it is 1, this FP_ROUND is known to not 456 /// change the value of Y. 457 /// 458 /// The TRUNC = 1 case is used in cases where we know that the value will 459 /// not be modified by the node, because Y is not using any of the extra 460 /// precision of source type. This allows certain transformations like 461 /// STRICT_FP_EXTEND(STRICT_FP_ROUND(X,1)) -> X which are not safe for 462 /// STRICT_FP_EXTEND(STRICT_FP_ROUND(X,0)) because the extra bits aren't 463 /// removed. 464 /// It is used to limit optimizations while the DAG is being optimized. 465 STRICT_FP_ROUND, 466 467 /// X = STRICT_FP_EXTEND(Y) - Extend a smaller FP type into a larger FP 468 /// type. 469 /// It is used to limit optimizations while the DAG is being optimized. 470 STRICT_FP_EXTEND, 471 472 /// STRICT_FSETCC/STRICT_FSETCCS - Constrained versions of SETCC, used 473 /// for floating-point operands only. STRICT_FSETCC performs a quiet 474 /// comparison operation, while STRICT_FSETCCS performs a signaling 475 /// comparison operation. 476 STRICT_FSETCC, 477 STRICT_FSETCCS, 478 479 // FPTRUNC_ROUND - This corresponds to the fptrunc_round intrinsic. 480 FPTRUNC_ROUND, 481 482 /// FMA - Perform a * b + c with no intermediate rounding step. 483 FMA, 484 485 /// FMAD - Perform a * b + c, while getting the same result as the 486 /// separately rounded operations. 487 FMAD, 488 489 /// FCOPYSIGN(X, Y) - Return the value of X with the sign of Y. NOTE: This 490 /// DAG node does not require that X and Y have the same type, just that 491 /// they are both floating point. X and the result must have the same type. 492 /// FCOPYSIGN(f32, f64) is allowed. 493 FCOPYSIGN, 494 495 /// INT = FGETSIGN(FP) - Return the sign bit of the specified floating point 496 /// value as an integer 0/1 value. 497 FGETSIGN, 498 499 /// Returns platform specific canonical encoding of a floating point number. 500 FCANONICALIZE, 501 502 /// Performs a check of floating point class property, defined by IEEE-754. 503 /// The first operand is the floating point value to check. The second operand 504 /// specifies the checked property and is a TargetConstant which specifies 505 /// test in the same way as intrinsic 'is_fpclass'. 506 /// Returns boolean value. 507 IS_FPCLASS, 508 509 /// BUILD_VECTOR(ELT0, ELT1, ELT2, ELT3,...) - Return a fixed-width vector 510 /// with the specified, possibly variable, elements. The types of the 511 /// operands must match the vector element type, except that integer types 512 /// are allowed to be larger than the element type, in which case the 513 /// operands are implicitly truncated. The types of the operands must all 514 /// be the same. 515 BUILD_VECTOR, 516 517 /// INSERT_VECTOR_ELT(VECTOR, VAL, IDX) - Returns VECTOR with the element 518 /// at IDX replaced with VAL. If the type of VAL is larger than the vector 519 /// element type then VAL is truncated before replacement. 520 /// 521 /// If VECTOR is a scalable vector, then IDX may be larger than the minimum 522 /// vector width. IDX is not first scaled by the runtime scaling factor of 523 /// VECTOR. 524 INSERT_VECTOR_ELT, 525 526 /// EXTRACT_VECTOR_ELT(VECTOR, IDX) - Returns a single element from VECTOR 527 /// identified by the (potentially variable) element number IDX. If the return 528 /// type is an integer type larger than the element type of the vector, the 529 /// result is extended to the width of the return type. In that case, the high 530 /// bits are undefined. 531 /// 532 /// If VECTOR is a scalable vector, then IDX may be larger than the minimum 533 /// vector width. IDX is not first scaled by the runtime scaling factor of 534 /// VECTOR. 535 EXTRACT_VECTOR_ELT, 536 537 /// CONCAT_VECTORS(VECTOR0, VECTOR1, ...) - Given a number of values of 538 /// vector type with the same length and element type, this produces a 539 /// concatenated vector result value, with length equal to the sum of the 540 /// lengths of the input vectors. If VECTOR0 is a fixed-width vector, then 541 /// VECTOR1..VECTORN must all be fixed-width vectors. Similarly, if VECTOR0 542 /// is a scalable vector, then VECTOR1..VECTORN must all be scalable vectors. 543 CONCAT_VECTORS, 544 545 /// INSERT_SUBVECTOR(VECTOR1, VECTOR2, IDX) - Returns a vector with VECTOR2 546 /// inserted into VECTOR1. IDX represents the starting element number at which 547 /// VECTOR2 will be inserted. IDX must be a constant multiple of T's known 548 /// minimum vector length. Let the type of VECTOR2 be T, then if T is a 549 /// scalable vector, IDX is first scaled by the runtime scaling factor of T. 550 /// The elements of VECTOR1 starting at IDX are overwritten with VECTOR2. 551 /// Elements IDX through (IDX + num_elements(T) - 1) must be valid VECTOR1 552 /// indices. If this condition cannot be determined statically but is false at 553 /// runtime, then the result vector is undefined. The IDX parameter must be a 554 /// vector index constant type, which for most targets will be an integer 555 /// pointer type. 556 /// 557 /// This operation supports inserting a fixed-width vector into a scalable 558 /// vector, but not the other way around. 559 INSERT_SUBVECTOR, 560 561 /// EXTRACT_SUBVECTOR(VECTOR, IDX) - Returns a subvector from VECTOR. 562 /// Let the result type be T, then IDX represents the starting element number 563 /// from which a subvector of type T is extracted. IDX must be a constant 564 /// multiple of T's known minimum vector length. If T is a scalable vector, 565 /// IDX is first scaled by the runtime scaling factor of T. Elements IDX 566 /// through (IDX + num_elements(T) - 1) must be valid VECTOR indices. If this 567 /// condition cannot be determined statically but is false at runtime, then 568 /// the result vector is undefined. The IDX parameter must be a vector index 569 /// constant type, which for most targets will be an integer pointer type. 570 /// 571 /// This operation supports extracting a fixed-width vector from a scalable 572 /// vector, but not the other way around. 573 EXTRACT_SUBVECTOR, 574 575 /// VECTOR_DEINTERLEAVE(VEC1, VEC2) - Returns two vectors with all input and 576 /// output vectors having the same type. The first output contains the even 577 /// indices from CONCAT_VECTORS(VEC1, VEC2), with the second output 578 /// containing the odd indices. The relative order of elements within an 579 /// output match that of the concatenated input. 580 VECTOR_DEINTERLEAVE, 581 582 /// VECTOR_INTERLEAVE(VEC1, VEC2) - Returns two vectors with all input and 583 /// output vectors having the same type. The first output contains the 584 /// result of interleaving the low half of CONCAT_VECTORS(VEC1, VEC2), with 585 /// the second output containing the result of interleaving the high half. 586 VECTOR_INTERLEAVE, 587 588 /// VECTOR_REVERSE(VECTOR) - Returns a vector, of the same type as VECTOR, 589 /// whose elements are shuffled using the following algorithm: 590 /// RESULT[i] = VECTOR[VECTOR.ElementCount - 1 - i] 591 VECTOR_REVERSE, 592 593 /// VECTOR_SHUFFLE(VEC1, VEC2) - Returns a vector, of the same type as 594 /// VEC1/VEC2. A VECTOR_SHUFFLE node also contains an array of constant int 595 /// values that indicate which value (or undef) each result element will 596 /// get. These constant ints are accessible through the 597 /// ShuffleVectorSDNode class. This is quite similar to the Altivec 598 /// 'vperm' instruction, except that the indices must be constants and are 599 /// in terms of the element size of VEC1/VEC2, not in terms of bytes. 600 VECTOR_SHUFFLE, 601 602 /// VECTOR_SPLICE(VEC1, VEC2, IMM) - Returns a subvector of the same type as 603 /// VEC1/VEC2 from CONCAT_VECTORS(VEC1, VEC2), based on the IMM in two ways. 604 /// Let the result type be T, if IMM is positive it represents the starting 605 /// element number (an index) from which a subvector of type T is extracted 606 /// from CONCAT_VECTORS(VEC1, VEC2). If IMM is negative it represents a count 607 /// specifying the number of trailing elements to extract from VEC1, where the 608 /// elements of T are selected using the following algorithm: 609 /// RESULT[i] = CONCAT_VECTORS(VEC1,VEC2)[VEC1.ElementCount - ABS(IMM) + i] 610 /// If IMM is not in the range [-VL, VL-1] the result vector is undefined. IMM 611 /// is a constant integer. 612 VECTOR_SPLICE, 613 614 /// SCALAR_TO_VECTOR(VAL) - This represents the operation of loading a 615 /// scalar value into element 0 of the resultant vector type. The top 616 /// elements 1 to N-1 of the N-element vector are undefined. The type 617 /// of the operand must match the vector element type, except when they 618 /// are integer types. In this case the operand is allowed to be wider 619 /// than the vector element type, and is implicitly truncated to it. 620 SCALAR_TO_VECTOR, 621 622 /// SPLAT_VECTOR(VAL) - Returns a vector with the scalar value VAL 623 /// duplicated in all lanes. The type of the operand must match the vector 624 /// element type, except when they are integer types. In this case the 625 /// operand is allowed to be wider than the vector element type, and is 626 /// implicitly truncated to it. 627 SPLAT_VECTOR, 628 629 /// SPLAT_VECTOR_PARTS(SCALAR1, SCALAR2, ...) - Returns a vector with the 630 /// scalar values joined together and then duplicated in all lanes. This 631 /// represents a SPLAT_VECTOR that has had its scalar operand expanded. This 632 /// allows representing a 64-bit splat on a target with 32-bit integers. The 633 /// total width of the scalars must cover the element width. SCALAR1 contains 634 /// the least significant bits of the value regardless of endianness and all 635 /// scalars should have the same type. 636 SPLAT_VECTOR_PARTS, 637 638 /// STEP_VECTOR(IMM) - Returns a scalable vector whose lanes are comprised 639 /// of a linear sequence of unsigned values starting from 0 with a step of 640 /// IMM, where IMM must be a TargetConstant with type equal to the vector 641 /// element type. The arithmetic is performed modulo the bitwidth of the 642 /// element. 643 /// 644 /// The operation does not support returning fixed-width vectors or 645 /// non-constant operands. 646 STEP_VECTOR, 647 648 /// MULHU/MULHS - Multiply high - Multiply two integers of type iN, 649 /// producing an unsigned/signed value of type i[2*N], then return the top 650 /// part. 651 MULHU, 652 MULHS, 653 654 /// AVGFLOORS/AVGFLOORU - Averaging add - Add two integers using an integer of 655 /// type i[N+1], halving the result by shifting it one bit right. 656 /// shr(add(ext(X), ext(Y)), 1) 657 AVGFLOORS, 658 AVGFLOORU, 659 /// AVGCEILS/AVGCEILU - Rounding averaging add - Add two integers using an 660 /// integer of type i[N+2], add 1 and halve the result by shifting it one bit 661 /// right. shr(add(ext(X), ext(Y), 1), 1) 662 AVGCEILS, 663 AVGCEILU, 664 665 // ABDS/ABDU - Absolute difference - Return the absolute difference between 666 // two numbers interpreted as signed/unsigned. 667 // i.e trunc(abs(sext(Op0) - sext(Op1))) becomes abds(Op0, Op1) 668 // or trunc(abs(zext(Op0) - zext(Op1))) becomes abdu(Op0, Op1) 669 ABDS, 670 ABDU, 671 672 /// [US]{MIN/MAX} - Binary minimum or maximum of signed or unsigned 673 /// integers. 674 SMIN, 675 SMAX, 676 UMIN, 677 UMAX, 678 679 /// Bitwise operators - logical and, logical or, logical xor. 680 AND, 681 OR, 682 XOR, 683 684 /// ABS - Determine the unsigned absolute value of a signed integer value of 685 /// the same bitwidth. 686 /// Note: A value of INT_MIN will return INT_MIN, no saturation or overflow 687 /// is performed. 688 ABS, 689 690 /// Shift and rotation operations. After legalization, the type of the 691 /// shift amount is known to be TLI.getShiftAmountTy(). Before legalization 692 /// the shift amount can be any type, but care must be taken to ensure it is 693 /// large enough. TLI.getShiftAmountTy() is i8 on some targets, but before 694 /// legalization, types like i1024 can occur and i8 doesn't have enough bits 695 /// to represent the shift amount. 696 /// When the 1st operand is a vector, the shift amount must be in the same 697 /// type. (TLI.getShiftAmountTy() will return the same type when the input 698 /// type is a vector.) 699 /// For rotates and funnel shifts, the shift amount is treated as an unsigned 700 /// amount modulo the element size of the first operand. 701 /// 702 /// Funnel 'double' shifts take 3 operands, 2 inputs and the shift amount. 703 /// fshl(X,Y,Z): (X << (Z % BW)) | (Y >> (BW - (Z % BW))) 704 /// fshr(X,Y,Z): (X << (BW - (Z % BW))) | (Y >> (Z % BW)) 705 SHL, 706 SRA, 707 SRL, 708 ROTL, 709 ROTR, 710 FSHL, 711 FSHR, 712 713 /// Byte Swap and Counting operators. 714 BSWAP, 715 CTTZ, 716 CTLZ, 717 CTPOP, 718 BITREVERSE, 719 PARITY, 720 721 /// Bit counting operators with an undefined result for zero inputs. 722 CTTZ_ZERO_UNDEF, 723 CTLZ_ZERO_UNDEF, 724 725 /// Select(COND, TRUEVAL, FALSEVAL). If the type of the boolean COND is not 726 /// i1 then the high bits must conform to getBooleanContents. 727 SELECT, 728 729 /// Select with a vector condition (op #0) and two vector operands (ops #1 730 /// and #2), returning a vector result. All vectors have the same length. 731 /// Much like the scalar select and setcc, each bit in the condition selects 732 /// whether the corresponding result element is taken from op #1 or op #2. 733 /// At first, the VSELECT condition is of vXi1 type. Later, targets may 734 /// change the condition type in order to match the VSELECT node using a 735 /// pattern. The condition follows the BooleanContent format of the target. 736 VSELECT, 737 738 /// Select with condition operator - This selects between a true value and 739 /// a false value (ops #2 and #3) based on the boolean result of comparing 740 /// the lhs and rhs (ops #0 and #1) of a conditional expression with the 741 /// condition code in op #4, a CondCodeSDNode. 742 SELECT_CC, 743 744 /// SetCC operator - This evaluates to a true value iff the condition is 745 /// true. If the result value type is not i1 then the high bits conform 746 /// to getBooleanContents. The operands to this are the left and right 747 /// operands to compare (ops #0, and #1) and the condition code to compare 748 /// them with (op #2) as a CondCodeSDNode. If the operands are vector types 749 /// then the result type must also be a vector type. 750 SETCC, 751 752 /// Like SetCC, ops #0 and #1 are the LHS and RHS operands to compare, but 753 /// op #2 is a boolean indicating if there is an incoming carry. This 754 /// operator checks the result of "LHS - RHS - Carry", and can be used to 755 /// compare two wide integers: 756 /// (setcccarry lhshi rhshi (usubo_carry lhslo rhslo) cc). 757 /// Only valid for integers. 758 SETCCCARRY, 759 760 /// SHL_PARTS/SRA_PARTS/SRL_PARTS - These operators are used for expanded 761 /// integer shift operations. The operation ordering is: 762 /// [Lo,Hi] = op [LoLHS,HiLHS], Amt 763 SHL_PARTS, 764 SRA_PARTS, 765 SRL_PARTS, 766 767 /// Conversion operators. These are all single input single output 768 /// operations. For all of these, the result type must be strictly 769 /// wider or narrower (depending on the operation) than the source 770 /// type. 771 772 /// SIGN_EXTEND - Used for integer types, replicating the sign bit 773 /// into new bits. 774 SIGN_EXTEND, 775 776 /// ZERO_EXTEND - Used for integer types, zeroing the new bits. 777 ZERO_EXTEND, 778 779 /// ANY_EXTEND - Used for integer types. The high bits are undefined. 780 ANY_EXTEND, 781 782 /// TRUNCATE - Completely drop the high bits. 783 TRUNCATE, 784 785 /// [SU]INT_TO_FP - These operators convert integers (whose interpreted sign 786 /// depends on the first letter) to floating point. 787 SINT_TO_FP, 788 UINT_TO_FP, 789 790 /// SIGN_EXTEND_INREG - This operator atomically performs a SHL/SRA pair to 791 /// sign extend a small value in a large integer register (e.g. sign 792 /// extending the low 8 bits of a 32-bit register to fill the top 24 bits 793 /// with the 7th bit). The size of the smaller type is indicated by the 1th 794 /// operand, a ValueType node. 795 SIGN_EXTEND_INREG, 796 797 /// ANY_EXTEND_VECTOR_INREG(Vector) - This operator represents an 798 /// in-register any-extension of the low lanes of an integer vector. The 799 /// result type must have fewer elements than the operand type, and those 800 /// elements must be larger integer types such that the total size of the 801 /// operand type is less than or equal to the size of the result type. Each 802 /// of the low operand elements is any-extended into the corresponding, 803 /// wider result elements with the high bits becoming undef. 804 /// NOTE: The type legalizer prefers to make the operand and result size 805 /// the same to allow expansion to shuffle vector during op legalization. 806 ANY_EXTEND_VECTOR_INREG, 807 808 /// SIGN_EXTEND_VECTOR_INREG(Vector) - This operator represents an 809 /// in-register sign-extension of the low lanes of an integer vector. The 810 /// result type must have fewer elements than the operand type, and those 811 /// elements must be larger integer types such that the total size of the 812 /// operand type is less than or equal to the size of the result type. Each 813 /// of the low operand elements is sign-extended into the corresponding, 814 /// wider result elements. 815 /// NOTE: The type legalizer prefers to make the operand and result size 816 /// the same to allow expansion to shuffle vector during op legalization. 817 SIGN_EXTEND_VECTOR_INREG, 818 819 /// ZERO_EXTEND_VECTOR_INREG(Vector) - This operator represents an 820 /// in-register zero-extension of the low lanes of an integer vector. The 821 /// result type must have fewer elements than the operand type, and those 822 /// elements must be larger integer types such that the total size of the 823 /// operand type is less than or equal to the size of the result type. Each 824 /// of the low operand elements is zero-extended into the corresponding, 825 /// wider result elements. 826 /// NOTE: The type legalizer prefers to make the operand and result size 827 /// the same to allow expansion to shuffle vector during op legalization. 828 ZERO_EXTEND_VECTOR_INREG, 829 830 /// FP_TO_[US]INT - Convert a floating point value to a signed or unsigned 831 /// integer. These have the same semantics as fptosi and fptoui in IR. If 832 /// the FP value cannot fit in the integer type, the results are undefined. 833 FP_TO_SINT, 834 FP_TO_UINT, 835 836 /// FP_TO_[US]INT_SAT - Convert floating point value in operand 0 to a 837 /// signed or unsigned scalar integer type given in operand 1 with the 838 /// following semantics: 839 /// 840 /// * If the value is NaN, zero is returned. 841 /// * If the value is larger/smaller than the largest/smallest integer, 842 /// the largest/smallest integer is returned (saturation). 843 /// * Otherwise the result of rounding the value towards zero is returned. 844 /// 845 /// The scalar width of the type given in operand 1 must be equal to, or 846 /// smaller than, the scalar result type width. It may end up being smaller 847 /// than the result width as a result of integer type legalization. 848 /// 849 /// After converting to the scalar integer type in operand 1, the value is 850 /// extended to the result VT. FP_TO_SINT_SAT sign extends and FP_TO_UINT_SAT 851 /// zero extends. 852 FP_TO_SINT_SAT, 853 FP_TO_UINT_SAT, 854 855 /// X = FP_ROUND(Y, TRUNC) - Rounding 'Y' from a larger floating point type 856 /// down to the precision of the destination VT. TRUNC is a flag, which is 857 /// always an integer that is zero or one. If TRUNC is 0, this is a 858 /// normal rounding, if it is 1, this FP_ROUND is known to not change the 859 /// value of Y. 860 /// 861 /// The TRUNC = 1 case is used in cases where we know that the value will 862 /// not be modified by the node, because Y is not using any of the extra 863 /// precision of source type. This allows certain transformations like 864 /// FP_EXTEND(FP_ROUND(X,1)) -> X which are not safe for 865 /// FP_EXTEND(FP_ROUND(X,0)) because the extra bits aren't removed. 866 FP_ROUND, 867 868 /// Returns current rounding mode: 869 /// -1 Undefined 870 /// 0 Round to 0 871 /// 1 Round to nearest, ties to even 872 /// 2 Round to +inf 873 /// 3 Round to -inf 874 /// 4 Round to nearest, ties to zero 875 /// Result is rounding mode and chain. Input is a chain. 876 GET_ROUNDING, 877 878 /// Set rounding mode. 879 /// The first operand is a chain pointer. The second specifies the required 880 /// rounding mode, encoded in the same way as used in '``GET_ROUNDING``'. 881 SET_ROUNDING, 882 883 /// X = FP_EXTEND(Y) - Extend a smaller FP type into a larger FP type. 884 FP_EXTEND, 885 886 /// BITCAST - This operator converts between integer, vector and FP 887 /// values, as if the value was stored to memory with one type and loaded 888 /// from the same address with the other type (or equivalently for vector 889 /// format conversions, etc). The source and result are required to have 890 /// the same bit size (e.g. f32 <-> i32). This can also be used for 891 /// int-to-int or fp-to-fp conversions, but that is a noop, deleted by 892 /// getNode(). 893 /// 894 /// This operator is subtly different from the bitcast instruction from 895 /// LLVM-IR since this node may change the bits in the register. For 896 /// example, this occurs on big-endian NEON and big-endian MSA where the 897 /// layout of the bits in the register depends on the vector type and this 898 /// operator acts as a shuffle operation for some vector type combinations. 899 BITCAST, 900 901 /// ADDRSPACECAST - This operator converts between pointers of different 902 /// address spaces. 903 ADDRSPACECAST, 904 905 /// FP16_TO_FP, FP_TO_FP16 - These operators are used to perform promotions 906 /// and truncation for half-precision (16 bit) floating numbers. These nodes 907 /// form a semi-softened interface for dealing with f16 (as an i16), which 908 /// is often a storage-only type but has native conversions. 909 FP16_TO_FP, 910 FP_TO_FP16, 911 STRICT_FP16_TO_FP, 912 STRICT_FP_TO_FP16, 913 914 /// BF16_TO_FP, FP_TO_BF16 - These operators are used to perform promotions 915 /// and truncation for bfloat16. These nodes form a semi-softened interface 916 /// for dealing with bf16 (as an i16), which is often a storage-only type but 917 /// has native conversions. 918 BF16_TO_FP, 919 FP_TO_BF16, 920 921 /// Perform various unary floating-point operations inspired by libm. For 922 /// FPOWI, the result is undefined if if the integer operand doesn't fit into 923 /// sizeof(int). 924 FNEG, 925 FABS, 926 FSQRT, 927 FCBRT, 928 FSIN, 929 FCOS, 930 FPOW, 931 FPOWI, 932 /// FLDEXP - ldexp, inspired by libm (op0 * 2**op1). 933 FLDEXP, 934 935 /// FFREXP - frexp, extract fractional and exponent component of a 936 /// floating-point value. Returns the two components as separate return 937 /// values. 938 FFREXP, 939 940 FLOG, 941 FLOG2, 942 FLOG10, 943 FEXP, 944 FEXP2, 945 FCEIL, 946 FTRUNC, 947 FRINT, 948 FNEARBYINT, 949 FROUND, 950 FROUNDEVEN, 951 FFLOOR, 952 LROUND, 953 LLROUND, 954 LRINT, 955 LLRINT, 956 957 /// FMINNUM/FMAXNUM - Perform floating-point minimum or maximum on two 958 /// values. 959 // 960 /// In the case where a single input is a NaN (either signaling or quiet), 961 /// the non-NaN input is returned. 962 /// 963 /// The return value of (FMINNUM 0.0, -0.0) could be either 0.0 or -0.0. 964 FMINNUM, 965 FMAXNUM, 966 967 /// FMINNUM_IEEE/FMAXNUM_IEEE - Perform floating-point minimum or maximum on 968 /// two values, following the IEEE-754 2008 definition. This differs from 969 /// FMINNUM/FMAXNUM in the handling of signaling NaNs. If one input is a 970 /// signaling NaN, returns a quiet NaN. 971 FMINNUM_IEEE, 972 FMAXNUM_IEEE, 973 974 /// FMINIMUM/FMAXIMUM - NaN-propagating minimum/maximum that also treat -0.0 975 /// as less than 0.0. While FMINNUM_IEEE/FMAXNUM_IEEE follow IEEE 754-2008 976 /// semantics, FMINIMUM/FMAXIMUM follow IEEE 754-2018 draft semantics. 977 FMINIMUM, 978 FMAXIMUM, 979 980 /// FSINCOS - Compute both fsin and fcos as a single operation. 981 FSINCOS, 982 983 /// Gets the current floating-point environment. The first operand is a token 984 /// chain. The results are FP environment, represented by an integer value, 985 /// and a token chain. 986 GET_FPENV, 987 988 /// Sets the current floating-point environment. The first operand is a token 989 /// chain, the second is FP environment, represented by an integer value. The 990 /// result is a token chain. 991 SET_FPENV, 992 993 /// Set floating-point environment to default state. The first operand and the 994 /// result are token chains. 995 RESET_FPENV, 996 997 /// Gets the current floating-point environment. The first operand is a token 998 /// chain, the second is a pointer to memory, where FP environment is stored 999 /// to. The result is a token chain. 1000 GET_FPENV_MEM, 1001 1002 /// Sets the current floating point environment. The first operand is a token 1003 /// chain, the second is a pointer to memory, where FP environment is loaded 1004 /// from. The result is a token chain. 1005 SET_FPENV_MEM, 1006 1007 /// LOAD and STORE have token chains as their first operand, then the same 1008 /// operands as an LLVM load/store instruction, then an offset node that 1009 /// is added / subtracted from the base pointer to form the address (for 1010 /// indexed memory ops). 1011 LOAD, 1012 STORE, 1013 1014 /// DYNAMIC_STACKALLOC - Allocate some number of bytes on the stack aligned 1015 /// to a specified boundary. This node always has two return values: a new 1016 /// stack pointer value and a chain. The first operand is the token chain, 1017 /// the second is the number of bytes to allocate, and the third is the 1018 /// alignment boundary. The size is guaranteed to be a multiple of the 1019 /// stack alignment, and the alignment is guaranteed to be bigger than the 1020 /// stack alignment (if required) or 0 to get standard stack alignment. 1021 DYNAMIC_STACKALLOC, 1022 1023 /// Control flow instructions. These all have token chains. 1024 1025 /// BR - Unconditional branch. The first operand is the chain 1026 /// operand, the second is the MBB to branch to. 1027 BR, 1028 1029 /// BRIND - Indirect branch. The first operand is the chain, the second 1030 /// is the value to branch to, which must be of the same type as the 1031 /// target's pointer type. 1032 BRIND, 1033 1034 /// BR_JT - Jumptable branch. The first operand is the chain, the second 1035 /// is the jumptable index, the last one is the jumptable entry index. 1036 BR_JT, 1037 1038 /// BRCOND - Conditional branch. The first operand is the chain, the 1039 /// second is the condition, the third is the block to branch to if the 1040 /// condition is true. If the type of the condition is not i1, then the 1041 /// high bits must conform to getBooleanContents. If the condition is undef, 1042 /// it nondeterministically jumps to the block. 1043 /// TODO: Its semantics w.r.t undef requires further discussion; we need to 1044 /// make it sure that it is consistent with optimizations in MIR & the 1045 /// meaning of IMPLICIT_DEF. See https://reviews.llvm.org/D92015 1046 BRCOND, 1047 1048 /// BR_CC - Conditional branch. The behavior is like that of SELECT_CC, in 1049 /// that the condition is represented as condition code, and two nodes to 1050 /// compare, rather than as a combined SetCC node. The operands in order 1051 /// are chain, cc, lhs, rhs, block to branch to if condition is true. If 1052 /// condition is undef, it nondeterministically jumps to the block. 1053 BR_CC, 1054 1055 /// INLINEASM - Represents an inline asm block. This node always has two 1056 /// return values: a chain and a flag result. The inputs are as follows: 1057 /// Operand #0 : Input chain. 1058 /// Operand #1 : a ExternalSymbolSDNode with a pointer to the asm string. 1059 /// Operand #2 : a MDNodeSDNode with the !srcloc metadata. 1060 /// Operand #3 : HasSideEffect, IsAlignStack bits. 1061 /// After this, it is followed by a list of operands with this format: 1062 /// ConstantSDNode: Flags that encode whether it is a mem or not, the 1063 /// of operands that follow, etc. See InlineAsm.h. 1064 /// ... however many operands ... 1065 /// Operand #last: Optional, an incoming flag. 1066 /// 1067 /// The variable width operands are required to represent target addressing 1068 /// modes as a single "operand", even though they may have multiple 1069 /// SDOperands. 1070 INLINEASM, 1071 1072 /// INLINEASM_BR - Branching version of inline asm. Used by asm-goto. 1073 INLINEASM_BR, 1074 1075 /// EH_LABEL - Represents a label in mid basic block used to track 1076 /// locations needed for debug and exception handling tables. These nodes 1077 /// take a chain as input and return a chain. 1078 EH_LABEL, 1079 1080 /// ANNOTATION_LABEL - Represents a mid basic block label used by 1081 /// annotations. This should remain within the basic block and be ordered 1082 /// with respect to other call instructions, but loads and stores may float 1083 /// past it. 1084 ANNOTATION_LABEL, 1085 1086 /// CATCHRET - Represents a return from a catch block funclet. Used for 1087 /// MSVC compatible exception handling. Takes a chain operand and a 1088 /// destination basic block operand. 1089 CATCHRET, 1090 1091 /// CLEANUPRET - Represents a return from a cleanup block funclet. Used for 1092 /// MSVC compatible exception handling. Takes only a chain operand. 1093 CLEANUPRET, 1094 1095 /// STACKSAVE - STACKSAVE has one operand, an input chain. It produces a 1096 /// value, the same type as the pointer type for the system, and an output 1097 /// chain. 1098 STACKSAVE, 1099 1100 /// STACKRESTORE has two operands, an input chain and a pointer to restore 1101 /// to it returns an output chain. 1102 STACKRESTORE, 1103 1104 /// CALLSEQ_START/CALLSEQ_END - These operators mark the beginning and end 1105 /// of a call sequence, and carry arbitrary information that target might 1106 /// want to know. The first operand is a chain, the rest are specified by 1107 /// the target and not touched by the DAG optimizers. 1108 /// Targets that may use stack to pass call arguments define additional 1109 /// operands: 1110 /// - size of the call frame part that must be set up within the 1111 /// CALLSEQ_START..CALLSEQ_END pair, 1112 /// - part of the call frame prepared prior to CALLSEQ_START. 1113 /// Both these parameters must be constants, their sum is the total call 1114 /// frame size. 1115 /// CALLSEQ_START..CALLSEQ_END pairs may not be nested. 1116 CALLSEQ_START, // Beginning of a call sequence 1117 CALLSEQ_END, // End of a call sequence 1118 1119 /// VAARG - VAARG has four operands: an input chain, a pointer, a SRCVALUE, 1120 /// and the alignment. It returns a pair of values: the vaarg value and a 1121 /// new chain. 1122 VAARG, 1123 1124 /// VACOPY - VACOPY has 5 operands: an input chain, a destination pointer, 1125 /// a source pointer, a SRCVALUE for the destination, and a SRCVALUE for the 1126 /// source. 1127 VACOPY, 1128 1129 /// VAEND, VASTART - VAEND and VASTART have three operands: an input chain, 1130 /// pointer, and a SRCVALUE. 1131 VAEND, 1132 VASTART, 1133 1134 // PREALLOCATED_SETUP - This has 2 operands: an input chain and a SRCVALUE 1135 // with the preallocated call Value. 1136 PREALLOCATED_SETUP, 1137 // PREALLOCATED_ARG - This has 3 operands: an input chain, a SRCVALUE 1138 // with the preallocated call Value, and a constant int. 1139 PREALLOCATED_ARG, 1140 1141 /// SRCVALUE - This is a node type that holds a Value* that is used to 1142 /// make reference to a value in the LLVM IR. 1143 SRCVALUE, 1144 1145 /// MDNODE_SDNODE - This is a node that holdes an MDNode*, which is used to 1146 /// reference metadata in the IR. 1147 MDNODE_SDNODE, 1148 1149 /// PCMARKER - This corresponds to the pcmarker intrinsic. 1150 PCMARKER, 1151 1152 /// READCYCLECOUNTER - This corresponds to the readcyclecounter intrinsic. 1153 /// It produces a chain and one i64 value. The only operand is a chain. 1154 /// If i64 is not legal, the result will be expanded into smaller values. 1155 /// Still, it returns an i64, so targets should set legality for i64. 1156 /// The result is the content of the architecture-specific cycle 1157 /// counter-like register (or other high accuracy low latency clock source). 1158 READCYCLECOUNTER, 1159 1160 /// HANDLENODE node - Used as a handle for various purposes. 1161 HANDLENODE, 1162 1163 /// INIT_TRAMPOLINE - This corresponds to the init_trampoline intrinsic. It 1164 /// takes as input a token chain, the pointer to the trampoline, the pointer 1165 /// to the nested function, the pointer to pass for the 'nest' parameter, a 1166 /// SRCVALUE for the trampoline and another for the nested function 1167 /// (allowing targets to access the original Function*). 1168 /// It produces a token chain as output. 1169 INIT_TRAMPOLINE, 1170 1171 /// ADJUST_TRAMPOLINE - This corresponds to the adjust_trampoline intrinsic. 1172 /// It takes a pointer to the trampoline and produces a (possibly) new 1173 /// pointer to the same trampoline with platform-specific adjustments 1174 /// applied. The pointer it returns points to an executable block of code. 1175 ADJUST_TRAMPOLINE, 1176 1177 /// TRAP - Trapping instruction 1178 TRAP, 1179 1180 /// DEBUGTRAP - Trap intended to get the attention of a debugger. 1181 DEBUGTRAP, 1182 1183 /// UBSANTRAP - Trap with an immediate describing the kind of sanitizer 1184 /// failure. 1185 UBSANTRAP, 1186 1187 /// PREFETCH - This corresponds to a prefetch intrinsic. The first operand 1188 /// is the chain. The other operands are the address to prefetch, 1189 /// read / write specifier, locality specifier and instruction / data cache 1190 /// specifier. 1191 PREFETCH, 1192 1193 /// ARITH_FENCE - This corresponds to a arithmetic fence intrinsic. Both its 1194 /// operand and output are the same floating type. 1195 ARITH_FENCE, 1196 1197 /// MEMBARRIER - Compiler barrier only; generate a no-op. 1198 MEMBARRIER, 1199 1200 /// OUTCHAIN = ATOMIC_FENCE(INCHAIN, ordering, scope) 1201 /// This corresponds to the fence instruction. It takes an input chain, and 1202 /// two integer constants: an AtomicOrdering and a SynchronizationScope. 1203 ATOMIC_FENCE, 1204 1205 /// Val, OUTCHAIN = ATOMIC_LOAD(INCHAIN, ptr) 1206 /// This corresponds to "load atomic" instruction. 1207 ATOMIC_LOAD, 1208 1209 /// OUTCHAIN = ATOMIC_STORE(INCHAIN, ptr, val) 1210 /// This corresponds to "store atomic" instruction. 1211 ATOMIC_STORE, 1212 1213 /// Val, OUTCHAIN = ATOMIC_CMP_SWAP(INCHAIN, ptr, cmp, swap) 1214 /// For double-word atomic operations: 1215 /// ValLo, ValHi, OUTCHAIN = ATOMIC_CMP_SWAP(INCHAIN, ptr, cmpLo, cmpHi, 1216 /// swapLo, swapHi) 1217 /// This corresponds to the cmpxchg instruction. 1218 ATOMIC_CMP_SWAP, 1219 1220 /// Val, Success, OUTCHAIN 1221 /// = ATOMIC_CMP_SWAP_WITH_SUCCESS(INCHAIN, ptr, cmp, swap) 1222 /// N.b. this is still a strong cmpxchg operation, so 1223 /// Success == "Val == cmp". 1224 ATOMIC_CMP_SWAP_WITH_SUCCESS, 1225 1226 /// Val, OUTCHAIN = ATOMIC_SWAP(INCHAIN, ptr, amt) 1227 /// Val, OUTCHAIN = ATOMIC_LOAD_[OpName](INCHAIN, ptr, amt) 1228 /// For double-word atomic operations: 1229 /// ValLo, ValHi, OUTCHAIN = ATOMIC_SWAP(INCHAIN, ptr, amtLo, amtHi) 1230 /// ValLo, ValHi, OUTCHAIN = ATOMIC_LOAD_[OpName](INCHAIN, ptr, amtLo, amtHi) 1231 /// These correspond to the atomicrmw instruction. 1232 ATOMIC_SWAP, 1233 ATOMIC_LOAD_ADD, 1234 ATOMIC_LOAD_SUB, 1235 ATOMIC_LOAD_AND, 1236 ATOMIC_LOAD_CLR, 1237 ATOMIC_LOAD_OR, 1238 ATOMIC_LOAD_XOR, 1239 ATOMIC_LOAD_NAND, 1240 ATOMIC_LOAD_MIN, 1241 ATOMIC_LOAD_MAX, 1242 ATOMIC_LOAD_UMIN, 1243 ATOMIC_LOAD_UMAX, 1244 ATOMIC_LOAD_FADD, 1245 ATOMIC_LOAD_FSUB, 1246 ATOMIC_LOAD_FMAX, 1247 ATOMIC_LOAD_FMIN, 1248 ATOMIC_LOAD_UINC_WRAP, 1249 ATOMIC_LOAD_UDEC_WRAP, 1250 1251 // Masked load and store - consecutive vector load and store operations 1252 // with additional mask operand that prevents memory accesses to the 1253 // masked-off lanes. 1254 // 1255 // Val, OutChain = MLOAD(BasePtr, Mask, PassThru) 1256 // OutChain = MSTORE(Value, BasePtr, Mask) 1257 MLOAD, 1258 MSTORE, 1259 1260 // Masked gather and scatter - load and store operations for a vector of 1261 // random addresses with additional mask operand that prevents memory 1262 // accesses to the masked-off lanes. 1263 // 1264 // Val, OutChain = GATHER(InChain, PassThru, Mask, BasePtr, Index, Scale) 1265 // OutChain = SCATTER(InChain, Value, Mask, BasePtr, Index, Scale) 1266 // 1267 // The Index operand can have more vector elements than the other operands 1268 // due to type legalization. The extra elements are ignored. 1269 MGATHER, 1270 MSCATTER, 1271 1272 /// This corresponds to the llvm.lifetime.* intrinsics. The first operand 1273 /// is the chain and the second operand is the alloca pointer. 1274 LIFETIME_START, 1275 LIFETIME_END, 1276 1277 /// GC_TRANSITION_START/GC_TRANSITION_END - These operators mark the 1278 /// beginning and end of GC transition sequence, and carry arbitrary 1279 /// information that target might need for lowering. The first operand is 1280 /// a chain, the rest are specified by the target and not touched by the DAG 1281 /// optimizers. GC_TRANSITION_START..GC_TRANSITION_END pairs may not be 1282 /// nested. 1283 GC_TRANSITION_START, 1284 GC_TRANSITION_END, 1285 1286 /// GET_DYNAMIC_AREA_OFFSET - get offset from native SP to the address of 1287 /// the most recent dynamic alloca. For most targets that would be 0, but 1288 /// for some others (e.g. PowerPC, PowerPC64) that would be compile-time 1289 /// known nonzero constant. The only operand here is the chain. 1290 GET_DYNAMIC_AREA_OFFSET, 1291 1292 /// Pseudo probe for AutoFDO, as a place holder in a basic block to improve 1293 /// the sample counts quality. 1294 PSEUDO_PROBE, 1295 1296 /// VSCALE(IMM) - Returns the runtime scaling factor used to calculate the 1297 /// number of elements within a scalable vector. IMM is a constant integer 1298 /// multiplier that is applied to the runtime value. 1299 VSCALE, 1300 1301 /// Generic reduction nodes. These nodes represent horizontal vector 1302 /// reduction operations, producing a scalar result. 1303 /// The SEQ variants perform reductions in sequential order. The first 1304 /// operand is an initial scalar accumulator value, and the second operand 1305 /// is the vector to reduce. 1306 /// E.g. RES = VECREDUCE_SEQ_FADD f32 ACC, <4 x f32> SRC_VEC 1307 /// ... is equivalent to 1308 /// RES = (((ACC + SRC_VEC[0]) + SRC_VEC[1]) + SRC_VEC[2]) + SRC_VEC[3] 1309 VECREDUCE_SEQ_FADD, 1310 VECREDUCE_SEQ_FMUL, 1311 1312 /// These reductions have relaxed evaluation order semantics, and have a 1313 /// single vector operand. The order of evaluation is unspecified. For 1314 /// pow-of-2 vectors, one valid legalizer expansion is to use a tree 1315 /// reduction, i.e.: 1316 /// For RES = VECREDUCE_FADD <8 x f16> SRC_VEC 1317 /// PART_RDX = FADD SRC_VEC[0:3], SRC_VEC[4:7] 1318 /// PART_RDX2 = FADD PART_RDX[0:1], PART_RDX[2:3] 1319 /// RES = FADD PART_RDX2[0], PART_RDX2[1] 1320 /// For non-pow-2 vectors, this can be computed by extracting each element 1321 /// and performing the operation as if it were scalarized. 1322 VECREDUCE_FADD, 1323 VECREDUCE_FMUL, 1324 /// FMIN/FMAX nodes can have flags, for NaN/NoNaN variants. 1325 VECREDUCE_FMAX, 1326 VECREDUCE_FMIN, 1327 /// FMINIMUM/FMAXIMUM nodes propatate NaNs and signed zeroes using the 1328 /// llvm.minimum and llvm.maximum semantics. 1329 VECREDUCE_FMAXIMUM, 1330 VECREDUCE_FMINIMUM, 1331 /// Integer reductions may have a result type larger than the vector element 1332 /// type. However, the reduction is performed using the vector element type 1333 /// and the value in the top bits is unspecified. 1334 VECREDUCE_ADD, 1335 VECREDUCE_MUL, 1336 VECREDUCE_AND, 1337 VECREDUCE_OR, 1338 VECREDUCE_XOR, 1339 VECREDUCE_SMAX, 1340 VECREDUCE_SMIN, 1341 VECREDUCE_UMAX, 1342 VECREDUCE_UMIN, 1343 1344 // The `llvm.experimental.stackmap` intrinsic. 1345 // Operands: input chain, glue, <id>, <numShadowBytes>, [live0[, live1...]] 1346 // Outputs: output chain, glue 1347 STACKMAP, 1348 1349 // The `llvm.experimental.patchpoint.*` intrinsic. 1350 // Operands: input chain, [glue], reg-mask, <id>, <numShadowBytes>, callee, 1351 // <numArgs>, cc, ... 1352 // Outputs: [rv], output chain, glue 1353 PATCHPOINT, 1354 1355 // Vector Predication 1356 #define BEGIN_REGISTER_VP_SDNODE(VPSDID, ...) VPSDID, 1357 #include "llvm/IR/VPIntrinsics.def" 1358 1359 /// BUILTIN_OP_END - This must be the last enum value in this list. 1360 /// The target-specific pre-isel opcode values start here. 1361 BUILTIN_OP_END 1362 }; 1363 1364 /// FIRST_TARGET_STRICTFP_OPCODE - Target-specific pre-isel operations 1365 /// which cannot raise FP exceptions should be less than this value. 1366 /// Those that do must not be less than this value. 1367 static const int FIRST_TARGET_STRICTFP_OPCODE = BUILTIN_OP_END + 400; 1368 1369 /// FIRST_TARGET_MEMORY_OPCODE - Target-specific pre-isel operations 1370 /// which do not reference a specific memory location should be less than 1371 /// this value. Those that do must not be less than this value, and can 1372 /// be used with SelectionDAG::getMemIntrinsicNode. 1373 static const int FIRST_TARGET_MEMORY_OPCODE = BUILTIN_OP_END + 500; 1374 1375 /// Whether this is bitwise logic opcode. 1376 inline bool isBitwiseLogicOp(unsigned Opcode) { 1377 return Opcode == ISD::AND || Opcode == ISD::OR || Opcode == ISD::XOR; 1378 } 1379 1380 /// Get underlying scalar opcode for VECREDUCE opcode. 1381 /// For example ISD::AND for ISD::VECREDUCE_AND. 1382 NodeType getVecReduceBaseOpcode(unsigned VecReduceOpcode); 1383 1384 /// Whether this is a vector-predicated Opcode. 1385 bool isVPOpcode(unsigned Opcode); 1386 1387 /// Whether this is a vector-predicated binary operation opcode. 1388 bool isVPBinaryOp(unsigned Opcode); 1389 1390 /// Whether this is a vector-predicated reduction opcode. 1391 bool isVPReduction(unsigned Opcode); 1392 1393 /// The operand position of the vector mask. 1394 std::optional<unsigned> getVPMaskIdx(unsigned Opcode); 1395 1396 /// The operand position of the explicit vector length parameter. 1397 std::optional<unsigned> getVPExplicitVectorLengthIdx(unsigned Opcode); 1398 1399 /// Translate this VP Opcode to its corresponding non-VP Opcode. 1400 std::optional<unsigned> getBaseOpcodeForVP(unsigned Opcode, bool hasFPExcept); 1401 1402 /// Translate this non-VP Opcode to its corresponding VP Opcode. 1403 unsigned getVPForBaseOpcode(unsigned Opcode); 1404 1405 //===--------------------------------------------------------------------===// 1406 /// MemIndexedMode enum - This enum defines the load / store indexed 1407 /// addressing modes. 1408 /// 1409 /// UNINDEXED "Normal" load / store. The effective address is already 1410 /// computed and is available in the base pointer. The offset 1411 /// operand is always undefined. In addition to producing a 1412 /// chain, an unindexed load produces one value (result of the 1413 /// load); an unindexed store does not produce a value. 1414 /// 1415 /// PRE_INC Similar to the unindexed mode where the effective address is 1416 /// PRE_DEC the value of the base pointer add / subtract the offset. 1417 /// It considers the computation as being folded into the load / 1418 /// store operation (i.e. the load / store does the address 1419 /// computation as well as performing the memory transaction). 1420 /// The base operand is always undefined. In addition to 1421 /// producing a chain, pre-indexed load produces two values 1422 /// (result of the load and the result of the address 1423 /// computation); a pre-indexed store produces one value (result 1424 /// of the address computation). 1425 /// 1426 /// POST_INC The effective address is the value of the base pointer. The 1427 /// POST_DEC value of the offset operand is then added to / subtracted 1428 /// from the base after memory transaction. In addition to 1429 /// producing a chain, post-indexed load produces two values 1430 /// (the result of the load and the result of the base +/- offset 1431 /// computation); a post-indexed store produces one value (the 1432 /// the result of the base +/- offset computation). 1433 enum MemIndexedMode { UNINDEXED = 0, PRE_INC, PRE_DEC, POST_INC, POST_DEC }; 1434 1435 static const int LAST_INDEXED_MODE = POST_DEC + 1; 1436 1437 //===--------------------------------------------------------------------===// 1438 /// MemIndexType enum - This enum defines how to interpret MGATHER/SCATTER's 1439 /// index parameter when calculating addresses. 1440 /// 1441 /// SIGNED_SCALED Addr = Base + ((signed)Index * Scale) 1442 /// UNSIGNED_SCALED Addr = Base + ((unsigned)Index * Scale) 1443 /// 1444 /// NOTE: The value of Scale is typically only known to the node owning the 1445 /// IndexType, with a value of 1 the equivalent of being unscaled. 1446 enum MemIndexType { SIGNED_SCALED = 0, UNSIGNED_SCALED }; 1447 1448 static const int LAST_MEM_INDEX_TYPE = UNSIGNED_SCALED + 1; 1449 1450 inline bool isIndexTypeSigned(MemIndexType IndexType) { 1451 return IndexType == SIGNED_SCALED; 1452 } 1453 1454 //===--------------------------------------------------------------------===// 1455 /// LoadExtType enum - This enum defines the three variants of LOADEXT 1456 /// (load with extension). 1457 /// 1458 /// SEXTLOAD loads the integer operand and sign extends it to a larger 1459 /// integer result type. 1460 /// ZEXTLOAD loads the integer operand and zero extends it to a larger 1461 /// integer result type. 1462 /// EXTLOAD is used for two things: floating point extending loads and 1463 /// integer extending loads [the top bits are undefined]. 1464 enum LoadExtType { NON_EXTLOAD = 0, EXTLOAD, SEXTLOAD, ZEXTLOAD }; 1465 1466 static const int LAST_LOADEXT_TYPE = ZEXTLOAD + 1; 1467 1468 NodeType getExtForLoadExtType(bool IsFP, LoadExtType); 1469 1470 //===--------------------------------------------------------------------===// 1471 /// ISD::CondCode enum - These are ordered carefully to make the bitfields 1472 /// below work out, when considering SETFALSE (something that never exists 1473 /// dynamically) as 0. "U" -> Unsigned (for integer operands) or Unordered 1474 /// (for floating point), "L" -> Less than, "G" -> Greater than, "E" -> Equal 1475 /// to. If the "N" column is 1, the result of the comparison is undefined if 1476 /// the input is a NAN. 1477 /// 1478 /// All of these (except for the 'always folded ops') should be handled for 1479 /// floating point. For integer, only the SETEQ,SETNE,SETLT,SETLE,SETGT, 1480 /// SETGE,SETULT,SETULE,SETUGT, and SETUGE opcodes are used. 1481 /// 1482 /// Note that these are laid out in a specific order to allow bit-twiddling 1483 /// to transform conditions. 1484 enum CondCode { 1485 // Opcode N U L G E Intuitive operation 1486 SETFALSE, // 0 0 0 0 Always false (always folded) 1487 SETOEQ, // 0 0 0 1 True if ordered and equal 1488 SETOGT, // 0 0 1 0 True if ordered and greater than 1489 SETOGE, // 0 0 1 1 True if ordered and greater than or equal 1490 SETOLT, // 0 1 0 0 True if ordered and less than 1491 SETOLE, // 0 1 0 1 True if ordered and less than or equal 1492 SETONE, // 0 1 1 0 True if ordered and operands are unequal 1493 SETO, // 0 1 1 1 True if ordered (no nans) 1494 SETUO, // 1 0 0 0 True if unordered: isnan(X) | isnan(Y) 1495 SETUEQ, // 1 0 0 1 True if unordered or equal 1496 SETUGT, // 1 0 1 0 True if unordered or greater than 1497 SETUGE, // 1 0 1 1 True if unordered, greater than, or equal 1498 SETULT, // 1 1 0 0 True if unordered or less than 1499 SETULE, // 1 1 0 1 True if unordered, less than, or equal 1500 SETUNE, // 1 1 1 0 True if unordered or not equal 1501 SETTRUE, // 1 1 1 1 Always true (always folded) 1502 // Don't care operations: undefined if the input is a nan. 1503 SETFALSE2, // 1 X 0 0 0 Always false (always folded) 1504 SETEQ, // 1 X 0 0 1 True if equal 1505 SETGT, // 1 X 0 1 0 True if greater than 1506 SETGE, // 1 X 0 1 1 True if greater than or equal 1507 SETLT, // 1 X 1 0 0 True if less than 1508 SETLE, // 1 X 1 0 1 True if less than or equal 1509 SETNE, // 1 X 1 1 0 True if not equal 1510 SETTRUE2, // 1 X 1 1 1 Always true (always folded) 1511 1512 SETCC_INVALID // Marker value. 1513 }; 1514 1515 /// Return true if this is a setcc instruction that performs a signed 1516 /// comparison when used with integer operands. 1517 inline bool isSignedIntSetCC(CondCode Code) { 1518 return Code == SETGT || Code == SETGE || Code == SETLT || Code == SETLE; 1519 } 1520 1521 /// Return true if this is a setcc instruction that performs an unsigned 1522 /// comparison when used with integer operands. 1523 inline bool isUnsignedIntSetCC(CondCode Code) { 1524 return Code == SETUGT || Code == SETUGE || Code == SETULT || Code == SETULE; 1525 } 1526 1527 /// Return true if this is a setcc instruction that performs an equality 1528 /// comparison when used with integer operands. 1529 inline bool isIntEqualitySetCC(CondCode Code) { 1530 return Code == SETEQ || Code == SETNE; 1531 } 1532 1533 /// Return true if the specified condition returns true if the two operands to 1534 /// the condition are equal. Note that if one of the two operands is a NaN, 1535 /// this value is meaningless. 1536 inline bool isTrueWhenEqual(CondCode Cond) { return ((int)Cond & 1) != 0; } 1537 1538 /// This function returns 0 if the condition is always false if an operand is 1539 /// a NaN, 1 if the condition is always true if the operand is a NaN, and 2 if 1540 /// the condition is undefined if the operand is a NaN. 1541 inline unsigned getUnorderedFlavor(CondCode Cond) { 1542 return ((int)Cond >> 3) & 3; 1543 } 1544 1545 /// Return the operation corresponding to !(X op Y), where 'op' is a valid 1546 /// SetCC operation. 1547 CondCode getSetCCInverse(CondCode Operation, EVT Type); 1548 1549 inline bool isExtOpcode(unsigned Opcode) { 1550 return Opcode == ISD::ANY_EXTEND || Opcode == ISD::ZERO_EXTEND || 1551 Opcode == ISD::SIGN_EXTEND; 1552 } 1553 1554 inline bool isExtVecInRegOpcode(unsigned Opcode) { 1555 return Opcode == ISD::ANY_EXTEND_VECTOR_INREG || 1556 Opcode == ISD::ZERO_EXTEND_VECTOR_INREG || 1557 Opcode == ISD::SIGN_EXTEND_VECTOR_INREG; 1558 } 1559 1560 namespace GlobalISel { 1561 /// Return the operation corresponding to !(X op Y), where 'op' is a valid 1562 /// SetCC operation. The U bit of the condition code has different meanings 1563 /// between floating point and integer comparisons and LLT's don't provide 1564 /// this distinction. As such we need to be told whether the comparison is 1565 /// floating point or integer-like. Pointers should use integer-like 1566 /// comparisons. 1567 CondCode getSetCCInverse(CondCode Operation, bool isIntegerLike); 1568 } // end namespace GlobalISel 1569 1570 /// Return the operation corresponding to (Y op X) when given the operation 1571 /// for (X op Y). 1572 CondCode getSetCCSwappedOperands(CondCode Operation); 1573 1574 /// Return the result of a logical OR between different comparisons of 1575 /// identical values: ((X op1 Y) | (X op2 Y)). This function returns 1576 /// SETCC_INVALID if it is not possible to represent the resultant comparison. 1577 CondCode getSetCCOrOperation(CondCode Op1, CondCode Op2, EVT Type); 1578 1579 /// Return the result of a logical AND between different comparisons of 1580 /// identical values: ((X op1 Y) & (X op2 Y)). This function returns 1581 /// SETCC_INVALID if it is not possible to represent the resultant comparison. 1582 CondCode getSetCCAndOperation(CondCode Op1, CondCode Op2, EVT Type); 1583 1584 } // namespace ISD 1585 1586 } // namespace llvm 1587 1588 #endif 1589