//===- IntrinsicsAMDGPU.td - Defines AMDGPU intrinsics -----*- tablegen -*-===// // // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. // See https://llvm.org/LICENSE.txt for license information. // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception // //===----------------------------------------------------------------------===// // // This file defines all of the R600-specific intrinsics. // //===----------------------------------------------------------------------===// class AMDGPUReadPreloadRegisterIntrinsic : Intrinsic<[llvm_i32_ty], [], [IntrNoMem, IntrSpeculatable, IntrWillReturn]>; class AMDGPUReadPreloadRegisterIntrinsicNamed : Intrinsic<[llvm_i32_ty], [], [IntrNoMem, IntrSpeculatable, IntrWillReturn]>, ClangBuiltin; // Used to tag image and resource intrinsics with information used to generate // mem operands. class AMDGPURsrcIntrinsic { int RsrcArg = rsrcarg; bit IsImage = isimage; } let TargetPrefix = "r600" in { multiclass AMDGPUReadPreloadRegisterIntrinsic_xyz { def _x : AMDGPUReadPreloadRegisterIntrinsic; def _y : AMDGPUReadPreloadRegisterIntrinsic; def _z : AMDGPUReadPreloadRegisterIntrinsic; } multiclass AMDGPUReadPreloadRegisterIntrinsic_xyz_named { def _x : AMDGPUReadPreloadRegisterIntrinsicNamed; def _y : AMDGPUReadPreloadRegisterIntrinsicNamed; def _z : AMDGPUReadPreloadRegisterIntrinsicNamed; } defm int_r600_read_global_size : AMDGPUReadPreloadRegisterIntrinsic_xyz_named <"__builtin_r600_read_global_size">; defm int_r600_read_ngroups : AMDGPUReadPreloadRegisterIntrinsic_xyz_named <"__builtin_r600_read_ngroups">; defm int_r600_read_tgid : AMDGPUReadPreloadRegisterIntrinsic_xyz_named <"__builtin_r600_read_tgid">; defm int_r600_read_local_size : AMDGPUReadPreloadRegisterIntrinsic_xyz; defm int_r600_read_tidig : AMDGPUReadPreloadRegisterIntrinsic_xyz; def int_r600_group_barrier : ClangBuiltin<"__builtin_r600_group_barrier">, Intrinsic<[], [], [IntrConvergent, IntrWillReturn]>; // AS 7 is PARAM_I_ADDRESS, used for kernel arguments def int_r600_implicitarg_ptr : ClangBuiltin<"__builtin_r600_implicitarg_ptr">, Intrinsic<[LLVMQualPointerType], [], [IntrNoMem, IntrSpeculatable, IntrWillReturn]>; def int_r600_rat_store_typed : // 1st parameter: Data // 2nd parameter: Index // 3rd parameter: Constant RAT ID Intrinsic<[], [llvm_v4i32_ty, llvm_v4i32_ty, llvm_i32_ty], [IntrWillReturn]>, ClangBuiltin<"__builtin_r600_rat_store_typed">; def int_r600_recipsqrt_ieee : Intrinsic< [llvm_anyfloat_ty], [LLVMMatchType<0>], [IntrNoMem, IntrSpeculatable, IntrWillReturn] >; def int_r600_recipsqrt_clamped : Intrinsic< [llvm_anyfloat_ty], [LLVMMatchType<0>], [IntrNoMem, IntrSpeculatable, IntrWillReturn] >; def int_r600_cube : Intrinsic< [llvm_v4f32_ty], [llvm_v4f32_ty], [IntrNoMem, IntrSpeculatable, IntrWillReturn] >; def int_r600_store_stream_output : Intrinsic< [], [llvm_v4f32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [IntrWillReturn] >; class TextureIntrinsicFloatInput : Intrinsic<[llvm_v4f32_ty], [ llvm_v4f32_ty, // Coord llvm_i32_ty, // offset_x llvm_i32_ty, // offset_y, llvm_i32_ty, // offset_z, llvm_i32_ty, // resource_id llvm_i32_ty, // samplerid llvm_i32_ty, // coord_type_x llvm_i32_ty, // coord_type_y llvm_i32_ty, // coord_type_z llvm_i32_ty], // coord_type_w [IntrNoMem, IntrWillReturn] >; class TextureIntrinsicInt32Input : Intrinsic<[llvm_v4i32_ty], [ llvm_v4i32_ty, // Coord llvm_i32_ty, // offset_x llvm_i32_ty, // offset_y, llvm_i32_ty, // offset_z, llvm_i32_ty, // resource_id llvm_i32_ty, // samplerid llvm_i32_ty, // coord_type_x llvm_i32_ty, // coord_type_y llvm_i32_ty, // coord_type_z llvm_i32_ty], // coord_type_w [IntrNoMem, IntrWillReturn] >; def int_r600_store_swizzle : Intrinsic<[], [llvm_v4f32_ty, llvm_i32_ty, llvm_i32_ty], [IntrWillReturn] >; def int_r600_tex : TextureIntrinsicFloatInput; def int_r600_texc : TextureIntrinsicFloatInput; def int_r600_txl : TextureIntrinsicFloatInput; def int_r600_txlc : TextureIntrinsicFloatInput; def int_r600_txb : TextureIntrinsicFloatInput; def int_r600_txbc : TextureIntrinsicFloatInput; def int_r600_txf : TextureIntrinsicInt32Input; def int_r600_txq : TextureIntrinsicInt32Input; def int_r600_ddx : TextureIntrinsicFloatInput; def int_r600_ddy : TextureIntrinsicFloatInput; def int_r600_dot4 : Intrinsic<[llvm_float_ty], [llvm_v4f32_ty, llvm_v4f32_ty], [IntrNoMem, IntrSpeculatable, IntrWillReturn] >; def int_r600_kill : Intrinsic<[], [llvm_float_ty], [IntrWillReturn]>; } // End TargetPrefix = "r600" let TargetPrefix = "amdgcn" in { //===----------------------------------------------------------------------===// // ABI Special Intrinsics //===----------------------------------------------------------------------===// defm int_amdgcn_workitem_id : AMDGPUReadPreloadRegisterIntrinsic_xyz; defm int_amdgcn_workgroup_id : AMDGPUReadPreloadRegisterIntrinsic_xyz_named <"__builtin_amdgcn_workgroup_id">; def int_amdgcn_dispatch_ptr : Intrinsic<[LLVMQualPointerType], [], [Align, IntrNoMem, IntrSpeculatable, IntrWillReturn]>; def int_amdgcn_queue_ptr : ClangBuiltin<"__builtin_amdgcn_queue_ptr">, Intrinsic<[LLVMQualPointerType], [], [Align, IntrNoMem, IntrSpeculatable, IntrWillReturn]>; def int_amdgcn_kernarg_segment_ptr : ClangBuiltin<"__builtin_amdgcn_kernarg_segment_ptr">, Intrinsic<[LLVMQualPointerType], [], [Align, IntrNoMem, IntrSpeculatable, IntrWillReturn]>; def int_amdgcn_implicitarg_ptr : ClangBuiltin<"__builtin_amdgcn_implicitarg_ptr">, Intrinsic<[LLVMQualPointerType], [], [Align, IntrNoMem, IntrSpeculatable, IntrWillReturn]>; def int_amdgcn_groupstaticsize : ClangBuiltin<"__builtin_amdgcn_groupstaticsize">, Intrinsic<[llvm_i32_ty], [], [IntrNoMem, IntrSpeculatable, IntrWillReturn]>; def int_amdgcn_dispatch_id : ClangBuiltin<"__builtin_amdgcn_dispatch_id">, Intrinsic<[llvm_i64_ty], [], [IntrNoMem, IntrSpeculatable, IntrWillReturn]>; // For internal use. Coordinates LDS lowering between IR transform and backend. def int_amdgcn_lds_kernel_id : Intrinsic<[llvm_i32_ty], [], [IntrNoMem, IntrSpeculatable, IntrWillReturn]>; def int_amdgcn_implicit_buffer_ptr : ClangBuiltin<"__builtin_amdgcn_implicit_buffer_ptr">, Intrinsic<[LLVMQualPointerType], [], [Align, IntrNoMem, IntrSpeculatable, IntrWillReturn]>; // Set EXEC to the 64-bit value given. // This is always moved to the beginning of the basic block. // FIXME: Should be mangled for wave size. def int_amdgcn_init_exec : Intrinsic<[], [llvm_i64_ty], // 64-bit literal constant [IntrConvergent, ImmArg>]>; // Set EXEC according to a thread count packed in an SGPR input: // thread_count = (input >> bitoffset) & 0x7f; // This is always moved to the beginning of the basic block. // Note: only inreg arguments to the parent function are valid as // inputs to this intrinsic, computed values cannot be used. def int_amdgcn_init_exec_from_input : Intrinsic<[], [llvm_i32_ty, // 32-bit SGPR input llvm_i32_ty], // bit offset of the thread count [IntrConvergent, ImmArg>]>; def int_amdgcn_wavefrontsize : ClangBuiltin<"__builtin_amdgcn_wavefrontsize">, Intrinsic<[llvm_i32_ty], [], [IntrNoMem, IntrSpeculatable, IntrWillReturn]>; //===----------------------------------------------------------------------===// // Instruction Intrinsics //===----------------------------------------------------------------------===// // The first parameter is s_sendmsg immediate (i16), // the second one is copied to m0 def int_amdgcn_s_sendmsg : ClangBuiltin<"__builtin_amdgcn_s_sendmsg">, Intrinsic <[], [llvm_i32_ty, llvm_i32_ty], [ImmArg>, IntrNoMem, IntrHasSideEffects]>; def int_amdgcn_s_sendmsghalt : ClangBuiltin<"__builtin_amdgcn_s_sendmsghalt">, Intrinsic <[], [llvm_i32_ty, llvm_i32_ty], [ImmArg>, IntrNoMem, IntrHasSideEffects]>; // gfx11 intrinsic // The first parameter is s_sendmsg immediate (i16). Return type is i32 or i64. def int_amdgcn_s_sendmsg_rtn : Intrinsic <[llvm_anyint_ty], [llvm_i32_ty], [ImmArg>, IntrNoMem, IntrHasSideEffects]>; def int_amdgcn_s_barrier : ClangBuiltin<"__builtin_amdgcn_s_barrier">, Intrinsic<[], [], [IntrNoMem, IntrHasSideEffects, IntrConvergent, IntrWillReturn]>; def int_amdgcn_wave_barrier : ClangBuiltin<"__builtin_amdgcn_wave_barrier">, Intrinsic<[], [], [IntrNoMem, IntrHasSideEffects, IntrConvergent, IntrWillReturn]>; // The 1st parameter is a mask for the types of instructions that may be allowed // to cross the SCHED_BARRIER during scheduling. // MASK = 0x0000 0000: No instructions may be scheduled across SCHED_BARRIER. // MASK = 0x0000 0001: ALL, non-memory, non-side-effect producing instructions may be // scheduled across SCHED_BARRIER, i.e. allow ALU instructions to pass. // MASK = 0x0000 0002: VALU instructions may be scheduled across SCHED_BARRIER. // MASK = 0x0000 0004: SALU instructions may be scheduled across SCHED_BARRIER. // MASK = 0x0000 0008: MFMA instructions may be scheduled across SCHED_BARRIER. // MASK = 0x0000 0010: ALL VMEM instructions may be scheduled across SCHED_BARRIER. // MASK = 0x0000 0020: VMEM read instructions may be scheduled across SCHED_BARRIER. // MASK = 0x0000 0040: VMEM write instructions may be scheduled across SCHED_BARRIER. // MASK = 0x0000 0080: ALL DS instructions may be scheduled across SCHED_BARRIER. // MASK = 0x0000 0100: ALL DS read instructions may be scheduled accoss SCHED_BARRIER. // MASK = 0x0000 0200: ALL DS write instructions may be scheduled across SCHED_BARRIER. def int_amdgcn_sched_barrier : ClangBuiltin<"__builtin_amdgcn_sched_barrier">, Intrinsic<[], [llvm_i32_ty], [ImmArg>, IntrNoMem, IntrHasSideEffects, IntrConvergent, IntrWillReturn]>; def int_amdgcn_s_waitcnt : ClangBuiltin<"__builtin_amdgcn_s_waitcnt">, Intrinsic<[], [llvm_i32_ty], [ImmArg>, IntrNoMem, IntrHasSideEffects, IntrWillReturn]>; def int_amdgcn_div_scale : Intrinsic< // 1st parameter: Numerator // 2nd parameter: Denominator // 3rd parameter: Select quotient. Must equal Numerator or Denominator. // (0 = Denominator, 1 = Numerator). [llvm_anyfloat_ty, llvm_i1_ty], [LLVMMatchType<0>, LLVMMatchType<0>, llvm_i1_ty], [IntrNoMem, IntrSpeculatable, ImmArg>, IntrWillReturn] >; def int_amdgcn_div_fmas : Intrinsic<[llvm_anyfloat_ty], [LLVMMatchType<0>, LLVMMatchType<0>, LLVMMatchType<0>, llvm_i1_ty], [IntrNoMem, IntrSpeculatable, IntrWillReturn] >; def int_amdgcn_div_fixup : Intrinsic<[llvm_anyfloat_ty], [LLVMMatchType<0>, LLVMMatchType<0>, LLVMMatchType<0>], [IntrNoMem, IntrSpeculatable, IntrWillReturn] >; // Look Up 2.0 / pi src0 with segment select src1[4:0] def int_amdgcn_trig_preop : Intrinsic< [llvm_anyfloat_ty], [LLVMMatchType<0>, llvm_i32_ty], [IntrNoMem, IntrSpeculatable, IntrWillReturn] >; def int_amdgcn_sin : Intrinsic< [llvm_anyfloat_ty], [LLVMMatchType<0>], [IntrNoMem, IntrSpeculatable, IntrWillReturn] >; def int_amdgcn_cos : Intrinsic< [llvm_anyfloat_ty], [LLVMMatchType<0>], [IntrNoMem, IntrSpeculatable, IntrWillReturn] >; def int_amdgcn_log_clamp : Intrinsic< [llvm_anyfloat_ty], [LLVMMatchType<0>], [IntrNoMem, IntrSpeculatable, IntrWillReturn] >; def int_amdgcn_fmul_legacy : ClangBuiltin<"__builtin_amdgcn_fmul_legacy">, Intrinsic<[llvm_float_ty], [llvm_float_ty, llvm_float_ty], [IntrNoMem, IntrSpeculatable, IntrWillReturn, Commutative] >; // Fused single-precision multiply-add with legacy behaviour for the multiply, // which is that +/- 0.0 * anything (even NaN or infinity) is +0.0. This is // intended for use on subtargets that have the v_fma_legacy_f32 and/or // v_fmac_legacy_f32 instructions. (Note that v_fma_legacy_f16 is unrelated and // has a completely different kind of legacy behaviour.) def int_amdgcn_fma_legacy : Intrinsic<[llvm_float_ty], [llvm_float_ty, llvm_float_ty, llvm_float_ty], [IntrNoMem, IntrSpeculatable, IntrWillReturn, Commutative] >; def int_amdgcn_rcp : Intrinsic< [llvm_anyfloat_ty], [LLVMMatchType<0>], [IntrNoMem, IntrSpeculatable, IntrWillReturn] >; def int_amdgcn_rcp_legacy : ClangBuiltin<"__builtin_amdgcn_rcp_legacy">, Intrinsic<[llvm_float_ty], [llvm_float_ty], [IntrNoMem, IntrSpeculatable, IntrWillReturn] >; def int_amdgcn_sqrt : Intrinsic< [llvm_anyfloat_ty], [LLVMMatchType<0>], [IntrNoMem, IntrSpeculatable, IntrWillReturn] >; def int_amdgcn_rsq : Intrinsic< [llvm_anyfloat_ty], [LLVMMatchType<0>], [IntrNoMem, IntrSpeculatable, IntrWillReturn] >; def int_amdgcn_rsq_legacy : ClangBuiltin<"__builtin_amdgcn_rsq_legacy">, Intrinsic< [llvm_float_ty], [llvm_float_ty], [IntrNoMem, IntrSpeculatable, IntrWillReturn] >; // out = 1.0 / sqrt(a) result clamped to +/- max_float. def int_amdgcn_rsq_clamp : Intrinsic< [llvm_anyfloat_ty], [LLVMMatchType<0>], [IntrNoMem, IntrSpeculatable, IntrWillReturn]>; def int_amdgcn_ldexp : Intrinsic< [llvm_anyfloat_ty], [LLVMMatchType<0>, llvm_i32_ty], [IntrNoMem, IntrSpeculatable, IntrWillReturn] >; def int_amdgcn_frexp_mant : Intrinsic< [llvm_anyfloat_ty], [LLVMMatchType<0>], [IntrNoMem, IntrSpeculatable, IntrWillReturn] >; def int_amdgcn_frexp_exp : Intrinsic< [llvm_anyint_ty], [llvm_anyfloat_ty], [IntrNoMem, IntrSpeculatable, IntrWillReturn] >; // v_fract is buggy on SI/CI. It mishandles infinities, may return 1.0 // and always uses rtz, so is not suitable for implementing the OpenCL // fract function. It should be ok on VI. def int_amdgcn_fract : Intrinsic< [llvm_anyfloat_ty], [LLVMMatchType<0>], [IntrNoMem, IntrSpeculatable, IntrWillReturn] >; def int_amdgcn_cvt_pkrtz : ClangBuiltin<"__builtin_amdgcn_cvt_pkrtz">, Intrinsic<[llvm_v2f16_ty], [llvm_float_ty, llvm_float_ty], [IntrNoMem, IntrSpeculatable, IntrWillReturn] >; def int_amdgcn_cvt_pknorm_i16 : ClangBuiltin<"__builtin_amdgcn_cvt_pknorm_i16">, Intrinsic<[llvm_v2i16_ty], [llvm_float_ty, llvm_float_ty], [IntrNoMem, IntrSpeculatable, IntrWillReturn] >; def int_amdgcn_cvt_pknorm_u16 : ClangBuiltin<"__builtin_amdgcn_cvt_pknorm_u16">, Intrinsic<[llvm_v2i16_ty], [llvm_float_ty, llvm_float_ty], [IntrNoMem, IntrSpeculatable, IntrWillReturn] >; def int_amdgcn_cvt_pk_i16 : ClangBuiltin<"__builtin_amdgcn_cvt_pk_i16">, Intrinsic< [llvm_v2i16_ty], [llvm_i32_ty, llvm_i32_ty], [IntrNoMem, IntrSpeculatable, IntrWillReturn] >; def int_amdgcn_cvt_pk_u16 : ClangBuiltin<"__builtin_amdgcn_cvt_pk_u16">, Intrinsic<[llvm_v2i16_ty], [llvm_i32_ty, llvm_i32_ty], [IntrNoMem, IntrSpeculatable, IntrWillReturn] >; def int_amdgcn_class : Intrinsic< [llvm_i1_ty], [llvm_anyfloat_ty, llvm_i32_ty], [IntrNoMem, IntrSpeculatable, IntrWillReturn] >; def int_amdgcn_fmed3 : ClangBuiltin<"__builtin_amdgcn_fmed3">, Intrinsic<[llvm_anyfloat_ty], [LLVMMatchType<0>, LLVMMatchType<0>, LLVMMatchType<0>], [IntrNoMem, IntrSpeculatable, IntrWillReturn] >; def int_amdgcn_cubeid : ClangBuiltin<"__builtin_amdgcn_cubeid">, Intrinsic<[llvm_float_ty], [llvm_float_ty, llvm_float_ty, llvm_float_ty], [IntrNoMem, IntrSpeculatable, IntrWillReturn] >; def int_amdgcn_cubema : ClangBuiltin<"__builtin_amdgcn_cubema">, Intrinsic<[llvm_float_ty], [llvm_float_ty, llvm_float_ty, llvm_float_ty], [IntrNoMem, IntrSpeculatable, IntrWillReturn] >; def int_amdgcn_cubesc : ClangBuiltin<"__builtin_amdgcn_cubesc">, Intrinsic<[llvm_float_ty], [llvm_float_ty, llvm_float_ty, llvm_float_ty], [IntrNoMem, IntrSpeculatable, IntrWillReturn] >; def int_amdgcn_cubetc : ClangBuiltin<"__builtin_amdgcn_cubetc">, Intrinsic<[llvm_float_ty], [llvm_float_ty, llvm_float_ty, llvm_float_ty], [IntrNoMem, IntrSpeculatable, IntrWillReturn] >; // v_ffbh_i32, as opposed to v_ffbh_u32. For v_ffbh_u32, llvm.ctlz // should be used. def int_amdgcn_sffbh : Intrinsic<[llvm_anyint_ty], [LLVMMatchType<0>], [IntrNoMem, IntrSpeculatable, IntrWillReturn] >; // v_mad_f32|f16/v_mac_f32|f16, selected regardless of denorm support. def int_amdgcn_fmad_ftz : Intrinsic<[llvm_anyfloat_ty], [LLVMMatchType<0>, LLVMMatchType<0>, LLVMMatchType<0>], [IntrNoMem, IntrSpeculatable, IntrWillReturn] >; // Fields should mirror atomicrmw class AMDGPUAtomicIncIntrin : Intrinsic<[llvm_anyint_ty], [llvm_anyptr_ty, LLVMMatchType<0>, llvm_i32_ty, // ordering llvm_i32_ty, // scope llvm_i1_ty], // isVolatile [IntrArgMemOnly, IntrWillReturn, NoCapture>, ImmArg>, ImmArg>, ImmArg>], "", [SDNPMemOperand] >; def int_amdgcn_atomic_inc : AMDGPUAtomicIncIntrin; def int_amdgcn_atomic_dec : AMDGPUAtomicIncIntrin; class AMDGPULDSIntrin : Intrinsic<[llvm_any_ty], [LLVMQualPointerType, 3>, LLVMMatchType<0>, llvm_i32_ty, // ordering llvm_i32_ty, // scope llvm_i1_ty], // isVolatile [IntrArgMemOnly, IntrWillReturn, NoCapture>, ImmArg>, ImmArg>, ImmArg>] >; // FIXME: The m0 argument should be moved after the normal arguments class AMDGPUDSOrderedIntrinsic : Intrinsic< [llvm_i32_ty], // M0 = {hi16:address, lo16:waveID}. Allow passing M0 as a pointer, so that // the bit packing can be optimized at the IR level. [LLVMQualPointerType, // IntToPtr(M0) llvm_i32_ty, // value to add or swap llvm_i32_ty, // ordering llvm_i32_ty, // scope llvm_i1_ty, // isVolatile llvm_i32_ty, // ordered count index (OA index), also added to the address // gfx10: bits 24-27 indicate the number of active threads/dwords llvm_i1_ty, // wave release, usually set to 1 llvm_i1_ty], // wave done, set to 1 for the last ordered instruction [IntrWillReturn, NoCapture>, ImmArg>, ImmArg>, ImmArg>, ImmArg>, ImmArg>, ImmArg> ] >; class AMDGPUDSAppendConsumedIntrinsic : Intrinsic< [llvm_i32_ty], [llvm_anyptr_ty, // LDS or GDS ptr llvm_i1_ty], // isVolatile [IntrConvergent, IntrWillReturn, IntrArgMemOnly, NoCapture>, ImmArg>], "", [SDNPMemOperand] >; def int_amdgcn_ds_ordered_add : AMDGPUDSOrderedIntrinsic; def int_amdgcn_ds_ordered_swap : AMDGPUDSOrderedIntrinsic; // The pointer argument is assumed to be dynamically uniform if a VGPR. def int_amdgcn_ds_append : AMDGPUDSAppendConsumedIntrinsic; def int_amdgcn_ds_consume : AMDGPUDSAppendConsumedIntrinsic; def int_amdgcn_ds_fadd : AMDGPULDSIntrin; def int_amdgcn_ds_fmin : AMDGPULDSIntrin; def int_amdgcn_ds_fmax : AMDGPULDSIntrin; } // TargetPrefix = "amdgcn" // New-style image intrinsics ////////////////////////////////////////////////////////////////////////// // Dimension-aware image intrinsics framework ////////////////////////////////////////////////////////////////////////// // Helper class to represent (type, name) combinations of arguments. The // argument names are explanatory and used as DAG operand names for codegen // pattern matching. class AMDGPUArg { LLVMType Type = ty; string Name = name; } // Return [AMDGPUArg, AMDGPUArg, names[1]>, ...] class makeArgList names, LLVMType basety> { list ret = !listconcat([AMDGPUArg], !foreach(name, !tail(names), AMDGPUArg, name>)); } // Return arglist, with LLVMMatchType's references shifted by 'shift'. class arglistmatchshift arglist, int shift> { list ret = !foreach(arg, arglist, !if(!isa(arg.Type), AMDGPUArg(arg.Type).Number, shift)>, arg.Name>, arg)); } // Return the concatenation of the given arglists. LLVMMatchType's are adjusted // accordingly, and shifted by an additional 'shift'. class arglistconcat> arglists, int shift = 0> { list ret = !foldl([], arglists, lhs, rhs, !listconcat( lhs, arglistmatchshift.ret)); } // Represent texture/image types / dimensionality. class AMDGPUDimProps enc, string name, string asmsuffix, list coord_names, list slice_names, bit msaa = 0> { AMDGPUDimProps Dim = !cast(NAME); string Name = name; // e.g. "2darraymsaa" string AsmSuffix = asmsuffix; // e.g. 2D_MSAA_ARRAY (used in assembly strings) bits<3> Encoding = enc; bit DA = 0; // DA bit in MIMG encoding bit MSAA = msaa; list CoordSliceArgs = makeArgList.ret; list CoordSliceIntArgs = makeArgList.ret; list GradientArgs = makeArgList.ret; bits<8> NumCoords = !size(CoordSliceArgs); bits<8> NumGradients = !size(GradientArgs); } def AMDGPUDim1D : AMDGPUDimProps<0x0, "1d", "1D", ["s"], []>; def AMDGPUDim2D : AMDGPUDimProps<0x1, "2d", "2D", ["s", "t"], []>; def AMDGPUDim3D : AMDGPUDimProps<0x2, "3d", "3D", ["s", "t", "r"], []>; let DA = 1 in { def AMDGPUDimCube : AMDGPUDimProps<0x3, "cube", "CUBE", ["s", "t"], ["face"]>; def AMDGPUDim1DArray : AMDGPUDimProps<0x4, "1darray", "1D_ARRAY", ["s"], ["slice"]>; def AMDGPUDim2DArray : AMDGPUDimProps<0x5, "2darray", "2D_ARRAY", ["s", "t"], ["slice"]>; } def AMDGPUDim2DMsaa : AMDGPUDimProps<0x6, "2dmsaa", "2D_MSAA", ["s", "t"], ["fragid"], 1>; let DA = 1 in { def AMDGPUDim2DArrayMsaa : AMDGPUDimProps<0x7, "2darraymsaa", "2D_MSAA_ARRAY", ["s", "t"], ["slice", "fragid"], 1>; } def AMDGPUDims { list NoMsaa = [AMDGPUDim1D, AMDGPUDim2D, AMDGPUDim3D, AMDGPUDimCube, AMDGPUDim1DArray, AMDGPUDim2DArray]; list Msaa = [AMDGPUDim2DMsaa, AMDGPUDim2DArrayMsaa]; list All = !listconcat(NoMsaa, Msaa); } // Represent sample variants, i.e. _C, _O, _B, ... and combinations thereof. class AMDGPUSampleVariant extra_addr> { string UpperCaseMod = ucmod; string LowerCaseMod = lcmod; // {offset} {bias} {z-compare} list ExtraAddrArgs = extra_addr; bit Offset = false; bit Bias = false; bit ZCompare = false; bit Gradients = false; // Name of the {lod} or {clamp} argument that is appended to the coordinates, // if any. string LodOrClamp = ""; } // AMDGPUSampleVariants: all variants supported by IMAGE_SAMPLE // AMDGPUSampleVariantsNoGradients: variants supported by IMAGE_GATHER4 defset list AMDGPUSampleVariants = { multiclass AMDGPUSampleHelper_Offset extra_addr> { def NAME#lcmod : AMDGPUSampleVariant; let Offset = true in def NAME#lcmod#_o : AMDGPUSampleVariant< ucmod#"_O", lcmod#"_o", !listconcat([AMDGPUArg], extra_addr)>; } multiclass AMDGPUSampleHelper_Compare extra_addr> { defm NAME : AMDGPUSampleHelper_Offset; let ZCompare = true in defm NAME : AMDGPUSampleHelper_Offset< "_C"#ucmod, "_c"#lcmod, !listconcat(extra_addr, [AMDGPUArg])>; } multiclass AMDGPUSampleHelper_Clamp extra_addr> { defm NAME : AMDGPUSampleHelper_Compare; let LodOrClamp = "clamp" in defm NAME : AMDGPUSampleHelper_Compare; } defset list AMDGPUSampleVariantsNoGradients = { defm AMDGPUSample : AMDGPUSampleHelper_Clamp<"", "", []>; let Bias = true in defm AMDGPUSample : AMDGPUSampleHelper_Clamp< "_B", "_b", [AMDGPUArg]>; let LodOrClamp = "lod" in defm AMDGPUSample : AMDGPUSampleHelper_Compare<"_L", "_l", []>; defm AMDGPUSample : AMDGPUSampleHelper_Compare<"_LZ", "_lz", []>; } let Gradients = true in { defm AMDGPUSample : AMDGPUSampleHelper_Clamp<"_D", "_d", []>; defm AMDGPUSample : AMDGPUSampleHelper_Clamp<"_CD", "_cd", []>; } } // Helper class to capture the profile of a dimension-aware image intrinsic. // This information is used to generate the intrinsic's type and to inform // codegen pattern matching. class AMDGPUDimProfile { AMDGPUDimProps Dim = dim; string OpMod = opmod; // the corresponding instruction is named IMAGE_OpMod // These are intended to be overwritten by subclasses bit IsSample = false; bit IsAtomic = false; list RetTypes = []; list DataArgs = []; list ExtraAddrArgs = []; bit Offset = false; bit Bias = false; bit ZCompare = false; bit Gradients = false; string LodClampMip = ""; int NumRetAndDataAnyTypes = !foldl(0, !listconcat(RetTypes, !foreach(arg, DataArgs, arg.Type)), a, b, !add(a, b.isAny)); list AddrArgs = arglistconcat<[ExtraAddrArgs, !if(Gradients, dim.GradientArgs, []), !listconcat(!if(IsSample, dim.CoordSliceArgs, dim.CoordSliceIntArgs), !if(!empty(LodClampMip), [], [AMDGPUArg, LodClampMip>]))], NumRetAndDataAnyTypes>.ret; list AddrTypes = !foreach(arg, AddrArgs, arg.Type); list AddrDefaultArgs = !foreach(arg, AddrArgs, AMDGPUArg(arg.Type)), !if(IsSample, llvm_float_ty, llvm_i32_ty), arg.Type), arg.Name>); list AddrA16Args = !foreach(arg, AddrArgs, AMDGPUArg(arg.Type)), !if(IsSample, llvm_half_ty, llvm_i16_ty), arg.Type), arg.Name>); } class AMDGPUDimProfileCopy : AMDGPUDimProfile { let IsSample = base.IsSample; let IsAtomic = base.IsAtomic; let RetTypes = base.RetTypes; let DataArgs = base.DataArgs; let ExtraAddrArgs = base.ExtraAddrArgs; let Offset = base.Offset; let Bias = base.Bias; let ZCompare = base.ZCompare; let Gradients = base.Gradients; let LodClampMip = base.LodClampMip; } class AMDGPUDimSampleProfile : AMDGPUDimProfile { let IsSample = true; let RetTypes = [llvm_any_ty]; let ExtraAddrArgs = sample.ExtraAddrArgs; let Offset = sample.Offset; let Bias = sample.Bias; let ZCompare = sample.ZCompare; let Gradients = sample.Gradients; let LodClampMip = sample.LodOrClamp; } class AMDGPUDimNoSampleProfile retty, list dataargs, bit Mip = false> : AMDGPUDimProfile { let RetTypes = retty; let DataArgs = dataargs; let LodClampMip = !if(Mip, "mip", ""); } class AMDGPUDimAtomicProfile dataargs> : AMDGPUDimProfile { let RetTypes = [llvm_anyint_ty]; let DataArgs = dataargs; let IsAtomic = true; } class AMDGPUDimAtomicFloatProfile dataargs> : AMDGPUDimAtomicProfile { let RetTypes = [llvm_anyfloat_ty]; } class AMDGPUDimGetResInfoProfile : AMDGPUDimProfile<"GET_RESINFO", dim> { let RetTypes = [llvm_anyfloat_ty]; let DataArgs = []; let AddrArgs = [AMDGPUArg]; let LodClampMip = "mip"; } // Helper class for figuring out image intrinsic argument indexes. class AMDGPUImageDimIntrinsicEval { int NumDataArgs = !size(P_.DataArgs); int NumDmaskArgs = !not(P_.IsAtomic); int NumOffsetArgs = !if(P_.Offset, 1, 0); int NumBiasArgs = !if(P_.Bias, 1, 0); int NumZCompareArgs = !if(P_.ZCompare, 1, 0); int NumExtraAddrArgs = !add(NumOffsetArgs, NumBiasArgs, NumZCompareArgs); int NumVAddrArgs = !size(P_.AddrArgs); int NumGradientArgs = !if(P_.Gradients, !size(P_.Dim.GradientArgs), 0); int NumCoordArgs = !if(P_.IsSample, !size(P_.Dim.CoordSliceArgs), !size(P_.Dim.CoordSliceIntArgs)); int NumRSrcArgs = 1; int NumSampArgs = !if(P_.IsSample, 2, 0); int DmaskArgIndex = NumDataArgs; int VAddrArgIndex = !add(DmaskArgIndex, NumDmaskArgs); int OffsetArgIndex = VAddrArgIndex; int BiasArgIndex = !add(VAddrArgIndex, NumOffsetArgs); int ZCompareArgIndex = !add(BiasArgIndex, NumBiasArgs); int GradientArgIndex = !add(VAddrArgIndex, NumExtraAddrArgs); int CoordArgIndex = !add(GradientArgIndex, NumGradientArgs); int LodArgIndex = !add(VAddrArgIndex, NumVAddrArgs, -1); int MipArgIndex = LodArgIndex; int RsrcArgIndex = !add(VAddrArgIndex, NumVAddrArgs); int SampArgIndex = !add(RsrcArgIndex, NumRSrcArgs); int UnormArgIndex = !add(SampArgIndex, 1); int TexFailCtrlArgIndex = !add(SampArgIndex, NumSampArgs); int CachePolicyArgIndex = !add(TexFailCtrlArgIndex, 1); } // All dimension-aware intrinsics are derived from this class. class AMDGPUImageDimIntrinsic props, list sdnodeprops> : Intrinsic< P_.RetTypes, // vdata(VGPR) -- for load/atomic-with-return !listconcat( !foreach(arg, P_.DataArgs, arg.Type), // vdata(VGPR) -- for store/atomic !if(P_.IsAtomic, [], [llvm_i32_ty]), // dmask(imm) P_.AddrTypes, // vaddr(VGPR) [llvm_v8i32_ty], // rsrc(SGPR) !if(P_.IsSample, [llvm_v4i32_ty, // samp(SGPR) llvm_i1_ty], []), // unorm(imm) [llvm_i32_ty, // texfailctrl(imm; bit 0 = tfe, bit 1 = lwe) llvm_i32_ty]), // cachepolicy(imm; bit 0 = glc, bit 1 = slc, bit 2 = dlc) !listconcat(props, !if(P_.IsAtomic, [], [ImmArg.DmaskArgIndex>>]), !if(P_.IsSample, [ImmArg.UnormArgIndex>>], []), [IntrWillReturn], [ImmArg.TexFailCtrlArgIndex>>, ImmArg.CachePolicyArgIndex>>]), "", sdnodeprops>, AMDGPURsrcIntrinsic { AMDGPUDimProfile P = P_; AMDGPUImageDimIntrinsic Intr = !cast(NAME); let TargetPrefix = "amdgcn"; } // Marker class for intrinsics with a DMask that determines the returned // channels. class AMDGPUImageDMaskIntrinsic; defset list AMDGPUImageDimIntrinsics = { ////////////////////////////////////////////////////////////////////////// // Load and store intrinsics ////////////////////////////////////////////////////////////////////////// multiclass AMDGPUImageDimIntrinsicsNoMsaa retty, list dataargs, list props, list sdnodeprops, bit Mip = false> { foreach dim = AMDGPUDims.NoMsaa in { def !strconcat(NAME, "_", dim.Name) : AMDGPUImageDimIntrinsic< AMDGPUDimNoSampleProfile, props, sdnodeprops>; } } multiclass AMDGPUImageDimIntrinsicsAll retty, list dataargs, list props, list sdnodeprops, bit Mip = false> { foreach dim = AMDGPUDims.All in { def !strconcat(NAME, "_", dim.Name) : AMDGPUImageDimIntrinsic< AMDGPUDimNoSampleProfile, props, sdnodeprops>; } } defm int_amdgcn_image_load : AMDGPUImageDimIntrinsicsAll<"LOAD", [llvm_any_ty], [], [IntrReadMem], [SDNPMemOperand]>, AMDGPUImageDMaskIntrinsic; defm int_amdgcn_image_load_mip : AMDGPUImageDimIntrinsicsNoMsaa<"LOAD_MIP", [llvm_any_ty], [], [IntrReadMem, IntrWillReturn], [SDNPMemOperand], 1>, AMDGPUImageDMaskIntrinsic; defm int_amdgcn_image_store : AMDGPUImageDimIntrinsicsAll< "STORE", [], [AMDGPUArg], [IntrWriteMem, IntrWillReturn], [SDNPMemOperand]>; defm int_amdgcn_image_store_mip : AMDGPUImageDimIntrinsicsNoMsaa< "STORE_MIP", [], [AMDGPUArg], [IntrWriteMem, IntrWillReturn], [SDNPMemOperand], 1>; ////////////////////////////////////////////////////////////////////////// // MSAA intrinsics ////////////////////////////////////////////////////////////////////////// foreach dim = AMDGPUDims.Msaa in { def int_amdgcn_image_msaa_load_x # _ # dim.Name: AMDGPUImageDimIntrinsic< AMDGPUDimNoSampleProfile<"MSAA_LOAD_X", dim, [llvm_any_ty], []>, [IntrReadMem], [SDNPMemOperand]>; } foreach dim = AMDGPUDims.Msaa in { def int_amdgcn_image_msaa_load # _ # dim.Name: AMDGPUImageDimIntrinsic< AMDGPUDimNoSampleProfile<"MSAA_LOAD", dim, [llvm_any_ty], []>, [IntrReadMem], [SDNPMemOperand]>; } ////////////////////////////////////////////////////////////////////////// // sample and getlod intrinsics ////////////////////////////////////////////////////////////////////////// multiclass AMDGPUImageDimSampleDims { foreach dim = AMDGPUDims.NoMsaa in { def !strconcat(NAME, "_", dim.Name) : AMDGPUImageDimIntrinsic< AMDGPUDimSampleProfile, !if(NoMem, [IntrNoMem], [IntrReadMem]), !if(NoMem, [], [SDNPMemOperand])>; } } foreach sample = AMDGPUSampleVariants in { defm int_amdgcn_image_sample # sample.LowerCaseMod : AMDGPUImageDimSampleDims<"SAMPLE" # sample.UpperCaseMod, sample>, AMDGPUImageDMaskIntrinsic; } defm int_amdgcn_image_getlod : AMDGPUImageDimSampleDims<"GET_LOD", AMDGPUSample, 1>, AMDGPUImageDMaskIntrinsic; ////////////////////////////////////////////////////////////////////////// // getresinfo intrinsics ////////////////////////////////////////////////////////////////////////// foreach dim = AMDGPUDims.All in { def !strconcat("int_amdgcn_image_getresinfo_", dim.Name) : AMDGPUImageDimIntrinsic, [IntrNoMem], []>, AMDGPUImageDMaskIntrinsic; } ////////////////////////////////////////////////////////////////////////// // gather4 intrinsics ////////////////////////////////////////////////////////////////////////// foreach sample = AMDGPUSampleVariantsNoGradients in { foreach dim = [AMDGPUDim2D, AMDGPUDimCube, AMDGPUDim2DArray] in { def int_amdgcn_image_gather4 # sample.LowerCaseMod # _ # dim.Name: AMDGPUImageDimIntrinsic< AMDGPUDimSampleProfile<"GATHER4" # sample.UpperCaseMod, dim, sample>, [IntrReadMem], [SDNPMemOperand]>; } } } ////////////////////////////////////////////////////////////////////////// // atomic intrinsics ////////////////////////////////////////////////////////////////////////// defset list AMDGPUImageDimAtomicIntrinsics = { multiclass AMDGPUImageDimAtomicX dataargs, int isFloat = 0> { foreach dim = AMDGPUDims.All in { def !strconcat(NAME, "_", dim.Name): AMDGPUImageDimIntrinsic< !if (isFloat, AMDGPUDimAtomicFloatProfile, AMDGPUDimAtomicProfile), [], [SDNPMemOperand]>; } } multiclass AMDGPUImageDimAtomic { defm "" : AMDGPUImageDimAtomicX, "vdata">], isFloat>; } multiclass AMDGPUImageDimFloatAtomic { defm "" : AMDGPUImageDimAtomic; } defm int_amdgcn_image_atomic_swap : AMDGPUImageDimAtomic<"ATOMIC_SWAP">; defm int_amdgcn_image_atomic_add : AMDGPUImageDimAtomic<"ATOMIC_ADD">; defm int_amdgcn_image_atomic_sub : AMDGPUImageDimAtomic<"ATOMIC_SUB">; defm int_amdgcn_image_atomic_smin : AMDGPUImageDimAtomic<"ATOMIC_SMIN">; defm int_amdgcn_image_atomic_umin : AMDGPUImageDimAtomic<"ATOMIC_UMIN">; defm int_amdgcn_image_atomic_fmin : AMDGPUImageDimFloatAtomic<"ATOMIC_FMIN">; defm int_amdgcn_image_atomic_smax : AMDGPUImageDimAtomic<"ATOMIC_SMAX">; defm int_amdgcn_image_atomic_umax : AMDGPUImageDimAtomic<"ATOMIC_UMAX">; defm int_amdgcn_image_atomic_fmax : AMDGPUImageDimFloatAtomic<"ATOMIC_FMAX">; defm int_amdgcn_image_atomic_and : AMDGPUImageDimAtomic<"ATOMIC_AND">; defm int_amdgcn_image_atomic_or : AMDGPUImageDimAtomic<"ATOMIC_OR">; defm int_amdgcn_image_atomic_xor : AMDGPUImageDimAtomic<"ATOMIC_XOR">; defm int_amdgcn_image_atomic_inc : AMDGPUImageDimAtomic<"ATOMIC_INC">; defm int_amdgcn_image_atomic_dec : AMDGPUImageDimAtomic<"ATOMIC_DEC">; defm int_amdgcn_image_atomic_cmpswap : AMDGPUImageDimAtomicX<"ATOMIC_CMPSWAP", [AMDGPUArg, "src">, AMDGPUArg, "cmp">]>; } ////////////////////////////////////////////////////////////////////////// // Buffer intrinsics ////////////////////////////////////////////////////////////////////////// let TargetPrefix = "amdgcn" in { defset list AMDGPUBufferIntrinsics = { class AMDGPUBufferLoad : Intrinsic < [data_ty], [llvm_v4i32_ty, // rsrc(SGPR) llvm_i32_ty, // vindex(VGPR) llvm_i32_ty, // offset(SGPR/VGPR/imm) llvm_i1_ty, // glc(imm) llvm_i1_ty], // slc(imm) [IntrReadMem, IntrWillReturn, ImmArg>, ImmArg>], "", [SDNPMemOperand]>, AMDGPURsrcIntrinsic<0>; def int_amdgcn_buffer_load_format : AMDGPUBufferLoad; def int_amdgcn_buffer_load : AMDGPUBufferLoad; // Generate a buffer_load instruction that may be optimized to s_buffer_load if // the offset argument is uniform. def int_amdgcn_s_buffer_load : Intrinsic < [llvm_any_ty], [llvm_v4i32_ty, // rsrc(SGPR) llvm_i32_ty, // byte offset llvm_i32_ty], // cachepolicy(imm; bit 0 = glc, bit 2 = dlc) [IntrNoMem, IntrWillReturn, ImmArg>]>, AMDGPURsrcIntrinsic<0>; class AMDGPUBufferStore : Intrinsic < [], [data_ty, // vdata(VGPR) llvm_v4i32_ty, // rsrc(SGPR) llvm_i32_ty, // vindex(VGPR) llvm_i32_ty, // offset(SGPR/VGPR/imm) llvm_i1_ty, // glc(imm) llvm_i1_ty], // slc(imm) [IntrWriteMem, IntrWillReturn, ImmArg>, ImmArg>], "", [SDNPMemOperand]>, AMDGPURsrcIntrinsic<1>; def int_amdgcn_buffer_store_format : AMDGPUBufferStore; def int_amdgcn_buffer_store : AMDGPUBufferStore; // New buffer intrinsics with separate raw and struct variants. The raw // variant never has an index. The struct variant always has an index, even if // it is const 0. A struct intrinsic with constant 0 index is different to the // corresponding raw intrinsic on gfx9+ because the behavior of bound checking // and swizzling changes depending on whether idxen is set in the instruction. // These new instrinsics also keep the offset and soffset arguments separate as // they behave differently in bounds checking and swizzling. class AMDGPURawBufferLoad : Intrinsic < [data_ty], [llvm_v4i32_ty, // rsrc(SGPR) llvm_i32_ty, // offset(VGPR/imm, included in bounds checking and swizzling) llvm_i32_ty, // soffset(SGPR/imm, excluded from bounds checking and swizzling) llvm_i32_ty], // auxiliary data (imm, cachepolicy (bit 0 = glc, // bit 1 = slc, // bit 2 = dlc on gfx10+), // swizzled buffer (bit 3 = swz)) [IntrReadMem, IntrWillReturn, ImmArg>], "", [SDNPMemOperand]>, AMDGPURsrcIntrinsic<0>; def int_amdgcn_raw_buffer_load_format : AMDGPURawBufferLoad; def int_amdgcn_raw_buffer_load : AMDGPURawBufferLoad; class AMDGPUStructBufferLoad : Intrinsic < [data_ty], [llvm_v4i32_ty, // rsrc(SGPR) llvm_i32_ty, // vindex(VGPR) llvm_i32_ty, // offset(VGPR/imm, included in bounds checking and swizzling) llvm_i32_ty, // soffset(SGPR/imm, excluded from bounds checking and swizzling) llvm_i32_ty], // auxiliary data (imm, cachepolicy (bit 0 = glc, // bit 1 = slc, // bit 2 = dlc on gfx10+), // swizzled buffer (bit 3 = swz)) [IntrReadMem, IntrWillReturn, ImmArg>], "", [SDNPMemOperand]>, AMDGPURsrcIntrinsic<0>; def int_amdgcn_struct_buffer_load_format : AMDGPUStructBufferLoad; def int_amdgcn_struct_buffer_load : AMDGPUStructBufferLoad; class AMDGPURawBufferStore : Intrinsic < [], [data_ty, // vdata(VGPR) llvm_v4i32_ty, // rsrc(SGPR) llvm_i32_ty, // offset(VGPR/imm, included in bounds checking and swizzling) llvm_i32_ty, // soffset(SGPR/imm, excluded from bounds checking and swizzling) llvm_i32_ty], // auxiliary data (imm, cachepolicy (bit 0 = glc, // bit 1 = slc, // bit 2 = dlc on gfx10+), // swizzled buffer (bit 3 = swz)) [IntrWriteMem, IntrWillReturn, ImmArg>], "", [SDNPMemOperand]>, AMDGPURsrcIntrinsic<1>; def int_amdgcn_raw_buffer_store_format : AMDGPURawBufferStore; def int_amdgcn_raw_buffer_store : AMDGPURawBufferStore; class AMDGPUStructBufferStore : Intrinsic < [], [data_ty, // vdata(VGPR) llvm_v4i32_ty, // rsrc(SGPR) llvm_i32_ty, // vindex(VGPR) llvm_i32_ty, // offset(VGPR/imm, included in bounds checking and swizzling) llvm_i32_ty, // soffset(SGPR/imm, excluded from bounds checking and swizzling) llvm_i32_ty], // auxiliary data (imm, cachepolicy (bit 0 = glc, // bit 1 = slc, // bit 2 = dlc on gfx10+), // swizzled buffer (bit 3 = swz)) [IntrWriteMem, IntrWillReturn, ImmArg>], "", [SDNPMemOperand]>, AMDGPURsrcIntrinsic<1>; def int_amdgcn_struct_buffer_store_format : AMDGPUStructBufferStore; def int_amdgcn_struct_buffer_store : AMDGPUStructBufferStore; class AMDGPURawBufferAtomic : Intrinsic < !if(NoRtn, [], [data_ty]), [!if(NoRtn, data_ty, LLVMMatchType<0>), // vdata(VGPR) llvm_v4i32_ty, // rsrc(SGPR) llvm_i32_ty, // offset(VGPR/imm, included in bounds checking and swizzling) llvm_i32_ty, // soffset(SGPR/imm, excluded from bounds checking and swizzling) llvm_i32_ty], // cachepolicy(imm; bit 1 = slc) [ImmArg>, IntrWillReturn], "", [SDNPMemOperand]>, AMDGPURsrcIntrinsic<1, 0>; def int_amdgcn_raw_buffer_atomic_swap : AMDGPURawBufferAtomic; def int_amdgcn_raw_buffer_atomic_add : AMDGPURawBufferAtomic; def int_amdgcn_raw_buffer_atomic_sub : AMDGPURawBufferAtomic; def int_amdgcn_raw_buffer_atomic_smin : AMDGPURawBufferAtomic; def int_amdgcn_raw_buffer_atomic_umin : AMDGPURawBufferAtomic; def int_amdgcn_raw_buffer_atomic_fmin : AMDGPURawBufferAtomic; def int_amdgcn_raw_buffer_atomic_smax : AMDGPURawBufferAtomic; def int_amdgcn_raw_buffer_atomic_umax : AMDGPURawBufferAtomic; def int_amdgcn_raw_buffer_atomic_fmax : AMDGPURawBufferAtomic; def int_amdgcn_raw_buffer_atomic_and : AMDGPURawBufferAtomic; def int_amdgcn_raw_buffer_atomic_or : AMDGPURawBufferAtomic; def int_amdgcn_raw_buffer_atomic_xor : AMDGPURawBufferAtomic; def int_amdgcn_raw_buffer_atomic_inc : AMDGPURawBufferAtomic; def int_amdgcn_raw_buffer_atomic_dec : AMDGPURawBufferAtomic; def int_amdgcn_raw_buffer_atomic_cmpswap : Intrinsic< [llvm_anyint_ty], [LLVMMatchType<0>, // src(VGPR) LLVMMatchType<0>, // cmp(VGPR) llvm_v4i32_ty, // rsrc(SGPR) llvm_i32_ty, // offset(VGPR/imm, included in bounds checking and swizzling) llvm_i32_ty, // soffset(SGPR/imm, excluded from bounds checking and swizzling) llvm_i32_ty], // cachepolicy(imm; bit 1 = slc) [ImmArg>, IntrWillReturn], "", [SDNPMemOperand]>, AMDGPURsrcIntrinsic<2, 0>; // gfx908 intrinsic def int_amdgcn_raw_buffer_atomic_fadd : AMDGPURawBufferAtomic; class AMDGPUStructBufferAtomic : Intrinsic < !if(NoRtn, [], [data_ty]), [!if(NoRtn, data_ty, LLVMMatchType<0>), // vdata(VGPR) llvm_v4i32_ty, // rsrc(SGPR) llvm_i32_ty, // vindex(VGPR) llvm_i32_ty, // offset(VGPR/imm, included in bounds checking and swizzling) llvm_i32_ty, // soffset(SGPR/imm, excluded from bounds checking and swizzling) llvm_i32_ty], // cachepolicy(imm; bit 1 = slc) [ImmArg>, IntrWillReturn], "", [SDNPMemOperand]>, AMDGPURsrcIntrinsic<1, 0>; def int_amdgcn_struct_buffer_atomic_swap : AMDGPUStructBufferAtomic; def int_amdgcn_struct_buffer_atomic_add : AMDGPUStructBufferAtomic; def int_amdgcn_struct_buffer_atomic_sub : AMDGPUStructBufferAtomic; def int_amdgcn_struct_buffer_atomic_smin : AMDGPUStructBufferAtomic; def int_amdgcn_struct_buffer_atomic_umin : AMDGPUStructBufferAtomic; def int_amdgcn_struct_buffer_atomic_smax : AMDGPUStructBufferAtomic; def int_amdgcn_struct_buffer_atomic_umax : AMDGPUStructBufferAtomic; def int_amdgcn_struct_buffer_atomic_and : AMDGPUStructBufferAtomic; def int_amdgcn_struct_buffer_atomic_or : AMDGPUStructBufferAtomic; def int_amdgcn_struct_buffer_atomic_xor : AMDGPUStructBufferAtomic; def int_amdgcn_struct_buffer_atomic_inc : AMDGPUStructBufferAtomic; def int_amdgcn_struct_buffer_atomic_dec : AMDGPUStructBufferAtomic; def int_amdgcn_struct_buffer_atomic_cmpswap : Intrinsic< [llvm_anyint_ty], [LLVMMatchType<0>, // src(VGPR) LLVMMatchType<0>, // cmp(VGPR) llvm_v4i32_ty, // rsrc(SGPR) llvm_i32_ty, // vindex(VGPR) llvm_i32_ty, // offset(VGPR/imm, included in bounds checking and swizzling) llvm_i32_ty, // soffset(SGPR/imm, excluded from bounds checking and swizzling) llvm_i32_ty], // cachepolicy(imm; bit 1 = slc) [ImmArg>, IntrWillReturn], "", [SDNPMemOperand]>, AMDGPURsrcIntrinsic<2, 0>; // gfx908 intrinsic def int_amdgcn_struct_buffer_atomic_fadd : AMDGPUStructBufferAtomic; // gfx90a intrinsics def int_amdgcn_struct_buffer_atomic_fmin : AMDGPUStructBufferAtomic; def int_amdgcn_struct_buffer_atomic_fmax : AMDGPUStructBufferAtomic; // Obsolescent tbuffer intrinsics. def int_amdgcn_tbuffer_load : Intrinsic < [llvm_any_ty], // overloaded for types f32/i32, v2f32/v2i32, v4f32/v4i32 [llvm_v4i32_ty, // rsrc(SGPR) llvm_i32_ty, // vindex(VGPR) llvm_i32_ty, // voffset(VGPR) llvm_i32_ty, // soffset(SGPR) llvm_i32_ty, // offset(imm) llvm_i32_ty, // dfmt(imm) llvm_i32_ty, // nfmt(imm) llvm_i1_ty, // glc(imm) llvm_i1_ty], // slc(imm) [IntrReadMem, IntrWillReturn, ImmArg>, ImmArg>, ImmArg>, ImmArg>, ImmArg>], "", [SDNPMemOperand]>, AMDGPURsrcIntrinsic<0>; def int_amdgcn_tbuffer_store : Intrinsic < [], [llvm_any_ty, // vdata(VGPR), overloaded for types f32/i32, v2f32/v2i32, v4f32/v4i32 llvm_v4i32_ty, // rsrc(SGPR) llvm_i32_ty, // vindex(VGPR) llvm_i32_ty, // voffset(VGPR) llvm_i32_ty, // soffset(SGPR) llvm_i32_ty, // offset(imm) llvm_i32_ty, // dfmt(imm) llvm_i32_ty, // nfmt(imm) llvm_i1_ty, // glc(imm) llvm_i1_ty], // slc(imm) [IntrWriteMem, IntrWillReturn, ImmArg>, ImmArg>, ImmArg>, ImmArg>, ImmArg>], "", [SDNPMemOperand]>, AMDGPURsrcIntrinsic<1>; // New tbuffer intrinsics, with: // - raw and struct variants // - joint format field // - joint cachepolicy field def int_amdgcn_raw_tbuffer_load : Intrinsic < [llvm_any_ty], // overloaded for types f32/i32, v2f32/v2i32, v4f32/v4i32 [llvm_v4i32_ty, // rsrc(SGPR) llvm_i32_ty, // offset(VGPR/imm, included in bounds checking and swizzling) llvm_i32_ty, // soffset(SGPR/imm, excluded from bounds checking and swizzling) llvm_i32_ty, // format(imm; bits 3..0 = dfmt, bits 6..4 = nfmt) llvm_i32_ty], // auxiliary data (imm, cachepolicy (bit 0 = glc, // bit 1 = slc, // bit 2 = dlc on gfx10+), // swizzled buffer (bit 3 = swz)) [IntrReadMem, IntrWillReturn, ImmArg>, ImmArg>], "", [SDNPMemOperand]>, AMDGPURsrcIntrinsic<0>; def int_amdgcn_raw_tbuffer_store : Intrinsic < [], [llvm_any_ty, // vdata(VGPR), overloaded for types f32/i32, v2f32/v2i32, v4f32/v4i32 llvm_v4i32_ty, // rsrc(SGPR) llvm_i32_ty, // offset(VGPR/imm, included in bounds checking and swizzling) llvm_i32_ty, // soffset(SGPR/imm, excluded from bounds checking and swizzling) llvm_i32_ty, // format(imm; bits 3..0 = dfmt, bits 6..4 = nfmt) llvm_i32_ty], // auxiliary data (imm, cachepolicy (bit 0 = glc, // bit 1 = slc, // bit 2 = dlc on gfx10+), // swizzled buffer (bit 3 = swz)) [IntrWriteMem, IntrWillReturn, ImmArg>, ImmArg>], "", [SDNPMemOperand]>, AMDGPURsrcIntrinsic<1>; def int_amdgcn_struct_tbuffer_load : Intrinsic < [llvm_any_ty], // overloaded for types f32/i32, v2f32/v2i32, v4f32/v4i32 [llvm_v4i32_ty, // rsrc(SGPR) llvm_i32_ty, // vindex(VGPR) llvm_i32_ty, // offset(VGPR/imm, included in bounds checking and swizzling) llvm_i32_ty, // soffset(SGPR/imm, excluded from bounds checking and swizzling) llvm_i32_ty, // format(imm; bits 3..0 = dfmt, bits 6..4 = nfmt) llvm_i32_ty], // auxiliary data (imm, cachepolicy (bit 0 = glc, // bit 1 = slc, // bit 2 = dlc on gfx10+), // swizzled buffer (bit 3 = swz)) [IntrReadMem, IntrWillReturn, ImmArg>, ImmArg>], "", [SDNPMemOperand]>, AMDGPURsrcIntrinsic<0>; def int_amdgcn_struct_tbuffer_store : Intrinsic < [], [llvm_any_ty, // vdata(VGPR), overloaded for types f32/i32, v2f32/v2i32, v4f32/v4i32 llvm_v4i32_ty, // rsrc(SGPR) llvm_i32_ty, // vindex(VGPR) llvm_i32_ty, // offset(VGPR/imm, included in bounds checking and swizzling) llvm_i32_ty, // soffset(SGPR/imm, excluded from bounds checking and swizzling) llvm_i32_ty, // format(imm; bits 3..0 = dfmt, bits 6..4 = nfmt) llvm_i32_ty], // auxiliary data (imm, cachepolicy (bit 0 = glc, // bit 1 = slc, // bit 2 = dlc on gfx10+), // swizzled buffer (bit 3 = swz)) [IntrWriteMem, IntrWillReturn, ImmArg>, ImmArg>], "", [SDNPMemOperand]>, AMDGPURsrcIntrinsic<1>; class AMDGPUBufferAtomic : Intrinsic < [llvm_anyint_ty], [LLVMMatchType<0>, // vdata(VGPR) llvm_v4i32_ty, // rsrc(SGPR) llvm_i32_ty, // vindex(VGPR) llvm_i32_ty, // offset(SGPR/VGPR/imm) llvm_i1_ty], // slc(imm) [ImmArg>, IntrWillReturn], "", [SDNPMemOperand]>, AMDGPURsrcIntrinsic<1, 0>; def int_amdgcn_buffer_atomic_swap : AMDGPUBufferAtomic; def int_amdgcn_buffer_atomic_add : AMDGPUBufferAtomic; def int_amdgcn_buffer_atomic_sub : AMDGPUBufferAtomic; def int_amdgcn_buffer_atomic_smin : AMDGPUBufferAtomic; def int_amdgcn_buffer_atomic_umin : AMDGPUBufferAtomic; def int_amdgcn_buffer_atomic_smax : AMDGPUBufferAtomic; def int_amdgcn_buffer_atomic_umax : AMDGPUBufferAtomic; def int_amdgcn_buffer_atomic_and : AMDGPUBufferAtomic; def int_amdgcn_buffer_atomic_or : AMDGPUBufferAtomic; def int_amdgcn_buffer_atomic_xor : AMDGPUBufferAtomic; def int_amdgcn_buffer_atomic_cmpswap : Intrinsic< [llvm_i32_ty], [llvm_i32_ty, // src(VGPR) llvm_i32_ty, // cmp(VGPR) llvm_v4i32_ty, // rsrc(SGPR) llvm_i32_ty, // vindex(VGPR) llvm_i32_ty, // offset(SGPR/VGPR/imm) llvm_i1_ty], // slc(imm) [ImmArg>, IntrWillReturn], "", [SDNPMemOperand]>, AMDGPURsrcIntrinsic<2, 0>; def int_amdgcn_buffer_atomic_csub : AMDGPUBufferAtomic; class AMDGPUBufferAtomicFP : Intrinsic < [llvm_anyfloat_ty], [LLVMMatchType<0>, // vdata(VGPR) llvm_v4i32_ty, // rsrc(SGPR) llvm_i32_ty, // vindex(VGPR) llvm_i32_ty, // offset(SGPR/VGPR/imm) llvm_i1_ty], // slc(imm) [ImmArg>, IntrWillReturn], "", [SDNPMemOperand]>, AMDGPURsrcIntrinsic<1, 0>; // Legacy form of the intrinsic. raw and struct forms should be preferred. def int_amdgcn_buffer_atomic_fadd : AMDGPUBufferAtomicFP; class AMDGPURawBufferLoadLDS : Intrinsic < [], [llvm_v4i32_ty, // rsrc(SGPR) LLVMQualPointerType, // LDS base offset llvm_i32_ty, // Data byte size: 1/2/4 llvm_i32_ty, // voffset(VGPR, included in bounds checking and swizzling) llvm_i32_ty, // soffset(SGPR/imm, excluded from bounds checking and swizzling) llvm_i32_ty, // imm offset(imm, included in bounds checking and swizzling) llvm_i32_ty], // auxiliary data (imm, cachepolicy (bit 0 = glc, // bit 1 = slc, // bit 2 = dlc on gfx10+)) // swizzled buffer (bit 3 = swz)) [IntrWillReturn, NoCapture>, ImmArg>, ImmArg>, ImmArg>], "", [SDNPMemOperand]>, AMDGPURsrcIntrinsic<0>; def int_amdgcn_raw_buffer_load_lds : AMDGPURawBufferLoadLDS; class AMDGPUStructBufferLoadLDS : Intrinsic < [], [llvm_v4i32_ty, // rsrc(SGPR) LLVMQualPointerType, // LDS base offset llvm_i32_ty, // Data byte size: 1/2/4 llvm_i32_ty, // vindex(VGPR) llvm_i32_ty, // voffset(VGPR, included in bounds checking and swizzling) llvm_i32_ty, // soffset(SGPR/imm, excluded from bounds checking and swizzling) llvm_i32_ty, // imm offset(imm, included in bounds checking and swizzling) llvm_i32_ty], // auxiliary data (imm, cachepolicy (bit 0 = glc, // bit 1 = slc, // bit 2 = dlc on gfx10+)) // swizzled buffer (bit 3 = swz)) [IntrWillReturn, NoCapture>, ImmArg>, ImmArg>, ImmArg>], "", [SDNPMemOperand]>, AMDGPURsrcIntrinsic<0>; def int_amdgcn_struct_buffer_load_lds : AMDGPUStructBufferLoadLDS; } // defset AMDGPUBufferIntrinsics // Uses that do not set the done bit should set IntrWriteMem on the // call site. def int_amdgcn_exp : Intrinsic <[], [ llvm_i32_ty, // tgt, llvm_i32_ty, // en llvm_any_ty, // src0 (f32 or i32) LLVMMatchType<0>, // src1 LLVMMatchType<0>, // src2 LLVMMatchType<0>, // src3 llvm_i1_ty, // done llvm_i1_ty // vm (ignored on GFX11+) ], [ImmArg>, ImmArg>, ImmArg>, ImmArg>, IntrWriteMem, IntrInaccessibleMemOnly, IntrWillReturn] >; // exp with row_en bit set. Only supported on GFX11+. def int_amdgcn_exp_row : Intrinsic <[], [ llvm_i32_ty, // tgt, llvm_i32_ty, // en llvm_any_ty, // src0 (f32 or i32) LLVMMatchType<0>, // src1 LLVMMatchType<0>, // src2 LLVMMatchType<0>, // src3 llvm_i1_ty, // done llvm_i32_ty], // row number [ImmArg>, ImmArg>, ImmArg>, IntrWriteMem, IntrInaccessibleMemOnly, IntrWillReturn] >; // exp with compr bit set. Not supported on GFX11+. def int_amdgcn_exp_compr : Intrinsic <[], [ llvm_i32_ty, // tgt, llvm_i32_ty, // en llvm_anyvector_ty, // src0 (v2f16 or v2i16) LLVMMatchType<0>, // src1 llvm_i1_ty, // done llvm_i1_ty], // vm [ImmArg>, ImmArg>, ImmArg>, ImmArg>, IntrWriteMem, IntrInaccessibleMemOnly, IntrWillReturn] >; def int_amdgcn_buffer_wbinvl1_sc : ClangBuiltin<"__builtin_amdgcn_buffer_wbinvl1_sc">, Intrinsic<[], [], [IntrNoMem, IntrHasSideEffects, IntrWillReturn]>; def int_amdgcn_buffer_wbinvl1 : ClangBuiltin<"__builtin_amdgcn_buffer_wbinvl1">, Intrinsic<[], [], [IntrNoMem, IntrHasSideEffects, IntrWillReturn]>; def int_amdgcn_s_dcache_inv : ClangBuiltin<"__builtin_amdgcn_s_dcache_inv">, Intrinsic<[], [], [IntrNoMem, IntrHasSideEffects, IntrWillReturn]>; def int_amdgcn_s_memtime : ClangBuiltin<"__builtin_amdgcn_s_memtime">, Intrinsic<[llvm_i64_ty], [], [IntrNoMem, IntrHasSideEffects, IntrWillReturn]>; def int_amdgcn_s_sleep : ClangBuiltin<"__builtin_amdgcn_s_sleep">, Intrinsic<[], [llvm_i32_ty], [ImmArg>, IntrNoMem, IntrHasSideEffects, IntrWillReturn]> { } def int_amdgcn_s_incperflevel : ClangBuiltin<"__builtin_amdgcn_s_incperflevel">, Intrinsic<[], [llvm_i32_ty], [ImmArg>, IntrNoMem, IntrHasSideEffects, IntrWillReturn]> { } def int_amdgcn_s_decperflevel : ClangBuiltin<"__builtin_amdgcn_s_decperflevel">, Intrinsic<[], [llvm_i32_ty], [ImmArg>, IntrNoMem, IntrHasSideEffects, IntrWillReturn]> { } def int_amdgcn_s_sethalt : Intrinsic<[], [llvm_i32_ty], [ImmArg>, IntrNoMem, IntrHasSideEffects, IntrWillReturn]>; def int_amdgcn_s_setprio : ClangBuiltin<"__builtin_amdgcn_s_setprio">, Intrinsic<[], [llvm_i16_ty], [ImmArg>, IntrNoMem, IntrHasSideEffects, IntrWillReturn]>; // This is IntrHasSideEffects so it can be used to read cycle counters. def int_amdgcn_s_getreg : ClangBuiltin<"__builtin_amdgcn_s_getreg">, Intrinsic<[llvm_i32_ty], [llvm_i32_ty], [IntrNoMem, IntrHasSideEffects, IntrWillReturn, ImmArg>] >; // Note this can be used to set FP environment properties that are // unsafe to change in non-strictfp functions. The register properties // available (and value required to access them) may differ per // subtarget. llvm.amdgcn.s.setreg(hwmode, value) def int_amdgcn_s_setreg : ClangBuiltin<"__builtin_amdgcn_s_setreg">, Intrinsic<[], [llvm_i32_ty, llvm_i32_ty], [IntrNoMem, IntrHasSideEffects, IntrWillReturn, ImmArg>] >; // int_amdgcn_s_getpc is provided to allow a specific style of position // independent code to determine the high part of its address when it is // known (through convention) that the code and any data of interest does // not cross a 4Gb address boundary. Use for any other purpose may not // produce the desired results as optimizations may cause code movement, // especially as we explicitly use IntrNoMem to allow optimizations. def int_amdgcn_s_getpc : ClangBuiltin<"__builtin_amdgcn_s_getpc">, Intrinsic<[llvm_i64_ty], [], [IntrNoMem, IntrSpeculatable, IntrWillReturn]>; // __builtin_amdgcn_interp_mov , , , // param values: 0 = P10, 1 = P20, 2 = P0 def int_amdgcn_interp_mov : ClangBuiltin<"__builtin_amdgcn_interp_mov">, Intrinsic<[llvm_float_ty], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [IntrNoMem, IntrSpeculatable, IntrWillReturn, ImmArg>, ImmArg>, ImmArg>]>; // __builtin_amdgcn_interp_p1 , , , // This intrinsic reads from lds, but the memory values are constant, // so it behaves like IntrNoMem. def int_amdgcn_interp_p1 : ClangBuiltin<"__builtin_amdgcn_interp_p1">, Intrinsic<[llvm_float_ty], [llvm_float_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [IntrNoMem, IntrSpeculatable, IntrWillReturn, ImmArg>, ImmArg>]>; // __builtin_amdgcn_interp_p2 , , , , def int_amdgcn_interp_p2 : ClangBuiltin<"__builtin_amdgcn_interp_p2">, Intrinsic<[llvm_float_ty], [llvm_float_ty, llvm_float_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [IntrNoMem, IntrSpeculatable, IntrWillReturn, ImmArg>, ImmArg>]>; // See int_amdgcn_v_interp_p1 for why this is IntrNoMem. // __builtin_amdgcn_interp_p1_f16 , , , , // high selects whether high or low 16-bits are loaded from LDS def int_amdgcn_interp_p1_f16 : ClangBuiltin<"__builtin_amdgcn_interp_p1_f16">, Intrinsic<[llvm_float_ty], [llvm_float_ty, llvm_i32_ty, llvm_i32_ty, llvm_i1_ty, llvm_i32_ty], [IntrNoMem, IntrSpeculatable, IntrWillReturn, ImmArg>, ImmArg>, ImmArg>]>; // __builtin_amdgcn_interp_p2_f16 , , , , , // high selects whether high or low 16-bits are loaded from LDS def int_amdgcn_interp_p2_f16 : ClangBuiltin<"__builtin_amdgcn_interp_p2_f16">, Intrinsic<[llvm_half_ty], [llvm_float_ty, llvm_float_ty, llvm_i32_ty, llvm_i32_ty, llvm_i1_ty, llvm_i32_ty], [IntrNoMem, IntrSpeculatable, IntrWillReturn, ImmArg>, ImmArg>, ImmArg>]>; // llvm.amdgcn.lds.direct.load // The input argument is m0, which contains a packed combination of address // offset and flags describing the data type. def int_amdgcn_lds_direct_load : Intrinsic<[llvm_any_ty], // overloaded for types u8, u16, i32/f32, i8, i16 [llvm_i32_ty], [IntrReadMem, IntrSpeculatable, IntrWillReturn]>; // llvm.amdgcn.lds.param.load , , // Like interp intrinsics, this reads from lds, but the memory values are constant, // so it behaves like IntrNoMem. def int_amdgcn_lds_param_load : Intrinsic<[llvm_float_ty], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [IntrNoMem, IntrSpeculatable, IntrWillReturn, ImmArg>, ImmArg>]>; // llvm.amdgcn.interp.inreg.p10

, , def int_amdgcn_interp_inreg_p10 : Intrinsic<[llvm_float_ty], [llvm_float_ty, llvm_float_ty, llvm_float_ty], [IntrNoMem, IntrSpeculatable, IntrWillReturn]>; // llvm.amdgcn.interp.inreg.p2

, , def int_amdgcn_interp_inreg_p2 : Intrinsic<[llvm_float_ty], [llvm_float_ty, llvm_float_ty, llvm_float_ty], [IntrNoMem, IntrSpeculatable, IntrWillReturn]>; // llvm.amdgcn.interp.inreg.p10.f16

, , , // high selects whether high or low 16-bits are used for p and p0 operands def int_amdgcn_interp_inreg_p10_f16: Intrinsic<[llvm_float_ty], [llvm_float_ty, llvm_float_ty, llvm_float_ty, llvm_i1_ty], [IntrNoMem, IntrSpeculatable, IntrWillReturn, ImmArg>]>; // llvm.amdgcn.interp.inreg.p2.f16

, , , // high selects whether high or low 16-bits are used for p operand def int_amdgcn_interp_inreg_p2_f16 : Intrinsic<[llvm_half_ty], [llvm_float_ty, llvm_float_ty, llvm_float_ty, llvm_i1_ty], [IntrNoMem, IntrSpeculatable, IntrWillReturn, ImmArg>]>; // Deprecated: use llvm.amdgcn.live.mask instead. def int_amdgcn_ps_live : Intrinsic < [llvm_i1_ty], [], [IntrNoMem, IntrWillReturn]>; // Query currently live lanes. // Returns true if lane is live (and not a helper lane). def int_amdgcn_live_mask : Intrinsic <[llvm_i1_ty], [], [IntrReadMem, IntrInaccessibleMemOnly, IntrWillReturn] >; def int_amdgcn_mbcnt_lo : ClangBuiltin<"__builtin_amdgcn_mbcnt_lo">, Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty], [IntrNoMem, IntrWillReturn]>; def int_amdgcn_mbcnt_hi : ClangBuiltin<"__builtin_amdgcn_mbcnt_hi">, Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty], [IntrNoMem, IntrWillReturn]>; // llvm.amdgcn.ds.swizzle src offset def int_amdgcn_ds_swizzle : ClangBuiltin<"__builtin_amdgcn_ds_swizzle">, Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty], [IntrNoMem, IntrConvergent, IntrWillReturn, ImmArg>]>; def int_amdgcn_ubfe : Intrinsic<[llvm_anyint_ty], [LLVMMatchType<0>, llvm_i32_ty, llvm_i32_ty], [IntrNoMem, IntrSpeculatable, IntrWillReturn] >; def int_amdgcn_sbfe : Intrinsic<[llvm_anyint_ty], [LLVMMatchType<0>, llvm_i32_ty, llvm_i32_ty], [IntrNoMem, IntrSpeculatable, IntrWillReturn] >; def int_amdgcn_lerp : ClangBuiltin<"__builtin_amdgcn_lerp">, Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [IntrNoMem, IntrSpeculatable, IntrWillReturn] >; def int_amdgcn_sad_u8 : ClangBuiltin<"__builtin_amdgcn_sad_u8">, Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [IntrNoMem, IntrSpeculatable, IntrWillReturn] >; def int_amdgcn_msad_u8 : ClangBuiltin<"__builtin_amdgcn_msad_u8">, Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [IntrNoMem, IntrSpeculatable, IntrWillReturn] >; def int_amdgcn_sad_hi_u8 : ClangBuiltin<"__builtin_amdgcn_sad_hi_u8">, Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [IntrNoMem, IntrSpeculatable, IntrWillReturn] >; def int_amdgcn_sad_u16 : ClangBuiltin<"__builtin_amdgcn_sad_u16">, Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [IntrNoMem, IntrSpeculatable, IntrWillReturn] >; def int_amdgcn_qsad_pk_u16_u8 : ClangBuiltin<"__builtin_amdgcn_qsad_pk_u16_u8">, Intrinsic<[llvm_i64_ty], [llvm_i64_ty, llvm_i32_ty, llvm_i64_ty], [IntrNoMem, IntrSpeculatable, IntrWillReturn] >; def int_amdgcn_mqsad_pk_u16_u8 : ClangBuiltin<"__builtin_amdgcn_mqsad_pk_u16_u8">, Intrinsic<[llvm_i64_ty], [llvm_i64_ty, llvm_i32_ty, llvm_i64_ty], [IntrNoMem, IntrSpeculatable, IntrWillReturn] >; def int_amdgcn_mqsad_u32_u8 : ClangBuiltin<"__builtin_amdgcn_mqsad_u32_u8">, Intrinsic<[llvm_v4i32_ty], [llvm_i64_ty, llvm_i32_ty, llvm_v4i32_ty], [IntrNoMem, IntrSpeculatable, IntrWillReturn] >; def int_amdgcn_cvt_pk_u8_f32 : ClangBuiltin<"__builtin_amdgcn_cvt_pk_u8_f32">, Intrinsic<[llvm_i32_ty], [llvm_float_ty, llvm_i32_ty, llvm_i32_ty], [IntrNoMem, IntrSpeculatable, IntrWillReturn] >; def int_amdgcn_icmp : Intrinsic<[llvm_anyint_ty], [llvm_anyint_ty, LLVMMatchType<1>, llvm_i32_ty], [IntrNoMem, IntrConvergent, IntrWillReturn, ImmArg>]>; def int_amdgcn_fcmp : Intrinsic<[llvm_anyint_ty], [llvm_anyfloat_ty, LLVMMatchType<1>, llvm_i32_ty], [IntrNoMem, IntrConvergent, IntrWillReturn, ImmArg>]>; def int_amdgcn_ballot : Intrinsic<[llvm_anyint_ty], [llvm_i1_ty], [IntrNoMem, IntrConvergent, IntrWillReturn]>; def int_amdgcn_readfirstlane : ClangBuiltin<"__builtin_amdgcn_readfirstlane">, Intrinsic<[llvm_i32_ty], [llvm_i32_ty], [IntrNoMem, IntrConvergent, IntrWillReturn]>; // The lane argument must be uniform across the currently active threads of the // current wave. Otherwise, the result is undefined. def int_amdgcn_readlane : ClangBuiltin<"__builtin_amdgcn_readlane">, Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty], [IntrNoMem, IntrConvergent, IntrWillReturn]>; // The value to write and lane select arguments must be uniform across the // currently active threads of the current wave. Otherwise, the result is // undefined. def int_amdgcn_writelane : ClangBuiltin<"__builtin_amdgcn_writelane">, Intrinsic<[llvm_i32_ty], [ llvm_i32_ty, // uniform value to write: returned by the selected lane llvm_i32_ty, // uniform lane select llvm_i32_ty // returned by all lanes other than the selected one ], [IntrNoMem, IntrConvergent, IntrWillReturn] >; def int_amdgcn_alignbyte : ClangBuiltin<"__builtin_amdgcn_alignbyte">, Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [IntrNoMem, IntrSpeculatable, IntrWillReturn] >; def int_amdgcn_mul_i24 : Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty], [IntrNoMem, IntrSpeculatable, IntrWillReturn] >; def int_amdgcn_mul_u24 : Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty], [IntrNoMem, IntrSpeculatable, IntrWillReturn] >; def int_amdgcn_mulhi_i24 : Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty], [IntrNoMem, IntrSpeculatable, IntrWillReturn] >; def int_amdgcn_mulhi_u24 : Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty], [IntrNoMem, IntrSpeculatable, IntrWillReturn] >; // llvm.amdgcn.ds.gws.init(i32 bar_val, i32 resource_id) // // bar_val is the total number of waves that will wait on this // barrier, minus 1. def int_amdgcn_ds_gws_init : ClangBuiltin<"__builtin_amdgcn_ds_gws_init">, Intrinsic<[], [llvm_i32_ty, llvm_i32_ty], [IntrConvergent, IntrWriteMem, IntrInaccessibleMemOnly, IntrWillReturn], "", [SDNPMemOperand] >; // llvm.amdgcn.ds.gws.barrier(i32 vsrc0, i32 resource_id) // bar_val is the total number of waves that will wait on this // barrier, minus 1. def int_amdgcn_ds_gws_barrier : ClangBuiltin<"__builtin_amdgcn_ds_gws_barrier">, Intrinsic<[], [llvm_i32_ty, llvm_i32_ty], [IntrConvergent, IntrInaccessibleMemOnly, IntrWillReturn], "", [SDNPMemOperand] >; // llvm.amdgcn.ds.gws.sema.v(i32 resource_id) def int_amdgcn_ds_gws_sema_v : ClangBuiltin<"__builtin_amdgcn_ds_gws_sema_v">, Intrinsic<[], [llvm_i32_ty], [IntrConvergent, IntrInaccessibleMemOnly, IntrWillReturn], "", [SDNPMemOperand] >; // llvm.amdgcn.ds.gws.sema.br(i32 vsrc, i32 resource_id) def int_amdgcn_ds_gws_sema_br : ClangBuiltin<"__builtin_amdgcn_ds_gws_sema_br">, Intrinsic<[], [llvm_i32_ty, llvm_i32_ty], [IntrConvergent, IntrInaccessibleMemOnly, IntrWillReturn], "", [SDNPMemOperand] >; // llvm.amdgcn.ds.gws.sema.p(i32 resource_id) def int_amdgcn_ds_gws_sema_p : ClangBuiltin<"__builtin_amdgcn_ds_gws_sema_p">, Intrinsic<[], [llvm_i32_ty], [IntrConvergent, IntrInaccessibleMemOnly, IntrWillReturn], "", [SDNPMemOperand] >; // llvm.amdgcn.ds.gws.sema.release.all(i32 resource_id) def int_amdgcn_ds_gws_sema_release_all : ClangBuiltin<"__builtin_amdgcn_ds_gws_sema_release_all">, Intrinsic<[], [llvm_i32_ty], [IntrConvergent, IntrInaccessibleMemOnly, IntrWillReturn], "", [SDNPMemOperand] >; // Copies the source value to the destination value, with the guarantee that // the source value is computed as if the entire program were executed in WQM. def int_amdgcn_wqm : Intrinsic<[llvm_any_ty], [LLVMMatchType<0>], [IntrNoMem, IntrSpeculatable, IntrWillReturn] >; // Copies the source value to the destination value, such that the source // is computed as if the entire program were executed in WQM if any other // program code executes in WQM. def int_amdgcn_softwqm : Intrinsic<[llvm_any_ty], [LLVMMatchType<0>], [IntrNoMem, IntrSpeculatable, IntrWillReturn] >; // Return true if at least one thread within the pixel quad passes true into // the function. def int_amdgcn_wqm_vote : Intrinsic<[llvm_i1_ty], [llvm_i1_ty], [IntrNoMem, IntrConvergent, IntrWillReturn] >; // If false, set EXEC=0 for the current thread until the end of program. // FIXME: Should this be IntrNoMem, IntrHasSideEffects, or IntrWillReturn? def int_amdgcn_kill : Intrinsic<[], [llvm_i1_ty], []>; def int_amdgcn_endpgm : ClangBuiltin<"__builtin_amdgcn_endpgm">, Intrinsic<[], [], [IntrNoReturn, IntrCold, IntrNoMem, IntrHasSideEffects] >; // If false, mark all active lanes as helper lanes until the end of program. def int_amdgcn_wqm_demote : Intrinsic<[], [llvm_i1_ty], [IntrWriteMem, IntrInaccessibleMemOnly] >; // Copies the active channels of the source value to the destination value, // with the guarantee that the source value is computed as if the entire // program were executed in Whole Wavefront Mode, i.e. with all channels // enabled, with a few exceptions: - Phi nodes which require WWM return an // undefined value. def int_amdgcn_strict_wwm : Intrinsic<[llvm_any_ty], [LLVMMatchType<0>], [IntrNoMem, IntrSpeculatable, IntrConvergent, IntrWillReturn] >; // Deprecated. Use int_amdgcn_strict_wwm instead. def int_amdgcn_wwm : Intrinsic<[llvm_any_ty], [LLVMMatchType<0>], [IntrNoMem, IntrSpeculatable, IntrConvergent, IntrWillReturn] >; def int_amdgcn_strict_wqm : Intrinsic<[llvm_any_ty], [LLVMMatchType<0>], [IntrNoMem, IntrSpeculatable, IntrConvergent, IntrWillReturn] >; // Given a value, copies it while setting all the inactive lanes to a given // value. Note that OpenGL helper lanes are considered active, so if the // program ever uses WQM, then the instruction and the first source will be // computed in WQM. def int_amdgcn_set_inactive : Intrinsic<[llvm_anyint_ty], [LLVMMatchType<0>, // value to be copied LLVMMatchType<0>], // value for the inactive lanes to take [IntrNoMem, IntrConvergent, IntrWillReturn]>; // Return if the given flat pointer points to a local memory address. def int_amdgcn_is_shared : ClangBuiltin<"__builtin_amdgcn_is_shared">, Intrinsic<[llvm_i1_ty], [llvm_ptr_ty], [IntrNoMem, IntrSpeculatable, NoCapture>, IntrWillReturn] >; // Return if the given flat pointer points to a prvate memory address. def int_amdgcn_is_private : ClangBuiltin<"__builtin_amdgcn_is_private">, Intrinsic<[llvm_i1_ty], [llvm_ptr_ty], [IntrNoMem, IntrSpeculatable, NoCapture>, IntrWillReturn] >; //===----------------------------------------------------------------------===// // CI+ Intrinsics //===----------------------------------------------------------------------===// def int_amdgcn_s_dcache_inv_vol : ClangBuiltin<"__builtin_amdgcn_s_dcache_inv_vol">, Intrinsic<[], [], [IntrNoMem, IntrHasSideEffects, IntrWillReturn]>; def int_amdgcn_buffer_wbinvl1_vol : ClangBuiltin<"__builtin_amdgcn_buffer_wbinvl1_vol">, Intrinsic<[], [], [IntrNoMem, IntrHasSideEffects, IntrWillReturn]>; //===----------------------------------------------------------------------===// // VI Intrinsics //===----------------------------------------------------------------------===// // llvm.amdgcn.mov.dpp.i32 def int_amdgcn_mov_dpp : Intrinsic<[llvm_anyint_ty], [LLVMMatchType<0>, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i1_ty], [IntrNoMem, IntrConvergent, IntrWillReturn, ImmArg>, ImmArg>, ImmArg>, ImmArg>]>; // llvm.amdgcn.update.dpp.i32 // Should be equivalent to: // v_mov_b32 // v_mov_b32 def int_amdgcn_update_dpp : Intrinsic<[llvm_anyint_ty], [LLVMMatchType<0>, LLVMMatchType<0>, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i1_ty], [IntrNoMem, IntrConvergent, IntrWillReturn, ImmArg>, ImmArg>, ImmArg>, ImmArg>]>; def int_amdgcn_s_dcache_wb : ClangBuiltin<"__builtin_amdgcn_s_dcache_wb">, Intrinsic<[], [], [IntrNoMem, IntrHasSideEffects, IntrWillReturn]>; def int_amdgcn_s_dcache_wb_vol : ClangBuiltin<"__builtin_amdgcn_s_dcache_wb_vol">, Intrinsic<[], [], [IntrNoMem, IntrHasSideEffects, IntrWillReturn]>; def int_amdgcn_s_memrealtime : ClangBuiltin<"__builtin_amdgcn_s_memrealtime">, Intrinsic<[llvm_i64_ty], [], [IntrNoMem, IntrHasSideEffects, IntrWillReturn]>; // llvm.amdgcn.ds.permute def int_amdgcn_ds_permute : ClangBuiltin<"__builtin_amdgcn_ds_permute">, Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty], [IntrNoMem, IntrConvergent, IntrWillReturn]>; // llvm.amdgcn.ds.bpermute def int_amdgcn_ds_bpermute : ClangBuiltin<"__builtin_amdgcn_ds_bpermute">, Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty], [IntrNoMem, IntrConvergent, IntrWillReturn]>; // llvm.amdgcn.perm def int_amdgcn_perm : ClangBuiltin<"__builtin_amdgcn_perm">, Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [IntrNoMem, IntrSpeculatable, IntrWillReturn]>; //===----------------------------------------------------------------------===// // GFX9 Intrinsics //===----------------------------------------------------------------------===// class AMDGPUGlobalLoadLDS : Intrinsic < [], [LLVMQualPointerType, // Base global pointer to load from LLVMQualPointerType, // LDS base pointer to store to llvm_i32_ty, // Data byte size: 1/2/4 llvm_i32_ty, // imm offset (applied to both global and LDS address) llvm_i32_ty], // auxiliary data (imm, cachepolicy (bit 0 = glc/sc0, // bit 1 = slc/sc1, // bit 2 = dlc on gfx10+)) // bit 4 = scc/nt on gfx90a+)) [IntrWillReturn, NoCapture>, NoCapture>, ImmArg>, ImmArg>, ImmArg>, ImmArg>], "", [SDNPMemOperand]>; def int_amdgcn_global_load_lds : AMDGPUGlobalLoadLDS; //===----------------------------------------------------------------------===// // GFX10 Intrinsics //===----------------------------------------------------------------------===// // llvm.amdgcn.permlane16 def int_amdgcn_permlane16 : ClangBuiltin<"__builtin_amdgcn_permlane16">, Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i1_ty, llvm_i1_ty], [IntrNoMem, IntrConvergent, IntrWillReturn, ImmArg>, ImmArg>]>; // llvm.amdgcn.permlanex16 def int_amdgcn_permlanex16 : ClangBuiltin<"__builtin_amdgcn_permlanex16">, Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i1_ty, llvm_i1_ty], [IntrNoMem, IntrConvergent, IntrWillReturn, ImmArg>, ImmArg>]>; // llvm.amdgcn.mov.dpp8.i32 // is a 32-bit constant whose high 8 bits must be zero which selects // the lanes to read from. def int_amdgcn_mov_dpp8 : Intrinsic<[llvm_anyint_ty], [LLVMMatchType<0>, llvm_i32_ty], [IntrNoMem, IntrConvergent, IntrWillReturn, ImmArg>]>; def int_amdgcn_s_get_waveid_in_workgroup : ClangBuiltin<"__builtin_amdgcn_s_get_waveid_in_workgroup">, Intrinsic<[llvm_i32_ty], [], [IntrNoMem, IntrHasSideEffects, IntrWillReturn]>; class AMDGPUGlobalAtomicRtn : Intrinsic < [vt], [llvm_anyptr_ty, // vaddr vt], // vdata(VGPR) [IntrArgMemOnly, IntrWillReturn, NoCapture>], "", [SDNPMemOperand]>; def int_amdgcn_global_atomic_csub : AMDGPUGlobalAtomicRtn; // uint4 llvm.amdgcn.image.bvh.intersect.ray , , , // , , // is i32 or i64. // and are both v3f16 or both v3f32. def int_amdgcn_image_bvh_intersect_ray : Intrinsic<[llvm_v4i32_ty], [llvm_anyint_ty, llvm_float_ty, llvm_v3f32_ty, llvm_anyvector_ty, LLVMMatchType<1>, llvm_v4i32_ty], [IntrReadMem, IntrWillReturn]>; //===----------------------------------------------------------------------===// // GFX11 Intrinsics //===----------------------------------------------------------------------===// // llvm.amdgcn.permlane64 def int_amdgcn_permlane64 : Intrinsic<[llvm_i32_ty], [llvm_i32_ty], [IntrNoMem, IntrConvergent, IntrWillReturn]>; def int_amdgcn_ds_add_gs_reg_rtn : ClangBuiltin<"__builtin_amdgcn_ds_add_gs_reg_rtn">, Intrinsic<[llvm_anyint_ty], [llvm_i32_ty, llvm_i32_ty], [ImmArg>, IntrHasSideEffects, IntrWillReturn]>; def int_amdgcn_ds_sub_gs_reg_rtn : ClangBuiltin<"__builtin_amdgcn_ds_sub_gs_reg_rtn">, Intrinsic<[llvm_anyint_ty], [llvm_i32_ty, llvm_i32_ty], [ImmArg>, IntrHasSideEffects, IntrWillReturn]>; // WMMA (Wave Matrix Multiply-Accumulate) intrinsics // // These operations perform a matrix multiplication and accumulation of // the form: D = A * B + C . class AMDGPUWmmaIntrinsic : Intrinsic< [CD], // %D [ AB, // %A AB, // %B LLVMMatchType<0>, // %C ], [IntrNoMem, IntrConvergent, IntrWillReturn] >; class AMDGPUWmmaIntrinsicOPSEL : Intrinsic< [CD], // %D [ AB, // %A AB, // %B LLVMMatchType<0>, // %C llvm_i1_ty, // %high ], [IntrNoMem, IntrConvergent, IntrWillReturn, ImmArg>] >; class AMDGPUWmmaIntrinsicIU : Intrinsic< [CD], // %D [ llvm_i1_ty, // %A_sign AB, // %A llvm_i1_ty, // %B_sign AB, // %B LLVMMatchType<0>, // %C llvm_i1_ty, // %clamp ], [IntrNoMem, IntrConvergent, IntrWillReturn, ImmArg>, ImmArg>, ImmArg>] >; def int_amdgcn_wmma_f32_16x16x16_f16 : AMDGPUWmmaIntrinsic; def int_amdgcn_wmma_f32_16x16x16_bf16 : AMDGPUWmmaIntrinsic; def int_amdgcn_wmma_f16_16x16x16_f16 : AMDGPUWmmaIntrinsicOPSEL; def int_amdgcn_wmma_bf16_16x16x16_bf16 : AMDGPUWmmaIntrinsicOPSEL; def int_amdgcn_wmma_i32_16x16x16_iu8 : AMDGPUWmmaIntrinsicIU; def int_amdgcn_wmma_i32_16x16x16_iu4 : AMDGPUWmmaIntrinsicIU; //===----------------------------------------------------------------------===// // Deep learning intrinsics. //===----------------------------------------------------------------------===// // f32 %r = llvm.amdgcn.fdot2(v2f16 %a, v2f16 %b, f32 %c, i1 %clamp) // %r = %a[0] * %b[0] + %a[1] * %b[1] + %c def int_amdgcn_fdot2 : ClangBuiltin<"__builtin_amdgcn_fdot2">, Intrinsic< [llvm_float_ty], // %r [ llvm_v2f16_ty, // %a llvm_v2f16_ty, // %b llvm_float_ty, // %c llvm_i1_ty // %clamp ], [IntrNoMem, IntrSpeculatable, IntrWillReturn, ImmArg>] >; // f16 %r = llvm.amdgcn.fdot2.f16.f16(v2f16 %a, v2f16 %b, f16 %c) // %r = %a[0] * %b[0] + %a[1] * %b[1] + %c def int_amdgcn_fdot2_f16_f16 : ClangBuiltin<"__builtin_amdgcn_fdot2_f16_f16">, Intrinsic< [llvm_half_ty], // %r [ llvm_v2f16_ty, // %a llvm_v2f16_ty, // %b llvm_half_ty // %c ], [IntrNoMem, IntrSpeculatable, IntrWillReturn] >; // bf16 %r = llvm.amdgcn.fdot2.bf16.bf16(v2bf16 %a, v2bf16 %b, bf16 %c) // %r = %a[0] * %b[0] + %a[1] * %b[1] + %c def int_amdgcn_fdot2_bf16_bf16 : ClangBuiltin<"__builtin_amdgcn_fdot2_bf16_bf16">, Intrinsic< [llvm_i16_ty], // %r [ llvm_v2i16_ty, // %a llvm_v2i16_ty, // %b llvm_i16_ty // %c ], [IntrNoMem, IntrSpeculatable, IntrWillReturn] >; // f32 %r = llvm.amdgcn.fdot2.f32.bf16(v2bf16 %a, v2bf16 %b, f32 %c, i1 %clamp) // %r = %a[0] * %b[0] + %a[1] * %b[1] + %c def int_amdgcn_fdot2_f32_bf16 : ClangBuiltin<"__builtin_amdgcn_fdot2_f32_bf16">, Intrinsic< [llvm_float_ty], // %r [ llvm_v2i16_ty, // %a llvm_v2i16_ty, // %b llvm_float_ty, // %c llvm_i1_ty // %clamp ], [IntrNoMem, IntrSpeculatable, IntrWillReturn, ImmArg>] >; // i32 %r = llvm.amdgcn.sdot2(v2i16 %a, v2i16 %b, i32 %c, i1 %clamp) // %r = %a[0] * %b[0] + %a[1] * %b[1] + %c def int_amdgcn_sdot2 : ClangBuiltin<"__builtin_amdgcn_sdot2">, Intrinsic< [llvm_i32_ty], // %r [ llvm_v2i16_ty, // %a llvm_v2i16_ty, // %b llvm_i32_ty, // %c llvm_i1_ty // %clamp ], [IntrNoMem, IntrSpeculatable, IntrWillReturn, ImmArg>] >; // u32 %r = llvm.amdgcn.udot2(v2u16 %a, v2u16 %b, u32 %c, i1 %clamp) // %r = %a[0] * %b[0] + %a[1] * %b[1] + %c def int_amdgcn_udot2 : ClangBuiltin<"__builtin_amdgcn_udot2">, Intrinsic< [llvm_i32_ty], // %r [ llvm_v2i16_ty, // %a llvm_v2i16_ty, // %b llvm_i32_ty, // %c llvm_i1_ty // %clamp ], [IntrNoMem, IntrSpeculatable, IntrWillReturn, ImmArg>] >; // i32 %r = llvm.amdgcn.sdot4(v4i8 (as i32) %a, v4i8 (as i32) %b, i32 %c, i1 %clamp) // %r = %a[0] * %b[0] + %a[1] * %b[1] + %a[2] * %b[2] + %a[3] * %b[3] + %c def int_amdgcn_sdot4 : ClangBuiltin<"__builtin_amdgcn_sdot4">, Intrinsic< [llvm_i32_ty], // %r [ llvm_i32_ty, // %a llvm_i32_ty, // %b llvm_i32_ty, // %c llvm_i1_ty // %clamp ], [IntrNoMem, IntrSpeculatable, IntrWillReturn, ImmArg>] >; // u32 %r = llvm.amdgcn.udot4(v4u8 (as u32) %a, v4u8 (as u32) %b, u32 %c, i1 %clamp) // %r = %a[0] * %b[0] + %a[1] * %b[1] + %a[2] * %b[2] + %a[3] * %b[3] + %c def int_amdgcn_udot4 : ClangBuiltin<"__builtin_amdgcn_udot4">, Intrinsic< [llvm_i32_ty], // %r [ llvm_i32_ty, // %a llvm_i32_ty, // %b llvm_i32_ty, // %c llvm_i1_ty // %clamp ], [IntrNoMem, IntrSpeculatable, IntrWillReturn, ImmArg>] >; // i32 %r = llvm.amdgcn.sudot4(i1 %a_sign, v4i8 (as i32) %a, i1 %b_sign, v4i8 (as i32) %b, i32 %c, i1 %clamp) // Treat input as signed (_sign = 1) or unsigned (_sign = 0). // a[i in 0. . . 3] = (%a_sign ? a.i8[i] : promoteToSigned(a.u8[i])); // b[i in 0. . . 3] = (%b_sign ? b.i8[i] : promoteToSigned(b.u8[i])); // %r = %a[0] * %b[0] + %a[1] * %b[1] + %a[2] * %b[2] + %a[3] * %b[3] + %c def int_amdgcn_sudot4 : ClangBuiltin<"__builtin_amdgcn_sudot4">, Intrinsic< [llvm_i32_ty], // %r [ llvm_i1_ty, // %a_sign llvm_i32_ty, // %a llvm_i1_ty, // %b_sign llvm_i32_ty, // %b llvm_i32_ty, // %c llvm_i1_ty // %clamp ], [IntrNoMem, IntrSpeculatable, IntrWillReturn, ImmArg>, ImmArg>, ImmArg>] >; // i32 %r = llvm.amdgcn.sdot8(v8i4 (as i32) %a, v8i4 (as i32) %b, i32 %c, i1 %clamp) // %r = %a[0] * %b[0] + %a[1] * %b[1] + %a[2] * %b[2] + %a[3] * %b[3] + // %a[4] * %b[4] + %a[5] * %b[5] + %a[6] * %b[6] + %a[7] * %b[7] + %c def int_amdgcn_sdot8 : ClangBuiltin<"__builtin_amdgcn_sdot8">, Intrinsic< [llvm_i32_ty], // %r [ llvm_i32_ty, // %a llvm_i32_ty, // %b llvm_i32_ty, // %c llvm_i1_ty // %clamp ], [IntrNoMem, IntrSpeculatable, IntrWillReturn, ImmArg>] >; // u32 %r = llvm.amdgcn.udot8(v8u4 (as u32) %a, v8u4 (as u32) %b, u32 %c, i1 %clamp) // %r = %a[0] * %b[0] + %a[1] * %b[1] + %a[2] * %b[2] + %a[3] * %b[3] + // %a[4] * %b[4] + %a[5] * %b[5] + %a[6] * %b[6] + %a[7] * %b[7] + %c def int_amdgcn_udot8 : ClangBuiltin<"__builtin_amdgcn_udot8">, Intrinsic< [llvm_i32_ty], // %r [ llvm_i32_ty, // %a llvm_i32_ty, // %b llvm_i32_ty, // %c llvm_i1_ty // %clamp ], [IntrNoMem, IntrSpeculatable, IntrWillReturn, ImmArg>] >; // i32 %r = llvm.amdgcn.sudot8(i1 %a_sign, v8i4 (as i32) %a, i1 %b_sign, v8i4 (as i32) %b, i32 %c, i1 %clamp) // Treat input as signed (_sign = 1) or unsigned (_sign = 0). // a[i in 0. . . 7] = (%a_sign ? a.i4[i] : promoteToSigned(a.u4[i])); // b[i in 0. . . 7] = (%b_sign ? b.i4[i] : promoteToSigned(b.u4[i])); // %r = %a[0] * %b[0] + %a[1] * %b[1] + %a[2] * %b[2] + %a[3] * %b[3] + // %a[4] * %b[4] + %a[5] * %b[5] + %a[6] * %b[6] + %a[7] * %b[7] + %c def int_amdgcn_sudot8 : ClangBuiltin<"__builtin_amdgcn_sudot8">, Intrinsic< [llvm_i32_ty], // %r [ llvm_i1_ty, // %a_sign llvm_i32_ty, // %a llvm_i1_ty, // %b_sign llvm_i32_ty, // %b llvm_i32_ty, // %c llvm_i1_ty // %clamp ], [IntrNoMem, IntrSpeculatable, IntrWillReturn, ImmArg>, ImmArg>, ImmArg>] >; //===----------------------------------------------------------------------===// // gfx908 intrinsics // ===----------------------------------------------------------------------===// def int_amdgcn_global_atomic_fadd : AMDGPUGlobalAtomicRtn; // llvm.amdgcn.mfma.*.* vdst, srcA, srcB, srcC, cbsz, abid, blgp class AMDGPUMfmaIntrinsic : ClangBuiltin, Intrinsic<[DestTy], [SrcABTy, SrcABTy, DestTy, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [IntrConvergent, IntrNoMem, IntrWillReturn, ImmArg>, ImmArg>, ImmArg>]>; def int_amdgcn_mfma_f32_32x32x1f32 : AMDGPUMfmaIntrinsic; def int_amdgcn_mfma_f32_16x16x1f32 : AMDGPUMfmaIntrinsic; def int_amdgcn_mfma_f32_4x4x1f32 : AMDGPUMfmaIntrinsic; def int_amdgcn_mfma_f32_32x32x2f32 : AMDGPUMfmaIntrinsic; def int_amdgcn_mfma_f32_16x16x4f32 : AMDGPUMfmaIntrinsic; def int_amdgcn_mfma_f32_32x32x4f16 : AMDGPUMfmaIntrinsic; def int_amdgcn_mfma_f32_16x16x4f16 : AMDGPUMfmaIntrinsic; def int_amdgcn_mfma_f32_4x4x4f16 : AMDGPUMfmaIntrinsic; def int_amdgcn_mfma_f32_32x32x8f16 : AMDGPUMfmaIntrinsic; def int_amdgcn_mfma_f32_16x16x16f16 : AMDGPUMfmaIntrinsic; def int_amdgcn_mfma_i32_32x32x4i8 : AMDGPUMfmaIntrinsic; def int_amdgcn_mfma_i32_16x16x4i8 : AMDGPUMfmaIntrinsic; def int_amdgcn_mfma_i32_4x4x4i8 : AMDGPUMfmaIntrinsic; def int_amdgcn_mfma_i32_32x32x8i8 : AMDGPUMfmaIntrinsic; def int_amdgcn_mfma_i32_16x16x16i8 : AMDGPUMfmaIntrinsic; def int_amdgcn_mfma_f32_32x32x2bf16 : AMDGPUMfmaIntrinsic; def int_amdgcn_mfma_f32_16x16x2bf16 : AMDGPUMfmaIntrinsic; def int_amdgcn_mfma_f32_4x4x2bf16 : AMDGPUMfmaIntrinsic; def int_amdgcn_mfma_f32_32x32x4bf16 : AMDGPUMfmaIntrinsic; def int_amdgcn_mfma_f32_16x16x8bf16 : AMDGPUMfmaIntrinsic; //===----------------------------------------------------------------------===// // gfx90a intrinsics // ===----------------------------------------------------------------------===// def int_amdgcn_global_atomic_fmin : AMDGPUGlobalAtomicRtn; def int_amdgcn_global_atomic_fmax : AMDGPUGlobalAtomicRtn; def int_amdgcn_flat_atomic_fadd : AMDGPUGlobalAtomicRtn; def int_amdgcn_flat_atomic_fmin : AMDGPUGlobalAtomicRtn; def int_amdgcn_flat_atomic_fmax : AMDGPUGlobalAtomicRtn; def int_amdgcn_mfma_f32_32x32x4bf16_1k : AMDGPUMfmaIntrinsic; def int_amdgcn_mfma_f32_16x16x4bf16_1k : AMDGPUMfmaIntrinsic; def int_amdgcn_mfma_f32_4x4x4bf16_1k : AMDGPUMfmaIntrinsic; def int_amdgcn_mfma_f32_32x32x8bf16_1k : AMDGPUMfmaIntrinsic; def int_amdgcn_mfma_f32_16x16x16bf16_1k : AMDGPUMfmaIntrinsic; // Note: in gfx940 BLGP argument is replaced by NEG bitfield in the DGEMM MFMA. // Three bits corresponding to the neg modifier applied to the respective // source operand. def int_amdgcn_mfma_f64_16x16x4f64 : AMDGPUMfmaIntrinsic; def int_amdgcn_mfma_f64_4x4x4f64 : AMDGPUMfmaIntrinsic; //===----------------------------------------------------------------------===// // gfx940 intrinsics // ===----------------------------------------------------------------------===// // bf16 atomics use v2i16 argument since there is no bf16 data type in the llvm. def int_amdgcn_global_atomic_fadd_v2bf16 : AMDGPUGlobalAtomicRtn; def int_amdgcn_flat_atomic_fadd_v2bf16 : AMDGPUGlobalAtomicRtn; def int_amdgcn_ds_fadd_v2bf16 : Intrinsic< [llvm_v2i16_ty], [LLVMQualPointerType, llvm_v2i16_ty], [IntrArgMemOnly, IntrWillReturn, NoCapture>]>, ClangBuiltin<"__builtin_amdgcn_ds_atomic_fadd_v2bf16">; def int_amdgcn_mfma_i32_16x16x32_i8 : AMDGPUMfmaIntrinsic; def int_amdgcn_mfma_i32_32x32x16_i8 : AMDGPUMfmaIntrinsic; def int_amdgcn_mfma_f32_16x16x8_xf32 : AMDGPUMfmaIntrinsic; def int_amdgcn_mfma_f32_32x32x4_xf32 : AMDGPUMfmaIntrinsic; class AMDGPUMFp8MfmaIntrinsic : AMDGPUMfmaIntrinsic; multiclass AMDGPUMFp8MfmaIntrinsic { foreach kind = ["bf8_bf8", "bf8_fp8", "fp8_bf8", "fp8_fp8"] in def NAME#"_"#kind : AMDGPUMFp8MfmaIntrinsic; } defm int_amdgcn_mfma_f32_16x16x32 : AMDGPUMFp8MfmaIntrinsic; defm int_amdgcn_mfma_f32_32x32x16 : AMDGPUMFp8MfmaIntrinsic; // llvm.amdgcn.smfmac.?32.* vdst, srcA, srcB, srcC, index, cbsz, abid class AMDGPUMSmfmacIntrinsic : ClangBuiltin, Intrinsic<[DestTy], [SrcA, SrcB, DestTy, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [IntrConvergent, IntrNoMem, IntrWillReturn, ImmArg>, ImmArg>]>; def int_amdgcn_smfmac_f32_16x16x32_f16 : AMDGPUMSmfmacIntrinsic; def int_amdgcn_smfmac_f32_32x32x16_f16 : AMDGPUMSmfmacIntrinsic; def int_amdgcn_smfmac_f32_16x16x32_bf16 : AMDGPUMSmfmacIntrinsic; def int_amdgcn_smfmac_f32_32x32x16_bf16 : AMDGPUMSmfmacIntrinsic; def int_amdgcn_smfmac_i32_16x16x64_i8 : AMDGPUMSmfmacIntrinsic; def int_amdgcn_smfmac_i32_32x32x32_i8 : AMDGPUMSmfmacIntrinsic; class AMDGPUMFp8SmfmacIntrinsic : AMDGPUMSmfmacIntrinsic; multiclass AMDGPUMFp8SmfmacIntrinsic { foreach kind = ["bf8_bf8", "bf8_fp8", "fp8_bf8", "fp8_fp8"] in def NAME#"_"#kind : AMDGPUMFp8SmfmacIntrinsic; } defm int_amdgcn_smfmac_f32_16x16x64 : AMDGPUMFp8SmfmacIntrinsic; defm int_amdgcn_smfmac_f32_32x32x32 : AMDGPUMFp8SmfmacIntrinsic; // llvm.amdgcn.cvt.f32.bf8 float vdst, int srcA, imm byte_sel [0..3] // byte_sel selects byte from srcA. def int_amdgcn_cvt_f32_bf8 : ClangBuiltin<"__builtin_amdgcn_cvt_f32_bf8">, Intrinsic<[llvm_float_ty], [llvm_i32_ty, llvm_i32_ty], [IntrNoMem, IntrWillReturn, ImmArg>]>; // llvm.amdgcn.cvt.f32.fp8 float vdst, int srcA, imm byte_sel [0..3] def int_amdgcn_cvt_f32_fp8 : ClangBuiltin<"__builtin_amdgcn_cvt_f32_fp8">, Intrinsic<[llvm_float_ty], [llvm_i32_ty, llvm_i32_ty], [IntrNoMem, IntrWillReturn, ImmArg>]>; // llvm.amdgcn.cvt.pk.f32.bf8 float2 vdst, int srcA, imm word_sel // word_sel = 1 selects 2 high bytes, 0 selects 2 low bytes. def int_amdgcn_cvt_pk_f32_bf8 : ClangBuiltin<"__builtin_amdgcn_cvt_pk_f32_bf8">, Intrinsic<[llvm_v2f32_ty], [llvm_i32_ty, llvm_i1_ty], [IntrNoMem, IntrWillReturn, ImmArg>]>; // llvm.amdgcn.cvt.pk.f32.fp8 float2 vdst, int srcA, imm word_sel. def int_amdgcn_cvt_pk_f32_fp8 : ClangBuiltin<"__builtin_amdgcn_cvt_pk_f32_fp8">, Intrinsic<[llvm_v2f32_ty], [llvm_i32_ty, llvm_i1_ty], [IntrNoMem, IntrWillReturn, ImmArg>]>; // llvm.amdgcn.cvt.pk.bf8.f32 int vdst, float srcA, float srcB, int old, imm word_sel // word_sel = 1 selects 2 high bytes in the vdst, 0 selects 2 low bytes. def int_amdgcn_cvt_pk_bf8_f32 : ClangBuiltin<"__builtin_amdgcn_cvt_pk_bf8_f32">, Intrinsic<[llvm_i32_ty], [llvm_float_ty, llvm_float_ty, llvm_i32_ty, llvm_i1_ty], [IntrNoMem, IntrWillReturn, ImmArg>]>; // llvm.amdgcn.cvt.pk.fp8.f32 int vdst, float srcA, float srcB, int old, imm word_sel def int_amdgcn_cvt_pk_fp8_f32 : ClangBuiltin<"__builtin_amdgcn_cvt_pk_fp8_f32">, Intrinsic<[llvm_i32_ty], [llvm_float_ty, llvm_float_ty, llvm_i32_ty, llvm_i1_ty], [IntrNoMem, IntrWillReturn, ImmArg>]>; // llvm.amdgcn.cvt.sr.bf8.f32 int vdst, float srcA, int srcB, int old, imm byte_sel [0..3] // byte_sel selects byte to write into vdst. def int_amdgcn_cvt_sr_bf8_f32 : ClangBuiltin<"__builtin_amdgcn_cvt_sr_bf8_f32">, Intrinsic<[llvm_i32_ty], [llvm_float_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [IntrNoMem, IntrWillReturn, ImmArg>]>; // llvm.amdgcn.cvt.sr.fp8.f32 int vdst, float srcA, int srcB, int old, imm byte_sel [0..3] def int_amdgcn_cvt_sr_fp8_f32 : ClangBuiltin<"__builtin_amdgcn_cvt_sr_fp8_f32">, Intrinsic<[llvm_i32_ty], [llvm_float_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [IntrNoMem, IntrWillReturn, ImmArg>]>; //===----------------------------------------------------------------------===// // Special Intrinsics for backend internal use only. No frontend // should emit calls to these. // ===----------------------------------------------------------------------===// def int_amdgcn_if : Intrinsic<[llvm_i1_ty, llvm_anyint_ty], [llvm_i1_ty], [IntrConvergent, IntrWillReturn] >; def int_amdgcn_else : Intrinsic<[llvm_i1_ty, llvm_anyint_ty], [llvm_anyint_ty], [IntrConvergent, IntrWillReturn] >; def int_amdgcn_if_break : Intrinsic<[llvm_anyint_ty], [llvm_i1_ty, LLVMMatchType<0>], [IntrNoMem, IntrConvergent, IntrWillReturn] >; def int_amdgcn_loop : Intrinsic<[llvm_i1_ty], [llvm_anyint_ty], [IntrConvergent, IntrWillReturn] >; def int_amdgcn_end_cf : Intrinsic<[], [llvm_anyint_ty], [IntrConvergent, IntrWillReturn]>; // Represent unreachable in a divergent region. def int_amdgcn_unreachable : Intrinsic<[], [], [IntrConvergent]>; // Emit 2.5 ulp, no denormal division. Should only be inserted by // pass based on !fpmath metadata. def int_amdgcn_fdiv_fast : Intrinsic< [llvm_float_ty], [llvm_float_ty, llvm_float_ty], [IntrNoMem, IntrSpeculatable, IntrWillReturn] >; // Represent a relocation constant. def int_amdgcn_reloc_constant : Intrinsic< [llvm_i32_ty], [llvm_metadata_ty], [IntrNoMem, IntrSpeculatable, IntrWillReturn] >; }