1; RUN: llc -march=amdgcn -mcpu=gfx1010 -mattr=+wavefrontsize32,-wavefrontsize64 -verify-machineinstrs -simplifycfg-require-and-preserve-domtree=1 < %s | FileCheck -check-prefixes=GCN,GFX1032 %s 2; RUN: llc -march=amdgcn -mcpu=gfx1010 -mattr=-wavefrontsize32,+wavefrontsize64 -verify-machineinstrs -simplifycfg-require-and-preserve-domtree=1 < %s | FileCheck -check-prefixes=GCN,GFX1064 %s 3; RUN: llc -march=amdgcn -mcpu=gfx1010 -mattr=+wavefrontsize32,-wavefrontsize64 -amdgpu-early-ifcvt=1 -verify-machineinstrs -simplifycfg-require-and-preserve-domtree=1 < %s | FileCheck -check-prefixes=GCN,GFX1032 %s 4; RUN: llc -march=amdgcn -mcpu=gfx1010 -mattr=-wavefrontsize32,+wavefrontsize64 -amdgpu-early-ifcvt=1 -verify-machineinstrs -simplifycfg-require-and-preserve-domtree=1 < %s | FileCheck -check-prefixes=GCN,GFX1064 %s 5; RUN: llc -march=amdgcn -mcpu=gfx1010 -verify-machineinstrs -simplifycfg-require-and-preserve-domtree=1 < %s | FileCheck -check-prefixes=GCN,GFX1032,GFX10DEFWAVE %s 6 7; GCN-LABEL: {{^}}test_vopc_i32: 8; GFX1032: v_cmp_lt_i32_e32 vcc_lo, 0, v{{[0-9]+}} 9; GFX1032: v_cndmask_b32_e64 v{{[0-9]+}}, 2, 1, vcc_lo 10; GFX1064: v_cmp_lt_i32_e32 vcc, 0, v{{[0-9]+}} 11; GFX1064: v_cndmask_b32_e64 v{{[0-9]+}}, 2, 1, vcc{{$}} 12define amdgpu_kernel void @test_vopc_i32(i32 addrspace(1)* %arg) { 13 %lid = tail call i32 @llvm.amdgcn.workitem.id.x() 14 %gep = getelementptr inbounds i32, i32 addrspace(1)* %arg, i32 %lid 15 %load = load i32, i32 addrspace(1)* %gep, align 4 16 %cmp = icmp sgt i32 %load, 0 17 %sel = select i1 %cmp, i32 1, i32 2 18 store i32 %sel, i32 addrspace(1)* %gep, align 4 19 ret void 20} 21 22; GCN-LABEL: {{^}}test_vopc_f32: 23; GFX1032: v_cmp_nge_f32_e32 vcc_lo, 0, v{{[0-9]+}} 24; GFX1032: v_cndmask_b32_e64 v{{[0-9]+}}, 2.0, 1.0, vcc_lo 25; GFX1064: v_cmp_nge_f32_e32 vcc, 0, v{{[0-9]+}} 26; GFX1064: v_cndmask_b32_e64 v{{[0-9]+}}, 2.0, 1.0, vcc{{$}} 27define amdgpu_kernel void @test_vopc_f32(float addrspace(1)* %arg) { 28 %lid = tail call i32 @llvm.amdgcn.workitem.id.x() 29 %gep = getelementptr inbounds float, float addrspace(1)* %arg, i32 %lid 30 %load = load float, float addrspace(1)* %gep, align 4 31 %cmp = fcmp ugt float %load, 0.0 32 %sel = select i1 %cmp, float 1.0, float 2.0 33 store float %sel, float addrspace(1)* %gep, align 4 34 ret void 35} 36 37; GCN-LABEL: {{^}}test_vopc_vcmp: 38; GFX1032: v_cmp_nle_f32_e32 vcc_lo, 0, v{{[0-9]+}} 39; GFX1064: v_cmp_nle_f32_e32 vcc, 0, v{{[0-9]+}} 40define amdgpu_ps void @test_vopc_vcmp(float %x) { 41 %cmp = fcmp oge float %x, 0.0 42 call void @llvm.amdgcn.kill(i1 %cmp) 43 ret void 44} 45 46; GCN-LABEL: {{^}}test_vopc_2xf16: 47; GFX1032: v_cmp_le_f16_sdwa [[SC:vcc_lo|s[0-9]+]], {{[vs][0-9]+}}, v{{[0-9]+}} src0_sel:WORD_1 src1_sel:DWORD 48; GFX1032: v_cndmask_b32_e32 v{{[0-9]+}}, 0x3c003c00, v{{[0-9]+}}, [[SC]] 49; GFX1064: v_cmp_le_f16_sdwa [[SC:vcc|s\[[0-9:]+\]]], {{[vs][0-9]+}}, v{{[0-9]+}} src0_sel:WORD_1 src1_sel:DWORD 50; GFX1064: v_cndmask_b32_e32 v{{[0-9]+}}, 0x3c003c00, v{{[0-9]+}}, [[SC]] 51define amdgpu_kernel void @test_vopc_2xf16(<2 x half> addrspace(1)* %arg) { 52 %lid = tail call i32 @llvm.amdgcn.workitem.id.x() 53 %gep = getelementptr inbounds <2 x half>, <2 x half> addrspace(1)* %arg, i32 %lid 54 %load = load <2 x half>, <2 x half> addrspace(1)* %gep, align 4 55 %elt = extractelement <2 x half> %load, i32 1 56 %cmp = fcmp ugt half %elt, 0.0 57 %sel = select i1 %cmp, <2 x half> <half 1.0, half 1.0>, <2 x half> %load 58 store <2 x half> %sel, <2 x half> addrspace(1)* %gep, align 4 59 ret void 60} 61 62; GCN-LABEL: {{^}}test_vopc_class: 63; GFX1032: v_cmp_class_f32_e64 [[C:vcc_lo|s[0-9:]+]], s{{[0-9]+}}, 0x204 64; GFX1032: v_cndmask_b32_e64 v{{[0-9]+}}, 0, 1, [[C]] 65; GFX1064: v_cmp_class_f32_e64 [[C:vcc|s\[[0-9:]+\]]], s{{[0-9]+}}, 0x204 66; GFX1064: v_cndmask_b32_e64 v{{[0-9]+}}, 0, 1, [[C]]{{$}} 67define amdgpu_kernel void @test_vopc_class(i32 addrspace(1)* %out, float %x) #0 { 68 %fabs = tail call float @llvm.fabs.f32(float %x) 69 %cmp = fcmp oeq float %fabs, 0x7FF0000000000000 70 %ext = zext i1 %cmp to i32 71 store i32 %ext, i32 addrspace(1)* %out, align 4 72 ret void 73} 74 75; GCN-LABEL: {{^}}test_vcmp_vcnd_f16: 76; GFX1032: v_cmp_neq_f16_e64 [[C:vcc_lo|s\[[0-9:]+\]]], 0x7c00, s{{[0-9]+}} 77; GFX1032: v_cndmask_b32_e32 v{{[0-9]+}}, 0x3c00, v{{[0-9]+}}, [[C]] 78 79; GFX1064: v_cmp_neq_f16_e64 [[C:vcc|s\[[0-9:]+\]]], 0x7c00, s{{[0-9]+}} 80; GFX1064: v_cndmask_b32_e32 v{{[0-9]+}}, 0x3c00, v{{[0-9]+}}, [[C]]{{$}} 81define amdgpu_kernel void @test_vcmp_vcnd_f16(half addrspace(1)* %out, half %x) #0 { 82 %cmp = fcmp oeq half %x, 0x7FF0000000000000 83 %sel = select i1 %cmp, half 1.0, half %x 84 store half %sel, half addrspace(1)* %out, align 2 85 ret void 86} 87 88; GCN-LABEL: {{^}}test_vop3_cmp_f32_sop_and: 89; GFX1032: v_cmp_nge_f32_e32 vcc_lo, 0, v{{[0-9]+}} 90; GFX1032: v_cmp_nle_f32_e64 [[C2:s[0-9]+]], 1.0, v{{[0-9]+}} 91; GFX1032: s_and_b32 [[AND:s[0-9]+]], vcc_lo, [[C2]] 92; GFX1032: v_cndmask_b32_e64 v{{[0-9]+}}, 2.0, 1.0, [[AND]] 93; GFX1064: v_cmp_nge_f32_e32 vcc, 0, v{{[0-9]+}} 94; GFX1064: v_cmp_nle_f32_e64 [[C2:s\[[0-9:]+\]]], 1.0, v{{[0-9]+}} 95; GFX1064: s_and_b64 [[AND:s\[[0-9:]+\]]], vcc, [[C2]] 96; GFX1064: v_cndmask_b32_e64 v{{[0-9]+}}, 2.0, 1.0, [[AND]] 97define amdgpu_kernel void @test_vop3_cmp_f32_sop_and(float addrspace(1)* %arg) { 98 %lid = tail call i32 @llvm.amdgcn.workitem.id.x() 99 %gep = getelementptr inbounds float, float addrspace(1)* %arg, i32 %lid 100 %load = load float, float addrspace(1)* %gep, align 4 101 %cmp = fcmp ugt float %load, 0.0 102 %cmp2 = fcmp ult float %load, 1.0 103 %and = and i1 %cmp, %cmp2 104 %sel = select i1 %and, float 1.0, float 2.0 105 store float %sel, float addrspace(1)* %gep, align 4 106 ret void 107} 108 109; GCN-LABEL: {{^}}test_vop3_cmp_i32_sop_xor: 110; GFX1032: v_cmp_lt_i32_e32 vcc_lo, 0, v{{[0-9]+}} 111; GFX1032: v_cmp_gt_i32_e64 [[C2:s[0-9]+]], 1, v{{[0-9]+}} 112; GFX1032: s_xor_b32 [[AND:s[0-9]+]], vcc_lo, [[C2]] 113; GFX1032: v_cndmask_b32_e64 v{{[0-9]+}}, 2, 1, [[AND]] 114; GFX1064: v_cmp_lt_i32_e32 vcc, 0, v{{[0-9]+}} 115; GFX1064: v_cmp_gt_i32_e64 [[C2:s\[[0-9:]+\]]], 1, v{{[0-9]+}} 116; GFX1064: s_xor_b64 [[AND:s\[[0-9:]+\]]], vcc, [[C2]] 117; GFX1064: v_cndmask_b32_e64 v{{[0-9]+}}, 2, 1, [[AND]] 118define amdgpu_kernel void @test_vop3_cmp_i32_sop_xor(i32 addrspace(1)* %arg) { 119 %lid = tail call i32 @llvm.amdgcn.workitem.id.x() 120 %gep = getelementptr inbounds i32, i32 addrspace(1)* %arg, i32 %lid 121 %load = load i32, i32 addrspace(1)* %gep, align 4 122 %cmp = icmp sgt i32 %load, 0 123 %cmp2 = icmp slt i32 %load, 1 124 %xor = xor i1 %cmp, %cmp2 125 %sel = select i1 %xor, i32 1, i32 2 126 store i32 %sel, i32 addrspace(1)* %gep, align 4 127 ret void 128} 129 130; GCN-LABEL: {{^}}test_vop3_cmp_u32_sop_or: 131; GFX1032: v_cmp_lt_u32_e32 vcc_lo, 3, v{{[0-9]+}} 132; GFX1032: v_cmp_gt_u32_e64 [[C2:s[0-9]+]], 2, v{{[0-9]+}} 133; GFX1032: s_or_b32 [[AND:s[0-9]+]], vcc_lo, [[C2]] 134; GFX1032: v_cndmask_b32_e64 v{{[0-9]+}}, 2, 1, [[AND]] 135; GFX1064: v_cmp_lt_u32_e32 vcc, 3, v{{[0-9]+}} 136; GFX1064: v_cmp_gt_u32_e64 [[C2:s\[[0-9:]+\]]], 2, v{{[0-9]+}} 137; GFX1064: s_or_b64 [[AND:s\[[0-9:]+\]]], vcc, [[C2]] 138; GFX1064: v_cndmask_b32_e64 v{{[0-9]+}}, 2, 1, [[AND]] 139define amdgpu_kernel void @test_vop3_cmp_u32_sop_or(i32 addrspace(1)* %arg) { 140 %lid = tail call i32 @llvm.amdgcn.workitem.id.x() 141 %gep = getelementptr inbounds i32, i32 addrspace(1)* %arg, i32 %lid 142 %load = load i32, i32 addrspace(1)* %gep, align 4 143 %cmp = icmp ugt i32 %load, 3 144 %cmp2 = icmp ult i32 %load, 2 145 %or = or i1 %cmp, %cmp2 146 %sel = select i1 %or, i32 1, i32 2 147 store i32 %sel, i32 addrspace(1)* %gep, align 4 148 ret void 149} 150 151; GCN-LABEL: {{^}}test_mask_if: 152; GFX1032: s_and_saveexec_b32 s{{[0-9]+}}, vcc_lo 153; GFX1064: s_and_saveexec_b64 s[{{[0-9:]+}}], vcc{{$}} 154; GCN: s_cbranch_execz 155define amdgpu_kernel void @test_mask_if(i32 addrspace(1)* %arg) #0 { 156 %lid = tail call i32 @llvm.amdgcn.workitem.id.x() 157 %cmp = icmp ugt i32 %lid, 10 158 br i1 %cmp, label %if, label %endif 159 160if: 161 store i32 0, i32 addrspace(1)* %arg, align 4 162 br label %endif 163 164endif: 165 ret void 166} 167 168; GCN-LABEL: {{^}}test_loop_with_if: 169; GFX1032: s_or_b32 s{{[0-9]+}}, vcc_lo, s{{[0-9]+}} 170; GFX1032: s_andn2_b32 exec_lo, exec_lo, s{{[0-9]+}} 171; GFX1064: s_or_b64 s[{{[0-9:]+}}], vcc, s[{{[0-9:]+}}] 172; GFX1064: s_andn2_b64 exec, exec, s[{{[0-9:]+}}] 173; GCN: s_cbranch_execz 174; GCN: BB{{.*}}: 175; GFX1032: s_and_saveexec_b32 s{{[0-9]+}}, vcc_lo 176; GFX1064: s_and_saveexec_b64 s[{{[0-9:]+}}], vcc{{$}} 177; GCN: s_cbranch_execz 178; GCN: ; %bb.{{[0-9]+}}: 179; GCN: BB{{.*}}: 180; GFX1032: s_xor_b32 s{{[0-9]+}}, exec_lo, s{{[0-9]+}} 181; GFX1064: s_xor_b64 s[{{[0-9:]+}}], exec, s[{{[0-9:]+}}] 182; GCN: ; %bb.{{[0-9]+}}: 183; GCN: ; %bb.{{[0-9]+}}: 184; GFX1032: s_or_b32 exec_lo, exec_lo, s{{[0-9]+}} 185; GFX1032: s_and_saveexec_b32 s{{[0-9]+}}, s{{[0-9]+}} 186; GFX1064: s_or_b64 exec, exec, s[{{[0-9:]+}}] 187; GFX1064: s_and_saveexec_b64 s[{{[0-9:]+}}], s[{{[0-9:]+}}]{{$}} 188; GCN: s_cbranch_execz BB 189; GCN: ; %bb.{{[0-9]+}}: 190; GCN: BB{{.*}}: 191; GCN: s_endpgm 192define amdgpu_kernel void @test_loop_with_if(i32 addrspace(1)* %arg) #0 { 193bb: 194 %tmp = tail call i32 @llvm.amdgcn.workitem.id.x() 195 br label %bb2 196 197bb1: 198 ret void 199 200bb2: 201 %tmp3 = phi i32 [ 0, %bb ], [ %tmp15, %bb13 ] 202 %tmp4 = icmp slt i32 %tmp3, %tmp 203 br i1 %tmp4, label %bb5, label %bb11 204 205bb5: 206 %tmp6 = sext i32 %tmp3 to i64 207 %tmp7 = getelementptr inbounds i32, i32 addrspace(1)* %arg, i64 %tmp6 208 %tmp8 = load i32, i32 addrspace(1)* %tmp7, align 4 209 %tmp9 = icmp sgt i32 %tmp8, 10 210 br i1 %tmp9, label %bb10, label %bb11 211 212bb10: 213 store i32 %tmp, i32 addrspace(1)* %tmp7, align 4 214 br label %bb13 215 216bb11: 217 %tmp12 = sdiv i32 %tmp3, 2 218 br label %bb13 219 220bb13: 221 %tmp14 = phi i32 [ %tmp3, %bb10 ], [ %tmp12, %bb11 ] 222 %tmp15 = add nsw i32 %tmp14, 1 223 %tmp16 = icmp slt i32 %tmp14, 255 224 br i1 %tmp16, label %bb2, label %bb1 225} 226 227; GCN-LABEL: {{^}}test_loop_with_if_else_break: 228; GFX1032: s_and_saveexec_b32 s{{[0-9]+}}, vcc_lo 229; GFX1064: s_and_saveexec_b64 s[{{[0-9:]+}}], vcc{{$}} 230; GCN: s_cbranch_execz 231; GCN: ; %bb.{{[0-9]+}}: ; %.preheader 232; GCN: BB{{.*}}: 233 234; GCN: global_store_dword 235; GFX1032: s_or_b32 [[MASK0:s[0-9]+]], [[MASK0]], vcc_lo 236; GFX1064: s_or_b64 [[MASK0:s\[[0-9:]+\]]], [[MASK0]], vcc 237; GFX1032: s_andn2_b32 [[MASK1:s[0-9]+]], [[MASK1]], exec_lo 238; GFX1064: s_andn2_b64 [[MASK1:s\[[0-9:]+\]]], [[MASK1]], exec 239; GFX1032: s_and_b32 [[MASK0]], [[MASK0]], exec_lo 240; GFX1064: s_and_b64 [[MASK0]], [[MASK0]], exec 241; GFX1032: s_or_b32 [[MASK1]], [[MASK1]], [[MASK0]] 242; GFX1064: s_or_b64 [[MASK1]], [[MASK1]], [[MASK0]] 243; GCN: BB{{.*}}: ; %Flow 244; GFX1032: s_and_b32 [[TMP0:s[0-9]+]], exec_lo, [[MASK1]] 245; GFX1064: s_and_b64 [[TMP0:s\[[0-9:]+\]]], exec, [[MASK1]] 246; GFX1032: s_or_b32 [[ACC:s[0-9]+]], [[TMP0]], [[ACC]] 247; GFX1064: s_or_b64 [[ACC:s\[[0-9:]+\]]], [[TMP0]], [[ACC]] 248; GFX1032: s_andn2_b32 exec_lo, exec_lo, [[ACC]] 249; GFX1064: s_andn2_b64 exec, exec, [[ACC]] 250; GCN: s_cbranch_execz 251; GCN: BB{{.*}}: 252 253; GFX1032: s_or_b32 [[MASK1]], [[MASK1]], exec_lo 254; GFX1064: s_or_b64 [[MASK1]], [[MASK1]], exec 255; GCN: global_load_dword [[LOAD:v[0-9]+]] 256; GFX1032: v_cmp_gt_i32_e32 vcc_lo, 11, [[LOAD]] 257; GFX1064: v_cmp_gt_i32_e32 vcc, 11, [[LOAD]] 258define amdgpu_kernel void @test_loop_with_if_else_break(i32 addrspace(1)* %arg) #0 { 259bb: 260 %tmp = tail call i32 @llvm.amdgcn.workitem.id.x() 261 %tmp1 = icmp eq i32 %tmp, 0 262 br i1 %tmp1, label %.loopexit, label %.preheader 263 264.preheader: 265 br label %bb2 266 267bb2: 268 %tmp3 = phi i32 [ %tmp9, %bb8 ], [ 0, %.preheader ] 269 %tmp4 = zext i32 %tmp3 to i64 270 %tmp5 = getelementptr inbounds i32, i32 addrspace(1)* %arg, i64 %tmp4 271 %tmp6 = load i32, i32 addrspace(1)* %tmp5, align 4 272 %tmp7 = icmp sgt i32 %tmp6, 10 273 br i1 %tmp7, label %bb8, label %.loopexit 274 275bb8: 276 store i32 %tmp, i32 addrspace(1)* %tmp5, align 4 277 %tmp9 = add nuw nsw i32 %tmp3, 1 278 %tmp10 = icmp ult i32 %tmp9, 256 279 %tmp11 = icmp ult i32 %tmp9, %tmp 280 %tmp12 = and i1 %tmp10, %tmp11 281 br i1 %tmp12, label %bb2, label %.loopexit 282 283.loopexit: 284 ret void 285} 286 287; GCN-LABEL: {{^}}test_addc_vop2b: 288; GFX1032: v_add_co_u32 v{{[0-9]+}}, vcc_lo, v{{[0-9]+}}, s{{[0-9]+}} 289; GFX1032: v_add_co_ci_u32_e32 v{{[0-9]+}}, vcc_lo, s{{[0-9]+}}, v{{[0-9]+}}, vcc_lo 290; GFX1064: v_add_co_u32 v{{[0-9]+}}, vcc, v{{[0-9]+}}, s{{[0-9]+}} 291; GFX1064: v_add_co_ci_u32_e32 v{{[0-9]+}}, vcc, s{{[0-9]+}}, v{{[0-9]+}}, vcc{{$}} 292define amdgpu_kernel void @test_addc_vop2b(i64 addrspace(1)* %arg, i64 %arg1) #0 { 293bb: 294 %tmp = tail call i32 @llvm.amdgcn.workitem.id.x() 295 %tmp3 = getelementptr inbounds i64, i64 addrspace(1)* %arg, i32 %tmp 296 %tmp4 = load i64, i64 addrspace(1)* %tmp3, align 8 297 %tmp5 = add nsw i64 %tmp4, %arg1 298 store i64 %tmp5, i64 addrspace(1)* %tmp3, align 8 299 ret void 300} 301 302; GCN-LABEL: {{^}}test_subbrev_vop2b: 303; GFX1032: v_sub_co_u32 v{{[0-9]+}}, [[A0:s[0-9]+|vcc_lo]], v{{[0-9]+}}, s{{[0-9]+}}{{$}} 304; GFX1032: v_subrev_co_ci_u32_e32 v{{[0-9]+}}, vcc_lo, {{[vs][0-9]+}}, {{[vs][0-9]+}}, [[A0]]{{$}} 305; GFX1064: v_sub_co_u32 v{{[0-9]+}}, [[A0:s\[[0-9:]+\]|vcc]], v{{[0-9]+}}, s{{[0-9]+}}{{$}} 306; GFX1064: v_subrev_co_ci_u32_e32 v{{[0-9]+}}, vcc, {{[vs][0-9]+}}, {{[vs][0-9]+}}, [[A0]]{{$}} 307define amdgpu_kernel void @test_subbrev_vop2b(i64 addrspace(1)* %arg, i64 %arg1) #0 { 308bb: 309 %tmp = tail call i32 @llvm.amdgcn.workitem.id.x() 310 %tmp3 = getelementptr inbounds i64, i64 addrspace(1)* %arg, i32 %tmp 311 %tmp4 = load i64, i64 addrspace(1)* %tmp3, align 8 312 %tmp5 = sub nsw i64 %tmp4, %arg1 313 store i64 %tmp5, i64 addrspace(1)* %tmp3, align 8 314 ret void 315} 316 317; GCN-LABEL: {{^}}test_subb_vop2b: 318; GFX1032: v_sub_co_u32 v{{[0-9]+}}, [[A0:s[0-9]+|vcc_lo]], s{{[0-9]+}}, v{{[0-9]+}}{{$}} 319; GFX1032: v_sub_co_ci_u32_e32 v{{[0-9]+}}, vcc_lo, {{[vs][0-9]+}}, v{{[0-9]+}}, [[A0]]{{$}} 320; GFX1064: v_sub_co_u32 v{{[0-9]+}}, [[A0:s\[[0-9:]+\]|vcc]], s{{[0-9]+}}, v{{[0-9]+}}{{$}} 321; GFX1064: v_sub_co_ci_u32_e32 v{{[0-9]+}}, vcc, {{[vs][0-9]+}}, v{{[0-9]+}}, [[A0]]{{$}} 322define amdgpu_kernel void @test_subb_vop2b(i64 addrspace(1)* %arg, i64 %arg1) #0 { 323bb: 324 %tmp = tail call i32 @llvm.amdgcn.workitem.id.x() 325 %tmp3 = getelementptr inbounds i64, i64 addrspace(1)* %arg, i32 %tmp 326 %tmp4 = load i64, i64 addrspace(1)* %tmp3, align 8 327 %tmp5 = sub nsw i64 %arg1, %tmp4 328 store i64 %tmp5, i64 addrspace(1)* %tmp3, align 8 329 ret void 330} 331 332; GCN-LABEL: {{^}}test_udiv64: 333; GFX1032: v_add_co_u32 v{{[0-9]+}}, [[SDST:s[0-9]+]], v{{[0-9]+}}, v{{[0-9]+}} 334; GFX1032: v_add_co_ci_u32_e32 v{{[0-9]+}}, vcc_lo, 0, v{{[0-9]+}}, vcc_lo 335; GFX1032: v_add_co_ci_u32_e64 v{{[0-9]+}}, vcc_lo, v{{[0-9]+}}, v{{[0-9]+}}, [[SDST]] 336; GFX1032: v_add_co_u32 v{{[0-9]+}}, vcc_lo, v{{[0-9]+}}, v{{[0-9]+}} 337; GFX1032: v_add_co_u32 v{{[0-9]+}}, vcc_lo, v{{[0-9]+}}, v{{[0-9]+}} 338; GFX1032: v_add_co_u32 v{{[0-9]+}}, vcc_lo, v{{[0-9]+}}, v{{[0-9]+}} 339; GFX1032: v_add_co_ci_u32_e32 v{{[0-9]+}}, vcc_lo, 0, v{{[0-9]+}}, vcc_lo 340; GFX1032: v_sub_co_u32 v{{[0-9]+}}, vcc_lo, s{{[0-9]+}}, v{{[0-9]+}} 341; GFX1032: v_subrev_co_ci_u32_e64 v{{[0-9]+}}, s{{[0-9]+}}, {{[vs][0-9]+}}, v{{[0-9]+}}, vcc_lo 342; GFX1032: v_sub_co_ci_u32_e32 v{{[0-9]+}}, vcc_lo, {{[vs][0-9]+}}, v{{[0-9]+}}, vcc_lo 343; GFX1064: v_add_co_u32 v{{[0-9]+}}, [[SDST:s\[[0-9:]+\]]], v{{[0-9]+}}, v{{[0-9]+}} 344; GFX1064: v_add_co_ci_u32_e32 v{{[0-9]+}}, vcc, 0, v{{[0-9]+}}, vcc{{$}} 345; GFX1064: v_add_co_ci_u32_e64 v{{[0-9]+}}, vcc, v{{[0-9]+}}, v{{[0-9]+}}, [[SDST]] 346; GFX1064: v_add_co_u32 v{{[0-9]+}}, vcc, v{{[0-9]+}}, v{{[0-9]+}} 347; GFX1064: v_add_co_u32 v{{[0-9]+}}, vcc, v{{[0-9]+}}, v{{[0-9]+}} 348; GFX1064: v_add_co_u32 v{{[0-9]+}}, vcc, v{{[0-9]+}}, v{{[0-9]+}} 349; GFX1064: v_add_co_ci_u32_e32 v{{[0-9]+}}, vcc, 0, v{{[0-9]+}}, vcc{{$}} 350; GFX1064: v_sub_co_u32 v{{[0-9]+}}, vcc, s{{[0-9]+}}, v{{[0-9]+}} 351; GFX1064: v_subrev_co_ci_u32_e64 v{{[0-9]+}}, s[{{[0-9:]+}}], {{[vs][0-9]+}}, v{{[0-9]+}}, vcc 352; GFX1064: v_sub_co_ci_u32_e32 v{{[0-9]+}}, vcc, {{[vs][0-9]+}}, v{{[0-9]+}}, vcc 353define amdgpu_kernel void @test_udiv64(i64 addrspace(1)* %arg) #0 { 354bb: 355 %tmp = getelementptr inbounds i64, i64 addrspace(1)* %arg, i64 1 356 %tmp1 = load i64, i64 addrspace(1)* %tmp, align 8 357 %tmp2 = load i64, i64 addrspace(1)* %arg, align 8 358 %tmp3 = udiv i64 %tmp1, %tmp2 359 %tmp4 = getelementptr inbounds i64, i64 addrspace(1)* %arg, i64 2 360 store i64 %tmp3, i64 addrspace(1)* %tmp4, align 8 361 ret void 362} 363 364; GCN-LABEL: {{^}}test_div_scale_f32: 365; GFX1032: v_div_scale_f32 v{{[0-9]+}}, s{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}} 366; GFX1064: v_div_scale_f32 v{{[0-9]+}}, s[{{[0-9:]+}}], v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}} 367define amdgpu_kernel void @test_div_scale_f32(float addrspace(1)* %out, float addrspace(1)* %in) #0 { 368 %tid = call i32 @llvm.amdgcn.workitem.id.x() nounwind readnone 369 %gep.0 = getelementptr float, float addrspace(1)* %in, i32 %tid 370 %gep.1 = getelementptr float, float addrspace(1)* %gep.0, i32 1 371 372 %a = load volatile float, float addrspace(1)* %gep.0, align 4 373 %b = load volatile float, float addrspace(1)* %gep.1, align 4 374 375 %result = call { float, i1 } @llvm.amdgcn.div.scale.f32(float %a, float %b, i1 false) nounwind readnone 376 %result0 = extractvalue { float, i1 } %result, 0 377 store float %result0, float addrspace(1)* %out, align 4 378 ret void 379} 380 381; GCN-LABEL: {{^}}test_div_scale_f64: 382; GFX1032: v_div_scale_f64 v[{{[0-9:]+}}], s{{[0-9]+}}, v[{{[0-9:]+}}], v[{{[0-9:]+}}], v[{{[0-9:]+}}] 383; GFX1064: v_div_scale_f64 v[{{[0-9:]+}}], s[{{[0-9:]+}}], v[{{[0-9:]+}}], v[{{[0-9:]+}}], v[{{[0-9:]+}}] 384define amdgpu_kernel void @test_div_scale_f64(double addrspace(1)* %out, double addrspace(1)* %aptr, double addrspace(1)* %in) #0 { 385 %tid = call i32 @llvm.amdgcn.workitem.id.x() nounwind readnone 386 %gep.0 = getelementptr double, double addrspace(1)* %in, i32 %tid 387 %gep.1 = getelementptr double, double addrspace(1)* %gep.0, i32 1 388 389 %a = load volatile double, double addrspace(1)* %gep.0, align 8 390 %b = load volatile double, double addrspace(1)* %gep.1, align 8 391 392 %result = call { double, i1 } @llvm.amdgcn.div.scale.f64(double %a, double %b, i1 true) nounwind readnone 393 %result0 = extractvalue { double, i1 } %result, 0 394 store double %result0, double addrspace(1)* %out, align 8 395 ret void 396} 397 398; GCN-LABEL: {{^}}test_mad_i64_i32: 399; GFX1032: v_mad_i64_i32 v[{{[0-9:]+}}], s{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}, v[{{[0-9:]+}}] 400; GFX1064: v_mad_i64_i32 v[{{[0-9:]+}}], s[{{[0-9:]+}}], v{{[0-9]+}}, v{{[0-9]+}}, v[{{[0-9:]+}}] 401define i64 @test_mad_i64_i32(i32 %arg0, i32 %arg1, i64 %arg2) #0 { 402 %sext0 = sext i32 %arg0 to i64 403 %sext1 = sext i32 %arg1 to i64 404 %mul = mul i64 %sext0, %sext1 405 %mad = add i64 %mul, %arg2 406 ret i64 %mad 407} 408 409; GCN-LABEL: {{^}}test_mad_u64_u32: 410; GFX1032: v_mad_u64_u32 v[{{[0-9:]+}}], s{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}, v[{{[0-9:]+}}] 411; GFX1064: v_mad_u64_u32 v[{{[0-9:]+}}], s[{{[0-9:]+}}], v{{[0-9]+}}, v{{[0-9]+}}, v[{{[0-9:]+}}] 412define i64 @test_mad_u64_u32(i32 %arg0, i32 %arg1, i64 %arg2) #0 { 413 %sext0 = zext i32 %arg0 to i64 414 %sext1 = zext i32 %arg1 to i64 415 %mul = mul i64 %sext0, %sext1 416 %mad = add i64 %mul, %arg2 417 ret i64 %mad 418} 419 420; GCN-LABEL: {{^}}test_div_fmas_f32: 421; GCN: s_bitcmp1_b32 s{{[0-9]+}}, 0 422; GFX1032: s_cselect_b32 vcc_lo, -1, 0 423; GFX1064: s_cselect_b64 vcc, -1, 0 424; GCN: v_div_fmas_f32 v{{[0-9]+}}, {{[vs][0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}} 425define amdgpu_kernel void @test_div_fmas_f32(float addrspace(1)* %out, float %a, float %b, float %c, i1 %d) nounwind { 426 %result = call float @llvm.amdgcn.div.fmas.f32(float %a, float %b, float %c, i1 %d) nounwind readnone 427 store float %result, float addrspace(1)* %out, align 4 428 ret void 429} 430 431; GCN-LABEL: {{^}}test_div_fmas_f64: 432; GCN: s_bitcmp1_b32 s{{[0-9]+}}, 0 433; GFX1032: s_cselect_b32 vcc_lo, -1, 0 434; GFX1064: s_cselect_b64 vcc, -1, 0 435; GCN-DAG: v_div_fmas_f64 v[{{[0-9:]+}}], {{[vs]}}[{{[0-9:]+}}], v[{{[0-9:]+}}], v[{{[0-9:]+}}] 436define amdgpu_kernel void @test_div_fmas_f64(double addrspace(1)* %out, double %a, double %b, double %c, i1 %d) nounwind { 437 %result = call double @llvm.amdgcn.div.fmas.f64(double %a, double %b, double %c, i1 %d) nounwind readnone 438 store double %result, double addrspace(1)* %out, align 8 439 ret void 440} 441 442; GCN-LABEL: {{^}}test_div_fmas_f32_i1_phi_vcc: 443; GFX1032: s_mov_b32 [[VCC:vcc_lo]], 0{{$}} 444; GFX1064: s_mov_b64 [[VCC:vcc]], 0{{$}} 445; GFX1032: s_and_saveexec_b32 [[SAVE:s[0-9]+]], s{{[0-9]+}}{{$}} 446; GFX1064: s_and_saveexec_b64 [[SAVE:s\[[0-9]+:[0-9]+\]]], s[{{[0-9:]+}}]{{$}} 447 448; GCN: load_dword [[LOAD:v[0-9]+]] 449; GCN: v_cmp_ne_u32_e32 [[VCC]], 0, [[LOAD]] 450 451; GCN: BB{{[0-9_]+}}: 452; GFX1032: s_or_b32 exec_lo, exec_lo, [[SAVE]] 453; GFX1064: s_or_b64 exec, exec, [[SAVE]] 454; GCN: v_div_fmas_f32 {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} 455define amdgpu_kernel void @test_div_fmas_f32_i1_phi_vcc(float addrspace(1)* %out, float addrspace(1)* %in, i32 addrspace(1)* %dummy) #0 { 456entry: 457 %tid = call i32 @llvm.amdgcn.workitem.id.x() nounwind readnone 458 %gep.out = getelementptr float, float addrspace(1)* %out, i32 2 459 %gep.a = getelementptr float, float addrspace(1)* %in, i32 %tid 460 %gep.b = getelementptr float, float addrspace(1)* %gep.a, i32 1 461 %gep.c = getelementptr float, float addrspace(1)* %gep.a, i32 2 462 463 %a = load float, float addrspace(1)* %gep.a 464 %b = load float, float addrspace(1)* %gep.b 465 %c = load float, float addrspace(1)* %gep.c 466 467 %cmp0 = icmp eq i32 %tid, 0 468 br i1 %cmp0, label %bb, label %exit 469 470bb: 471 %val = load volatile i32, i32 addrspace(1)* %dummy 472 %cmp1 = icmp ne i32 %val, 0 473 br label %exit 474 475exit: 476 %cond = phi i1 [false, %entry], [%cmp1, %bb] 477 %result = call float @llvm.amdgcn.div.fmas.f32(float %a, float %b, float %c, i1 %cond) nounwind readnone 478 store float %result, float addrspace(1)* %gep.out, align 4 479 ret void 480} 481 482; GCN-LABEL: {{^}}fdiv_f32: 483; GFX1032: v_div_scale_f32 v{{[0-9]+}}, s{{[0-9]+}}, s{{[0-9]+}}, s{{[0-9]+}}, s{{[0-9]+}} 484; GFX1064: v_div_scale_f32 v{{[0-9]+}}, s{{\[[0-9]+:[0-9]+\]}}, s{{[0-9]+}}, s{{[0-9]+}}, s{{[0-9]+}} 485; GCN: v_rcp_f32_e32 v{{[0-9]+}}, v{{[0-9]+}} 486; GFX1032: v_div_scale_f32 v{{[0-9]+}}, vcc_lo, s{{[0-9]+}}, s{{[0-9]+}}, s{{[0-9]+}} 487; GFX1064: v_div_scale_f32 v{{[0-9]+}}, vcc, s{{[0-9]+}}, s{{[0-9]+}}, s{{[0-9]+}} 488 489; GCN-NOT: vcc 490; GCN: v_div_fmas_f32 v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}} 491define amdgpu_kernel void @fdiv_f32(float addrspace(1)* %out, float %a, float %b) #0 { 492entry: 493 %fdiv = fdiv float %a, %b 494 store float %fdiv, float addrspace(1)* %out 495 ret void 496} 497 498; GCN-LABEL: {{^}}test_br_cc_f16: 499; GFX1032: v_cmp_nlt_f16_e32 vcc_lo, 500; GFX1032: s_and_b32 vcc_lo, exec_lo, vcc_lo 501; GFX1064: v_cmp_nlt_f16_e32 vcc, 502; GFX1064: s_and_b64 vcc, exec, vcc{{$}} 503; GCN-NEXT: s_cbranch_vccnz 504define amdgpu_kernel void @test_br_cc_f16( 505 half addrspace(1)* %r, 506 half addrspace(1)* %a, 507 half addrspace(1)* %b) { 508entry: 509 %a.val = load half, half addrspace(1)* %a 510 %b.val = load half, half addrspace(1)* %b 511 %fcmp = fcmp olt half %a.val, %b.val 512 br i1 %fcmp, label %one, label %two 513 514one: 515 store half %a.val, half addrspace(1)* %r 516 ret void 517 518two: 519 store half %b.val, half addrspace(1)* %r 520 ret void 521} 522 523; GCN-LABEL: {{^}}test_brcc_i1: 524; GCN: s_bitcmp0_b32 s{{[0-9]+}}, 0 525; GCN-NEXT: s_cbranch_scc1 526define amdgpu_kernel void @test_brcc_i1(i32 addrspace(1)* noalias %out, i32 addrspace(1)* noalias %in, i1 %val) #0 { 527 %cmp0 = icmp ne i1 %val, 0 528 br i1 %cmp0, label %store, label %end 529 530store: 531 store i32 222, i32 addrspace(1)* %out 532 ret void 533 534end: 535 ret void 536} 537 538; GCN-LABEL: {{^}}test_preserve_condition_undef_flag: 539; GFX1032-DAG: v_cmp_nlt_f32_e64 s{{[0-9]+}}, s{{[0-9]+}}, 1.0 540; GFX1032-DAG: v_cmp_ngt_f32_e64 s{{[0-9]+}}, s{{[0-9]+}}, 0 541; GFX1032: v_cmp_nlt_f32_e64 s{{[0-9]+}}, s{{[0-9]+}}, 1.0 542; GFX1032: s_or_b32 [[OR1:s[0-9]+]], s{{[0-9]+}}, s{{[0-9]+}} 543; GFX1032: s_or_b32 [[OR2:s[0-9]+]], [[OR1]], s{{[0-9]+}} 544; GFX1032: s_and_b32 vcc_lo, exec_lo, [[OR2]] 545; GFX1064-DAG: v_cmp_nlt_f32_e64 s[{{[0-9:]+}}], s{{[0-9]+}}, 1.0 546; GFX1064-DAG: v_cmp_ngt_f32_e64 s[{{[0-9:]+}}], s{{[0-9]+}}, 0 547; GFX1064: v_cmp_nlt_f32_e64 s[{{[0-9:]+}}], s{{[0-9]+}}, 1.0 548; GFX1064: s_or_b64 [[OR1:s\[[0-9:]+\]]], s[{{[0-9:]+}}], s[{{[0-9:]+}}] 549; GFX1064: s_or_b64 [[OR2:s\[[0-9:]+\]]], [[OR1]], s[{{[0-9:]+}}] 550; GFX1064: s_and_b64 vcc, exec, [[OR2]] 551; GCN: s_cbranch_vccnz 552define amdgpu_kernel void @test_preserve_condition_undef_flag(float %arg, i32 %arg1, float %arg2) #0 { 553bb0: 554 %tmp = icmp sgt i32 %arg1, 4 555 %undef = call i1 @llvm.amdgcn.class.f32(float undef, i32 undef) 556 %tmp4 = select i1 %undef, float %arg, float 1.000000e+00 557 %tmp5 = fcmp ogt float %arg2, 0.000000e+00 558 %tmp6 = fcmp olt float %arg2, 1.000000e+00 559 %tmp7 = fcmp olt float %arg, %tmp4 560 %tmp8 = and i1 %tmp5, %tmp6 561 %tmp9 = and i1 %tmp8, %tmp7 562 br i1 %tmp9, label %bb1, label %bb2 563 564bb1: 565 store volatile i32 0, i32 addrspace(1)* undef 566 br label %bb2 567 568bb2: 569 ret void 570} 571 572; GCN-LABEL: {{^}}test_invert_true_phi_cond_break_loop: 573; GFX1032: s_xor_b32 s{{[0-9]+}}, s{{[0-9]+}}, -1 574; GFX1032: s_or_b32 s{{[0-9]+}}, s{{[0-9]+}}, s{{[0-9]+}} 575; GFX1064: s_xor_b64 s[{{[0-9:]+}}], s[{{[0-9:]+}}], -1 576; GFX1064: s_or_b64 s[{{[0-9:]+}}], s[{{[0-9:]+}}], s[{{[0-9:]+}}] 577define amdgpu_kernel void @test_invert_true_phi_cond_break_loop(i32 %arg) #0 { 578bb: 579 %id = call i32 @llvm.amdgcn.workitem.id.x() 580 %tmp = sub i32 %id, %arg 581 br label %bb1 582 583bb1: ; preds = %Flow, %bb 584 %lsr.iv = phi i32 [ undef, %bb ], [ %tmp2, %Flow ] 585 %lsr.iv.next = add i32 %lsr.iv, 1 586 %cmp0 = icmp slt i32 %lsr.iv.next, 0 587 br i1 %cmp0, label %bb4, label %Flow 588 589bb4: ; preds = %bb1 590 %load = load volatile i32, i32 addrspace(1)* undef, align 4 591 %cmp1 = icmp sge i32 %tmp, %load 592 br label %Flow 593 594Flow: ; preds = %bb4, %bb1 595 %tmp2 = phi i32 [ %lsr.iv.next, %bb4 ], [ undef, %bb1 ] 596 %tmp3 = phi i1 [ %cmp1, %bb4 ], [ true, %bb1 ] 597 br i1 %tmp3, label %bb1, label %bb9 598 599bb9: ; preds = %Flow 600 store volatile i32 7, i32 addrspace(3)* undef 601 ret void 602} 603 604; GCN-LABEL: {{^}}test_movrels_extract_neg_offset_vgpr: 605; GFX1032: v_cmp_eq_u32_e32 vcc_lo, 1, v{{[0-9]+}} 606; GFX1032: v_cndmask_b32_e64 v{{[0-9]+}}, 0, 1, vcc_lo 607; GFX1032: v_cmp_ne_u32_e32 vcc_lo, 2, v{{[0-9]+}} 608; GFX1032: v_cndmask_b32_e32 v{{[0-9]+}}, 2, v{{[0-9]+}}, vcc_lo 609; GFX1032: v_cmp_ne_u32_e32 vcc_lo, 3, v{{[0-9]+}} 610; GFX1032: v_cndmask_b32_e32 v{{[0-9]+}}, 3, v{{[0-9]+}}, vcc_lo 611; GFX1064: v_cmp_eq_u32_e32 vcc, 1, v{{[0-9]+}} 612; GFX1064: v_cndmask_b32_e64 v{{[0-9]+}}, 0, 1, vcc 613; GFX1064: v_cmp_ne_u32_e32 vcc, 2, v{{[0-9]+}} 614; GFX1064: v_cndmask_b32_e32 v{{[0-9]+}}, 2, v{{[0-9]+}}, vcc 615; GFX1064: v_cmp_ne_u32_e32 vcc, 3, v{{[0-9]+}} 616; GFX1064: v_cndmask_b32_e32 v{{[0-9]+}}, 3, v{{[0-9]+}}, vcc 617define amdgpu_kernel void @test_movrels_extract_neg_offset_vgpr(i32 addrspace(1)* %out) #0 { 618entry: 619 %id = call i32 @llvm.amdgcn.workitem.id.x() #1 620 %index = add i32 %id, -512 621 %value = extractelement <4 x i32> <i32 0, i32 1, i32 2, i32 3>, i32 %index 622 store i32 %value, i32 addrspace(1)* %out 623 ret void 624} 625 626; GCN-LABEL: {{^}}test_set_inactive: 627; GFX1032: s_not_b32 exec_lo, exec_lo 628; GFX1032: v_mov_b32_e32 {{v[0-9]+}}, 42 629; GFX1032: s_not_b32 exec_lo, exec_lo 630; GFX1064: s_not_b64 exec, exec{{$}} 631; GFX1064: v_mov_b32_e32 {{v[0-9]+}}, 42 632; GFX1064: s_not_b64 exec, exec{{$}} 633define amdgpu_kernel void @test_set_inactive(i32 addrspace(1)* %out, i32 %in) #0 { 634 %tmp = call i32 @llvm.amdgcn.set.inactive.i32(i32 %in, i32 42) 635 store i32 %tmp, i32 addrspace(1)* %out 636 ret void 637} 638 639; GCN-LABEL: {{^}}test_set_inactive_64: 640; GFX1032: s_not_b32 exec_lo, exec_lo 641; GFX1032: v_mov_b32_e32 {{v[0-9]+}}, 0 642; GFX1032: v_mov_b32_e32 {{v[0-9]+}}, 0 643; GFX1032: s_not_b32 exec_lo, exec_lo 644; GFX1064: s_not_b64 exec, exec{{$}} 645; GFX1064: v_mov_b32_e32 {{v[0-9]+}}, 0 646; GFX1064: v_mov_b32_e32 {{v[0-9]+}}, 0 647; GFX1064: s_not_b64 exec, exec{{$}} 648define amdgpu_kernel void @test_set_inactive_64(i64 addrspace(1)* %out, i64 %in) #0 { 649 %tmp = call i64 @llvm.amdgcn.set.inactive.i64(i64 %in, i64 0) 650 store i64 %tmp, i64 addrspace(1)* %out 651 ret void 652} 653 654; GCN-LABEL: {{^}}test_kill_i1_terminator_float: 655; GFX1032: s_mov_b32 exec_lo, 0 656; GFX1064: s_mov_b64 exec, 0 657define amdgpu_ps void @test_kill_i1_terminator_float() #0 { 658 call void @llvm.amdgcn.kill(i1 false) 659 ret void 660} 661 662; GCN-LABEL: {{^}}test_kill_i1_terminator_i1: 663; GFX1032: s_mov_b32 [[LIVE:s[0-9]+]], exec_lo 664; GFX1032: s_or_b32 [[OR:s[0-9]+]], 665; GFX1032: s_xor_b32 [[KILL:s[0-9]+]], [[OR]], exec_lo 666; GFX1032: s_andn2_b32 [[MASK:s[0-9]+]], [[LIVE]], [[KILL]] 667; GFX1032: s_and_b32 exec_lo, exec_lo, [[MASK]] 668; GFX1064: s_mov_b64 [[LIVE:s\[[0-9:]+\]]], exec 669; GFX1064: s_or_b64 [[OR:s\[[0-9:]+\]]], 670; GFX1064: s_xor_b64 [[KILL:s\[[0-9:]+\]]], [[OR]], exec 671; GFX1064: s_andn2_b64 [[MASK:s\[[0-9:]+\]]], [[LIVE]], [[KILL]] 672; GFX1064: s_and_b64 exec, exec, [[MASK]] 673define amdgpu_gs void @test_kill_i1_terminator_i1(i32 %a, i32 %b, i32 %c, i32 %d) #0 { 674 %c1 = icmp slt i32 %a, %b 675 %c2 = icmp slt i32 %c, %d 676 %x = or i1 %c1, %c2 677 call void @llvm.amdgcn.kill(i1 %x) 678 call void @llvm.amdgcn.exp.f32(i32 0, i32 0, float 0.0, float 0.0, float 0.0, float 0.0, i1 false, i1 false) 679 ret void 680} 681 682; GCN-LABEL: {{^}}test_loop_vcc: 683; GFX1032: v_cmp_lt_f32_e32 vcc_lo, 684; GFX1064: v_cmp_lt_f32_e32 vcc, 685; GCN: s_cbranch_vccz 686define amdgpu_ps <4 x float> @test_loop_vcc(<4 x float> %in) #0 { 687entry: 688 br label %loop 689 690loop: 691 %ctr.iv = phi float [ 0.0, %entry ], [ %ctr.next, %body ] 692 %c.iv = phi <4 x float> [ %in, %entry ], [ %c.next, %body ] 693 %cc = fcmp ogt float %ctr.iv, 7.0 694 br i1 %cc, label %break, label %body 695 696body: 697 %c.iv0 = extractelement <4 x float> %c.iv, i32 0 698 %c.next = call <4 x float> @llvm.amdgcn.image.sample.1d.v4f32.f32(i32 15, float %c.iv0, <8 x i32> undef, <4 x i32> undef, i1 0, i32 0, i32 0) 699 %ctr.next = fadd float %ctr.iv, 2.0 700 br label %loop 701 702break: 703 ret <4 x float> %c.iv 704} 705 706; NOTE: llvm.amdgcn.wwm is deprecated, use llvm.amdgcn.strict.wwm instead. 707; GCN-LABEL: {{^}}test_wwm1: 708; GFX1032: s_or_saveexec_b32 [[SAVE:s[0-9]+]], -1 709; GFX1032: s_mov_b32 exec_lo, [[SAVE]] 710; GFX1064: s_or_saveexec_b64 [[SAVE:s\[[0-9]+:[0-9]+\]]], -1 711; GFX1064: s_mov_b64 exec, [[SAVE]] 712define amdgpu_ps float @test_wwm1(i32 inreg %idx0, i32 inreg %idx1, float %src0, float %src1) { 713main_body: 714 %out = fadd float %src0, %src1 715 %out.0 = call float @llvm.amdgcn.wwm.f32(float %out) 716 ret float %out.0 717} 718 719; GCN-LABEL: {{^}}test_wwm2: 720; GFX1032: v_cmp_gt_u32_e32 vcc_lo, 32, v{{[0-9]+}} 721; GFX1032: s_and_saveexec_b32 [[SAVE1:s[0-9]+]], vcc_lo 722; GFX1032: s_or_saveexec_b32 [[SAVE2:s[0-9]+]], -1 723; GFX1032: s_mov_b32 exec_lo, [[SAVE2]] 724; GFX1032: s_or_b32 exec_lo, exec_lo, [[SAVE1]] 725; GFX1064: v_cmp_gt_u32_e32 vcc, 32, v{{[0-9]+}} 726; GFX1064: s_and_saveexec_b64 [[SAVE1:s\[[0-9:]+\]]], vcc{{$}} 727; GFX1064: s_or_saveexec_b64 [[SAVE2:s\[[0-9:]+\]]], -1 728; GFX1064: s_mov_b64 exec, [[SAVE2]] 729; GFX1064: s_or_b64 exec, exec, [[SAVE1]] 730define amdgpu_ps float @test_wwm2(i32 inreg %idx) { 731main_body: 732 ; use mbcnt to make sure the branch is divergent 733 %lo = call i32 @llvm.amdgcn.mbcnt.lo(i32 -1, i32 0) 734 %hi = call i32 @llvm.amdgcn.mbcnt.hi(i32 -1, i32 %lo) 735 %cc = icmp uge i32 %hi, 32 736 br i1 %cc, label %endif, label %if 737 738if: 739 %src = call float @llvm.amdgcn.struct.buffer.load.f32(<4 x i32> undef, i32 %idx, i32 0, i32 0, i32 0) 740 %out = fadd float %src, %src 741 %out.0 = call float @llvm.amdgcn.wwm.f32(float %out) 742 %out.1 = fadd float %src, %out.0 743 br label %endif 744 745endif: 746 %out.2 = phi float [ %out.1, %if ], [ 0.0, %main_body ] 747 ret float %out.2 748} 749 750; GCN-LABEL: {{^}}test_strict_wwm1: 751; GFX1032: s_or_saveexec_b32 [[SAVE:s[0-9]+]], -1 752; GFX1032: s_mov_b32 exec_lo, [[SAVE]] 753; GFX1064: s_or_saveexec_b64 [[SAVE:s\[[0-9]+:[0-9]+\]]], -1 754; GFX1064: s_mov_b64 exec, [[SAVE]] 755define amdgpu_ps float @test_strict_wwm1(i32 inreg %idx0, i32 inreg %idx1, float %src0, float %src1) { 756main_body: 757 %out = fadd float %src0, %src1 758 %out.0 = call float @llvm.amdgcn.strict.wwm.f32(float %out) 759 ret float %out.0 760} 761 762; GCN-LABEL: {{^}}test_strict_wwm2: 763; GFX1032: v_cmp_gt_u32_e32 vcc_lo, 32, v{{[0-9]+}} 764; GFX1032: s_and_saveexec_b32 [[SAVE1:s[0-9]+]], vcc_lo 765; GFX1032: s_or_saveexec_b32 [[SAVE2:s[0-9]+]], -1 766; GFX1032: s_mov_b32 exec_lo, [[SAVE2]] 767; GFX1032: s_or_b32 exec_lo, exec_lo, [[SAVE1]] 768; GFX1064: v_cmp_gt_u32_e32 vcc, 32, v{{[0-9]+}} 769; GFX1064: s_and_saveexec_b64 [[SAVE1:s\[[0-9:]+\]]], vcc{{$}} 770; GFX1064: s_or_saveexec_b64 [[SAVE2:s\[[0-9:]+\]]], -1 771; GFX1064: s_mov_b64 exec, [[SAVE2]] 772; GFX1064: s_or_b64 exec, exec, [[SAVE1]] 773define amdgpu_ps float @test_strict_wwm2(i32 inreg %idx) { 774main_body: 775 ; use mbcnt to make sure the branch is divergent 776 %lo = call i32 @llvm.amdgcn.mbcnt.lo(i32 -1, i32 0) 777 %hi = call i32 @llvm.amdgcn.mbcnt.hi(i32 -1, i32 %lo) 778 %cc = icmp uge i32 %hi, 32 779 br i1 %cc, label %endif, label %if 780 781if: 782 %src = call float @llvm.amdgcn.struct.buffer.load.f32(<4 x i32> undef, i32 %idx, i32 0, i32 0, i32 0) 783 %out = fadd float %src, %src 784 %out.0 = call float @llvm.amdgcn.strict.wwm.f32(float %out) 785 %out.1 = fadd float %src, %out.0 786 br label %endif 787 788endif: 789 %out.2 = phi float [ %out.1, %if ], [ 0.0, %main_body ] 790 ret float %out.2 791} 792 793 794; GCN-LABEL: {{^}}test_wqm1: 795; GFX1032: s_mov_b32 [[ORIG:s[0-9]+]], exec_lo 796; GFX1032: s_wqm_b32 exec_lo, exec_lo 797; GFX1032: s_and_b32 exec_lo, exec_lo, [[ORIG]] 798; GFX1064: s_mov_b64 [[ORIG:s\[[0-9]+:[0-9]+\]]], exec{{$}} 799; GFX1064: s_wqm_b64 exec, exec{{$}} 800; GFX1064: s_and_b64 exec, exec, [[ORIG]] 801define amdgpu_ps <4 x float> @test_wqm1(i32 inreg, i32 inreg, i32 inreg, i32 inreg %m0, <8 x i32> inreg %rsrc, <4 x i32> inreg %sampler, <2 x float> %pos) #0 { 802main_body: 803 %inst23 = extractelement <2 x float> %pos, i32 0 804 %inst24 = extractelement <2 x float> %pos, i32 1 805 %inst25 = tail call float @llvm.amdgcn.interp.p1(float %inst23, i32 0, i32 0, i32 %m0) 806 %inst26 = tail call float @llvm.amdgcn.interp.p2(float %inst25, float %inst24, i32 0, i32 0, i32 %m0) 807 %inst28 = tail call float @llvm.amdgcn.interp.p1(float %inst23, i32 1, i32 0, i32 %m0) 808 %inst29 = tail call float @llvm.amdgcn.interp.p2(float %inst28, float %inst24, i32 1, i32 0, i32 %m0) 809 %tex = call <4 x float> @llvm.amdgcn.image.sample.2d.v4f32.f32(i32 15, float %inst26, float %inst29, <8 x i32> %rsrc, <4 x i32> %sampler, i1 0, i32 0, i32 0) 810 ret <4 x float> %tex 811} 812 813; GCN-LABEL: {{^}}test_wqm2: 814; GFX1032: s_wqm_b32 exec_lo, exec_lo 815; GFX1032: s_and_b32 exec_lo, exec_lo, s{{[0-9]+}} 816; GFX1064: s_wqm_b64 exec, exec{{$}} 817; GFX1064: s_and_b64 exec, exec, s[{{[0-9:]+}}] 818define amdgpu_ps float @test_wqm2(i32 inreg %idx0, i32 inreg %idx1) #0 { 819main_body: 820 %src0 = call float @llvm.amdgcn.struct.buffer.load.f32(<4 x i32> undef, i32 %idx0, i32 0, i32 0, i32 0) 821 %src1 = call float @llvm.amdgcn.struct.buffer.load.f32(<4 x i32> undef, i32 %idx1, i32 0, i32 0, i32 0) 822 %out = fadd float %src0, %src1 823 %out.0 = bitcast float %out to i32 824 %out.1 = call i32 @llvm.amdgcn.wqm.i32(i32 %out.0) 825 %out.2 = bitcast i32 %out.1 to float 826 ret float %out.2 827} 828 829; GCN-LABEL: {{^}}test_intr_fcmp_i64: 830; GFX1032-DAG: v_mov_b32_e32 v[[V_HI:[0-9]+]], 0{{$}} 831; GFX1032-DAG: v_cmp_eq_f32_e64 s[[C_LO:[0-9]+]], {{s[0-9]+}}, |{{[vs][0-9]+}}| 832; GFX1032-DAG: v_mov_b32_e32 v[[V_LO:[0-9]+]], s[[C_LO]] 833; GFX1064: v_cmp_eq_f32_e64 s{{\[}}[[C_LO:[0-9]+]]:[[C_HI:[0-9]+]]], {{s[0-9]+}}, |{{[vs][0-9]+}}| 834; GFX1064-DAG: v_mov_b32_e32 v[[V_LO:[0-9]+]], s[[C_LO]] 835; GFX1064-DAG: v_mov_b32_e32 v[[V_HI:[0-9]+]], s[[C_HI]] 836; GCN: store_dwordx2 v{{[0-9]+}}, v{{\[}}[[V_LO]]:[[V_HI]]], s 837define amdgpu_kernel void @test_intr_fcmp_i64(i64 addrspace(1)* %out, float %src, float %a) { 838 %temp = call float @llvm.fabs.f32(float %a) 839 %result = call i64 @llvm.amdgcn.fcmp.i64.f32(float %src, float %temp, i32 1) 840 store i64 %result, i64 addrspace(1)* %out 841 ret void 842} 843 844; GCN-LABEL: {{^}}test_intr_icmp_i64: 845; GFX1032-DAG: v_mov_b32_e32 v[[V_HI:[0-9]+]], 0{{$}} 846; GFX1032-DAG: v_cmp_eq_u32_e64 [[C_LO:vcc_lo|s[0-9]+]], 0x64, {{s[0-9]+}} 847; GFX1032-DAG: v_mov_b32_e32 v[[V_LO:[0-9]+]], [[C_LO]] 848; GFX1064: v_cmp_eq_u32_e64 s{{\[}}[[C_LO:[0-9]+]]:[[C_HI:[0-9]+]]], 0x64, {{s[0-9]+}} 849; GFX1064-DAG: v_mov_b32_e32 v[[V_LO:[0-9]+]], s[[C_LO]] 850; GFX1064-DAG: v_mov_b32_e32 v[[V_HI:[0-9]+]], s[[C_HI]] 851; GCN: store_dwordx2 v{{[0-9]+}}, v{{\[}}[[V_LO]]:[[V_HI]]], s 852define amdgpu_kernel void @test_intr_icmp_i64(i64 addrspace(1)* %out, i32 %src) { 853 %result = call i64 @llvm.amdgcn.icmp.i64.i32(i32 %src, i32 100, i32 32) 854 store i64 %result, i64 addrspace(1)* %out 855 ret void 856} 857 858; GCN-LABEL: {{^}}test_intr_fcmp_i32: 859; GFX1032-DAG: v_cmp_eq_f32_e64 s[[C_LO:[0-9]+]], {{s[0-9]+}}, |{{[vs][0-9]+}}| 860; GFX1032-DAG: v_mov_b32_e32 v[[V_LO:[0-9]+]], s[[C_LO]] 861; GFX1064: v_cmp_eq_f32_e64 s{{\[}}[[C_LO:[0-9]+]]:[[C_HI:[0-9]+]]], {{s[0-9]+}}, |{{[vs][0-9]+}}| 862; GFX1064-DAG: v_mov_b32_e32 v[[V_LO:[0-9]+]], s[[C_LO]] 863; GCN: store_dword v{{[0-9]+}}, v[[V_LO]], s 864define amdgpu_kernel void @test_intr_fcmp_i32(i32 addrspace(1)* %out, float %src, float %a) { 865 %temp = call float @llvm.fabs.f32(float %a) 866 %result = call i32 @llvm.amdgcn.fcmp.i32.f32(float %src, float %temp, i32 1) 867 store i32 %result, i32 addrspace(1)* %out 868 ret void 869} 870 871; GCN-LABEL: {{^}}test_intr_icmp_i32: 872; GFX1032-DAG: v_cmp_eq_u32_e64 s[[C_LO:[0-9]+]], 0x64, {{s[0-9]+}} 873; GFX1032-DAG: v_mov_b32_e32 v[[V_LO:[0-9]+]], s[[C_LO]]{{$}} 874; GFX1064: v_cmp_eq_u32_e64 s{{\[}}[[C_LO:[0-9]+]]:{{[0-9]+}}], 0x64, {{s[0-9]+}} 875; GFX1064-DAG: v_mov_b32_e32 v[[V_LO:[0-9]+]], s[[C_LO]]{{$}} 876; GCN: store_dword v{{[0-9]+}}, v[[V_LO]], s 877define amdgpu_kernel void @test_intr_icmp_i32(i32 addrspace(1)* %out, i32 %src) { 878 %result = call i32 @llvm.amdgcn.icmp.i32.i32(i32 %src, i32 100, i32 32) 879 store i32 %result, i32 addrspace(1)* %out 880 ret void 881} 882 883; GCN-LABEL: {{^}}test_wqm_vote: 884; GFX1032: v_cmp_neq_f32_e32 vcc_lo, 0 885; GFX1032: s_mov_b32 [[LIVE:s[0-9]+]], exec_lo 886; GFX1032: s_wqm_b32 [[WQM:s[0-9]+]], vcc_lo 887; GFX1032: s_xor_b32 [[KILL:s[0-9]+]], [[WQM]], exec_lo 888; GFX1032: s_andn2_b32 [[MASK:s[0-9]+]], [[LIVE]], [[KILL]] 889; GFX1032: s_and_b32 exec_lo, exec_lo, [[MASK]] 890; GFX1064: v_cmp_neq_f32_e32 vcc, 0 891; GFX1064: s_mov_b64 [[LIVE:s\[[0-9:]+\]]], exec 892; GFX1064: s_wqm_b64 [[WQM:s\[[0-9:]+\]]], vcc 893; GFX1064: s_xor_b64 [[KILL:s\[[0-9:]+\]]], [[WQM]], exec 894; GFX1064: s_andn2_b64 [[MASK:s\[[0-9:]+\]]], [[LIVE]], [[KILL]] 895; GFX1064: s_and_b64 exec, exec, [[MASK]] 896define amdgpu_ps void @test_wqm_vote(float %a) { 897 %c1 = fcmp une float %a, 0.0 898 %c2 = call i1 @llvm.amdgcn.wqm.vote(i1 %c1) 899 call void @llvm.amdgcn.kill(i1 %c2) 900 call void @llvm.amdgcn.exp.f32(i32 0, i32 0, float 0.0, float 0.0, float 0.0, float 0.0, i1 false, i1 false) 901 ret void 902} 903 904; GCN-LABEL: {{^}}test_branch_true: 905; GFX1032: s_mov_b32 vcc_lo, exec_lo 906; GFX1064: s_mov_b64 vcc, exec 907define amdgpu_kernel void @test_branch_true() #2 { 908entry: 909 br i1 true, label %for.end, label %for.body.lr.ph 910 911for.body.lr.ph: ; preds = %entry 912 br label %for.body 913 914for.body: ; preds = %for.body, %for.body.lr.ph 915 br i1 undef, label %for.end, label %for.body 916 917for.end: ; preds = %for.body, %entry 918 ret void 919} 920 921; GCN-LABEL: {{^}}test_ps_live: 922; GFX1032: s_mov_b32 [[C:s[0-9]+]], exec_lo 923; GFX1064: s_mov_b64 [[C:s\[[0-9:]+\]]], exec{{$}} 924; GCN: v_cndmask_b32_e64 v{{[0-9]+}}, 0, 1, [[C]] 925define amdgpu_ps float @test_ps_live() #0 { 926 %live = call i1 @llvm.amdgcn.ps.live() 927 %live.32 = zext i1 %live to i32 928 %r = bitcast i32 %live.32 to float 929 ret float %r 930} 931 932; GCN-LABEL: {{^}}test_vccnz_ifcvt_triangle64: 933; GFX1032: v_cmp_neq_f64_e64 [[C:s[0-9]+]], s[{{[0-9:]+}}], 1.0 934; GFX1032: s_and_b32 vcc_lo, exec_lo, [[C]] 935; GFX1064: v_cmp_neq_f64_e64 [[C:s\[[0-9:]+\]]], s[{{[0-9:]+}}], 1.0 936; GFX1064: s_and_b64 vcc, exec, [[C]] 937define amdgpu_kernel void @test_vccnz_ifcvt_triangle64(double addrspace(1)* %out, double addrspace(1)* %in) #0 { 938entry: 939 %v = load double, double addrspace(1)* %in 940 %cc = fcmp oeq double %v, 1.000000e+00 941 br i1 %cc, label %if, label %endif 942 943if: 944 %u = fadd double %v, %v 945 br label %endif 946 947endif: 948 %r = phi double [ %v, %entry ], [ %u, %if ] 949 store double %r, double addrspace(1)* %out 950 ret void 951} 952 953; GCN-LABEL: {{^}}test_vgprblocks_w32_attr: 954; Test that the wave size can be overridden in function attributes and that the block size is correct as a result 955; GFX10DEFWAVE: ; VGPRBlocks: 1 956define amdgpu_gs float @test_vgprblocks_w32_attr(float %a, float %b, float %c, float %d, float %e, 957 float %f, float %g, float %h, float %i, float %j, float %k, float %l) #3 { 958main_body: 959 %s = fadd float %a, %b 960 %s.1 = fadd float %s, %c 961 %s.2 = fadd float %s.1, %d 962 %s.3 = fadd float %s.2, %e 963 %s.4 = fadd float %s.3, %f 964 %s.5 = fadd float %s.4, %g 965 %s.6 = fadd float %s.5, %h 966 %s.7 = fadd float %s.6, %i 967 %s.8 = fadd float %s.7, %j 968 %s.9 = fadd float %s.8, %k 969 %s.10 = fadd float %s.9, %l 970 ret float %s.10 971} 972 973; GCN-LABEL: {{^}}test_vgprblocks_w64_attr: 974; Test that the wave size can be overridden in function attributes and that the block size is correct as a result 975; GFX10DEFWAVE: ; VGPRBlocks: 2 976define amdgpu_gs float @test_vgprblocks_w64_attr(float %a, float %b, float %c, float %d, float %e, 977 float %f, float %g, float %h, float %i, float %j, float %k, float %l) #4 { 978main_body: 979 %s = fadd float %a, %b 980 %s.1 = fadd float %s, %c 981 %s.2 = fadd float %s.1, %d 982 %s.3 = fadd float %s.2, %e 983 %s.4 = fadd float %s.3, %f 984 %s.5 = fadd float %s.4, %g 985 %s.6 = fadd float %s.5, %h 986 %s.7 = fadd float %s.6, %i 987 %s.8 = fadd float %s.7, %j 988 %s.9 = fadd float %s.8, %k 989 %s.10 = fadd float %s.9, %l 990 ret float %s.10 991} 992 993; GCN-LABEL: {{^}}icmp64: 994; GFX1032: v_cmp_eq_u32_e32 vcc_lo, 0, v 995; GFX1064: v_cmp_eq_u32_e32 vcc, 0, v 996define amdgpu_kernel void @icmp64(i32 %n, i32 %s) { 997entry: 998 %id = tail call i32 @llvm.amdgcn.workitem.id.x() 999 %mul4 = mul nsw i32 %s, %n 1000 %cmp = icmp slt i32 0, %mul4 1001 br label %if.end 1002 1003if.end: ; preds = %entry 1004 %rem = urem i32 %id, %s 1005 %icmp = tail call i64 @llvm.amdgcn.icmp.i64.i32(i32 %rem, i32 0, i32 32) 1006 %shr = lshr i64 %icmp, 1 1007 %notmask = shl nsw i64 -1, 0 1008 %and = and i64 %notmask, %shr 1009 %or = or i64 %and, -9223372036854775808 1010 %cttz = tail call i64 @llvm.cttz.i64(i64 %or, i1 true) 1011 %cast = trunc i64 %cttz to i32 1012 %cmp3 = icmp ugt i32 10, %cast 1013 %cmp6 = icmp ne i32 %rem, 0 1014 %brmerge = or i1 %cmp6, %cmp3 1015 br i1 %brmerge, label %if.end2, label %if.then 1016 1017if.then: ; preds = %if.end 1018 unreachable 1019 1020if.end2: ; preds = %if.end 1021 ret void 1022} 1023 1024; GCN-LABEL: {{^}}fcmp64: 1025; GFX1032: v_cmp_eq_f32_e32 vcc_lo, 0, v 1026; GFX1064: v_cmp_eq_f32_e32 vcc, 0, v 1027define amdgpu_kernel void @fcmp64(float %n, float %s) { 1028entry: 1029 %id = tail call i32 @llvm.amdgcn.workitem.id.x() 1030 %id.f = uitofp i32 %id to float 1031 %mul4 = fmul float %s, %n 1032 %cmp = fcmp ult float 0.0, %mul4 1033 br label %if.end 1034 1035if.end: ; preds = %entry 1036 %rem.f = frem float %id.f, %s 1037 %fcmp = tail call i64 @llvm.amdgcn.fcmp.i64.f32(float %rem.f, float 0.0, i32 1) 1038 %shr = lshr i64 %fcmp, 1 1039 %notmask = shl nsw i64 -1, 0 1040 %and = and i64 %notmask, %shr 1041 %or = or i64 %and, -9223372036854775808 1042 %cttz = tail call i64 @llvm.cttz.i64(i64 %or, i1 true) 1043 %cast = trunc i64 %cttz to i32 1044 %cmp3 = icmp ugt i32 10, %cast 1045 %cmp6 = fcmp one float %rem.f, 0.0 1046 %brmerge = or i1 %cmp6, %cmp3 1047 br i1 %brmerge, label %if.end2, label %if.then 1048 1049if.then: ; preds = %if.end 1050 unreachable 1051 1052if.end2: ; preds = %if.end 1053 ret void 1054} 1055 1056; GCN-LABEL: {{^}}icmp32: 1057; GFX1032: v_cmp_eq_u32_e32 vcc_lo, 0, v 1058; GFX1064: v_cmp_eq_u32_e32 vcc, 0, v 1059define amdgpu_kernel void @icmp32(i32 %n, i32 %s) { 1060entry: 1061 %id = tail call i32 @llvm.amdgcn.workitem.id.x() 1062 %mul4 = mul nsw i32 %s, %n 1063 %cmp = icmp slt i32 0, %mul4 1064 br label %if.end 1065 1066if.end: ; preds = %entry 1067 %rem = urem i32 %id, %s 1068 %icmp = tail call i32 @llvm.amdgcn.icmp.i32.i32(i32 %rem, i32 0, i32 32) 1069 %shr = lshr i32 %icmp, 1 1070 %notmask = shl nsw i32 -1, 0 1071 %and = and i32 %notmask, %shr 1072 %or = or i32 %and, 2147483648 1073 %cttz = tail call i32 @llvm.cttz.i32(i32 %or, i1 true) 1074 %cmp3 = icmp ugt i32 10, %cttz 1075 %cmp6 = icmp ne i32 %rem, 0 1076 %brmerge = or i1 %cmp6, %cmp3 1077 br i1 %brmerge, label %if.end2, label %if.then 1078 1079if.then: ; preds = %if.end 1080 unreachable 1081 1082if.end2: ; preds = %if.end 1083 ret void 1084} 1085 1086; GCN-LABEL: {{^}}fcmp32: 1087; GFX1032: v_cmp_eq_f32_e32 vcc_lo, 0, v 1088; GFX1064: v_cmp_eq_f32_e32 vcc, 0, v 1089define amdgpu_kernel void @fcmp32(float %n, float %s) { 1090entry: 1091 %id = tail call i32 @llvm.amdgcn.workitem.id.x() 1092 %id.f = uitofp i32 %id to float 1093 %mul4 = fmul float %s, %n 1094 %cmp = fcmp ult float 0.0, %mul4 1095 br label %if.end 1096 1097if.end: ; preds = %entry 1098 %rem.f = frem float %id.f, %s 1099 %fcmp = tail call i32 @llvm.amdgcn.fcmp.i32.f32(float %rem.f, float 0.0, i32 1) 1100 %shr = lshr i32 %fcmp, 1 1101 %notmask = shl nsw i32 -1, 0 1102 %and = and i32 %notmask, %shr 1103 %or = or i32 %and, 2147483648 1104 %cttz = tail call i32 @llvm.cttz.i32(i32 %or, i1 true) 1105 %cmp3 = icmp ugt i32 10, %cttz 1106 %cmp6 = fcmp one float %rem.f, 0.0 1107 %brmerge = or i1 %cmp6, %cmp3 1108 br i1 %brmerge, label %if.end2, label %if.then 1109 1110if.then: ; preds = %if.end 1111 unreachable 1112 1113if.end2: ; preds = %if.end 1114 ret void 1115} 1116 1117declare void @external_void_func_void() #1 1118 1119; Test save/restore of VGPR needed for SGPR spilling. 1120 1121; GCN-LABEL: {{^}}callee_no_stack_with_call: 1122; GCN: s_waitcnt 1123; GCN-NEXT: s_waitcnt_vscnt 1124 1125; GFX1064-NEXT: s_or_saveexec_b64 [[COPY_EXEC0:s\[[0-9]+:[0-9]+\]]], -1{{$}} 1126; GFX1032-NEXT: s_or_saveexec_b32 [[COPY_EXEC0:s[0-9]+]], -1{{$}} 1127; GCN-NEXT: buffer_store_dword v40, off, s[0:3], s32 ; 4-byte Folded Spill 1128; GCN-NEXT: s_waitcnt_depctr 0xffe3 1129; GFX1064-NEXT: s_mov_b64 exec, [[COPY_EXEC0]] 1130; GFX1032-NEXT: s_mov_b32 exec_lo, [[COPY_EXEC0]] 1131 1132; GCN-NEXT: v_writelane_b32 v40, s33, 2 1133; GCN: s_mov_b32 s33, s32 1134; GFX1064: s_addk_i32 s32, 0x400 1135; GFX1032: s_addk_i32 s32, 0x200 1136 1137 1138; GCN-DAG: v_writelane_b32 v40, s30, 0 1139; GCN-DAG: v_writelane_b32 v40, s31, 1 1140; GCN: s_swappc_b64 1141; GCN-DAG: v_readlane_b32 s4, v40, 0 1142; GCN-DAG: v_readlane_b32 s5, v40, 1 1143 1144 1145; GFX1064: s_addk_i32 s32, 0xfc00 1146; GFX1032: s_addk_i32 s32, 0xfe00 1147; GCN: v_readlane_b32 s33, v40, 2 1148; GFX1064: s_or_saveexec_b64 [[COPY_EXEC1:s\[[0-9]+:[0-9]+\]]], -1{{$}} 1149; GFX1032: s_or_saveexec_b32 [[COPY_EXEC1:s[0-9]]], -1{{$}} 1150; GCN-NEXT: buffer_load_dword v40, off, s[0:3], s32 ; 4-byte Folded Reload 1151; GCN-NEXT: s_waitcnt_depctr 0xffe3 1152; GFX1064-NEXT: s_mov_b64 exec, [[COPY_EXEC1]] 1153; GFX1032-NEXT: s_mov_b32 exec_lo, [[COPY_EXEC1]] 1154; GCN-NEXT: s_waitcnt vmcnt(0) 1155; GCN-NEXT: s_setpc_b64 1156define void @callee_no_stack_with_call() #1 { 1157 call void @external_void_func_void() 1158 ret void 1159} 1160 1161 1162declare i32 @llvm.amdgcn.workitem.id.x() 1163declare float @llvm.fabs.f32(float) 1164declare { float, i1 } @llvm.amdgcn.div.scale.f32(float, float, i1) 1165declare { double, i1 } @llvm.amdgcn.div.scale.f64(double, double, i1) 1166declare float @llvm.amdgcn.div.fmas.f32(float, float, float, i1) 1167declare double @llvm.amdgcn.div.fmas.f64(double, double, double, i1) 1168declare i1 @llvm.amdgcn.class.f32(float, i32) 1169declare i32 @llvm.amdgcn.set.inactive.i32(i32, i32) 1170declare i64 @llvm.amdgcn.set.inactive.i64(i64, i64) 1171declare <4 x float> @llvm.amdgcn.image.sample.1d.v4f32.f32(i32, float, <8 x i32>, <4 x i32>, i1, i32, i32) 1172declare <4 x float> @llvm.amdgcn.image.sample.2d.v4f32.f32(i32, float, float, <8 x i32>, <4 x i32>, i1, i32, i32) 1173declare float @llvm.amdgcn.strict.wwm.f32(float) 1174declare float @llvm.amdgcn.wwm.f32(float) 1175declare i32 @llvm.amdgcn.wqm.i32(i32) 1176declare float @llvm.amdgcn.interp.p1(float, i32, i32, i32) 1177declare float @llvm.amdgcn.interp.p2(float, float, i32, i32, i32) 1178declare float @llvm.amdgcn.struct.buffer.load.f32(<4 x i32>, i32, i32, i32, i32 immarg) 1179declare i32 @llvm.amdgcn.mbcnt.lo(i32, i32) 1180declare i32 @llvm.amdgcn.mbcnt.hi(i32, i32) 1181declare i64 @llvm.amdgcn.fcmp.i64.f32(float, float, i32) 1182declare i64 @llvm.amdgcn.icmp.i64.i32(i32, i32, i32) 1183declare i32 @llvm.amdgcn.fcmp.i32.f32(float, float, i32) 1184declare i32 @llvm.amdgcn.icmp.i32.i32(i32, i32, i32) 1185declare void @llvm.amdgcn.kill(i1) 1186declare i1 @llvm.amdgcn.wqm.vote(i1) 1187declare i1 @llvm.amdgcn.ps.live() 1188declare i64 @llvm.cttz.i64(i64, i1) 1189declare i32 @llvm.cttz.i32(i32, i1) 1190declare void @llvm.amdgcn.exp.f32(i32, i32, float, float, float, float, i1, i1) #5 1191 1192attributes #0 = { nounwind readnone speculatable } 1193attributes #1 = { nounwind } 1194attributes #2 = { nounwind readnone optnone noinline } 1195attributes #3 = { "target-features"="+wavefrontsize32" } 1196attributes #4 = { "target-features"="+wavefrontsize64" } 1197attributes #5 = { inaccessiblememonly nounwind } 1198