1; RUN: llc -march=amdgcn -mcpu=tahiti -verify-machineinstrs < %s | FileCheck %s 2 3; CHECK-LABEL: {{^}}fold_sgpr: 4; CHECK: v_add_i32_e32 v{{[0-9]+}}, vcc, s 5define amdgpu_kernel void @fold_sgpr(i32 addrspace(1)* %out, i32 %fold) #1 { 6entry: 7 %tmp0 = icmp ne i32 %fold, 0 8 br i1 %tmp0, label %if, label %endif 9 10if: 11 %id = call i32 @llvm.amdgcn.workitem.id.x() 12 %offset = add i32 %fold, %id 13 %tmp1 = getelementptr i32, i32 addrspace(1)* %out, i32 %offset 14 store i32 0, i32 addrspace(1)* %tmp1 15 br label %endif 16 17endif: 18 ret void 19} 20 21; CHECK-LABEL: {{^}}fold_imm: 22; CHECK: v_or_b32_e32 v{{[0-9]+}}, 5 23define amdgpu_kernel void @fold_imm(i32 addrspace(1)* %out, i32 %cmp) #1 { 24entry: 25 %fold = add i32 3, 2 26 %tmp0 = icmp ne i32 %cmp, 0 27 br i1 %tmp0, label %if, label %endif 28 29if: 30 %id = call i32 @llvm.amdgcn.workitem.id.x() 31 %val = or i32 %id, %fold 32 store i32 %val, i32 addrspace(1)* %out 33 br label %endif 34 35endif: 36 ret void 37} 38 39; CHECK-LABEL: {{^}}fold_64bit_constant_add: 40; CHECK-NOT: s_mov_b64 41; FIXME: It would be better if we could use v_add here and drop the extra 42; v_mov_b32 instructions. 43; CHECK-DAG: s_add_u32 [[LO:s[0-9]+]], s{{[0-9]+}}, 1 44; CHECK-DAG: s_addc_u32 [[HI:s[0-9]+]], s{{[0-9]+}}, 0 45; CHECK-DAG: v_mov_b32_e32 v[[VLO:[0-9]+]], [[LO]] 46; CHECK-DAG: v_mov_b32_e32 v[[VHI:[0-9]+]], [[HI]] 47; CHECK: buffer_store_dwordx2 v{{\[}}[[VLO]]:[[VHI]]{{\]}}, 48 49define amdgpu_kernel void @fold_64bit_constant_add(i64 addrspace(1)* %out, i32 %cmp, i64 %val) #1 { 50entry: 51 %tmp0 = add i64 %val, 1 52 store i64 %tmp0, i64 addrspace(1)* %out 53 ret void 54} 55 56; Inline constants should always be folded. 57 58; CHECK-LABEL: {{^}}vector_inline: 59; CHECK: v_xor_b32_e32 v{{[0-9]+}}, 5, v{{[0-9]+}} 60; CHECK: v_xor_b32_e32 v{{[0-9]+}}, 5, v{{[0-9]+}} 61; CHECK: v_xor_b32_e32 v{{[0-9]+}}, 5, v{{[0-9]+}} 62; CHECK: v_xor_b32_e32 v{{[0-9]+}}, 5, v{{[0-9]+}} 63 64define amdgpu_kernel void @vector_inline(<4 x i32> addrspace(1)* %out) #1 { 65entry: 66 %tmp0 = call i32 @llvm.amdgcn.workitem.id.x() 67 %tmp1 = add i32 %tmp0, 1 68 %tmp2 = add i32 %tmp0, 2 69 %tmp3 = add i32 %tmp0, 3 70 %vec0 = insertelement <4 x i32> undef, i32 %tmp0, i32 0 71 %vec1 = insertelement <4 x i32> %vec0, i32 %tmp1, i32 1 72 %vec2 = insertelement <4 x i32> %vec1, i32 %tmp2, i32 2 73 %vec3 = insertelement <4 x i32> %vec2, i32 %tmp3, i32 3 74 %tmp4 = xor <4 x i32> <i32 5, i32 5, i32 5, i32 5>, %vec3 75 store <4 x i32> %tmp4, <4 x i32> addrspace(1)* %out 76 ret void 77} 78 79; Immediates with one use should be folded 80; CHECK-LABEL: {{^}}imm_one_use: 81; CHECK: v_xor_b32_e32 v{{[0-9]+}}, 0x64, v{{[0-9]+}} 82 83define amdgpu_kernel void @imm_one_use(i32 addrspace(1)* %out) #1 { 84entry: 85 %tmp0 = call i32 @llvm.amdgcn.workitem.id.x() 86 %tmp1 = xor i32 %tmp0, 100 87 store i32 %tmp1, i32 addrspace(1)* %out 88 ret void 89} 90; CHECK-LABEL: {{^}}vector_imm: 91; CHECK: s_movk_i32 [[IMM:s[0-9]+]], 0x64 92; CHECK: v_xor_b32_e32 v{{[0-9]}}, [[IMM]], v{{[0-9]}} 93; CHECK: v_xor_b32_e32 v{{[0-9]}}, [[IMM]], v{{[0-9]}} 94; CHECK: v_xor_b32_e32 v{{[0-9]}}, [[IMM]], v{{[0-9]}} 95; CHECK: v_xor_b32_e32 v{{[0-9]}}, [[IMM]], v{{[0-9]}} 96 97define amdgpu_kernel void @vector_imm(<4 x i32> addrspace(1)* %out) #1 { 98entry: 99 %tmp0 = call i32 @llvm.amdgcn.workitem.id.x() 100 %tmp1 = add i32 %tmp0, 1 101 %tmp2 = add i32 %tmp0, 2 102 %tmp3 = add i32 %tmp0, 3 103 %vec0 = insertelement <4 x i32> undef, i32 %tmp0, i32 0 104 %vec1 = insertelement <4 x i32> %vec0, i32 %tmp1, i32 1 105 %vec2 = insertelement <4 x i32> %vec1, i32 %tmp2, i32 2 106 %vec3 = insertelement <4 x i32> %vec2, i32 %tmp3, i32 3 107 %tmp4 = xor <4 x i32> <i32 100, i32 100, i32 100, i32 100>, %vec3 108 store <4 x i32> %tmp4, <4 x i32> addrspace(1)* %out 109 ret void 110} 111 112; A subregister use operand should not be tied. 113; CHECK-LABEL: {{^}}no_fold_tied_subregister: 114; CHECK: buffer_load_dwordx2 v{{\[}}[[LO:[0-9]+]]:[[HI:[0-9]+]]{{\]}} 115; CHECK: v_mac_f32_e32 v[[LO]], 0x41200000, v[[HI]] 116; CHECK: buffer_store_dword v[[LO]] 117define amdgpu_kernel void @no_fold_tied_subregister() #1 { 118 %tmp1 = load volatile <2 x float>, <2 x float> addrspace(1)* undef 119 %tmp2 = extractelement <2 x float> %tmp1, i32 0 120 %tmp3 = extractelement <2 x float> %tmp1, i32 1 121 %tmp4 = fmul float %tmp3, 10.0 122 %tmp5 = fadd float %tmp4, %tmp2 123 store volatile float %tmp5, float addrspace(1)* undef 124 ret void 125} 126 127; There should be exact one folding on the same operand. 128; CHECK-LABEL: {{^}}no_extra_fold_on_same_opnd 129; CHECK: v_xor_b32_e32 v{{[0-9]+}}, 0, v{{[0-9]+}} 130; CHECK: v_xor_b32_e32 v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}} 131define void @no_extra_fold_on_same_opnd() #1 { 132entry: 133 %s0 = load i32, i32 addrspace(5)* undef, align 4 134 %s0.i64= zext i32 %s0 to i64 135 br label %for.body.i.i 136 137for.body.i.i: 138 %s1 = load i32, i32 addrspace(1)* undef, align 8 139 %s1.i64 = sext i32 %s1 to i64 140 %xor = xor i64 %s1.i64, %s0.i64 141 %flag = icmp ult i64 %xor, 8 142 br i1 %flag, label %if.then, label %if.else 143 144if.then: 145 unreachable 146 147if.else: 148 unreachable 149} 150 151declare i32 @llvm.amdgcn.workitem.id.x() #0 152 153attributes #0 = { nounwind readnone } 154attributes #1 = { nounwind "denormal-fp-math-f32"="preserve-sign,preserve-sign" } 155