1; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py 2; RUN: llc -mtriple=amdgcn-amd-amdhsa -mcpu=gfx900 -verify-machineinstrs < %s | FileCheck %s 3 4; FP is in CSR range, modified. 5define hidden fastcc void @callee_has_fp() #1 { 6; CHECK-LABEL: callee_has_fp: 7; CHECK: ; %bb.0: 8; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) 9; CHECK-NEXT: s_mov_b32 s4, s33 10; CHECK-NEXT: s_mov_b32 s33, s32 11; CHECK-NEXT: s_addk_i32 s32, 0x200 12; CHECK-NEXT: v_mov_b32_e32 v0, 1 13; CHECK-NEXT: buffer_store_dword v0, off, s[0:3], s33 offset:4 14; CHECK-NEXT: s_waitcnt vmcnt(0) 15; CHECK-NEXT: s_addk_i32 s32, 0xfe00 16; CHECK-NEXT: s_mov_b32 s33, s4 17; CHECK-NEXT: s_setpc_b64 s[30:31] 18 %alloca = alloca i32, addrspace(5) 19 store volatile i32 1, i32 addrspace(5)* %alloca 20 ret void 21} 22 23; Has no stack objects, but introduces them due to the CSR spill. We 24; see the FP modified in the callee with IPRA. We should not have 25; redundant spills of s33 or assert. 26define internal fastcc void @csr_vgpr_spill_fp_callee() #0 { 27; CHECK-LABEL: csr_vgpr_spill_fp_callee: 28; CHECK: ; %bb.0: ; %bb 29; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) 30; CHECK-NEXT: s_mov_b32 s8, s33 31; CHECK-NEXT: s_mov_b32 s33, s32 32; CHECK-NEXT: s_addk_i32 s32, 0x400 33; CHECK-NEXT: s_getpc_b64 s[4:5] 34; CHECK-NEXT: s_add_u32 s4, s4, callee_has_fp@rel32@lo+4 35; CHECK-NEXT: s_addc_u32 s5, s5, callee_has_fp@rel32@hi+12 36; CHECK-NEXT: buffer_store_dword v40, off, s[0:3], s33 ; 4-byte Folded Spill 37; CHECK-NEXT: s_mov_b64 s[6:7], s[30:31] 38; CHECK-NEXT: s_swappc_b64 s[30:31], s[4:5] 39; CHECK-NEXT: ;;#ASMSTART 40; CHECK-NEXT: ; clobber csr v40 41; CHECK-NEXT: ;;#ASMEND 42; CHECK-NEXT: buffer_load_dword v40, off, s[0:3], s33 ; 4-byte Folded Reload 43; CHECK-NEXT: s_addk_i32 s32, 0xfc00 44; CHECK-NEXT: s_mov_b32 s33, s8 45; CHECK-NEXT: s_waitcnt vmcnt(0) 46; CHECK-NEXT: s_setpc_b64 s[6:7] 47bb: 48 call fastcc void @callee_has_fp() 49 call void asm sideeffect "; clobber csr v40", "~{v40}"() 50 ret void 51} 52 53define amdgpu_kernel void @kernel_call() { 54; CHECK-LABEL: kernel_call: 55; CHECK: ; %bb.0: ; %bb 56; CHECK-NEXT: s_add_u32 flat_scratch_lo, s4, s7 57; CHECK-NEXT: s_addc_u32 flat_scratch_hi, s5, 0 58; CHECK-NEXT: s_add_u32 s0, s0, s7 59; CHECK-NEXT: s_addc_u32 s1, s1, 0 60; CHECK-NEXT: s_getpc_b64 s[4:5] 61; CHECK-NEXT: s_add_u32 s4, s4, csr_vgpr_spill_fp_callee@rel32@lo+4 62; CHECK-NEXT: s_addc_u32 s5, s5, csr_vgpr_spill_fp_callee@rel32@hi+12 63; CHECK-NEXT: s_mov_b32 s32, 0 64; CHECK-NEXT: s_swappc_b64 s[30:31], s[4:5] 65; CHECK-NEXT: s_endpgm 66bb: 67 tail call fastcc void @csr_vgpr_spill_fp_callee() 68 ret void 69} 70 71; Same, except with a tail call. 72define internal fastcc void @csr_vgpr_spill_fp_tailcall_callee() #0 { 73; CHECK-LABEL: csr_vgpr_spill_fp_tailcall_callee: 74; CHECK: ; %bb.0: ; %bb 75; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) 76; CHECK-NEXT: s_or_saveexec_b64 s[4:5], -1 77; CHECK-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:4 ; 4-byte Folded Spill 78; CHECK-NEXT: s_mov_b64 exec, s[4:5] 79; CHECK-NEXT: buffer_store_dword v40, off, s[0:3], s32 ; 4-byte Folded Spill 80; CHECK-NEXT: ;;#ASMSTART 81; CHECK-NEXT: ; clobber csr v40 82; CHECK-NEXT: ;;#ASMEND 83; CHECK-NEXT: buffer_load_dword v40, off, s[0:3], s32 ; 4-byte Folded Reload 84; CHECK-NEXT: v_writelane_b32 v1, s33, 0 85; CHECK-NEXT: s_getpc_b64 s[4:5] 86; CHECK-NEXT: s_add_u32 s4, s4, callee_has_fp@rel32@lo+4 87; CHECK-NEXT: s_addc_u32 s5, s5, callee_has_fp@rel32@hi+12 88; CHECK-NEXT: v_readlane_b32 s33, v1, 0 89; CHECK-NEXT: s_or_saveexec_b64 s[6:7], -1 90; CHECK-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:4 ; 4-byte Folded Reload 91; CHECK-NEXT: s_mov_b64 exec, s[6:7] 92; CHECK-NEXT: s_setpc_b64 s[4:5] 93bb: 94 call void asm sideeffect "; clobber csr v40", "~{v40}"() 95 tail call fastcc void @callee_has_fp() 96 ret void 97} 98 99define amdgpu_kernel void @kernel_tailcall() { 100; CHECK-LABEL: kernel_tailcall: 101; CHECK: ; %bb.0: ; %bb 102; CHECK-NEXT: s_add_u32 flat_scratch_lo, s4, s7 103; CHECK-NEXT: s_addc_u32 flat_scratch_hi, s5, 0 104; CHECK-NEXT: s_add_u32 s0, s0, s7 105; CHECK-NEXT: s_addc_u32 s1, s1, 0 106; CHECK-NEXT: s_getpc_b64 s[4:5] 107; CHECK-NEXT: s_add_u32 s4, s4, csr_vgpr_spill_fp_tailcall_callee@rel32@lo+4 108; CHECK-NEXT: s_addc_u32 s5, s5, csr_vgpr_spill_fp_tailcall_callee@rel32@hi+12 109; CHECK-NEXT: s_mov_b32 s32, 0 110; CHECK-NEXT: s_swappc_b64 s[30:31], s[4:5] 111; CHECK-NEXT: s_endpgm 112bb: 113 tail call fastcc void @csr_vgpr_spill_fp_tailcall_callee() 114 ret void 115} 116 117attributes #0 = { "frame-pointer"="none" noinline } 118attributes #1 = { "frame-pointer"="all" noinline } 119