1# RUN: llc -march=amdgcn -mcpu=gfx900 -amdgpu-vgpr-index-mode -run-pass=greedy -stress-regalloc=16 -o - %s | FileCheck -check-prefixes=GCN %s
2
3# An interval for a register that was partially defined was split, creating
4# a new use (a COPY) which was reached by the undef point. In particular,
5# there was a subrange of the new register which was reached by an "undef"
6# point. When the code in extendSegmentsToUses verified value numbers between
7# the new and the old live ranges, it did not account for this kind of a
8# situation and asserted expecting the old value to exist. For a PHI node
9# it is legal to have a missing predecessor value as long as the end of
10# the predecessor is jointly dominated by the undefs.
11#
12# A simplified form of this can be illustrated as
13#
14# bb.1:
15#   %0:vreg_64 = IMPLICIT_DEF
16#   ...
17#   S_CBRANCH_SCC1 %bb.2, implicit $vcc
18#   S_BRANCH %bb.3
19#
20# bb.2:
21# ; predecessors: %bb.1, %bb.4
22#   dead %1:vreg_64 = COPY %0:vreg_64 ; This is the point of the inserted split
23#   ...
24#   S_BRANCH %bb.5
25#
26# bb.3:
27# ; predecessors: %bb.1
28#   undef %0.sub0:vreg_64 = COPY %123:sreg_32 ; undef point for %0.sub1
29#   ...
30#   S_BRANCH %bb.4
31#
32# bb.4
33# ; predecessors: %bb.4
34#   ...
35#   S_BRANCH %bb.2
36#
37# This test exposes this scenario which caused previously caused an assert
38
39---
40name:            _amdgpu_ps_main
41tracksRegLiveness: true
42machineFunctionInfo:
43  scratchRSrcReg: $sgpr0_sgpr1_sgpr2_sgpr3
44  stackPtrOffsetReg: $sgpr32
45liveins:
46  - { reg: '$vgpr2', virtual-reg: '%0' }
47  - { reg: '$vgpr3', virtual-reg: '%1' }
48  - { reg: '$vgpr4', virtual-reg: '%2' }
49body: |
50  bb.0:
51    successors: %bb.1(0x40000000), %bb.2(0x40000000)
52    liveins: $vgpr2, $vgpr3, $vgpr4
53    %2:vgpr_32 = COPY $vgpr4
54    %1:vgpr_32 = COPY $vgpr3
55    %0:vgpr_32 = COPY $vgpr2
56    S_CBRANCH_SCC0 %bb.2, implicit undef $scc
57
58  bb.1:
59    successors: %bb.5(0x80000000)
60    undef %3.sub0:vreg_128 = V_MOV_B32_e32 0, implicit $exec
61    %3.sub1:vreg_128 = COPY %3.sub0
62    %3.sub2:vreg_128 = COPY %3.sub0
63    S_BRANCH %bb.5
64
65  bb.2:
66    successors: %bb.3(0x40000000), %bb.4(0x40000000)
67    S_CBRANCH_SCC0 %bb.4, implicit undef $scc
68
69  bb.3:
70    successors: %bb.5(0x80000000)
71    undef %3.sub0:vreg_128 = V_MOV_B32_e32 0, implicit $exec
72    %3.sub1:vreg_128 = COPY %3.sub0
73    S_BRANCH %bb.5
74
75  bb.4:
76    successors: %bb.5(0x80000000)
77    %3:vreg_128 = IMPLICIT_DEF
78
79  bb.5:
80    successors: %bb.6(0x40000000), %bb.22(0x40000000)
81    %4:vgpr_32 = V_MOV_B32_e32 0, implicit $exec
82    S_CBRANCH_SCC1 %bb.22, implicit undef $scc
83    S_BRANCH %bb.6
84
85  bb.6:
86    successors: %bb.8(0x40000000), %bb.11(0x40000000)
87    %5:vgpr_32 = V_MOV_B32_e32 0, implicit $exec
88    dead %6:vgpr_32 = V_MUL_F32_e32 0, undef %7:vgpr_32, implicit $mode, implicit $exec
89    dead %8:vgpr_32 = V_MUL_F32_e32 0, %2, implicit $mode, implicit $exec
90    undef %9.sub1:vreg_64 = V_MUL_F32_e32 0, %1, implicit $mode, implicit $exec
91    undef %10.sub0:vreg_128 = V_MUL_F32_e32 0, %0, implicit $mode, implicit $exec
92    undef %11.sub0:sgpr_256 = S_MOV_B32 0
93    %11.sub1:sgpr_256 = COPY %11.sub0
94    %11.sub2:sgpr_256 = COPY %11.sub0
95    %11.sub3:sgpr_256 = COPY %11.sub0
96    %11.sub4:sgpr_256 = COPY %11.sub0
97    %11.sub5:sgpr_256 = COPY %11.sub0
98    %11.sub6:sgpr_256 = COPY %11.sub0
99    %11.sub7:sgpr_256 = COPY %11.sub0
100    %12:vreg_128 = IMAGE_SAMPLE_LZ_V4_V2 %9, %11, undef %13:sgpr_128, 15, 0, 0, 0, 0, 0, 0, 0, 0, implicit $exec :: (dereferenceable load 16 from constant-pool, addrspace 4)
101    %14:vgpr_32 = V_MOV_B32_e32 -1, implicit $exec
102    %15:vreg_128 = IMPLICIT_DEF
103    S_CBRANCH_SCC1 %bb.8, implicit undef $scc
104    S_BRANCH %bb.11
105
106  bb.7:
107    successors: %bb.13(0x80000000)
108
109    ; In reality we are checking that this code doesn't assert when splitting
110    ; and inserting a spill. Here we just check that the point where the error
111    ; occurs we see a correctly generated spill.
112    ; GCN-LABEL: bb.7:
113    ; GCN: SI_SPILL_V128_SAVE %{{[0-9]+}}, %stack.0, $sgpr0_sgpr1_sgpr2_sgpr3, $sgpr32, 0, implicit $exec
114
115    undef %15.sub0:vreg_128 = V_MOV_B32_e32 0, implicit $exec
116    %15.sub1:vreg_128 = COPY %15.sub0
117    %15.sub2:vreg_128 = COPY %15.sub0
118    %5:vgpr_32 = IMPLICIT_DEF
119    S_BRANCH %bb.13
120
121  bb.8:
122    successors: %bb.9(0x40000000), %bb.10(0x40000000)
123    S_CBRANCH_SCC0 %bb.10, implicit undef $scc
124
125  bb.9:
126    successors: %bb.12(0x80000000)
127
128    ; GCN-LABEL: bb.9:
129    ; GCN: SI_SPILL_V128_SAVE %{{[0-9]+}}, %stack.0, $sgpr0_sgpr1_sgpr2_sgpr3, $sgpr32, 0, implicit $exec
130
131    undef %15.sub0:vreg_128 = V_MOV_B32_e32 0, implicit $exec
132    %15.sub1:vreg_128 = COPY %15.sub0
133    %15.sub2:vreg_128 = COPY %15.sub0
134    S_BRANCH %bb.12
135
136  bb.10:
137    successors: %bb.12(0x80000000)
138
139    ; GCN-LABEL: bb.10:
140    ; GCN: SI_SPILL_V128_SAVE %{{[0-9]+}}, %stack.0, $sgpr0_sgpr1_sgpr2_sgpr3, $sgpr32, 0, implicit $exec
141
142    undef %15.sub0:vreg_128 = V_MOV_B32_e32 2143289344, implicit $exec
143    %15.sub1:vreg_128 = COPY %15.sub0
144    %15.sub2:vreg_128 = COPY %15.sub0
145    S_BRANCH %bb.12
146
147  bb.11:
148    successors: %bb.7(0x40000000), %bb.13(0x40000000)
149    %16:sreg_64 = V_CMP_NE_U32_e64 0, %14, implicit $exec
150    %17:sreg_64 = S_AND_B64 $exec, %16, implicit-def dead $scc
151    $vcc = COPY %17
152    S_CBRANCH_VCCNZ %bb.7, implicit $vcc
153    S_BRANCH %bb.13
154
155  bb.12:
156    successors: %bb.11(0x80000000)
157    %14:vgpr_32 = V_MOV_B32_e32 0, implicit $exec
158    %5:vgpr_32 = V_MOV_B32_e32 -1, implicit $exec
159    S_BRANCH %bb.11
160
161  bb.13:
162    successors: %bb.15(0x40000000), %bb.14(0x40000000)
163
164    %18:vgpr_32 = V_MAD_F32 0, %10.sub0, 0, target-flags(amdgpu-gotprel) 1073741824, 0, -1082130432, 0, 0, implicit $mode, implicit $exec
165    %19:vgpr_32 = V_MAD_F32 0, %12.sub0, 0, target-flags(amdgpu-gotprel) 0, 0, 0, 0, 0, implicit $mode, implicit $exec
166    %20:sgpr_128 = S_BUFFER_LOAD_DWORDX4_IMM undef %21:sgpr_128, 1040, 0, 0 :: (dereferenceable invariant load 16)
167    %22:vgpr_32 = V_ADD_F32_e32 0, %19, implicit $mode, implicit $exec
168    %23:vgpr_32 = V_MAD_F32 0, %18, 0, 0, 0, 0, 0, 0, implicit $mode, implicit $exec
169    %24:vgpr_32 = COPY %20.sub3
170    %25:vgpr_32 = V_MUL_F32_e64 0, target-flags(amdgpu-gotprel32-lo) 0, 0, %20.sub1, 0, 0, implicit $mode, implicit $exec
171    %26:sgpr_128 = S_BUFFER_LOAD_DWORDX4_IMM undef %27:sgpr_128, 1056, 0, 0 :: (dereferenceable invariant load 16)
172    %28:vgpr_32 = V_MAD_F32 0, %18, 0, %26.sub0, 0, 0, 0, 0, implicit $mode, implicit $exec
173    %29:vgpr_32 = V_ADD_F32_e32 %28, %19, implicit $mode, implicit $exec
174    %30:vgpr_32 = V_RCP_F32_e32 %29, implicit $mode, implicit $exec
175    %25:vgpr_32 = V_MAC_F32_e32 0, %18, %25, implicit $mode, implicit $exec
176    %31:vgpr_32 = V_MAD_F32 0, target-flags(amdgpu-gotprel) 0, 0, %12.sub0, 0, %24, 0, 0, implicit $mode, implicit $exec
177    %32:vgpr_32 = V_ADD_F32_e32 %25, %31, implicit $mode, implicit $exec
178    %33:vgpr_32 = V_MUL_F32_e32 %22, %30, implicit $mode, implicit $exec
179    %34:vgpr_32 = V_MUL_F32_e32 %23, %30, implicit $mode, implicit $exec
180    %35:vgpr_32 = V_MUL_F32_e32 %32, %30, implicit $mode, implicit $exec
181    %36:vgpr_32 = V_MUL_F32_e32 0, %34, implicit $mode, implicit $exec
182    %36:vgpr_32 = V_MAC_F32_e32 0, %33, %36, implicit $mode, implicit $exec
183    %37:vgpr_32 = V_MAD_F32 0, %35, 0, 0, 0, 0, 0, 0, implicit $mode, implicit $exec
184    %38:sreg_64_xexec = V_CMP_NE_U32_e64 0, %5, implicit $exec
185    %39:vgpr_32 = V_CNDMASK_B32_e64 0, 0, 0, 1, %38, implicit $exec
186    V_CMP_NE_U32_e32 1, %39, implicit-def $vcc, implicit $exec
187    $vcc = S_AND_B64 $exec, $vcc, implicit-def dead $scc
188    %40:vgpr_32 = V_ADD_F32_e32 %36, %37, implicit $mode, implicit $exec
189    S_CBRANCH_VCCZ %bb.15, implicit $vcc
190
191  bb.14:
192    successors: %bb.17(0x80000000)
193    S_BRANCH %bb.17
194
195  bb.15:
196    successors: %bb.16(0x40000000), %bb.18(0x40000000)
197    %41:vgpr_32 = V_MAD_F32 0, %40, 0, 0, 0, 0, 0, 0, implicit $mode, implicit $exec
198    %42:sreg_64 = V_CMP_LE_F32_e64 0, 0, 0, %41, 0, implicit $mode, implicit $exec
199    %43:sreg_64 = V_CMP_GE_F32_e64 0, 1065353216, 0, %41, 0, implicit $mode, implicit $exec
200    %44:sreg_64 = S_AND_B64 %43, %43, implicit-def dead $scc
201    %45:sreg_64 = S_AND_B64 %42, %42, implicit-def dead $scc
202    %46:sreg_64 = S_AND_B64 %45, %44, implicit-def dead $scc
203    %47:sreg_64 = COPY $exec, implicit-def $exec
204    %48:sreg_64 = S_AND_B64 %47, %46, implicit-def dead $scc
205    $exec = S_MOV_B64_term %48
206    SI_MASK_BRANCH %bb.18, implicit $exec
207    S_BRANCH %bb.16
208
209  bb.16:
210    successors: %bb.18(0x80000000)
211    S_BRANCH %bb.18
212
213  bb.17:
214    successors: %bb.21(0x40000000), %bb.23(0x40000000)
215    %49:sreg_64 = V_CMP_NE_U32_e64 0, %5, implicit $exec
216    %50:sreg_64 = S_AND_B64 $exec, %49, implicit-def dead $scc
217    %51:vreg_128 = IMPLICIT_DEF
218    $vcc = COPY %50
219    S_CBRANCH_VCCNZ %bb.21, implicit $vcc
220    S_BRANCH %bb.23
221
222  bb.18:
223    successors: %bb.20(0x40000000), %bb.19(0x40000000)
224    $exec = S_OR_B64 $exec, %47, implicit-def $scc
225    %52:vgpr_32 = V_MAD_F32 0, %3.sub1, 0, target-flags(amdgpu-gotprel32-lo) 0, 1, %3.sub0, 0, 0, implicit $mode, implicit $exec
226    %53:vgpr_32 = V_MUL_F32_e32 -2147483648, %3.sub1, implicit $mode, implicit $exec
227    %53:vgpr_32 = V_MAC_F32_e32 target-flags(amdgpu-gotprel32-hi) 1065353216, %3.sub2, %53, implicit $mode, implicit $exec
228    %54:vgpr_32 = V_MUL_F32_e32 %53, %53, implicit $mode, implicit $exec
229    %54:vgpr_32 = V_MAC_F32_e32 %52, %52, %54, implicit $mode, implicit $exec
230    %55:vgpr_32 = V_SQRT_F32_e32 %54, implicit $mode, implicit $exec
231    %5:vgpr_32 = V_MOV_B32_e32 0, implicit $exec
232    %56:vgpr_32 = V_MOV_B32_e32 981668463, implicit $exec
233    %57:sreg_64 = V_CMP_NGT_F32_e64 0, %55, 0, %56, 0, implicit $mode, implicit $exec
234    %58:sreg_64 = S_AND_B64 $exec, %57, implicit-def dead $scc
235    $vcc = COPY %58
236    S_CBRANCH_VCCZ %bb.20, implicit $vcc
237
238  bb.19:
239    successors: %bb.17(0x80000000)
240    S_BRANCH %bb.17
241
242  bb.20:
243    successors: %bb.17(0x80000000)
244    S_BRANCH %bb.17
245
246  bb.21:
247    successors: %bb.23(0x80000000)
248    %59:sreg_32 = S_MOV_B32 0
249    undef %51.sub0:vreg_128 = COPY %59
250    S_BRANCH %bb.23
251
252  bb.22:
253    successors: %bb.24(0x80000000)
254    S_BRANCH %bb.24
255
256  bb.23:
257    successors: %bb.22(0x80000000)
258    undef %60.sub1:vreg_64 = V_CVT_I32_F32_e32 %1, implicit $mode, implicit $exec
259    %60.sub0:vreg_64 = V_CVT_I32_F32_e32 %0, implicit $mode, implicit $exec
260    undef %61.sub0:sgpr_256 = S_MOV_B32 0
261    %61.sub1:sgpr_256 = COPY %61.sub0
262    %61.sub2:sgpr_256 = COPY %61.sub0
263    %61.sub3:sgpr_256 = COPY %61.sub0
264    %61.sub4:sgpr_256 = COPY %61.sub0
265    %61.sub5:sgpr_256 = COPY %61.sub0
266    %61.sub6:sgpr_256 = COPY %61.sub0
267    %61.sub7:sgpr_256 = COPY %61.sub0
268    %62:vgpr_32 = V_MOV_B32_e32 1033100696, implicit $exec
269    %63:vgpr_32 = V_MUL_F32_e32 1060575065, %15.sub1, implicit $mode, implicit $exec
270    %63:vgpr_32 = V_MAC_F32_e32 1046066128, %15.sub0, %63, implicit $mode, implicit $exec
271    %64:vgpr_32 = IMAGE_LOAD_V1_V2 %60, %61, 1, -1, 0, 0, 0, 0, 0, 0, 0, implicit $exec :: (dereferenceable load 16 from constant-pool, addrspace 4)
272    %64:vgpr_32 = V_MAC_F32_e32 target-flags(amdgpu-gotprel) 0, %51.sub0, %64, implicit $mode, implicit $exec
273    %65:vgpr_32 = V_MUL_F32_e32 0, %64, implicit $mode, implicit $exec
274    %66:vgpr_32 = V_MUL_F32_e32 0, %65, implicit $mode, implicit $exec
275    %67:vgpr_32 = V_MAD_F32 0, %66, 0, %62, 0, 0, 0, 0, implicit $mode, implicit $exec
276    %63:vgpr_32 = V_MAC_F32_e32 %15.sub2, %62, %63, implicit $mode, implicit $exec
277    %4:vgpr_32 = V_ADD_F32_e32 %63, %67, implicit $mode, implicit $exec
278    S_BRANCH %bb.22
279
280  bb.24:
281    %68:vgpr_32 = V_MUL_F32_e32 0, %4, implicit $mode, implicit $exec
282    %69:vgpr_32 = V_CVT_PKRTZ_F16_F32_e64 0, undef %70:vgpr_32, 0, %68, 0, 0, implicit $mode, implicit $exec
283    EXP 0, undef %71:vgpr_32, %69, undef %72:vgpr_32, undef %73:vgpr_32, -1, -1, 15, implicit $exec
284    S_ENDPGM 0
285...
286