1; Like frame-03.ll, but for z13.  In this case we have 16 more registers
2; available.
3;
4; RUN: llc < %s -mtriple=s390x-linux-gnu -mcpu=z13 | FileCheck %s
5
6; This function should require all FPRs, but no other spill slots.
7; We need to save and restore 8 of the 16 FPRs, so the frame size
8; should be exactly 8 * 8 = 64.  The CFA offset is 160
9; (the caller-allocated part of the frame) + 64.
10define void @f1(double *%ptr) {
11; CHECK-LABEL: f1:
12; CHECK: aghi %r15, -64
13; CHECK: .cfi_def_cfa_offset 224
14; CHECK: std %f8, 56(%r15)
15; CHECK: std %f9, 48(%r15)
16; CHECK: std %f10, 40(%r15)
17; CHECK: std %f11, 32(%r15)
18; CHECK: std %f12, 24(%r15)
19; CHECK: std %f13, 16(%r15)
20; CHECK: std %f14, 8(%r15)
21; CHECK: std %f15, 0(%r15)
22; CHECK: .cfi_offset %f8, -168
23; CHECK: .cfi_offset %f9, -176
24; CHECK: .cfi_offset %f10, -184
25; CHECK: .cfi_offset %f11, -192
26; CHECK: .cfi_offset %f12, -200
27; CHECK: .cfi_offset %f13, -208
28; CHECK: .cfi_offset %f14, -216
29; CHECK: .cfi_offset %f15, -224
30; CHECK-DAG: ld %f0, 0(%r2)
31; CHECK-DAG: ld %f7, 0(%r2)
32; CHECK-DAG: ld %f8, 0(%r2)
33; CHECK-DAG: ld %f15, 0(%r2)
34; CHECK-DAG: vlrepg %v16, 0(%r2)
35; CHECK-DAG: vlrepg %v23, 0(%r2)
36; CHECK-DAG: vlrepg %v24, 0(%r2)
37; CHECK-DAG: vlrepg %v31, 0(%r2)
38; CHECK: ld %f8, 56(%r15)
39; CHECK: ld %f9, 48(%r15)
40; CHECK: ld %f10, 40(%r15)
41; CHECK: ld %f11, 32(%r15)
42; CHECK: ld %f12, 24(%r15)
43; CHECK: ld %f13, 16(%r15)
44; CHECK: ld %f14, 8(%r15)
45; CHECK: ld %f15, 0(%r15)
46; CHECK: aghi %r15, 64
47; CHECK: br %r14
48  %l0 = load volatile double, double *%ptr
49  %l1 = load volatile double, double *%ptr
50  %l2 = load volatile double, double *%ptr
51  %l3 = load volatile double, double *%ptr
52  %l4 = load volatile double, double *%ptr
53  %l5 = load volatile double, double *%ptr
54  %l6 = load volatile double, double *%ptr
55  %l7 = load volatile double, double *%ptr
56  %l8 = load volatile double, double *%ptr
57  %l9 = load volatile double, double *%ptr
58  %l10 = load volatile double, double *%ptr
59  %l11 = load volatile double, double *%ptr
60  %l12 = load volatile double, double *%ptr
61  %l13 = load volatile double, double *%ptr
62  %l14 = load volatile double, double *%ptr
63  %l15 = load volatile double, double *%ptr
64  %l16 = load volatile double, double *%ptr
65  %l17 = load volatile double, double *%ptr
66  %l18 = load volatile double, double *%ptr
67  %l19 = load volatile double, double *%ptr
68  %l20 = load volatile double, double *%ptr
69  %l21 = load volatile double, double *%ptr
70  %l22 = load volatile double, double *%ptr
71  %l23 = load volatile double, double *%ptr
72  %l24 = load volatile double, double *%ptr
73  %l25 = load volatile double, double *%ptr
74  %l26 = load volatile double, double *%ptr
75  %l27 = load volatile double, double *%ptr
76  %l28 = load volatile double, double *%ptr
77  %l29 = load volatile double, double *%ptr
78  %l30 = load volatile double, double *%ptr
79  %l31 = load volatile double, double *%ptr
80  %acc0 = fsub double %l0, %l0
81  %acc1 = fsub double %l1, %acc0
82  %acc2 = fsub double %l2, %acc1
83  %acc3 = fsub double %l3, %acc2
84  %acc4 = fsub double %l4, %acc3
85  %acc5 = fsub double %l5, %acc4
86  %acc6 = fsub double %l6, %acc5
87  %acc7 = fsub double %l7, %acc6
88  %acc8 = fsub double %l8, %acc7
89  %acc9 = fsub double %l9, %acc8
90  %acc10 = fsub double %l10, %acc9
91  %acc11 = fsub double %l11, %acc10
92  %acc12 = fsub double %l12, %acc11
93  %acc13 = fsub double %l13, %acc12
94  %acc14 = fsub double %l14, %acc13
95  %acc15 = fsub double %l15, %acc14
96  %acc16 = fsub double %l16, %acc15
97  %acc17 = fsub double %l17, %acc16
98  %acc18 = fsub double %l18, %acc17
99  %acc19 = fsub double %l19, %acc18
100  %acc20 = fsub double %l20, %acc19
101  %acc21 = fsub double %l21, %acc20
102  %acc22 = fsub double %l22, %acc21
103  %acc23 = fsub double %l23, %acc22
104  %acc24 = fsub double %l24, %acc23
105  %acc25 = fsub double %l25, %acc24
106  %acc26 = fsub double %l26, %acc25
107  %acc27 = fsub double %l27, %acc26
108  %acc28 = fsub double %l28, %acc27
109  %acc29 = fsub double %l29, %acc28
110  %acc30 = fsub double %l30, %acc29
111  %acc31 = fsub double %l31, %acc30
112  store volatile double %acc0, double *%ptr
113  store volatile double %acc1, double *%ptr
114  store volatile double %acc2, double *%ptr
115  store volatile double %acc3, double *%ptr
116  store volatile double %acc4, double *%ptr
117  store volatile double %acc5, double *%ptr
118  store volatile double %acc6, double *%ptr
119  store volatile double %acc7, double *%ptr
120  store volatile double %acc8, double *%ptr
121  store volatile double %acc9, double *%ptr
122  store volatile double %acc10, double *%ptr
123  store volatile double %acc11, double *%ptr
124  store volatile double %acc12, double *%ptr
125  store volatile double %acc13, double *%ptr
126  store volatile double %acc14, double *%ptr
127  store volatile double %acc15, double *%ptr
128  store volatile double %acc16, double *%ptr
129  store volatile double %acc17, double *%ptr
130  store volatile double %acc18, double *%ptr
131  store volatile double %acc19, double *%ptr
132  store volatile double %acc20, double *%ptr
133  store volatile double %acc21, double *%ptr
134  store volatile double %acc22, double *%ptr
135  store volatile double %acc23, double *%ptr
136  store volatile double %acc24, double *%ptr
137  store volatile double %acc25, double *%ptr
138  store volatile double %acc26, double *%ptr
139  store volatile double %acc27, double *%ptr
140  store volatile double %acc28, double *%ptr
141  store volatile double %acc29, double *%ptr
142  store volatile double %acc30, double *%ptr
143  store volatile double %acc31, double *%ptr
144  ret void
145}
146
147; Like f1, but requires one fewer FPR.  We allocate in numerical order,
148; so %f15 is the one that gets dropped.
149define void @f2(double *%ptr) {
150; CHECK-LABEL: f2:
151; CHECK: aghi %r15, -56
152; CHECK: .cfi_def_cfa_offset 216
153; CHECK: std %f8, 48(%r15)
154; CHECK: std %f9, 40(%r15)
155; CHECK: std %f10, 32(%r15)
156; CHECK: std %f11, 24(%r15)
157; CHECK: std %f12, 16(%r15)
158; CHECK: std %f13, 8(%r15)
159; CHECK: std %f14, 0(%r15)
160; CHECK: .cfi_offset %f8, -168
161; CHECK: .cfi_offset %f9, -176
162; CHECK: .cfi_offset %f10, -184
163; CHECK: .cfi_offset %f11, -192
164; CHECK: .cfi_offset %f12, -200
165; CHECK: .cfi_offset %f13, -208
166; CHECK: .cfi_offset %f14, -216
167; CHECK-NOT: %v15
168; CHECK-NOT: %f15
169; CHECK: ld %f8, 48(%r15)
170; CHECK: ld %f9, 40(%r15)
171; CHECK: ld %f10, 32(%r15)
172; CHECK: ld %f11, 24(%r15)
173; CHECK: ld %f12, 16(%r15)
174; CHECK: ld %f13, 8(%r15)
175; CHECK: ld %f14, 0(%r15)
176; CHECK: aghi %r15, 56
177; CHECK: br %r14
178  %l0 = load volatile double, double *%ptr
179  %l1 = load volatile double, double *%ptr
180  %l2 = load volatile double, double *%ptr
181  %l3 = load volatile double, double *%ptr
182  %l4 = load volatile double, double *%ptr
183  %l5 = load volatile double, double *%ptr
184  %l6 = load volatile double, double *%ptr
185  %l7 = load volatile double, double *%ptr
186  %l8 = load volatile double, double *%ptr
187  %l9 = load volatile double, double *%ptr
188  %l10 = load volatile double, double *%ptr
189  %l11 = load volatile double, double *%ptr
190  %l12 = load volatile double, double *%ptr
191  %l13 = load volatile double, double *%ptr
192  %l14 = load volatile double, double *%ptr
193  %l16 = load volatile double, double *%ptr
194  %l17 = load volatile double, double *%ptr
195  %l18 = load volatile double, double *%ptr
196  %l19 = load volatile double, double *%ptr
197  %l20 = load volatile double, double *%ptr
198  %l21 = load volatile double, double *%ptr
199  %l22 = load volatile double, double *%ptr
200  %l23 = load volatile double, double *%ptr
201  %l24 = load volatile double, double *%ptr
202  %l25 = load volatile double, double *%ptr
203  %l26 = load volatile double, double *%ptr
204  %l27 = load volatile double, double *%ptr
205  %l28 = load volatile double, double *%ptr
206  %l29 = load volatile double, double *%ptr
207  %l30 = load volatile double, double *%ptr
208  %l31 = load volatile double, double *%ptr
209  %acc0 = fsub double %l0, %l0
210  %acc1 = fsub double %l1, %acc0
211  %acc2 = fsub double %l2, %acc1
212  %acc3 = fsub double %l3, %acc2
213  %acc4 = fsub double %l4, %acc3
214  %acc5 = fsub double %l5, %acc4
215  %acc6 = fsub double %l6, %acc5
216  %acc7 = fsub double %l7, %acc6
217  %acc8 = fsub double %l8, %acc7
218  %acc9 = fsub double %l9, %acc8
219  %acc10 = fsub double %l10, %acc9
220  %acc11 = fsub double %l11, %acc10
221  %acc12 = fsub double %l12, %acc11
222  %acc13 = fsub double %l13, %acc12
223  %acc14 = fsub double %l14, %acc13
224  %acc16 = fsub double %l16, %acc14
225  %acc17 = fsub double %l17, %acc16
226  %acc18 = fsub double %l18, %acc17
227  %acc19 = fsub double %l19, %acc18
228  %acc20 = fsub double %l20, %acc19
229  %acc21 = fsub double %l21, %acc20
230  %acc22 = fsub double %l22, %acc21
231  %acc23 = fsub double %l23, %acc22
232  %acc24 = fsub double %l24, %acc23
233  %acc25 = fsub double %l25, %acc24
234  %acc26 = fsub double %l26, %acc25
235  %acc27 = fsub double %l27, %acc26
236  %acc28 = fsub double %l28, %acc27
237  %acc29 = fsub double %l29, %acc28
238  %acc30 = fsub double %l30, %acc29
239  %acc31 = fsub double %l31, %acc30
240  store volatile double %acc0, double *%ptr
241  store volatile double %acc1, double *%ptr
242  store volatile double %acc2, double *%ptr
243  store volatile double %acc3, double *%ptr
244  store volatile double %acc4, double *%ptr
245  store volatile double %acc5, double *%ptr
246  store volatile double %acc6, double *%ptr
247  store volatile double %acc7, double *%ptr
248  store volatile double %acc8, double *%ptr
249  store volatile double %acc9, double *%ptr
250  store volatile double %acc10, double *%ptr
251  store volatile double %acc11, double *%ptr
252  store volatile double %acc12, double *%ptr
253  store volatile double %acc13, double *%ptr
254  store volatile double %acc14, double *%ptr
255  store volatile double %acc16, double *%ptr
256  store volatile double %acc17, double *%ptr
257  store volatile double %acc18, double *%ptr
258  store volatile double %acc19, double *%ptr
259  store volatile double %acc20, double *%ptr
260  store volatile double %acc21, double *%ptr
261  store volatile double %acc22, double *%ptr
262  store volatile double %acc23, double *%ptr
263  store volatile double %acc24, double *%ptr
264  store volatile double %acc25, double *%ptr
265  store volatile double %acc26, double *%ptr
266  store volatile double %acc27, double *%ptr
267  store volatile double %acc28, double *%ptr
268  store volatile double %acc29, double *%ptr
269  store volatile double %acc30, double *%ptr
270  store volatile double %acc31, double *%ptr
271  ret void
272}
273
274; Like f1, but should require only one call-saved FPR.
275define void @f3(double *%ptr) {
276; CHECK-LABEL: f3:
277; CHECK: aghi %r15, -8
278; CHECK: .cfi_def_cfa_offset 168
279; CHECK: std %f8, 0(%r15)
280; CHECK: .cfi_offset %f8, -168
281; CHECK-NOT: {{%[fv]9}}
282; CHECK-NOT: {{%[fv]1[0-5]}}
283; CHECK: ld %f8, 0(%r15)
284; CHECK: aghi %r15, 8
285; CHECK: br %r14
286  %l0 = load volatile double, double *%ptr
287  %l1 = load volatile double, double *%ptr
288  %l2 = load volatile double, double *%ptr
289  %l3 = load volatile double, double *%ptr
290  %l4 = load volatile double, double *%ptr
291  %l5 = load volatile double, double *%ptr
292  %l6 = load volatile double, double *%ptr
293  %l7 = load volatile double, double *%ptr
294  %l8 = load volatile double, double *%ptr
295  %l16 = load volatile double, double *%ptr
296  %l17 = load volatile double, double *%ptr
297  %l18 = load volatile double, double *%ptr
298  %l19 = load volatile double, double *%ptr
299  %l20 = load volatile double, double *%ptr
300  %l21 = load volatile double, double *%ptr
301  %l22 = load volatile double, double *%ptr
302  %l23 = load volatile double, double *%ptr
303  %l24 = load volatile double, double *%ptr
304  %l25 = load volatile double, double *%ptr
305  %l26 = load volatile double, double *%ptr
306  %l27 = load volatile double, double *%ptr
307  %l28 = load volatile double, double *%ptr
308  %l29 = load volatile double, double *%ptr
309  %l30 = load volatile double, double *%ptr
310  %l31 = load volatile double, double *%ptr
311  %acc0 = fsub double %l0, %l0
312  %acc1 = fsub double %l1, %acc0
313  %acc2 = fsub double %l2, %acc1
314  %acc3 = fsub double %l3, %acc2
315  %acc4 = fsub double %l4, %acc3
316  %acc5 = fsub double %l5, %acc4
317  %acc6 = fsub double %l6, %acc5
318  %acc7 = fsub double %l7, %acc6
319  %acc8 = fsub double %l8, %acc7
320  %acc16 = fsub double %l16, %acc8
321  %acc17 = fsub double %l17, %acc16
322  %acc18 = fsub double %l18, %acc17
323  %acc19 = fsub double %l19, %acc18
324  %acc20 = fsub double %l20, %acc19
325  %acc21 = fsub double %l21, %acc20
326  %acc22 = fsub double %l22, %acc21
327  %acc23 = fsub double %l23, %acc22
328  %acc24 = fsub double %l24, %acc23
329  %acc25 = fsub double %l25, %acc24
330  %acc26 = fsub double %l26, %acc25
331  %acc27 = fsub double %l27, %acc26
332  %acc28 = fsub double %l28, %acc27
333  %acc29 = fsub double %l29, %acc28
334  %acc30 = fsub double %l30, %acc29
335  %acc31 = fsub double %l31, %acc30
336  store volatile double %acc0, double *%ptr
337  store volatile double %acc1, double *%ptr
338  store volatile double %acc2, double *%ptr
339  store volatile double %acc3, double *%ptr
340  store volatile double %acc4, double *%ptr
341  store volatile double %acc5, double *%ptr
342  store volatile double %acc6, double *%ptr
343  store volatile double %acc7, double *%ptr
344  store volatile double %acc8, double *%ptr
345  store volatile double %acc16, double *%ptr
346  store volatile double %acc17, double *%ptr
347  store volatile double %acc18, double *%ptr
348  store volatile double %acc19, double *%ptr
349  store volatile double %acc20, double *%ptr
350  store volatile double %acc21, double *%ptr
351  store volatile double %acc22, double *%ptr
352  store volatile double %acc23, double *%ptr
353  store volatile double %acc24, double *%ptr
354  store volatile double %acc25, double *%ptr
355  store volatile double %acc26, double *%ptr
356  store volatile double %acc27, double *%ptr
357  store volatile double %acc28, double *%ptr
358  store volatile double %acc29, double *%ptr
359  store volatile double %acc30, double *%ptr
360  store volatile double %acc31, double *%ptr
361  ret void
362}
363
364; This function should use all call-clobbered FPRs and vector registers
365; but no call-saved ones.  It shouldn't need to create a frame.
366define void @f4(double *%ptr) {
367; CHECK-LABEL: f4:
368; CHECK-NOT: %r15
369; CHECK-NOT: {{%[fv][89]}}
370; CHECK-NOT: {{%[fv]1[0-5]}}
371; CHECK: br %r14
372  %l0 = load volatile double, double *%ptr
373  %l1 = load volatile double, double *%ptr
374  %l2 = load volatile double, double *%ptr
375  %l3 = load volatile double, double *%ptr
376  %l4 = load volatile double, double *%ptr
377  %l5 = load volatile double, double *%ptr
378  %l6 = load volatile double, double *%ptr
379  %l7 = load volatile double, double *%ptr
380  %l16 = load volatile double, double *%ptr
381  %l17 = load volatile double, double *%ptr
382  %l18 = load volatile double, double *%ptr
383  %l19 = load volatile double, double *%ptr
384  %l20 = load volatile double, double *%ptr
385  %l21 = load volatile double, double *%ptr
386  %l22 = load volatile double, double *%ptr
387  %l23 = load volatile double, double *%ptr
388  %l24 = load volatile double, double *%ptr
389  %l25 = load volatile double, double *%ptr
390  %l26 = load volatile double, double *%ptr
391  %l27 = load volatile double, double *%ptr
392  %l28 = load volatile double, double *%ptr
393  %l29 = load volatile double, double *%ptr
394  %l30 = load volatile double, double *%ptr
395  %l31 = load volatile double, double *%ptr
396  %acc0 = fsub double %l0, %l0
397  %acc1 = fsub double %l1, %acc0
398  %acc2 = fsub double %l2, %acc1
399  %acc3 = fsub double %l3, %acc2
400  %acc4 = fsub double %l4, %acc3
401  %acc5 = fsub double %l5, %acc4
402  %acc6 = fsub double %l6, %acc5
403  %acc7 = fsub double %l7, %acc6
404  %acc16 = fsub double %l16, %acc7
405  %acc17 = fsub double %l17, %acc16
406  %acc18 = fsub double %l18, %acc17
407  %acc19 = fsub double %l19, %acc18
408  %acc20 = fsub double %l20, %acc19
409  %acc21 = fsub double %l21, %acc20
410  %acc22 = fsub double %l22, %acc21
411  %acc23 = fsub double %l23, %acc22
412  %acc24 = fsub double %l24, %acc23
413  %acc25 = fsub double %l25, %acc24
414  %acc26 = fsub double %l26, %acc25
415  %acc27 = fsub double %l27, %acc26
416  %acc28 = fsub double %l28, %acc27
417  %acc29 = fsub double %l29, %acc28
418  %acc30 = fsub double %l30, %acc29
419  %acc31 = fsub double %l31, %acc30
420  store volatile double %acc0, double *%ptr
421  store volatile double %acc1, double *%ptr
422  store volatile double %acc2, double *%ptr
423  store volatile double %acc3, double *%ptr
424  store volatile double %acc4, double *%ptr
425  store volatile double %acc5, double *%ptr
426  store volatile double %acc6, double *%ptr
427  store volatile double %acc7, double *%ptr
428  store volatile double %acc16, double *%ptr
429  store volatile double %acc17, double *%ptr
430  store volatile double %acc18, double *%ptr
431  store volatile double %acc19, double *%ptr
432  store volatile double %acc20, double *%ptr
433  store volatile double %acc21, double *%ptr
434  store volatile double %acc22, double *%ptr
435  store volatile double %acc23, double *%ptr
436  store volatile double %acc24, double *%ptr
437  store volatile double %acc25, double *%ptr
438  store volatile double %acc26, double *%ptr
439  store volatile double %acc27, double *%ptr
440  store volatile double %acc28, double *%ptr
441  store volatile double %acc29, double *%ptr
442  store volatile double %acc30, double *%ptr
443  store volatile double %acc31, double *%ptr
444  ret void
445}
446