1; RUN: llc < %s -march=sparcv9 -disable-sparc-delay-filler -disable-sparc-leaf-proc | FileCheck %s
2; RUN: llc < %s -march=sparcv9  | FileCheck %s -check-prefix=OPT
3
4; CHECK-LABEL: ret2:
5; CHECK: or %g0, %i1, %i0
6
7; OPT-LABEL: ret2:
8; OPT: jmp %o7+8
9; OPT: or %g0, %o1, %o0
10define i64 @ret2(i64 %a, i64 %b) {
11  ret i64 %b
12}
13
14; CHECK: shl_imm
15; CHECK: sllx %i0, 7, %i0
16
17; OPT-LABEL: shl_imm:
18; OPT: jmp %o7+8
19; OPT: sllx %o0, 7, %o0
20define i64 @shl_imm(i64 %a) {
21  %x = shl i64 %a, 7
22  ret i64 %x
23}
24
25; CHECK: sra_reg
26; CHECK: srax %i0, %i1, %i0
27
28; OPT-LABEL: sra_reg:
29; OPT: jmp %o7+8
30; OPT: srax %o0, %o1, %o0
31define i64 @sra_reg(i64 %a, i64 %b) {
32  %x = ashr i64 %a, %b
33  ret i64 %x
34}
35
36; Immediate materialization. Many of these patterns could actually be merged
37; into the restore instruction:
38;
39;     restore %g0, %g0, %o0
40;
41; CHECK: ret_imm0
42; CHECK: or %g0, 0, %i0
43
44; OPT: ret_imm0
45; OPT: jmp %o7+8
46; OPT: or %g0, 0, %o0
47define i64 @ret_imm0() {
48  ret i64 0
49}
50
51; CHECK: ret_simm13
52; CHECK: or %g0, -4096, %i0
53
54; OPT:   ret_simm13
55; OPT:   jmp %o7+8
56; OPT:   or %g0, -4096, %o0
57define i64 @ret_simm13() {
58  ret i64 -4096
59}
60
61; CHECK: ret_sethi
62; CHECK: sethi 4, %i0
63; CHECK-NOT: or
64; CHECK: restore
65
66; OPT:  ret_sethi
67; OPT:  jmp %o7+8
68; OPT:  sethi 4, %o0
69define i64 @ret_sethi() {
70  ret i64 4096
71}
72
73; CHECK: ret_sethi_or
74; CHECK: sethi 4, [[R:%[goli][0-7]]]
75; CHECK: or [[R]], 1, %i0
76
77; OPT: ret_sethi_or
78; OPT: sethi 4, [[R:%[go][0-7]]]
79; OPT: jmp %o7+8
80; OPT: or [[R]], 1, %o0
81
82define i64 @ret_sethi_or() {
83  ret i64 4097
84}
85
86; CHECK: ret_nimm33
87; CHECK: sethi 4, [[R:%[goli][0-7]]]
88; CHECK: xor [[R]], -4, %i0
89
90; OPT: ret_nimm33
91; OPT: sethi 4, [[R:%[go][0-7]]]
92; OPT: jmp %o7+8
93; OPT: xor [[R]], -4, %o0
94
95define i64 @ret_nimm33() {
96  ret i64 -4100
97}
98
99; CHECK: ret_bigimm
100; CHECK: sethi
101; CHECK: sethi
102define i64 @ret_bigimm() {
103  ret i64 6800754272627607872
104}
105
106; CHECK: ret_bigimm2
107; CHECK: sethi 1048576
108define i64 @ret_bigimm2() {
109  ret i64 4611686018427387904 ; 0x4000000000000000
110}
111
112; CHECK: reg_reg_alu
113; CHECK: add %i0, %i1, [[R0:%[goli][0-7]]]
114; CHECK: sub [[R0]], %i2, [[R1:%[goli][0-7]]]
115; CHECK: andn [[R1]], %i0, %i0
116define i64 @reg_reg_alu(i64 %x, i64 %y, i64 %z) {
117  %a = add i64 %x, %y
118  %b = sub i64 %a, %z
119  %c = xor i64 %x, -1
120  %d = and i64 %b, %c
121  ret i64 %d
122}
123
124; CHECK: reg_imm_alu
125; CHECK: add %i0, -5, [[R0:%[goli][0-7]]]
126; CHECK: xor [[R0]], 2, %i0
127define i64 @reg_imm_alu(i64 %x, i64 %y, i64 %z) {
128  %a = add i64 %x, -5
129  %b = xor i64 %a, 2
130  ret i64 %b
131}
132
133; CHECK: loads
134; CHECK: ldx [%i0]
135; CHECK: stx %
136; CHECK: ld [%i1]
137; CHECK: st %
138; CHECK: ldsw [%i2]
139; CHECK: stx %
140; CHECK: ldsh [%i3]
141; CHECK: sth %
142define i64 @loads(i64* %p, i32* %q, i32* %r, i16* %s) {
143  %a = load i64* %p
144  %ai = add i64 1, %a
145  store i64 %ai, i64* %p
146  %b = load i32* %q
147  %b2 = zext i32 %b to i64
148  %bi = trunc i64 %ai to i32
149  store i32 %bi, i32* %q
150  %c = load i32* %r
151  %c2 = sext i32 %c to i64
152  store i64 %ai, i64* %p
153  %d = load i16* %s
154  %d2 = sext i16 %d to i64
155  %di = trunc i64 %ai to i16
156  store i16 %di, i16* %s
157
158  %x1 = add i64 %a, %b2
159  %x2 = add i64 %c2, %d2
160  %x3 = add i64 %x1, %x2
161  ret i64 %x3
162}
163
164; CHECK: load_bool
165; CHECK: ldub [%i0], %i0
166define i64 @load_bool(i1* %p) {
167  %a = load i1* %p
168  %b = zext i1 %a to i64
169  ret i64 %b
170}
171
172; CHECK: stores
173; CHECK: ldx [%i0+8], [[R:%[goli][0-7]]]
174; CHECK: stx [[R]], [%i0+16]
175; CHECK: st [[R]], [%i1+-8]
176; CHECK: sth [[R]], [%i2+40]
177; CHECK: stb [[R]], [%i3+-20]
178define void @stores(i64* %p, i32* %q, i16* %r, i8* %s) {
179  %p1 = getelementptr i64* %p, i64 1
180  %p2 = getelementptr i64* %p, i64 2
181  %pv = load i64* %p1
182  store i64 %pv, i64* %p2
183
184  %q2 = getelementptr i32* %q, i32 -2
185  %qv = trunc i64 %pv to i32
186  store i32 %qv, i32* %q2
187
188  %r2 = getelementptr i16* %r, i16 20
189  %rv = trunc i64 %pv to i16
190  store i16 %rv, i16* %r2
191
192  %s2 = getelementptr i8* %s, i8 -20
193  %sv = trunc i64 %pv to i8
194  store i8 %sv, i8* %s2
195
196  ret void
197}
198
199; CHECK: promote_shifts
200; CHECK: ldub [%i0], [[R:%[goli][0-7]]]
201; CHECK: sll [[R]], [[R]], %i0
202define i8 @promote_shifts(i8* %p) {
203  %L24 = load i8* %p
204  %L32 = load i8* %p
205  %B36 = shl i8 %L24, %L32
206  ret i8 %B36
207}
208
209; CHECK: multiply
210; CHECK: mulx %i0, %i1, %i0
211define i64 @multiply(i64 %a, i64 %b) {
212  %r = mul i64 %a, %b
213  ret i64 %r
214}
215
216; CHECK: signed_divide
217; CHECK: sdivx %i0, %i1, %i0
218define i64 @signed_divide(i64 %a, i64 %b) {
219  %r = sdiv i64 %a, %b
220  ret i64 %r
221}
222
223; CHECK: unsigned_divide
224; CHECK: udivx %i0, %i1, %i0
225define i64 @unsigned_divide(i64 %a, i64 %b) {
226  %r = udiv i64 %a, %b
227  ret i64 %r
228}
229
230define void @access_fi() {
231entry:
232  %b = alloca [32 x i8], align 1
233  %arraydecay = getelementptr inbounds [32 x i8]* %b, i64 0, i64 0
234  call void @g(i8* %arraydecay) #2
235  ret void
236}
237
238declare void @g(i8*)
239
240; CHECK: expand_setcc
241; CHECK: cmp %i0, 1
242; CHECK: movl %xcc, 1,
243define i32 @expand_setcc(i64 %a) {
244  %cond = icmp sle i64 %a, 0
245  %cast2 = zext i1 %cond to i32
246  %RV = sub i32 1, %cast2
247  ret i32 %RV
248}
249
250; CHECK: spill_i64
251; CHECK: stx
252; CHECK: ldx
253define i64 @spill_i64(i64 %x) {
254  call void asm sideeffect "", "~{i0},~{i1},~{i2},~{i3},~{i4},~{i5},~{o0},~{o1},~{o2},~{o3},~{o4},~{o5},~{o7},~{l0},~{l1},~{l2},~{l3},~{l4},~{l5},~{l6},~{l7},~{g1},~{g2},~{g3},~{g4},~{g5},~{g6},~{g7}"()
255  ret i64 %x
256}
257
258; CHECK: bitcast_i64_f64
259; CHECK: std
260; CHECK: ldx
261define i64 @bitcast_i64_f64(double %x) {
262  %y = bitcast double %x to i64
263  ret i64 %y
264}
265
266; CHECK: bitcast_f64_i64
267; CHECK: stx
268; CHECK: ldd
269define double @bitcast_f64_i64(i64 %x) {
270  %y = bitcast i64 %x to double
271  ret double %y
272}
273
274; CHECK-LABEL: store_zero:
275; CHECK: stx %g0, [%i0]
276; CHECK: stx %g0, [%i1+8]
277
278; OPT-LABEL:  store_zero:
279; OPT:  stx %g0, [%o0]
280; OPT:  stx %g0, [%o1+8]
281define i64 @store_zero(i64* nocapture %a, i64* nocapture %b) {
282entry:
283  store i64 0, i64* %a, align 8
284  %0 = getelementptr inbounds i64* %b, i32 1
285  store i64 0, i64* %0, align 8
286  ret i64 0
287}
288
289; CHECK-LABEL: bit_ops
290; CHECK:       popc
291
292; OPT-LABEL: bit_ops
293; OPT:       popc
294
295define i64 @bit_ops(i64 %arg) {
296entry:
297  %0 = tail call i64 @llvm.ctpop.i64(i64 %arg)
298  %1 = tail call i64 @llvm.ctlz.i64(i64 %arg, i1 true)
299  %2 = tail call i64 @llvm.cttz.i64(i64 %arg, i1 true)
300  %3 = tail call i64 @llvm.bswap.i64(i64 %arg)
301  %4 = add i64 %0, %1
302  %5 = add i64 %2, %3
303  %6 = add i64 %4, %5
304  ret i64 %6
305}
306
307declare i64 @llvm.ctpop.i64(i64) nounwind readnone
308declare i64 @llvm.ctlz.i64(i64, i1) nounwind readnone
309declare i64 @llvm.cttz.i64(i64, i1) nounwind readnone
310declare i64 @llvm.bswap.i64(i64) nounwind readnone
311