1; RUN: llc < %s -mtriple=i686-apple-darwin -mcpu=corei7-avx -mattr=+avx | FileCheck -check-prefix=X32 %s
2; RUN: llc < %s -mtriple=i386-pc-win32 -mcpu=corei7-avx -mattr=+avx | FileCheck -check-prefix=X32 %s
3; RUN: llc < %s -mtriple=x86_64-win32 -mcpu=corei7-avx -mattr=+avx | FileCheck -check-prefix=WIN64 %s
4; RUN: llc < %s -mtriple=x86_64-apple-darwin -mcpu=corei7-avx -mattr=+avx | FileCheck -check-prefix=X64 %s
5
6declare <16 x float> @func_float16_ptr(<16 x float>, <16 x float> *)
7declare <16 x float> @func_float16(<16 x float>, <16 x float>)
8declare i32 @func_int(i32, i32)
9
10; WIN64: testf16_inp
11; WIN64: vaddps  {{.*}}, {{%ymm[0-1]}}
12; WIN64: vaddps  {{.*}}, {{%ymm[0-1]}}
13; WIN64: leaq    {{.*}}(%rsp), %rcx
14; WIN64: call
15; WIN64: ret
16
17; X32: testf16_inp
18; X32: movl    %eax, (%esp)
19; X32: vaddps  {{.*}}, {{%ymm[0-1]}}
20; X32: vaddps  {{.*}}, {{%ymm[0-1]}}
21; X32: call
22; X32: ret
23
24; X64: testf16_inp
25; X64: vaddps  {{.*}}, {{%ymm[0-1]}}
26; X64: vaddps  {{.*}}, {{%ymm[0-1]}}
27; X64: leaq    {{.*}}(%rsp), %rdi
28; X64: call
29; X64: ret
30
31;test calling conventions - input parameters
32define <16 x float> @testf16_inp(<16 x float> %a, <16 x float> %b) nounwind {
33  %y = alloca <16 x float>, align 16
34  %x = fadd <16 x float> %a, %b
35  %1 = call intel_ocl_bicc <16 x float> @func_float16_ptr(<16 x float> %x, <16 x float>* %y)
36  %2 = load <16 x float>* %y, align 16
37  %3 = fadd <16 x float> %2, %1
38  ret <16 x float> %3
39}
40
41;test calling conventions - preserved registers
42
43; preserved ymm6-ymm15
44; WIN64: testf16_regs
45; WIN64: call
46; WIN64: vaddps  {{%ymm[6-7]}}, {{%ymm[0-1]}}, {{%ymm[0-1]}}
47; WIN64: vaddps  {{%ymm[6-7]}}, {{%ymm[0-1]}}, {{%ymm[0-1]}}
48; WIN64: ret
49
50; preserved ymm8-ymm15
51; X64: testf16_regs
52; X64: call
53; X64: vaddps  {{%ymm[8-9]}}, {{%ymm[0-1]}}, {{%ymm[0-1]}}
54; X64: vaddps  {{%ymm[8-9]}}, {{%ymm[0-1]}}, {{%ymm[0-1]}}
55; X64: ret
56
57define <16 x float> @testf16_regs(<16 x float> %a, <16 x float> %b) nounwind {
58  %y = alloca <16 x float>, align 16
59  %x = fadd <16 x float> %a, %b
60  %1 = call intel_ocl_bicc <16 x float> @func_float16_ptr(<16 x float> %x, <16 x float>* %y)
61  %2 = load <16 x float>* %y, align 16
62  %3 = fadd <16 x float> %1, %b
63  %4 = fadd <16 x float> %2, %3
64  ret <16 x float> %4
65}
66
67; test calling conventions - prolog and epilog
68; WIN64: vmovaps {{%ymm([6-9]|1[0-5])}}, {{.*(%rsp).*}}     # 32-byte Spill
69; WIN64: vmovaps {{%ymm([6-9]|1[0-5])}}, {{.*(%rsp).*}}     # 32-byte Spill
70; WIN64: vmovaps {{%ymm([6-9]|1[0-5])}}, {{.*(%rsp).*}}     # 32-byte Spill
71; WIN64: vmovaps {{%ymm([6-9]|1[0-5])}}, {{.*(%rsp).*}}     # 32-byte Spill
72; WIN64: vmovaps {{%ymm([6-9]|1[0-5])}}, {{.*(%rsp).*}}     # 32-byte Spill
73; WIN64: vmovaps {{%ymm([6-9]|1[0-5])}}, {{.*(%rsp).*}}     # 32-byte Spill
74; WIN64: vmovaps {{%ymm([6-9]|1[0-5])}}, {{.*(%rsp).*}}     # 32-byte Spill
75; WIN64: vmovaps {{%ymm([6-9]|1[0-5])}}, {{.*(%rsp).*}}     # 32-byte Spill
76; WIN64: vmovaps {{%ymm([6-9]|1[0-5])}}, {{.*(%rsp).*}}     # 32-byte Spill
77; WIN64: vmovaps {{%ymm([6-9]|1[0-5])}}, {{.*(%rsp).*}}     # 32-byte Spill
78; WIN64: call
79; WIN64: vmovaps {{.*(%rsp).*}}, {{%ymm([6-9]|1[0-5])}}     # 32-byte Reload
80; WIN64: vmovaps {{.*(%rsp).*}}, {{%ymm([6-9]|1[0-5])}}     # 32-byte Reload
81; WIN64: vmovaps {{.*(%rsp).*}}, {{%ymm([6-9]|1[0-5])}}     # 32-byte Reload
82; WIN64: vmovaps {{.*(%rsp).*}}, {{%ymm([6-9]|1[0-5])}}     # 32-byte Reload
83; WIN64: vmovaps {{.*(%rsp).*}}, {{%ymm([6-9]|1[0-5])}}     # 32-byte Reload
84; WIN64: vmovaps {{.*(%rsp).*}}, {{%ymm([6-9]|1[0-5])}}     # 32-byte Reload
85; WIN64: vmovaps {{.*(%rsp).*}}, {{%ymm([6-9]|1[0-5])}}     # 32-byte Reload
86; WIN64: vmovaps {{.*(%rsp).*}}, {{%ymm([6-9]|1[0-5])}}     # 32-byte Reload
87; WIN64: vmovaps {{.*(%rsp).*}}, {{%ymm([6-9]|1[0-5])}}     # 32-byte Reload
88; WIN64: vmovaps {{.*(%rsp).*}}, {{%ymm([6-9]|1[0-5])}}     # 32-byte Reload
89
90; X64: vmovups {{%ymm([8-9]|1[0-5])}}, {{.*}}(%rsp)  ## 32-byte Folded Spill
91; X64: vmovups {{%ymm([8-9]|1[0-5])}}, {{.*}}(%rsp)  ## 32-byte Folded Spill
92; X64: vmovups {{%ymm([8-9]|1[0-5])}}, {{.*}}(%rsp)  ## 32-byte Folded Spill
93; X64: vmovups {{%ymm([8-9]|1[0-5])}}, {{.*}}(%rsp)  ## 32-byte Folded Spill
94; X64: vmovups {{%ymm([8-9]|1[0-5])}}, {{.*}}(%rsp)  ## 32-byte Folded Spill
95; X64: vmovups {{%ymm([8-9]|1[0-5])}}, {{.*}}(%rsp)  ## 32-byte Folded Spill
96; X64: vmovups {{%ymm([8-9]|1[0-5])}}, {{.*}}(%rsp)  ## 32-byte Folded Spill
97; X64: vmovups {{%ymm([8-9]|1[0-5])}}, {{.*}}(%rsp)  ## 32-byte Folded Spill
98; X64: call
99; X64: vmovups {{.*}}(%rsp), {{%ymm([8-9]|1[0-5])}} ## 32-byte Folded Reload
100; X64: vmovups {{.*}}(%rsp), {{%ymm([8-9]|1[0-5])}} ## 32-byte Folded Reload
101; X64: vmovups {{.*}}(%rsp), {{%ymm([8-9]|1[0-5])}} ## 32-byte Folded Reload
102; X64: vmovups {{.*}}(%rsp), {{%ymm([8-9]|1[0-5])}} ## 32-byte Folded Reload
103; X64: vmovups {{.*}}(%rsp), {{%ymm([8-9]|1[0-5])}} ## 32-byte Folded Reload
104; X64: vmovups {{.*}}(%rsp), {{%ymm([8-9]|1[0-5])}} ## 32-byte Folded Reload
105; X64: vmovups {{.*}}(%rsp), {{%ymm([8-9]|1[0-5])}} ## 32-byte Folded Reload
106; X64: vmovups {{.*}}(%rsp), {{%ymm([8-9]|1[0-5])}} ## 32-byte Folded Reload
107define intel_ocl_bicc <16 x float> @test_prolog_epilog(<16 x float> %a, <16 x float> %b) nounwind {
108   %c = call <16 x float> @func_float16(<16 x float> %a, <16 x float> %b)
109   ret <16 x float> %c
110}
111
112; test functions with integer parameters
113; pass parameters on stack for 32-bit platform
114; X32: movl {{.*}}, 4(%esp)
115; X32: movl {{.*}}, (%esp)
116; X32: call
117; X32: addl {{.*}}, %eax
118
119; pass parameters in registers for 64-bit platform
120; X64: leal {{.*}}, %edi
121; X64: movl {{.*}}, %esi
122; X64: call
123; X64: addl {{.*}}, %eax
124define i32 @test_int(i32 %a, i32 %b) nounwind {
125    %c1 = add i32 %a, %b
126	%c2 = call intel_ocl_bicc i32 @func_int(i32 %c1, i32 %a)
127    %c = add i32 %c2, %b
128	ret i32 %c
129}
130
131; WIN64: test_float4
132; WIN64-NOT: vzeroupper
133; WIN64: call
134; WIN64-NOT: vzeroupper
135; WIN64: call
136; WIN64: ret
137
138; X64: test_float4
139; X64-NOT: vzeroupper
140; X64: call
141; X64-NOT: vzeroupper
142; X64: call
143; X64: ret
144
145; X32: test_float4
146; X32: vzeroupper
147; X32: call
148; X32: vzeroupper
149; X32: call
150; X32: ret
151
152declare <4 x float> @func_float4(<4 x float>, <4 x float>, <4 x float>)
153
154define <8 x float> @test_float4(<8 x float> %a, <8 x float> %b, <8 x float> %c) nounwind readnone {
155entry:
156  %0 = shufflevector <8 x float> %a, <8 x float> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
157  %1 = shufflevector <8 x float> %b, <8 x float> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
158  %2 = shufflevector <8 x float> %c, <8 x float> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
159  %call.i = tail call intel_ocl_bicc <4 x float> @func_float4(<4 x float> %0, <4 x float> %1, <4 x float> %2) nounwind
160  %3 = shufflevector <4 x float> %call.i, <4 x float> undef, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 undef, i32 undef, i32 undef, i32 undef>
161  %4 = shufflevector <8 x float> %a, <8 x float> undef, <4 x i32> <i32 4, i32 5, i32 6, i32 7>
162  %5 = shufflevector <8 x float> %b, <8 x float> undef, <4 x i32> <i32 4, i32 5, i32 6, i32 7>
163  %6 = shufflevector <8 x float> %c, <8 x float> undef, <4 x i32> <i32 4, i32 5, i32 6, i32 7>
164  %call.i2 = tail call intel_ocl_bicc <4 x float> @func_float4(<4 x float> %4, <4 x float> %5, <4 x float> %6) nounwind
165  %7 = shufflevector <4 x float> %call.i2, <4 x float> undef, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 undef, i32 undef, i32 undef, i32 undef>
166  %8 = shufflevector <8 x float> %3, <8 x float> %7, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 8, i32 9, i32 10, i32 11>
167  ret <8 x float> %8
168}
169