1; RUN: llc < %s -mtriple=x86_64-apple-darwin -mattr=+mmx,+sse2 | FileCheck %s
2; There are no MMX operations here, so we use XMM or i64.
3
4; CHECK: ti8
5define void @ti8(double %a, double %b) nounwind {
6entry:
7        %tmp1 = bitcast double %a to <8 x i8>
8        %tmp2 = bitcast double %b to <8 x i8>
9        %tmp3 = add <8 x i8> %tmp1, %tmp2
10; CHECK:  paddw
11        store <8 x i8> %tmp3, <8 x i8>* null
12        ret void
13}
14
15; CHECK: ti16
16define void @ti16(double %a, double %b) nounwind {
17entry:
18        %tmp1 = bitcast double %a to <4 x i16>
19        %tmp2 = bitcast double %b to <4 x i16>
20        %tmp3 = add <4 x i16> %tmp1, %tmp2
21; CHECK:  paddd
22        store <4 x i16> %tmp3, <4 x i16>* null
23        ret void
24}
25
26; CHECK: ti32
27define void @ti32(double %a, double %b) nounwind {
28entry:
29        %tmp1 = bitcast double %a to <2 x i32>
30        %tmp2 = bitcast double %b to <2 x i32>
31        %tmp3 = add <2 x i32> %tmp1, %tmp2
32; CHECK:  paddq
33        store <2 x i32> %tmp3, <2 x i32>* null
34        ret void
35}
36
37; CHECK: ti64
38define void @ti64(double %a, double %b) nounwind {
39entry:
40        %tmp1 = bitcast double %a to <1 x i64>
41        %tmp2 = bitcast double %b to <1 x i64>
42        %tmp3 = add <1 x i64> %tmp1, %tmp2
43; CHECK:  addq
44        store <1 x i64> %tmp3, <1 x i64>* null
45        ret void
46}
47
48; MMX intrinsics calls get us MMX instructions.
49; CHECK: ti8a
50define void @ti8a(double %a, double %b) nounwind {
51entry:
52        %tmp1 = bitcast double %a to x86_mmx
53; CHECK: movdq2q
54        %tmp2 = bitcast double %b to x86_mmx
55; CHECK: movdq2q
56        %tmp3 = tail call x86_mmx @llvm.x86.mmx.padd.b(x86_mmx %tmp1, x86_mmx %tmp2)
57        store x86_mmx %tmp3, x86_mmx* null
58        ret void
59}
60
61; CHECK: ti16a
62define void @ti16a(double %a, double %b) nounwind {
63entry:
64        %tmp1 = bitcast double %a to x86_mmx
65; CHECK: movdq2q
66        %tmp2 = bitcast double %b to x86_mmx
67; CHECK: movdq2q
68        %tmp3 = tail call x86_mmx @llvm.x86.mmx.padd.w(x86_mmx %tmp1, x86_mmx %tmp2)
69        store x86_mmx %tmp3, x86_mmx* null
70        ret void
71}
72
73; CHECK: ti32a
74define void @ti32a(double %a, double %b) nounwind {
75entry:
76        %tmp1 = bitcast double %a to x86_mmx
77; CHECK: movdq2q
78        %tmp2 = bitcast double %b to x86_mmx
79; CHECK: movdq2q
80        %tmp3 = tail call x86_mmx @llvm.x86.mmx.padd.d(x86_mmx %tmp1, x86_mmx %tmp2)
81        store x86_mmx %tmp3, x86_mmx* null
82        ret void
83}
84
85; CHECK: ti64a
86define void @ti64a(double %a, double %b) nounwind {
87entry:
88        %tmp1 = bitcast double %a to x86_mmx
89; CHECK: movdq2q
90        %tmp2 = bitcast double %b to x86_mmx
91; CHECK: movdq2q
92        %tmp3 = tail call x86_mmx @llvm.x86.mmx.padd.q(x86_mmx %tmp1, x86_mmx %tmp2)
93        store x86_mmx %tmp3, x86_mmx* null
94        ret void
95}
96
97declare x86_mmx @llvm.x86.mmx.padd.b(x86_mmx, x86_mmx)
98declare x86_mmx @llvm.x86.mmx.padd.w(x86_mmx, x86_mmx)
99declare x86_mmx @llvm.x86.mmx.padd.d(x86_mmx, x86_mmx)
100declare x86_mmx @llvm.x86.mmx.padd.q(x86_mmx, x86_mmx)
101