1; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2; RUN: llc < %s -mtriple=x86_64-darwin -mattr=+mmx,+sse2 | FileCheck %s
3
4define i64 @t0(x86_mmx* %p) {
5; CHECK-LABEL: t0:
6; CHECK:       ## %bb.0:
7; CHECK-NEXT:    movq (%rdi), %mm0
8; CHECK-NEXT:    paddq %mm0, %mm0
9; CHECK-NEXT:    movq %mm0, %rax
10; CHECK-NEXT:    retq
11  %t = load x86_mmx, x86_mmx* %p
12  %u = tail call x86_mmx @llvm.x86.mmx.padd.q(x86_mmx %t, x86_mmx %t)
13  %s = bitcast x86_mmx %u to i64
14  ret i64 %s
15}
16
17define i64 @t1(x86_mmx* %p) {
18; CHECK-LABEL: t1:
19; CHECK:       ## %bb.0:
20; CHECK-NEXT:    movq (%rdi), %mm0
21; CHECK-NEXT:    paddd %mm0, %mm0
22; CHECK-NEXT:    movq %mm0, %rax
23; CHECK-NEXT:    retq
24  %t = load x86_mmx, x86_mmx* %p
25  %u = tail call x86_mmx @llvm.x86.mmx.padd.d(x86_mmx %t, x86_mmx %t)
26  %s = bitcast x86_mmx %u to i64
27  ret i64 %s
28}
29
30define i64 @t2(x86_mmx* %p) {
31; CHECK-LABEL: t2:
32; CHECK:       ## %bb.0:
33; CHECK-NEXT:    movq (%rdi), %mm0
34; CHECK-NEXT:    paddw %mm0, %mm0
35; CHECK-NEXT:    movq %mm0, %rax
36; CHECK-NEXT:    retq
37  %t = load x86_mmx, x86_mmx* %p
38  %u = tail call x86_mmx @llvm.x86.mmx.padd.w(x86_mmx %t, x86_mmx %t)
39  %s = bitcast x86_mmx %u to i64
40  ret i64 %s
41}
42
43define i64 @t3(x86_mmx* %p) {
44; CHECK-LABEL: t3:
45; CHECK:       ## %bb.0:
46; CHECK-NEXT:    movq (%rdi), %mm0
47; CHECK-NEXT:    paddb %mm0, %mm0
48; CHECK-NEXT:    movq %mm0, %rax
49; CHECK-NEXT:    retq
50  %t = load x86_mmx, x86_mmx* %p
51  %u = tail call x86_mmx @llvm.x86.mmx.padd.b(x86_mmx %t, x86_mmx %t)
52  %s = bitcast x86_mmx %u to i64
53  ret i64 %s
54}
55
56@R = external global x86_mmx
57
58define void @t4(<1 x i64> %A, <1 x i64> %B) {
59; CHECK-LABEL: t4:
60; CHECK:       ## %bb.0: ## %entry
61; CHECK-NEXT:    movq %rdi, %mm0
62; CHECK-NEXT:    movq %rsi, %mm1
63; CHECK-NEXT:    paddusw %mm0, %mm1
64; CHECK-NEXT:    movq _R@GOTPCREL(%rip), %rax
65; CHECK-NEXT:    movq %mm1, (%rax)
66; CHECK-NEXT:    emms
67; CHECK-NEXT:    retq
68entry:
69  %tmp2 = bitcast <1 x i64> %A to x86_mmx
70  %tmp3 = bitcast <1 x i64> %B to x86_mmx
71  %tmp7 = tail call x86_mmx @llvm.x86.mmx.paddus.w(x86_mmx %tmp2, x86_mmx %tmp3)
72  store x86_mmx %tmp7, x86_mmx* @R
73  tail call void @llvm.x86.mmx.emms()
74  ret void
75}
76
77define i64 @t5(i32 %a, i32 %b) nounwind readnone {
78; CHECK-LABEL: t5:
79; CHECK:       ## %bb.0:
80; CHECK-NEXT:    movd %esi, %xmm0
81; CHECK-NEXT:    movd %edi, %xmm1
82; CHECK-NEXT:    punpckldq {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
83; CHECK-NEXT:    movq %xmm1, %rax
84; CHECK-NEXT:    retq
85  %v0 = insertelement <2 x i32> undef, i32 %a, i32 0
86  %v1 = insertelement <2 x i32> %v0, i32 %b, i32 1
87  %conv = bitcast <2 x i32> %v1 to i64
88  ret i64 %conv
89}
90
91declare x86_mmx @llvm.x86.mmx.pslli.q(x86_mmx, i32)
92
93define <1 x i64> @t6(i64 %t) {
94; CHECK-LABEL: t6:
95; CHECK:       ## %bb.0:
96; CHECK-NEXT:    movq %rdi, %mm0
97; CHECK-NEXT:    psllq $48, %mm0
98; CHECK-NEXT:    movq %mm0, %rax
99; CHECK-NEXT:    retq
100  %t1 = insertelement <1 x i64> undef, i64 %t, i32 0
101  %t0 = bitcast <1 x i64> %t1 to x86_mmx
102  %t2 = tail call x86_mmx @llvm.x86.mmx.pslli.q(x86_mmx %t0, i32 48)
103  %t3 = bitcast x86_mmx %t2 to <1 x i64>
104  ret <1 x i64> %t3
105}
106
107declare x86_mmx @llvm.x86.mmx.paddus.w(x86_mmx, x86_mmx)
108declare x86_mmx @llvm.x86.mmx.padd.b(x86_mmx, x86_mmx)
109declare x86_mmx @llvm.x86.mmx.padd.w(x86_mmx, x86_mmx)
110declare x86_mmx @llvm.x86.mmx.padd.d(x86_mmx, x86_mmx)
111declare x86_mmx @llvm.x86.mmx.padd.q(x86_mmx, x86_mmx)
112declare void @llvm.x86.mmx.emms()
113
114