1; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2; RUN: llc -verify-machineinstrs  < %s | FileCheck %s
3
4target triple = "x86_64-unknown-unknown"
5
6;
7; VECTOR_REVERSE
8;
9
10define <16 x i8> @reverse_v16i8(<16 x i8> %a) #0 {
11; CHECK-LABEL: reverse_v16i8:
12; CHECK:       # %bb.0:
13; CHECK-NEXT:    pxor %xmm1, %xmm1
14; CHECK-NEXT:    movdqa %xmm0, %xmm2
15; CHECK-NEXT:    punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1],xmm2[2],xmm1[2],xmm2[3],xmm1[3],xmm2[4],xmm1[4],xmm2[5],xmm1[5],xmm2[6],xmm1[6],xmm2[7],xmm1[7]
16; CHECK-NEXT:    pshufd {{.*#+}} xmm2 = xmm2[2,3,0,1]
17; CHECK-NEXT:    pshuflw {{.*#+}} xmm2 = xmm2[3,2,1,0,4,5,6,7]
18; CHECK-NEXT:    pshufhw {{.*#+}} xmm2 = xmm2[0,1,2,3,7,6,5,4]
19; CHECK-NEXT:    punpckhbw {{.*#+}} xmm0 = xmm0[8],xmm1[8],xmm0[9],xmm1[9],xmm0[10],xmm1[10],xmm0[11],xmm1[11],xmm0[12],xmm1[12],xmm0[13],xmm1[13],xmm0[14],xmm1[14],xmm0[15],xmm1[15]
20; CHECK-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[2,3,0,1]
21; CHECK-NEXT:    pshuflw {{.*#+}} xmm0 = xmm0[3,2,1,0,4,5,6,7]
22; CHECK-NEXT:    pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,7,6,5,4]
23; CHECK-NEXT:    packuswb %xmm2, %xmm0
24; CHECK-NEXT:    retq
25
26  %res = call <16 x i8> @llvm.experimental.vector.reverse.v16i8(<16 x i8> %a)
27  ret <16 x i8> %res
28}
29
30define <8 x i16> @reverse_v8i16(<8 x i16> %a) #0 {
31; CHECK-LABEL: reverse_v8i16:
32; CHECK:       # %bb.0:
33; CHECK-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[2,3,0,1]
34; CHECK-NEXT:    pshuflw {{.*#+}} xmm0 = xmm0[3,2,1,0,4,5,6,7]
35; CHECK-NEXT:    pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,7,6,5,4]
36; CHECK-NEXT:    retq
37  %res = call <8 x i16> @llvm.experimental.vector.reverse.v8i16(<8 x i16> %a)
38  ret <8 x i16> %res
39}
40
41define <4 x i32> @reverse_v4i32(<4 x i32> %a) #0 {
42; CHECK-LABEL: reverse_v4i32:
43; CHECK:       # %bb.0:
44; CHECK-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[3,2,1,0]
45; CHECK-NEXT:    retq
46  %res = call <4 x i32> @llvm.experimental.vector.reverse.v4i32(<4 x i32> %a)
47  ret <4 x i32> %res
48}
49
50define <2 x i64> @reverse_v2i64(<2 x i64> %a) #0 {
51; CHECK-LABEL: reverse_v2i64:
52; CHECK:       # %bb.0:
53; CHECK-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[2,3,0,1]
54; CHECK-NEXT:    retq
55  %res = call <2 x i64> @llvm.experimental.vector.reverse.v2i64(<2 x i64> %a)
56  ret <2 x i64> %res
57}
58
59define <4 x float> @reverse_v4f32(<4 x float> %a) #0 {
60; CHECK-LABEL: reverse_v4f32:
61; CHECK:       # %bb.0:
62; CHECK-NEXT:    shufps {{.*#+}} xmm0 = xmm0[3,2,1,0]
63; CHECK-NEXT:    retq
64  %res = call <4 x float> @llvm.experimental.vector.reverse.v4f32(<4 x float> %a)
65  ret <4 x float> %res
66}
67
68define <2 x double> @reverse_v2f64(<2 x double> %a) #0 {
69; CHECK-LABEL: reverse_v2f64:
70; CHECK:       # %bb.0:
71; CHECK-NEXT:    shufps {{.*#+}} xmm0 = xmm0[2,3,0,1]
72; CHECK-NEXT:    retq
73  %res = call <2 x double> @llvm.experimental.vector.reverse.v2f64(<2 x double> %a)
74  ret <2 x double> %res
75}
76
77; Verify promote type legalisation works as expected.
78define <2 x i8> @reverse_v2i8(<2 x i8> %a) #0 {
79; CHECK-LABEL: reverse_v2i8:
80; CHECK:       # %bb.0:
81; CHECK-NEXT:    movdqa %xmm0, %xmm1
82; CHECK-NEXT:    psrlw $8, %xmm1
83; CHECK-NEXT:    psllw $8, %xmm0
84; CHECK-NEXT:    por %xmm1, %xmm0
85; CHECK-NEXT:    retq
86  %res = call <2 x i8> @llvm.experimental.vector.reverse.v2i8(<2 x i8> %a)
87  ret <2 x i8> %res
88}
89
90; Verify splitvec type legalisation works as expected.
91define <8 x i32> @reverse_v8i32(<8 x i32> %a) #0 {
92; CHECK-LABEL: reverse_v8i32:
93; CHECK:       # %bb.0:
94; CHECK-NEXT:    pshufd {{.*#+}} xmm2 = xmm1[3,2,1,0]
95; CHECK-NEXT:    pshufd {{.*#+}} xmm1 = xmm0[3,2,1,0]
96; CHECK-NEXT:    movdqa %xmm2, %xmm0
97; CHECK-NEXT:    retq
98  %res = call <8 x i32> @llvm.experimental.vector.reverse.v8i32(<8 x i32> %a)
99  ret <8 x i32> %res
100}
101
102; Verify splitvec type legalisation works as expected.
103define <16 x float> @reverse_v16f32(<16 x float> %a) #0 {
104; CHECK-LABEL: reverse_v16f32:
105; CHECK:       # %bb.0:
106; CHECK-NEXT:    movaps %xmm1, %xmm4
107; CHECK-NEXT:    movaps %xmm0, %xmm5
108; CHECK-NEXT:    shufps {{.*#+}} xmm3 = xmm3[3,2,1,0]
109; CHECK-NEXT:    shufps {{.*#+}} xmm2 = xmm2[3,2,1,0]
110; CHECK-NEXT:    shufps {{.*#+}} xmm4 = xmm4[3,2],xmm1[1,0]
111; CHECK-NEXT:    shufps {{.*#+}} xmm5 = xmm5[3,2],xmm0[1,0]
112; CHECK-NEXT:    movaps %xmm3, %xmm0
113; CHECK-NEXT:    movaps %xmm2, %xmm1
114; CHECK-NEXT:    movaps %xmm4, %xmm2
115; CHECK-NEXT:    movaps %xmm5, %xmm3
116; CHECK-NEXT:    retq
117
118  %res = call <16 x float> @llvm.experimental.vector.reverse.v16f32(<16 x float> %a)
119  ret <16 x float> %res
120}
121
122
123declare <2 x i8> @llvm.experimental.vector.reverse.v2i8(<2 x i8>)
124declare <16 x i8> @llvm.experimental.vector.reverse.v16i8(<16 x i8>)
125declare <8 x i16> @llvm.experimental.vector.reverse.v8i16(<8 x i16>)
126declare <4 x i32> @llvm.experimental.vector.reverse.v4i32(<4 x i32>)
127declare <8 x i32> @llvm.experimental.vector.reverse.v8i32(<8 x i32>)
128declare <2 x i64> @llvm.experimental.vector.reverse.v2i64(<2 x i64>)
129declare <8 x half> @llvm.experimental.vector.reverse.v8f16(<8 x half>)
130declare <4 x float> @llvm.experimental.vector.reverse.v4f32(<4 x float>)
131declare <16 x float> @llvm.experimental.vector.reverse.v16f32(<16 x float>)
132declare <2 x double> @llvm.experimental.vector.reverse.v2f64(<2 x double>)
133
134attributes #0 = { nounwind }
135