1; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2; RUN: llc < %s -mtriple=i386-apple-darwin -mattr=-avx,+sse2 -show-mc-encoding | FileCheck %s --check-prefix=SSE
3; RUN: llc < %s -mtriple=i386-apple-darwin -mattr=+avx2 -show-mc-encoding | FileCheck %s --check-prefix=AVX2
4; RUN: llc < %s -mtriple=i386-apple-darwin -mcpu=skx -show-mc-encoding | FileCheck %s --check-prefix=SKX
5
6; NOTE: This should use IR equivalent to what is generated by clang/test/CodeGen/sse2-builtins.c
7
8
9define <16 x i8> @test_x86_sse2_paddus_b(<16 x i8> %a0, <16 x i8> %a1) {
10; SSE-LABEL: test_x86_sse2_paddus_b:
11; SSE:       ## %bb.0:
12; SSE-NEXT:    paddusb %xmm1, %xmm0 ## encoding: [0x66,0x0f,0xdc,0xc1]
13; SSE-NEXT:    retl ## encoding: [0xc3]
14;
15; AVX2-LABEL: test_x86_sse2_paddus_b:
16; AVX2:       ## %bb.0:
17; AVX2-NEXT:    vpaddusb %xmm1, %xmm0, %xmm0 ## encoding: [0xc5,0xf9,0xdc,0xc1]
18; AVX2-NEXT:    retl ## encoding: [0xc3]
19;
20; SKX-LABEL: test_x86_sse2_paddus_b:
21; SKX:       ## %bb.0:
22; SKX-NEXT:    vpaddusb %xmm1, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0xdc,0xc1]
23; SKX-NEXT:    retl ## encoding: [0xc3]
24  %1 = add <16 x i8> %a0, %a1
25  %2 = icmp ugt <16 x i8> %a0, %1
26  %3 = select <16 x i1> %2, <16 x i8> <i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1>, <16 x i8> %1
27  ret <16 x i8> %3
28}
29
30define <8 x i16> @test_x86_sse2_paddus_w(<8 x i16> %a0, <8 x i16> %a1) {
31; SSE-LABEL: test_x86_sse2_paddus_w:
32; SSE:       ## %bb.0:
33; SSE-NEXT:    paddusw %xmm1, %xmm0 ## encoding: [0x66,0x0f,0xdd,0xc1]
34; SSE-NEXT:    retl ## encoding: [0xc3]
35;
36; AVX2-LABEL: test_x86_sse2_paddus_w:
37; AVX2:       ## %bb.0:
38; AVX2-NEXT:    vpaddusw %xmm1, %xmm0, %xmm0 ## encoding: [0xc5,0xf9,0xdd,0xc1]
39; AVX2-NEXT:    retl ## encoding: [0xc3]
40;
41; SKX-LABEL: test_x86_sse2_paddus_w:
42; SKX:       ## %bb.0:
43; SKX-NEXT:    vpaddusw %xmm1, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0xdd,0xc1]
44; SKX-NEXT:    retl ## encoding: [0xc3]
45  %1 = add <8 x i16> %a0, %a1
46  %2 = icmp ugt <8 x i16> %a0, %1
47  %3 = select <8 x i1> %2, <8 x i16> <i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1>, <8 x i16> %1
48  ret <8 x i16> %3
49}
50
51define <16 x i8> @test_x86_sse2_psubus_b(<16 x i8> %a0, <16 x i8> %a1) {
52; SSE-LABEL: test_x86_sse2_psubus_b:
53; SSE:       ## %bb.0:
54; SSE-NEXT:    psubusb %xmm1, %xmm0 ## encoding: [0x66,0x0f,0xd8,0xc1]
55; SSE-NEXT:    retl ## encoding: [0xc3]
56;
57; AVX2-LABEL: test_x86_sse2_psubus_b:
58; AVX2:       ## %bb.0:
59; AVX2-NEXT:    vpsubusb %xmm1, %xmm0, %xmm0 ## encoding: [0xc5,0xf9,0xd8,0xc1]
60; AVX2-NEXT:    retl ## encoding: [0xc3]
61;
62; SKX-LABEL: test_x86_sse2_psubus_b:
63; SKX:       ## %bb.0:
64; SKX-NEXT:    vpsubusb %xmm1, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0xd8,0xc1]
65; SKX-NEXT:    retl ## encoding: [0xc3]
66  %cmp = icmp ugt <16 x i8> %a0, %a1
67  %sel = select <16 x i1> %cmp, <16 x i8> %a0, <16 x i8> %a1
68  %sub = sub <16 x i8> %sel, %a1
69  ret <16 x i8> %sub
70}
71
72define <8 x i16> @test_x86_sse2_psubus_w(<8 x i16> %a0, <8 x i16> %a1) {
73; SSE-LABEL: test_x86_sse2_psubus_w:
74; SSE:       ## %bb.0:
75; SSE-NEXT:    psubusw %xmm1, %xmm0 ## encoding: [0x66,0x0f,0xd9,0xc1]
76; SSE-NEXT:    retl ## encoding: [0xc3]
77;
78; AVX2-LABEL: test_x86_sse2_psubus_w:
79; AVX2:       ## %bb.0:
80; AVX2-NEXT:    vpsubusw %xmm1, %xmm0, %xmm0 ## encoding: [0xc5,0xf9,0xd9,0xc1]
81; AVX2-NEXT:    retl ## encoding: [0xc3]
82;
83; SKX-LABEL: test_x86_sse2_psubus_w:
84; SKX:       ## %bb.0:
85; SKX-NEXT:    vpsubusw %xmm1, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0xd9,0xc1]
86; SKX-NEXT:    retl ## encoding: [0xc3]
87  %cmp = icmp ugt <8 x i16> %a0, %a1
88  %sel = select <8 x i1> %cmp, <8 x i16> %a0, <8 x i16> %a1
89  %sub = sub <8 x i16> %sel, %a1
90  ret <8 x i16> %sub
91}
92
93define <8 x i8> @test_x86_sse2_paddus_b_64(<8 x i8> %a0, <8 x i8> %a1) {
94; SSE-LABEL: test_x86_sse2_paddus_b_64:
95; SSE:       ## %bb.0:
96; SSE-NEXT:    paddusb %xmm1, %xmm0 ## encoding: [0x66,0x0f,0xdc,0xc1]
97; SSE-NEXT:    retl ## encoding: [0xc3]
98;
99; AVX2-LABEL: test_x86_sse2_paddus_b_64:
100; AVX2:       ## %bb.0:
101; AVX2-NEXT:    vpaddusb %xmm1, %xmm0, %xmm0 ## encoding: [0xc5,0xf9,0xdc,0xc1]
102; AVX2-NEXT:    retl ## encoding: [0xc3]
103;
104; SKX-LABEL: test_x86_sse2_paddus_b_64:
105; SKX:       ## %bb.0:
106; SKX-NEXT:    vpaddusb %xmm1, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0xdc,0xc1]
107; SKX-NEXT:    retl ## encoding: [0xc3]
108  %1 = add <8 x i8> %a0, %a1
109  %2 = icmp ugt <8 x i8> %a0, %1
110  %3 = select <8 x i1> %2, <8 x i8> <i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1>, <8 x i8> %1
111  ret <8 x i8> %3
112}
113
114define <4 x i16> @test_x86_sse2_paddus_w_64(<4 x i16> %a0, <4 x i16> %a1) {
115; SSE-LABEL: test_x86_sse2_paddus_w_64:
116; SSE:       ## %bb.0:
117; SSE-NEXT:    paddusw %xmm1, %xmm0 ## encoding: [0x66,0x0f,0xdd,0xc1]
118; SSE-NEXT:    retl ## encoding: [0xc3]
119;
120; AVX2-LABEL: test_x86_sse2_paddus_w_64:
121; AVX2:       ## %bb.0:
122; AVX2-NEXT:    vpaddusw %xmm1, %xmm0, %xmm0 ## encoding: [0xc5,0xf9,0xdd,0xc1]
123; AVX2-NEXT:    retl ## encoding: [0xc3]
124;
125; SKX-LABEL: test_x86_sse2_paddus_w_64:
126; SKX:       ## %bb.0:
127; SKX-NEXT:    vpaddusw %xmm1, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0xdd,0xc1]
128; SKX-NEXT:    retl ## encoding: [0xc3]
129  %1 = add <4 x i16> %a0, %a1
130  %2 = icmp ugt <4 x i16> %a0, %1
131  %3 = select <4 x i1> %2, <4 x i16> <i16 -1, i16 -1, i16 -1, i16 -1>, <4 x i16> %1
132  ret <4 x i16> %3
133}
134
135define <8 x i8> @test_x86_sse2_psubus_b_64(<8 x i8> %a0, <8 x i8> %a1) {
136; SSE-LABEL: test_x86_sse2_psubus_b_64:
137; SSE:       ## %bb.0:
138; SSE-NEXT:    psubusb %xmm1, %xmm0 ## encoding: [0x66,0x0f,0xd8,0xc1]
139; SSE-NEXT:    retl ## encoding: [0xc3]
140;
141; AVX2-LABEL: test_x86_sse2_psubus_b_64:
142; AVX2:       ## %bb.0:
143; AVX2-NEXT:    vpsubusb %xmm1, %xmm0, %xmm0 ## encoding: [0xc5,0xf9,0xd8,0xc1]
144; AVX2-NEXT:    retl ## encoding: [0xc3]
145;
146; SKX-LABEL: test_x86_sse2_psubus_b_64:
147; SKX:       ## %bb.0:
148; SKX-NEXT:    vpsubusb %xmm1, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0xd8,0xc1]
149; SKX-NEXT:    retl ## encoding: [0xc3]
150  %cmp = icmp ugt <8 x i8> %a0, %a1
151  %sel = select <8 x i1> %cmp, <8 x i8> %a0, <8 x i8> %a1
152  %sub = sub <8 x i8> %sel, %a1
153  ret <8 x i8> %sub
154}
155
156define <4 x i16> @test_x86_sse2_psubus_w_64(<4 x i16> %a0, <4 x i16> %a1) {
157; SSE-LABEL: test_x86_sse2_psubus_w_64:
158; SSE:       ## %bb.0:
159; SSE-NEXT:    psubusw %xmm1, %xmm0 ## encoding: [0x66,0x0f,0xd9,0xc1]
160; SSE-NEXT:    retl ## encoding: [0xc3]
161;
162; AVX2-LABEL: test_x86_sse2_psubus_w_64:
163; AVX2:       ## %bb.0:
164; AVX2-NEXT:    vpsubusw %xmm1, %xmm0, %xmm0 ## encoding: [0xc5,0xf9,0xd9,0xc1]
165; AVX2-NEXT:    retl ## encoding: [0xc3]
166;
167; SKX-LABEL: test_x86_sse2_psubus_w_64:
168; SKX:       ## %bb.0:
169; SKX-NEXT:    vpsubusw %xmm1, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0xd9,0xc1]
170; SKX-NEXT:    retl ## encoding: [0xc3]
171  %cmp = icmp ugt <4 x i16> %a0, %a1
172  %sel = select <4 x i1> %cmp, <4 x i16> %a0, <4 x i16> %a1
173  %sub = sub <4 x i16> %sel, %a1
174  ret <4 x i16> %sub
175}
176
177; This test has a normal add and a saturating add.
178; FIXME: This should be an addw and a addusw, but a bad canonicalization makes this not work.
179define <8 x i16> @add_addusw(<8 x i16> %x, <8 x i16> %y, <8 x i16> %z) {
180; SSE-LABEL: add_addusw:
181; SSE:       ## %bb.0:
182; SSE-NEXT:    paddw %xmm2, %xmm1 ## encoding: [0x66,0x0f,0xfd,0xca]
183; SSE-NEXT:    paddusw %xmm1, %xmm0 ## encoding: [0x66,0x0f,0xdd,0xc1]
184; SSE-NEXT:    retl ## encoding: [0xc3]
185;
186; AVX2-LABEL: add_addusw:
187; AVX2:       ## %bb.0:
188; AVX2-NEXT:    vpaddw %xmm2, %xmm1, %xmm1 ## encoding: [0xc5,0xf1,0xfd,0xca]
189; AVX2-NEXT:    vpaddusw %xmm1, %xmm0, %xmm0 ## encoding: [0xc5,0xf9,0xdd,0xc1]
190; AVX2-NEXT:    retl ## encoding: [0xc3]
191;
192; SKX-LABEL: add_addusw:
193; SKX:       ## %bb.0:
194; SKX-NEXT:    vpaddw %xmm2, %xmm1, %xmm1 ## EVEX TO VEX Compression encoding: [0xc5,0xf1,0xfd,0xca]
195; SKX-NEXT:    vpaddusw %xmm1, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0xdd,0xc1]
196; SKX-NEXT:    retl ## encoding: [0xc3]
197  %a = add <8 x i16> %y, %z
198  %b = add <8 x i16> %x, %a
199  %c = icmp ugt <8 x i16> %a, %b
200  %res = select <8 x i1> %c, <8 x i16> <i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1>, <8 x i16> %b
201  ret <8 x i16> %res
202}
203