1; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+sse2 | FileCheck %s --check-prefix=SSE2
3; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+sse4.1 | FileCheck %s --check-prefix=SSE41
4; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+sse4.2 | FileCheck %s --check-prefix=SSE42
5; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx | FileCheck %s --check-prefix=AVX
6; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx2 | FileCheck %s --check-prefix=AVX
7; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512f | FileCheck %s --check-prefix=AVX
8; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512bw | FileCheck %s --check-prefix=AVX
9
10define <16 x i8> @test_v16i8_nosignbit(<16 x i8> %a, <16 x i8> %b) {
11; SSE2-LABEL: test_v16i8_nosignbit:
12; SSE2:       # %bb.0:
13; SSE2-NEXT:    movdqa {{.*#+}} xmm2 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
14; SSE2-NEXT:    pand %xmm2, %xmm0
15; SSE2-NEXT:    pand %xmm2, %xmm1
16; SSE2-NEXT:    pminub %xmm1, %xmm0
17; SSE2-NEXT:    retq
18;
19; SSE41-LABEL: test_v16i8_nosignbit:
20; SSE41:       # %bb.0:
21; SSE41-NEXT:    movdqa {{.*#+}} xmm2 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
22; SSE41-NEXT:    pand %xmm2, %xmm0
23; SSE41-NEXT:    pand %xmm2, %xmm1
24; SSE41-NEXT:    pminsb %xmm1, %xmm0
25; SSE41-NEXT:    retq
26;
27; SSE42-LABEL: test_v16i8_nosignbit:
28; SSE42:       # %bb.0:
29; SSE42-NEXT:    movdqa {{.*#+}} xmm2 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
30; SSE42-NEXT:    pand %xmm2, %xmm0
31; SSE42-NEXT:    pand %xmm2, %xmm1
32; SSE42-NEXT:    pminsb %xmm1, %xmm0
33; SSE42-NEXT:    retq
34;
35; AVX-LABEL: test_v16i8_nosignbit:
36; AVX:       # %bb.0:
37; AVX-NEXT:    vmovdqa {{.*#+}} xmm2 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
38; AVX-NEXT:    vpand %xmm2, %xmm0, %xmm0
39; AVX-NEXT:    vpand %xmm2, %xmm1, %xmm1
40; AVX-NEXT:    vpminsb %xmm1, %xmm0, %xmm0
41; AVX-NEXT:    retq
42  %1 = and <16 x i8> %a, <i8 15, i8 15, i8 15, i8 15, i8 15, i8 15, i8 15, i8 15, i8 15, i8 15, i8 15, i8 15, i8 15, i8 15, i8 15, i8 15>
43  %2 = and <16 x i8> %b, <i8 15, i8 15, i8 15, i8 15, i8 15, i8 15, i8 15, i8 15, i8 15, i8 15, i8 15, i8 15, i8 15, i8 15, i8 15, i8 15>
44  %3 = icmp slt <16 x i8> %1, %2
45  %4 = select <16 x i1> %3, <16 x i8> %1, <16 x i8> %2
46  ret <16 x i8> %4
47}
48