1; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx | FileCheck %s
3
4;
5; testz(~X,Y) -> testc(X,Y)
6;
7
8define i32 @testpsz_128_invert0(<4 x float> %c, <4 x float> %d, i32 %a, i32 %b) {
9; CHECK-LABEL: testpsz_128_invert0:
10; CHECK:       # %bb.0:
11; CHECK-NEXT:    movl %edi, %eax
12; CHECK-NEXT:    vtestps %xmm1, %xmm0
13; CHECK-NEXT:    cmovael %esi, %eax
14; CHECK-NEXT:    retq
15  %t0 = bitcast <4 x float> %c to <2 x i64>
16  %t1 = xor <2 x i64> %t0, <i64 -1, i64 -1>
17  %t2 = bitcast <2 x i64> %t1 to <4 x float>
18  %t3 = call i32 @llvm.x86.avx.vtestz.ps(<4 x float> %t2, <4 x float> %d)
19  %t4 = icmp ne i32 %t3, 0
20  %t5 = select i1 %t4, i32 %a, i32 %b
21  ret i32 %t5
22}
23
24define i32 @testpsz_256_invert0(<8 x float> %c, <8 x float> %d, i32 %a, i32 %b) {
25; CHECK-LABEL: testpsz_256_invert0:
26; CHECK:       # %bb.0:
27; CHECK-NEXT:    movl %edi, %eax
28; CHECK-NEXT:    vtestps %ymm1, %ymm0
29; CHECK-NEXT:    cmovael %esi, %eax
30; CHECK-NEXT:    vzeroupper
31; CHECK-NEXT:    retq
32  %t0 = bitcast <8 x float> %c to <4 x i64>
33  %t1 = xor <4 x i64> %t0, <i64 -1, i64 -1, i64 -1, i64 -1>
34  %t2 = bitcast <4 x i64> %t1 to <8 x float>
35  %t3 = call i32 @llvm.x86.avx.vtestz.ps.256(<8 x float> %t2, <8 x float> %d)
36  %t4 = icmp ne i32 %t3, 0
37  %t5 = select i1 %t4, i32 %a, i32 %b
38  ret i32 %t5
39}
40
41;
42; testz(X,~Y) -> testc(Y,X)
43;
44
45define i32 @testpsz_128_invert1(<4 x float> %c, <4 x float> %d, i32 %a, i32 %b) {
46; CHECK-LABEL: testpsz_128_invert1:
47; CHECK:       # %bb.0:
48; CHECK-NEXT:    movl %edi, %eax
49; CHECK-NEXT:    vtestps %xmm0, %xmm1
50; CHECK-NEXT:    cmovael %esi, %eax
51; CHECK-NEXT:    retq
52  %t0 = bitcast <4 x float> %d to <2 x i64>
53  %t1 = xor <2 x i64> %t0, <i64 -1, i64 -1>
54  %t2 = bitcast <2 x i64> %t1 to <4 x float>
55  %t3 = call i32 @llvm.x86.avx.vtestz.ps(<4 x float> %c, <4 x float> %t2)
56  %t4 = icmp ne i32 %t3, 0
57  %t5 = select i1 %t4, i32 %a, i32 %b
58  ret i32 %t5
59}
60
61define i32 @testpsz_256_invert1(<8 x float> %c, <8 x float> %d, i32 %a, i32 %b) {
62; CHECK-LABEL: testpsz_256_invert1:
63; CHECK:       # %bb.0:
64; CHECK-NEXT:    movl %edi, %eax
65; CHECK-NEXT:    vtestps %ymm0, %ymm1
66; CHECK-NEXT:    cmovael %esi, %eax
67; CHECK-NEXT:    vzeroupper
68; CHECK-NEXT:    retq
69  %t0 = bitcast <8 x float> %d to <4 x i64>
70  %t1 = xor <4 x i64> %t0, <i64 -1, i64 -1, i64 -1, i64 -1>
71  %t2 = bitcast <4 x i64> %t1 to <8 x float>
72  %t3 = call i32 @llvm.x86.avx.vtestz.ps.256(<8 x float> %c, <8 x float> %t2)
73  %t4 = icmp ne i32 %t3, 0
74  %t5 = select i1 %t4, i32 %a, i32 %b
75  ret i32 %t5
76}
77
78;
79; testc(~X,Y) -> testz(X,Y)
80;
81
82define i32 @testpsc_128_invert0(<4 x float> %c, <4 x float> %d, i32 %a, i32 %b) {
83; CHECK-LABEL: testpsc_128_invert0:
84; CHECK:       # %bb.0:
85; CHECK-NEXT:    movl %edi, %eax
86; CHECK-NEXT:    vtestps %xmm1, %xmm0
87; CHECK-NEXT:    cmovnel %esi, %eax
88; CHECK-NEXT:    retq
89  %t0 = bitcast <4 x float> %c to <2 x i64>
90  %t1 = xor <2 x i64> %t0, <i64 -1, i64 -1>
91  %t2 = bitcast <2 x i64> %t1 to <4 x float>
92  %t3 = call i32 @llvm.x86.avx.vtestc.ps(<4 x float> %t2, <4 x float> %d)
93  %t4 = icmp ne i32 %t3, 0
94  %t5 = select i1 %t4, i32 %a, i32 %b
95  ret i32 %t5
96}
97
98define i32 @testpsc_256_invert0(<8 x float> %c, <8 x float> %d, i32 %a, i32 %b) {
99; CHECK-LABEL: testpsc_256_invert0:
100; CHECK:       # %bb.0:
101; CHECK-NEXT:    movl %edi, %eax
102; CHECK-NEXT:    vtestps %ymm1, %ymm0
103; CHECK-NEXT:    cmovnel %esi, %eax
104; CHECK-NEXT:    vzeroupper
105; CHECK-NEXT:    retq
106  %t0 = bitcast <8 x float> %c to <4 x i64>
107  %t1 = xor <4 x i64> %t0, <i64 -1, i64 -1, i64 -1, i64 -1>
108  %t2 = bitcast <4 x i64> %t1 to <8 x float>
109  %t3 = call i32 @llvm.x86.avx.vtestc.ps.256(<8 x float> %t2, <8 x float> %d)
110  %t4 = icmp ne i32 %t3, 0
111  %t5 = select i1 %t4, i32 %a, i32 %b
112  ret i32 %t5
113}
114
115;
116; testnzc(~X,Y) -> testnzc(X,Y)
117;
118
119define i32 @testpsnzc_128_invert0(<4 x float> %c, <4 x float> %d, i32 %a, i32 %b) {
120; CHECK-LABEL: testpsnzc_128_invert0:
121; CHECK:       # %bb.0:
122; CHECK-NEXT:    movl %edi, %eax
123; CHECK-NEXT:    vtestps %xmm1, %xmm0
124; CHECK-NEXT:    cmovbel %esi, %eax
125; CHECK-NEXT:    retq
126  %t0 = bitcast <4 x float> %c to <2 x i64>
127  %t1 = xor <2 x i64> %t0, <i64 -1, i64 -1>
128  %t2 = bitcast <2 x i64> %t1 to <4 x float>
129  %t3 = call i32 @llvm.x86.avx.vtestnzc.ps(<4 x float> %t2, <4 x float> %d)
130  %t4 = icmp ne i32 %t3, 0
131  %t5 = select i1 %t4, i32 %a, i32 %b
132  ret i32 %t5
133}
134
135define i32 @testpsnzc_256_invert0(<8 x float> %c, <8 x float> %d, i32 %a, i32 %b) {
136; CHECK-LABEL: testpsnzc_256_invert0:
137; CHECK:       # %bb.0:
138; CHECK-NEXT:    movl %edi, %eax
139; CHECK-NEXT:    vtestps %ymm1, %ymm0
140; CHECK-NEXT:    cmovbel %esi, %eax
141; CHECK-NEXT:    vzeroupper
142; CHECK-NEXT:    retq
143  %t0 = bitcast <8 x float> %c to <4 x i64>
144  %t1 = xor <4 x i64> %t0, <i64 -1, i64 -1, i64 -1, i64 -1>
145  %t2 = bitcast <4 x i64> %t1 to <8 x float>
146  %t3 = call i32 @llvm.x86.avx.vtestnzc.ps.256(<8 x float> %t2, <8 x float> %d)
147  %t4 = icmp ne i32 %t3, 0
148  %t5 = select i1 %t4, i32 %a, i32 %b
149  ret i32 %t5
150}
151
152declare i32 @llvm.x86.avx.vtestz.ps(<4 x float>, <4 x float>) nounwind readnone
153declare i32 @llvm.x86.avx.vtestc.ps(<4 x float>, <4 x float>) nounwind readnone
154declare i32 @llvm.x86.avx.vtestnzc.ps(<4 x float>, <4 x float>) nounwind readnone
155
156declare i32 @llvm.x86.avx.vtestz.ps.256(<8 x float>, <8 x float>) nounwind readnone
157declare i32 @llvm.x86.avx.vtestc.ps.256(<8 x float>, <8 x float>) nounwind readnone
158declare i32 @llvm.x86.avx.vtestnzc.ps.256(<8 x float>, <8 x float>) nounwind readnone
159