1; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2; RUN: llc -mtriple=i686-unknown-linux-gnu -mattr=+sse,sse2                  < %s | FileCheck %s --check-prefixes=CHECK,X86,SSE2,X86-SSE2,X86-BMI1
3; RUN: llc -mtriple=i686-unknown-linux-gnu -mattr=+sse,sse2,+bmi             < %s | FileCheck %s --check-prefixes=CHECK,X86,SSE2,X86-SSE2,X86-BMI1
4; RUN: llc -mtriple=i686-unknown-linux-gnu -mattr=+sse,sse2,+bmi,+bmi2       < %s | FileCheck %s --check-prefixes=CHECK,X86,SSE2,X86-SSE2,X86-BMI2
5; RUN: llc -mtriple=i686-unknown-linux-gnu -mattr=+sse,sse2,+bmi,+bmi2,+avx2 < %s | FileCheck %s --check-prefixes=CHECK,X86,X86-BMI2,AVX2
6; RUN: llc -mtriple=x86_64-unknown-linux-gnu -mattr=+sse,sse2                  < %s | FileCheck %s --check-prefixes=CHECK,X64,SSE2,X64-SSE2,X64-BMI1
7; RUN: llc -mtriple=x86_64-unknown-linux-gnu -mattr=+sse,sse2,+bmi             < %s | FileCheck %s --check-prefixes=CHECK,X64,SSE2,X64-SSE2,X64-BMI1
8; RUN: llc -mtriple=x86_64-unknown-linux-gnu -mattr=+sse,sse2,+bmi,+bmi2       < %s | FileCheck %s --check-prefixes=CHECK,X64,SSE2,X64-SSE2,X64-BMI2
9; RUN: llc -mtriple=x86_64-unknown-linux-gnu -mattr=+sse,sse2,+bmi,+bmi2,+avx2 < %s | FileCheck %s --check-prefixes=CHECK,X64,X64-BMI2,AVX2
10
11; We are looking for the following pattern here:
12;   (X & (C l>> Y)) ==/!= 0
13; It may be optimal to hoist the constant:
14;   ((X << Y) & C) ==/!= 0
15
16;------------------------------------------------------------------------------;
17; A few scalar test
18;------------------------------------------------------------------------------;
19
20; i8 scalar
21
22define i1 @scalar_i8_signbit_eq(i8 %x, i8 %y) nounwind {
23; X86-LABEL: scalar_i8_signbit_eq:
24; X86:       # %bb.0:
25; X86-NEXT:    movb {{[0-9]+}}(%esp), %cl
26; X86-NEXT:    movb {{[0-9]+}}(%esp), %al
27; X86-NEXT:    shlb %cl, %al
28; X86-NEXT:    testb $-128, %al
29; X86-NEXT:    sete %al
30; X86-NEXT:    retl
31;
32; X64-LABEL: scalar_i8_signbit_eq:
33; X64:       # %bb.0:
34; X64-NEXT:    movl %esi, %ecx
35; X64-NEXT:    # kill: def $cl killed $cl killed $ecx
36; X64-NEXT:    shlb %cl, %dil
37; X64-NEXT:    testb $-128, %dil
38; X64-NEXT:    sete %al
39; X64-NEXT:    retq
40  %t0 = lshr i8 128, %y
41  %t1 = and i8 %t0, %x
42  %res = icmp eq i8 %t1, 0
43  ret i1 %res
44}
45
46define i1 @scalar_i8_lowestbit_eq(i8 %x, i8 %y) nounwind {
47; X86-LABEL: scalar_i8_lowestbit_eq:
48; X86:       # %bb.0:
49; X86-NEXT:    movb {{[0-9]+}}(%esp), %cl
50; X86-NEXT:    movb {{[0-9]+}}(%esp), %al
51; X86-NEXT:    shlb %cl, %al
52; X86-NEXT:    testb $1, %al
53; X86-NEXT:    sete %al
54; X86-NEXT:    retl
55;
56; X64-LABEL: scalar_i8_lowestbit_eq:
57; X64:       # %bb.0:
58; X64-NEXT:    movl %esi, %ecx
59; X64-NEXT:    # kill: def $cl killed $cl killed $ecx
60; X64-NEXT:    shlb %cl, %dil
61; X64-NEXT:    testb $1, %dil
62; X64-NEXT:    sete %al
63; X64-NEXT:    retq
64  %t0 = lshr i8 1, %y
65  %t1 = and i8 %t0, %x
66  %res = icmp eq i8 %t1, 0
67  ret i1 %res
68}
69
70define i1 @scalar_i8_bitsinmiddle_eq(i8 %x, i8 %y) nounwind {
71; X86-LABEL: scalar_i8_bitsinmiddle_eq:
72; X86:       # %bb.0:
73; X86-NEXT:    movb {{[0-9]+}}(%esp), %cl
74; X86-NEXT:    movb {{[0-9]+}}(%esp), %al
75; X86-NEXT:    shlb %cl, %al
76; X86-NEXT:    testb $24, %al
77; X86-NEXT:    sete %al
78; X86-NEXT:    retl
79;
80; X64-LABEL: scalar_i8_bitsinmiddle_eq:
81; X64:       # %bb.0:
82; X64-NEXT:    movl %esi, %ecx
83; X64-NEXT:    # kill: def $cl killed $cl killed $ecx
84; X64-NEXT:    shlb %cl, %dil
85; X64-NEXT:    testb $24, %dil
86; X64-NEXT:    sete %al
87; X64-NEXT:    retq
88  %t0 = lshr i8 24, %y
89  %t1 = and i8 %t0, %x
90  %res = icmp eq i8 %t1, 0
91  ret i1 %res
92}
93
94; i16 scalar
95
96define i1 @scalar_i16_signbit_eq(i16 %x, i16 %y) nounwind {
97; X86-BMI1-LABEL: scalar_i16_signbit_eq:
98; X86-BMI1:       # %bb.0:
99; X86-BMI1-NEXT:    movb {{[0-9]+}}(%esp), %cl
100; X86-BMI1-NEXT:    movl {{[0-9]+}}(%esp), %eax
101; X86-BMI1-NEXT:    shll %cl, %eax
102; X86-BMI1-NEXT:    testl $32768, %eax # imm = 0x8000
103; X86-BMI1-NEXT:    sete %al
104; X86-BMI1-NEXT:    retl
105;
106; X86-BMI2-LABEL: scalar_i16_signbit_eq:
107; X86-BMI2:       # %bb.0:
108; X86-BMI2-NEXT:    movb {{[0-9]+}}(%esp), %al
109; X86-BMI2-NEXT:    shlxl %eax, {{[0-9]+}}(%esp), %eax
110; X86-BMI2-NEXT:    testl $32768, %eax # imm = 0x8000
111; X86-BMI2-NEXT:    sete %al
112; X86-BMI2-NEXT:    retl
113;
114; X64-BMI1-LABEL: scalar_i16_signbit_eq:
115; X64-BMI1:       # %bb.0:
116; X64-BMI1-NEXT:    movl %esi, %ecx
117; X64-BMI1-NEXT:    # kill: def $cl killed $cl killed $ecx
118; X64-BMI1-NEXT:    shll %cl, %edi
119; X64-BMI1-NEXT:    testl $32768, %edi # imm = 0x8000
120; X64-BMI1-NEXT:    sete %al
121; X64-BMI1-NEXT:    retq
122;
123; X64-BMI2-LABEL: scalar_i16_signbit_eq:
124; X64-BMI2:       # %bb.0:
125; X64-BMI2-NEXT:    shlxl %esi, %edi, %eax
126; X64-BMI2-NEXT:    testl $32768, %eax # imm = 0x8000
127; X64-BMI2-NEXT:    sete %al
128; X64-BMI2-NEXT:    retq
129  %t0 = lshr i16 32768, %y
130  %t1 = and i16 %t0, %x
131  %res = icmp eq i16 %t1, 0
132  ret i1 %res
133}
134
135define i1 @scalar_i16_lowestbit_eq(i16 %x, i16 %y) nounwind {
136; X86-BMI1-LABEL: scalar_i16_lowestbit_eq:
137; X86-BMI1:       # %bb.0:
138; X86-BMI1-NEXT:    movb {{[0-9]+}}(%esp), %cl
139; X86-BMI1-NEXT:    movl {{[0-9]+}}(%esp), %eax
140; X86-BMI1-NEXT:    shll %cl, %eax
141; X86-BMI1-NEXT:    testb $1, %al
142; X86-BMI1-NEXT:    sete %al
143; X86-BMI1-NEXT:    retl
144;
145; X86-BMI2-LABEL: scalar_i16_lowestbit_eq:
146; X86-BMI2:       # %bb.0:
147; X86-BMI2-NEXT:    movb {{[0-9]+}}(%esp), %al
148; X86-BMI2-NEXT:    shlxl %eax, {{[0-9]+}}(%esp), %eax
149; X86-BMI2-NEXT:    testb $1, %al
150; X86-BMI2-NEXT:    sete %al
151; X86-BMI2-NEXT:    retl
152;
153; X64-BMI1-LABEL: scalar_i16_lowestbit_eq:
154; X64-BMI1:       # %bb.0:
155; X64-BMI1-NEXT:    movl %esi, %ecx
156; X64-BMI1-NEXT:    # kill: def $cl killed $cl killed $ecx
157; X64-BMI1-NEXT:    shll %cl, %edi
158; X64-BMI1-NEXT:    testb $1, %dil
159; X64-BMI1-NEXT:    sete %al
160; X64-BMI1-NEXT:    retq
161;
162; X64-BMI2-LABEL: scalar_i16_lowestbit_eq:
163; X64-BMI2:       # %bb.0:
164; X64-BMI2-NEXT:    shlxl %esi, %edi, %eax
165; X64-BMI2-NEXT:    testb $1, %al
166; X64-BMI2-NEXT:    sete %al
167; X64-BMI2-NEXT:    retq
168  %t0 = lshr i16 1, %y
169  %t1 = and i16 %t0, %x
170  %res = icmp eq i16 %t1, 0
171  ret i1 %res
172}
173
174define i1 @scalar_i16_bitsinmiddle_eq(i16 %x, i16 %y) nounwind {
175; X86-BMI1-LABEL: scalar_i16_bitsinmiddle_eq:
176; X86-BMI1:       # %bb.0:
177; X86-BMI1-NEXT:    movb {{[0-9]+}}(%esp), %cl
178; X86-BMI1-NEXT:    movl {{[0-9]+}}(%esp), %eax
179; X86-BMI1-NEXT:    shll %cl, %eax
180; X86-BMI1-NEXT:    testl $4080, %eax # imm = 0xFF0
181; X86-BMI1-NEXT:    sete %al
182; X86-BMI1-NEXT:    retl
183;
184; X86-BMI2-LABEL: scalar_i16_bitsinmiddle_eq:
185; X86-BMI2:       # %bb.0:
186; X86-BMI2-NEXT:    movb {{[0-9]+}}(%esp), %al
187; X86-BMI2-NEXT:    shlxl %eax, {{[0-9]+}}(%esp), %eax
188; X86-BMI2-NEXT:    testl $4080, %eax # imm = 0xFF0
189; X86-BMI2-NEXT:    sete %al
190; X86-BMI2-NEXT:    retl
191;
192; X64-BMI1-LABEL: scalar_i16_bitsinmiddle_eq:
193; X64-BMI1:       # %bb.0:
194; X64-BMI1-NEXT:    movl %esi, %ecx
195; X64-BMI1-NEXT:    # kill: def $cl killed $cl killed $ecx
196; X64-BMI1-NEXT:    shll %cl, %edi
197; X64-BMI1-NEXT:    testl $4080, %edi # imm = 0xFF0
198; X64-BMI1-NEXT:    sete %al
199; X64-BMI1-NEXT:    retq
200;
201; X64-BMI2-LABEL: scalar_i16_bitsinmiddle_eq:
202; X64-BMI2:       # %bb.0:
203; X64-BMI2-NEXT:    shlxl %esi, %edi, %eax
204; X64-BMI2-NEXT:    testl $4080, %eax # imm = 0xFF0
205; X64-BMI2-NEXT:    sete %al
206; X64-BMI2-NEXT:    retq
207  %t0 = lshr i16 4080, %y
208  %t1 = and i16 %t0, %x
209  %res = icmp eq i16 %t1, 0
210  ret i1 %res
211}
212
213; i32 scalar
214
215define i1 @scalar_i32_signbit_eq(i32 %x, i32 %y) nounwind {
216; X86-BMI1-LABEL: scalar_i32_signbit_eq:
217; X86-BMI1:       # %bb.0:
218; X86-BMI1-NEXT:    movb {{[0-9]+}}(%esp), %cl
219; X86-BMI1-NEXT:    movl {{[0-9]+}}(%esp), %eax
220; X86-BMI1-NEXT:    shll %cl, %eax
221; X86-BMI1-NEXT:    testl $-2147483648, %eax # imm = 0x80000000
222; X86-BMI1-NEXT:    sete %al
223; X86-BMI1-NEXT:    retl
224;
225; X86-BMI2-LABEL: scalar_i32_signbit_eq:
226; X86-BMI2:       # %bb.0:
227; X86-BMI2-NEXT:    movb {{[0-9]+}}(%esp), %al
228; X86-BMI2-NEXT:    shlxl %eax, {{[0-9]+}}(%esp), %eax
229; X86-BMI2-NEXT:    testl $-2147483648, %eax # imm = 0x80000000
230; X86-BMI2-NEXT:    sete %al
231; X86-BMI2-NEXT:    retl
232;
233; X64-BMI1-LABEL: scalar_i32_signbit_eq:
234; X64-BMI1:       # %bb.0:
235; X64-BMI1-NEXT:    movl %esi, %ecx
236; X64-BMI1-NEXT:    # kill: def $cl killed $cl killed $ecx
237; X64-BMI1-NEXT:    shll %cl, %edi
238; X64-BMI1-NEXT:    testl $-2147483648, %edi # imm = 0x80000000
239; X64-BMI1-NEXT:    sete %al
240; X64-BMI1-NEXT:    retq
241;
242; X64-BMI2-LABEL: scalar_i32_signbit_eq:
243; X64-BMI2:       # %bb.0:
244; X64-BMI2-NEXT:    shlxl %esi, %edi, %eax
245; X64-BMI2-NEXT:    testl $-2147483648, %eax # imm = 0x80000000
246; X64-BMI2-NEXT:    sete %al
247; X64-BMI2-NEXT:    retq
248  %t0 = lshr i32 2147483648, %y
249  %t1 = and i32 %t0, %x
250  %res = icmp eq i32 %t1, 0
251  ret i1 %res
252}
253
254define i1 @scalar_i32_lowestbit_eq(i32 %x, i32 %y) nounwind {
255; X86-BMI1-LABEL: scalar_i32_lowestbit_eq:
256; X86-BMI1:       # %bb.0:
257; X86-BMI1-NEXT:    movb {{[0-9]+}}(%esp), %cl
258; X86-BMI1-NEXT:    movl {{[0-9]+}}(%esp), %eax
259; X86-BMI1-NEXT:    shll %cl, %eax
260; X86-BMI1-NEXT:    testb $1, %al
261; X86-BMI1-NEXT:    sete %al
262; X86-BMI1-NEXT:    retl
263;
264; X86-BMI2-LABEL: scalar_i32_lowestbit_eq:
265; X86-BMI2:       # %bb.0:
266; X86-BMI2-NEXT:    movb {{[0-9]+}}(%esp), %al
267; X86-BMI2-NEXT:    shlxl %eax, {{[0-9]+}}(%esp), %eax
268; X86-BMI2-NEXT:    testb $1, %al
269; X86-BMI2-NEXT:    sete %al
270; X86-BMI2-NEXT:    retl
271;
272; X64-BMI1-LABEL: scalar_i32_lowestbit_eq:
273; X64-BMI1:       # %bb.0:
274; X64-BMI1-NEXT:    movl %esi, %ecx
275; X64-BMI1-NEXT:    # kill: def $cl killed $cl killed $ecx
276; X64-BMI1-NEXT:    shll %cl, %edi
277; X64-BMI1-NEXT:    testb $1, %dil
278; X64-BMI1-NEXT:    sete %al
279; X64-BMI1-NEXT:    retq
280;
281; X64-BMI2-LABEL: scalar_i32_lowestbit_eq:
282; X64-BMI2:       # %bb.0:
283; X64-BMI2-NEXT:    shlxl %esi, %edi, %eax
284; X64-BMI2-NEXT:    testb $1, %al
285; X64-BMI2-NEXT:    sete %al
286; X64-BMI2-NEXT:    retq
287  %t0 = lshr i32 1, %y
288  %t1 = and i32 %t0, %x
289  %res = icmp eq i32 %t1, 0
290  ret i1 %res
291}
292
293define i1 @scalar_i32_bitsinmiddle_eq(i32 %x, i32 %y) nounwind {
294; X86-BMI1-LABEL: scalar_i32_bitsinmiddle_eq:
295; X86-BMI1:       # %bb.0:
296; X86-BMI1-NEXT:    movb {{[0-9]+}}(%esp), %cl
297; X86-BMI1-NEXT:    movl {{[0-9]+}}(%esp), %eax
298; X86-BMI1-NEXT:    shll %cl, %eax
299; X86-BMI1-NEXT:    testl $16776960, %eax # imm = 0xFFFF00
300; X86-BMI1-NEXT:    sete %al
301; X86-BMI1-NEXT:    retl
302;
303; X86-BMI2-LABEL: scalar_i32_bitsinmiddle_eq:
304; X86-BMI2:       # %bb.0:
305; X86-BMI2-NEXT:    movb {{[0-9]+}}(%esp), %al
306; X86-BMI2-NEXT:    shlxl %eax, {{[0-9]+}}(%esp), %eax
307; X86-BMI2-NEXT:    testl $16776960, %eax # imm = 0xFFFF00
308; X86-BMI2-NEXT:    sete %al
309; X86-BMI2-NEXT:    retl
310;
311; X64-BMI1-LABEL: scalar_i32_bitsinmiddle_eq:
312; X64-BMI1:       # %bb.0:
313; X64-BMI1-NEXT:    movl %esi, %ecx
314; X64-BMI1-NEXT:    # kill: def $cl killed $cl killed $ecx
315; X64-BMI1-NEXT:    shll %cl, %edi
316; X64-BMI1-NEXT:    testl $16776960, %edi # imm = 0xFFFF00
317; X64-BMI1-NEXT:    sete %al
318; X64-BMI1-NEXT:    retq
319;
320; X64-BMI2-LABEL: scalar_i32_bitsinmiddle_eq:
321; X64-BMI2:       # %bb.0:
322; X64-BMI2-NEXT:    shlxl %esi, %edi, %eax
323; X64-BMI2-NEXT:    testl $16776960, %eax # imm = 0xFFFF00
324; X64-BMI2-NEXT:    sete %al
325; X64-BMI2-NEXT:    retq
326  %t0 = lshr i32 16776960, %y
327  %t1 = and i32 %t0, %x
328  %res = icmp eq i32 %t1, 0
329  ret i1 %res
330}
331
332; i64 scalar
333
334define i1 @scalar_i64_signbit_eq(i64 %x, i64 %y) nounwind {
335; X86-BMI1-LABEL: scalar_i64_signbit_eq:
336; X86-BMI1:       # %bb.0:
337; X86-BMI1-NEXT:    pushl %esi
338; X86-BMI1-NEXT:    movb {{[0-9]+}}(%esp), %cl
339; X86-BMI1-NEXT:    movl {{[0-9]+}}(%esp), %eax
340; X86-BMI1-NEXT:    movl {{[0-9]+}}(%esp), %edx
341; X86-BMI1-NEXT:    movl %eax, %esi
342; X86-BMI1-NEXT:    shll %cl, %esi
343; X86-BMI1-NEXT:    shldl %cl, %eax, %edx
344; X86-BMI1-NEXT:    testb $32, %cl
345; X86-BMI1-NEXT:    cmovnel %esi, %edx
346; X86-BMI1-NEXT:    testl $-2147483648, %edx # imm = 0x80000000
347; X86-BMI1-NEXT:    sete %al
348; X86-BMI1-NEXT:    popl %esi
349; X86-BMI1-NEXT:    retl
350;
351; X86-BMI2-LABEL: scalar_i64_signbit_eq:
352; X86-BMI2:       # %bb.0:
353; X86-BMI2-NEXT:    movb {{[0-9]+}}(%esp), %cl
354; X86-BMI2-NEXT:    movl {{[0-9]+}}(%esp), %eax
355; X86-BMI2-NEXT:    movl {{[0-9]+}}(%esp), %edx
356; X86-BMI2-NEXT:    shldl %cl, %eax, %edx
357; X86-BMI2-NEXT:    shlxl %ecx, %eax, %eax
358; X86-BMI2-NEXT:    testb $32, %cl
359; X86-BMI2-NEXT:    cmovel %edx, %eax
360; X86-BMI2-NEXT:    testl $-2147483648, %eax # imm = 0x80000000
361; X86-BMI2-NEXT:    sete %al
362; X86-BMI2-NEXT:    retl
363;
364; X64-BMI1-LABEL: scalar_i64_signbit_eq:
365; X64-BMI1:       # %bb.0:
366; X64-BMI1-NEXT:    movq %rsi, %rcx
367; X64-BMI1-NEXT:    # kill: def $cl killed $cl killed $rcx
368; X64-BMI1-NEXT:    shlq %cl, %rdi
369; X64-BMI1-NEXT:    shrq $63, %rdi
370; X64-BMI1-NEXT:    sete %al
371; X64-BMI1-NEXT:    retq
372;
373; X64-BMI2-LABEL: scalar_i64_signbit_eq:
374; X64-BMI2:       # %bb.0:
375; X64-BMI2-NEXT:    shlxq %rsi, %rdi, %rax
376; X64-BMI2-NEXT:    shrq $63, %rax
377; X64-BMI2-NEXT:    sete %al
378; X64-BMI2-NEXT:    retq
379  %t0 = lshr i64 9223372036854775808, %y
380  %t1 = and i64 %t0, %x
381  %res = icmp eq i64 %t1, 0
382  ret i1 %res
383}
384
385define i1 @scalar_i64_lowestbit_eq(i64 %x, i64 %y) nounwind {
386; X86-BMI1-LABEL: scalar_i64_lowestbit_eq:
387; X86-BMI1:       # %bb.0:
388; X86-BMI1-NEXT:    movb {{[0-9]+}}(%esp), %cl
389; X86-BMI1-NEXT:    movl {{[0-9]+}}(%esp), %eax
390; X86-BMI1-NEXT:    shll %cl, %eax
391; X86-BMI1-NEXT:    xorl %edx, %edx
392; X86-BMI1-NEXT:    testb $32, %cl
393; X86-BMI1-NEXT:    cmovel %eax, %edx
394; X86-BMI1-NEXT:    testb $1, %dl
395; X86-BMI1-NEXT:    sete %al
396; X86-BMI1-NEXT:    retl
397;
398; X86-BMI2-LABEL: scalar_i64_lowestbit_eq:
399; X86-BMI2:       # %bb.0:
400; X86-BMI2-NEXT:    movb {{[0-9]+}}(%esp), %al
401; X86-BMI2-NEXT:    shlxl %eax, {{[0-9]+}}(%esp), %ecx
402; X86-BMI2-NEXT:    xorl %edx, %edx
403; X86-BMI2-NEXT:    testb $32, %al
404; X86-BMI2-NEXT:    cmovel %ecx, %edx
405; X86-BMI2-NEXT:    testb $1, %dl
406; X86-BMI2-NEXT:    sete %al
407; X86-BMI2-NEXT:    retl
408;
409; X64-BMI1-LABEL: scalar_i64_lowestbit_eq:
410; X64-BMI1:       # %bb.0:
411; X64-BMI1-NEXT:    movq %rsi, %rcx
412; X64-BMI1-NEXT:    # kill: def $cl killed $cl killed $rcx
413; X64-BMI1-NEXT:    shlq %cl, %rdi
414; X64-BMI1-NEXT:    testb $1, %dil
415; X64-BMI1-NEXT:    sete %al
416; X64-BMI1-NEXT:    retq
417;
418; X64-BMI2-LABEL: scalar_i64_lowestbit_eq:
419; X64-BMI2:       # %bb.0:
420; X64-BMI2-NEXT:    shlxq %rsi, %rdi, %rax
421; X64-BMI2-NEXT:    testb $1, %al
422; X64-BMI2-NEXT:    sete %al
423; X64-BMI2-NEXT:    retq
424  %t0 = lshr i64 1, %y
425  %t1 = and i64 %t0, %x
426  %res = icmp eq i64 %t1, 0
427  ret i1 %res
428}
429
430define i1 @scalar_i64_bitsinmiddle_eq(i64 %x, i64 %y) nounwind {
431; X86-BMI1-LABEL: scalar_i64_bitsinmiddle_eq:
432; X86-BMI1:       # %bb.0:
433; X86-BMI1-NEXT:    pushl %esi
434; X86-BMI1-NEXT:    movb {{[0-9]+}}(%esp), %cl
435; X86-BMI1-NEXT:    movl {{[0-9]+}}(%esp), %eax
436; X86-BMI1-NEXT:    movl {{[0-9]+}}(%esp), %edx
437; X86-BMI1-NEXT:    movl %eax, %esi
438; X86-BMI1-NEXT:    shll %cl, %esi
439; X86-BMI1-NEXT:    shldl %cl, %eax, %edx
440; X86-BMI1-NEXT:    xorl %eax, %eax
441; X86-BMI1-NEXT:    testb $32, %cl
442; X86-BMI1-NEXT:    cmovnel %esi, %edx
443; X86-BMI1-NEXT:    movzwl %dx, %ecx
444; X86-BMI1-NEXT:    cmovel %esi, %eax
445; X86-BMI1-NEXT:    andl $-65536, %eax # imm = 0xFFFF0000
446; X86-BMI1-NEXT:    orl %ecx, %eax
447; X86-BMI1-NEXT:    sete %al
448; X86-BMI1-NEXT:    popl %esi
449; X86-BMI1-NEXT:    retl
450;
451; X86-BMI2-LABEL: scalar_i64_bitsinmiddle_eq:
452; X86-BMI2:       # %bb.0:
453; X86-BMI2-NEXT:    pushl %esi
454; X86-BMI2-NEXT:    movb {{[0-9]+}}(%esp), %cl
455; X86-BMI2-NEXT:    movl {{[0-9]+}}(%esp), %eax
456; X86-BMI2-NEXT:    movl {{[0-9]+}}(%esp), %edx
457; X86-BMI2-NEXT:    shldl %cl, %eax, %edx
458; X86-BMI2-NEXT:    shlxl %ecx, %eax, %eax
459; X86-BMI2-NEXT:    xorl %esi, %esi
460; X86-BMI2-NEXT:    testb $32, %cl
461; X86-BMI2-NEXT:    cmovnel %eax, %edx
462; X86-BMI2-NEXT:    movzwl %dx, %ecx
463; X86-BMI2-NEXT:    cmovel %eax, %esi
464; X86-BMI2-NEXT:    andl $-65536, %esi # imm = 0xFFFF0000
465; X86-BMI2-NEXT:    orl %ecx, %esi
466; X86-BMI2-NEXT:    sete %al
467; X86-BMI2-NEXT:    popl %esi
468; X86-BMI2-NEXT:    retl
469;
470; X64-BMI1-LABEL: scalar_i64_bitsinmiddle_eq:
471; X64-BMI1:       # %bb.0:
472; X64-BMI1-NEXT:    movq %rsi, %rcx
473; X64-BMI1-NEXT:    # kill: def $cl killed $cl killed $rcx
474; X64-BMI1-NEXT:    shlq %cl, %rdi
475; X64-BMI1-NEXT:    movabsq $281474976645120, %rax # imm = 0xFFFFFFFF0000
476; X64-BMI1-NEXT:    testq %rax, %rdi
477; X64-BMI1-NEXT:    sete %al
478; X64-BMI1-NEXT:    retq
479;
480; X64-BMI2-LABEL: scalar_i64_bitsinmiddle_eq:
481; X64-BMI2:       # %bb.0:
482; X64-BMI2-NEXT:    shlxq %rsi, %rdi, %rax
483; X64-BMI2-NEXT:    movabsq $281474976645120, %rcx # imm = 0xFFFFFFFF0000
484; X64-BMI2-NEXT:    testq %rcx, %rax
485; X64-BMI2-NEXT:    sete %al
486; X64-BMI2-NEXT:    retq
487  %t0 = lshr i64 281474976645120, %y
488  %t1 = and i64 %t0, %x
489  %res = icmp eq i64 %t1, 0
490  ret i1 %res
491}
492
493;------------------------------------------------------------------------------;
494; A few trivial vector tests
495;------------------------------------------------------------------------------;
496
497define <4 x i1> @vec_4xi32_splat_eq(<4 x i32> %x, <4 x i32> %y) nounwind {
498; X86-SSE2-LABEL: vec_4xi32_splat_eq:
499; X86-SSE2:       # %bb.0:
500; X86-SSE2-NEXT:    pxor %xmm2, %xmm2
501; X86-SSE2-NEXT:    pslld $23, %xmm1
502; X86-SSE2-NEXT:    paddd {{\.?LCPI[0-9]+_[0-9]+}}, %xmm1
503; X86-SSE2-NEXT:    cvttps2dq %xmm1, %xmm1
504; X86-SSE2-NEXT:    pshufd {{.*#+}} xmm3 = xmm0[1,1,3,3]
505; X86-SSE2-NEXT:    pmuludq %xmm1, %xmm0
506; X86-SSE2-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
507; X86-SSE2-NEXT:    pshufd {{.*#+}} xmm1 = xmm1[1,1,3,3]
508; X86-SSE2-NEXT:    pmuludq %xmm3, %xmm1
509; X86-SSE2-NEXT:    pshufd {{.*#+}} xmm1 = xmm1[0,2,2,3]
510; X86-SSE2-NEXT:    punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
511; X86-SSE2-NEXT:    pand {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0
512; X86-SSE2-NEXT:    pcmpeqd %xmm2, %xmm0
513; X86-SSE2-NEXT:    retl
514;
515; AVX2-LABEL: vec_4xi32_splat_eq:
516; AVX2:       # %bb.0:
517; AVX2-NEXT:    vpbroadcastd {{.*#+}} xmm2 = [1,1,1,1]
518; AVX2-NEXT:    vpxor %xmm3, %xmm3, %xmm3
519; AVX2-NEXT:    vpsllvd %xmm1, %xmm0, %xmm0
520; AVX2-NEXT:    vpand %xmm2, %xmm0, %xmm0
521; AVX2-NEXT:    vpcmpeqd %xmm3, %xmm0, %xmm0
522; AVX2-NEXT:    ret{{[l|q]}}
523;
524; X64-SSE2-LABEL: vec_4xi32_splat_eq:
525; X64-SSE2:       # %bb.0:
526; X64-SSE2-NEXT:    pxor %xmm2, %xmm2
527; X64-SSE2-NEXT:    pslld $23, %xmm1
528; X64-SSE2-NEXT:    paddd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
529; X64-SSE2-NEXT:    cvttps2dq %xmm1, %xmm1
530; X64-SSE2-NEXT:    pshufd {{.*#+}} xmm3 = xmm0[1,1,3,3]
531; X64-SSE2-NEXT:    pmuludq %xmm1, %xmm0
532; X64-SSE2-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
533; X64-SSE2-NEXT:    pshufd {{.*#+}} xmm1 = xmm1[1,1,3,3]
534; X64-SSE2-NEXT:    pmuludq %xmm3, %xmm1
535; X64-SSE2-NEXT:    pshufd {{.*#+}} xmm1 = xmm1[0,2,2,3]
536; X64-SSE2-NEXT:    punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
537; X64-SSE2-NEXT:    pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
538; X64-SSE2-NEXT:    pcmpeqd %xmm2, %xmm0
539; X64-SSE2-NEXT:    retq
540  %t0 = lshr <4 x i32> <i32 1, i32 1, i32 1, i32 1>, %y
541  %t1 = and <4 x i32> %t0, %x
542  %res = icmp eq <4 x i32> %t1, <i32 0, i32 0, i32 0, i32 0>
543  ret <4 x i1> %res
544}
545
546define <4 x i1> @vec_4xi32_nonsplat_eq(<4 x i32> %x, <4 x i32> %y) nounwind {
547; SSE2-LABEL: vec_4xi32_nonsplat_eq:
548; SSE2:       # %bb.0:
549; SSE2-NEXT:    pshuflw {{.*#+}} xmm2 = xmm1[2,3,3,3,4,5,6,7]
550; SSE2-NEXT:    movdqa {{.*#+}} xmm3 = [0,1,16776960,2147483648]
551; SSE2-NEXT:    movdqa %xmm3, %xmm4
552; SSE2-NEXT:    psrld %xmm2, %xmm4
553; SSE2-NEXT:    pshuflw {{.*#+}} xmm2 = xmm1[0,1,1,1,4,5,6,7]
554; SSE2-NEXT:    movdqa %xmm3, %xmm5
555; SSE2-NEXT:    psrld %xmm2, %xmm5
556; SSE2-NEXT:    punpcklqdq {{.*#+}} xmm5 = xmm5[0],xmm4[0]
557; SSE2-NEXT:    pshufd {{.*#+}} xmm1 = xmm1[2,3,2,3]
558; SSE2-NEXT:    pshuflw {{.*#+}} xmm2 = xmm1[2,3,3,3,4,5,6,7]
559; SSE2-NEXT:    movdqa %xmm3, %xmm4
560; SSE2-NEXT:    psrld %xmm2, %xmm4
561; SSE2-NEXT:    pshuflw {{.*#+}} xmm1 = xmm1[0,1,1,1,4,5,6,7]
562; SSE2-NEXT:    psrld %xmm1, %xmm3
563; SSE2-NEXT:    punpckhqdq {{.*#+}} xmm3 = xmm3[1],xmm4[1]
564; SSE2-NEXT:    shufps {{.*#+}} xmm5 = xmm5[0,3],xmm3[0,3]
565; SSE2-NEXT:    andps %xmm5, %xmm0
566; SSE2-NEXT:    pxor %xmm1, %xmm1
567; SSE2-NEXT:    pcmpeqd %xmm1, %xmm0
568; SSE2-NEXT:    ret{{[l|q]}}
569;
570; AVX2-LABEL: vec_4xi32_nonsplat_eq:
571; AVX2:       # %bb.0:
572; AVX2-NEXT:    vmovdqa {{.*#+}} xmm2 = [0,1,16776960,2147483648]
573; AVX2-NEXT:    vpsrlvd %xmm1, %xmm2, %xmm1
574; AVX2-NEXT:    vpand %xmm0, %xmm1, %xmm0
575; AVX2-NEXT:    vpxor %xmm1, %xmm1, %xmm1
576; AVX2-NEXT:    vpcmpeqd %xmm1, %xmm0, %xmm0
577; AVX2-NEXT:    ret{{[l|q]}}
578  %t0 = lshr <4 x i32> <i32 0, i32 1, i32 16776960, i32 2147483648>, %y
579  %t1 = and <4 x i32> %t0, %x
580  %res = icmp eq <4 x i32> %t1, <i32 0, i32 0, i32 0, i32 0>
581  ret <4 x i1> %res
582}
583
584define <4 x i1> @vec_4xi32_nonsplat_undef0_eq(<4 x i32> %x, <4 x i32> %y) nounwind {
585; X86-SSE2-LABEL: vec_4xi32_nonsplat_undef0_eq:
586; X86-SSE2:       # %bb.0:
587; X86-SSE2-NEXT:    pxor %xmm2, %xmm2
588; X86-SSE2-NEXT:    pslld $23, %xmm1
589; X86-SSE2-NEXT:    paddd {{\.?LCPI[0-9]+_[0-9]+}}, %xmm1
590; X86-SSE2-NEXT:    cvttps2dq %xmm1, %xmm1
591; X86-SSE2-NEXT:    pshufd {{.*#+}} xmm3 = xmm0[1,1,3,3]
592; X86-SSE2-NEXT:    pmuludq %xmm1, %xmm0
593; X86-SSE2-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
594; X86-SSE2-NEXT:    pshufd {{.*#+}} xmm1 = xmm1[1,1,3,3]
595; X86-SSE2-NEXT:    pmuludq %xmm3, %xmm1
596; X86-SSE2-NEXT:    pshufd {{.*#+}} xmm1 = xmm1[0,2,2,3]
597; X86-SSE2-NEXT:    punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
598; X86-SSE2-NEXT:    pand {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0
599; X86-SSE2-NEXT:    pcmpeqd %xmm2, %xmm0
600; X86-SSE2-NEXT:    retl
601;
602; AVX2-LABEL: vec_4xi32_nonsplat_undef0_eq:
603; AVX2:       # %bb.0:
604; AVX2-NEXT:    vpbroadcastd {{.*#+}} xmm2 = [1,1,1,1]
605; AVX2-NEXT:    vpxor %xmm3, %xmm3, %xmm3
606; AVX2-NEXT:    vpsllvd %xmm1, %xmm0, %xmm0
607; AVX2-NEXT:    vpand %xmm2, %xmm0, %xmm0
608; AVX2-NEXT:    vpcmpeqd %xmm3, %xmm0, %xmm0
609; AVX2-NEXT:    ret{{[l|q]}}
610;
611; X64-SSE2-LABEL: vec_4xi32_nonsplat_undef0_eq:
612; X64-SSE2:       # %bb.0:
613; X64-SSE2-NEXT:    pxor %xmm2, %xmm2
614; X64-SSE2-NEXT:    pslld $23, %xmm1
615; X64-SSE2-NEXT:    paddd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
616; X64-SSE2-NEXT:    cvttps2dq %xmm1, %xmm1
617; X64-SSE2-NEXT:    pshufd {{.*#+}} xmm3 = xmm0[1,1,3,3]
618; X64-SSE2-NEXT:    pmuludq %xmm1, %xmm0
619; X64-SSE2-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
620; X64-SSE2-NEXT:    pshufd {{.*#+}} xmm1 = xmm1[1,1,3,3]
621; X64-SSE2-NEXT:    pmuludq %xmm3, %xmm1
622; X64-SSE2-NEXT:    pshufd {{.*#+}} xmm1 = xmm1[0,2,2,3]
623; X64-SSE2-NEXT:    punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
624; X64-SSE2-NEXT:    pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
625; X64-SSE2-NEXT:    pcmpeqd %xmm2, %xmm0
626; X64-SSE2-NEXT:    retq
627  %t0 = lshr <4 x i32> <i32 1, i32 1, i32 undef, i32 1>, %y
628  %t1 = and <4 x i32> %t0, %x
629  %res = icmp eq <4 x i32> %t1, <i32 0, i32 0, i32 0, i32 0>
630  ret <4 x i1> %res
631}
632define <4 x i1> @vec_4xi32_nonsplat_undef1_eq(<4 x i32> %x, <4 x i32> %y) nounwind {
633; SSE2-LABEL: vec_4xi32_nonsplat_undef1_eq:
634; SSE2:       # %bb.0:
635; SSE2-NEXT:    pshuflw {{.*#+}} xmm2 = xmm1[2,3,3,3,4,5,6,7]
636; SSE2-NEXT:    movdqa {{.*#+}} xmm3 = [1,1,1,1]
637; SSE2-NEXT:    movdqa %xmm3, %xmm4
638; SSE2-NEXT:    psrld %xmm2, %xmm4
639; SSE2-NEXT:    pshuflw {{.*#+}} xmm2 = xmm1[0,1,1,1,4,5,6,7]
640; SSE2-NEXT:    movdqa %xmm3, %xmm5
641; SSE2-NEXT:    psrld %xmm2, %xmm5
642; SSE2-NEXT:    punpcklqdq {{.*#+}} xmm5 = xmm5[0],xmm4[0]
643; SSE2-NEXT:    pshufd {{.*#+}} xmm1 = xmm1[2,3,2,3]
644; SSE2-NEXT:    pshuflw {{.*#+}} xmm2 = xmm1[2,3,3,3,4,5,6,7]
645; SSE2-NEXT:    movdqa %xmm3, %xmm4
646; SSE2-NEXT:    psrld %xmm2, %xmm4
647; SSE2-NEXT:    pshuflw {{.*#+}} xmm1 = xmm1[0,1,1,1,4,5,6,7]
648; SSE2-NEXT:    psrld %xmm1, %xmm3
649; SSE2-NEXT:    punpckhqdq {{.*#+}} xmm3 = xmm3[1],xmm4[1]
650; SSE2-NEXT:    shufps {{.*#+}} xmm5 = xmm5[0,3],xmm3[0,3]
651; SSE2-NEXT:    andps %xmm5, %xmm0
652; SSE2-NEXT:    pxor %xmm1, %xmm1
653; SSE2-NEXT:    pcmpeqd %xmm1, %xmm0
654; SSE2-NEXT:    ret{{[l|q]}}
655;
656; AVX2-LABEL: vec_4xi32_nonsplat_undef1_eq:
657; AVX2:       # %bb.0:
658; AVX2-NEXT:    vpbroadcastd {{.*#+}} xmm2 = [1,1,1,1]
659; AVX2-NEXT:    vpsrlvd %xmm1, %xmm2, %xmm1
660; AVX2-NEXT:    vpand %xmm0, %xmm1, %xmm0
661; AVX2-NEXT:    vpxor %xmm1, %xmm1, %xmm1
662; AVX2-NEXT:    vpcmpeqd %xmm1, %xmm0, %xmm0
663; AVX2-NEXT:    ret{{[l|q]}}
664  %t0 = lshr <4 x i32> <i32 1, i32 1, i32 1, i32 1>, %y
665  %t1 = and <4 x i32> %t0, %x
666  %res = icmp eq <4 x i32> %t1, <i32 0, i32 0, i32 undef, i32 0>
667  ret <4 x i1> %res
668}
669define <4 x i1> @vec_4xi32_nonsplat_undef2_eq(<4 x i32> %x, <4 x i32> %y) nounwind {
670; SSE2-LABEL: vec_4xi32_nonsplat_undef2_eq:
671; SSE2:       # %bb.0:
672; SSE2-NEXT:    pshuflw {{.*#+}} xmm2 = xmm1[2,3,3,3,4,5,6,7]
673; SSE2-NEXT:    movdqa {{.*#+}} xmm3 = <1,1,u,1>
674; SSE2-NEXT:    movdqa %xmm3, %xmm4
675; SSE2-NEXT:    psrld %xmm2, %xmm4
676; SSE2-NEXT:    pshuflw {{.*#+}} xmm2 = xmm1[0,1,1,1,4,5,6,7]
677; SSE2-NEXT:    movdqa %xmm3, %xmm5
678; SSE2-NEXT:    psrld %xmm2, %xmm5
679; SSE2-NEXT:    punpcklqdq {{.*#+}} xmm5 = xmm5[0],xmm4[0]
680; SSE2-NEXT:    pshufd {{.*#+}} xmm1 = xmm1[2,3,2,3]
681; SSE2-NEXT:    pshuflw {{.*#+}} xmm2 = xmm1[2,3,3,3,4,5,6,7]
682; SSE2-NEXT:    movdqa %xmm3, %xmm4
683; SSE2-NEXT:    psrld %xmm2, %xmm4
684; SSE2-NEXT:    pshuflw {{.*#+}} xmm1 = xmm1[0,1,1,1,4,5,6,7]
685; SSE2-NEXT:    psrld %xmm1, %xmm3
686; SSE2-NEXT:    punpckhqdq {{.*#+}} xmm3 = xmm3[1],xmm4[1]
687; SSE2-NEXT:    shufps {{.*#+}} xmm5 = xmm5[0,3],xmm3[0,3]
688; SSE2-NEXT:    andps %xmm5, %xmm0
689; SSE2-NEXT:    pxor %xmm1, %xmm1
690; SSE2-NEXT:    pcmpeqd %xmm1, %xmm0
691; SSE2-NEXT:    ret{{[l|q]}}
692;
693; AVX2-LABEL: vec_4xi32_nonsplat_undef2_eq:
694; AVX2:       # %bb.0:
695; AVX2-NEXT:    vpbroadcastd {{.*#+}} xmm2 = [1,1,1,1]
696; AVX2-NEXT:    vpsrlvd %xmm1, %xmm2, %xmm1
697; AVX2-NEXT:    vpand %xmm0, %xmm1, %xmm0
698; AVX2-NEXT:    vpxor %xmm1, %xmm1, %xmm1
699; AVX2-NEXT:    vpcmpeqd %xmm1, %xmm0, %xmm0
700; AVX2-NEXT:    ret{{[l|q]}}
701  %t0 = lshr <4 x i32> <i32 1, i32 1, i32 undef, i32 1>, %y
702  %t1 = and <4 x i32> %t0, %x
703  %res = icmp eq <4 x i32> %t1, <i32 0, i32 0, i32 undef, i32 0>
704  ret <4 x i1> %res
705}
706
707;------------------------------------------------------------------------------;
708; A special tests
709;------------------------------------------------------------------------------;
710
711define i1 @scalar_i8_signbit_ne(i8 %x, i8 %y) nounwind {
712; X86-LABEL: scalar_i8_signbit_ne:
713; X86:       # %bb.0:
714; X86-NEXT:    movb {{[0-9]+}}(%esp), %cl
715; X86-NEXT:    movb {{[0-9]+}}(%esp), %al
716; X86-NEXT:    shlb %cl, %al
717; X86-NEXT:    shrb $7, %al
718; X86-NEXT:    retl
719;
720; X64-LABEL: scalar_i8_signbit_ne:
721; X64:       # %bb.0:
722; X64-NEXT:    movl %esi, %ecx
723; X64-NEXT:    movl %edi, %eax
724; X64-NEXT:    # kill: def $cl killed $cl killed $ecx
725; X64-NEXT:    shlb %cl, %al
726; X64-NEXT:    shrb $7, %al
727; X64-NEXT:    # kill: def $al killed $al killed $eax
728; X64-NEXT:    retq
729  %t0 = lshr i8 128, %y
730  %t1 = and i8 %t0, %x
731  %res = icmp ne i8 %t1, 0 ;  we are perfectly happy with 'ne' predicate
732  ret i1 %res
733}
734
735;------------------------------------------------------------------------------;
736; What if X is a constant too?
737;------------------------------------------------------------------------------;
738
739define i1 @scalar_i32_x_is_const_eq(i32 %y) nounwind {
740; X86-LABEL: scalar_i32_x_is_const_eq:
741; X86:       # %bb.0:
742; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
743; X86-NEXT:    movl $-1437226411, %ecx # imm = 0xAA55AA55
744; X86-NEXT:    btl %eax, %ecx
745; X86-NEXT:    setae %al
746; X86-NEXT:    retl
747;
748; X64-LABEL: scalar_i32_x_is_const_eq:
749; X64:       # %bb.0:
750; X64-NEXT:    movl $-1437226411, %eax # imm = 0xAA55AA55
751; X64-NEXT:    btl %edi, %eax
752; X64-NEXT:    setae %al
753; X64-NEXT:    retq
754  %t0 = lshr i32 2857740885, %y
755  %t1 = and i32 %t0, 1
756  %res = icmp eq i32 %t1, 0
757  ret i1 %res
758}
759define i1 @scalar_i32_x_is_const2_eq(i32 %y) nounwind {
760; X86-BMI1-LABEL: scalar_i32_x_is_const2_eq:
761; X86-BMI1:       # %bb.0:
762; X86-BMI1-NEXT:    movb {{[0-9]+}}(%esp), %cl
763; X86-BMI1-NEXT:    movl $1, %eax
764; X86-BMI1-NEXT:    shrl %cl, %eax
765; X86-BMI1-NEXT:    testl %eax, %eax
766; X86-BMI1-NEXT:    sete %al
767; X86-BMI1-NEXT:    retl
768;
769; X86-BMI2-LABEL: scalar_i32_x_is_const2_eq:
770; X86-BMI2:       # %bb.0:
771; X86-BMI2-NEXT:    movb {{[0-9]+}}(%esp), %al
772; X86-BMI2-NEXT:    movl $1, %ecx
773; X86-BMI2-NEXT:    shrxl %eax, %ecx, %eax
774; X86-BMI2-NEXT:    testl %eax, %eax
775; X86-BMI2-NEXT:    sete %al
776; X86-BMI2-NEXT:    retl
777;
778; X64-BMI1-LABEL: scalar_i32_x_is_const2_eq:
779; X64-BMI1:       # %bb.0:
780; X64-BMI1-NEXT:    movl %edi, %ecx
781; X64-BMI1-NEXT:    movl $1, %eax
782; X64-BMI1-NEXT:    # kill: def $cl killed $cl killed $ecx
783; X64-BMI1-NEXT:    shrl %cl, %eax
784; X64-BMI1-NEXT:    testl %eax, %eax
785; X64-BMI1-NEXT:    sete %al
786; X64-BMI1-NEXT:    retq
787;
788; X64-BMI2-LABEL: scalar_i32_x_is_const2_eq:
789; X64-BMI2:       # %bb.0:
790; X64-BMI2-NEXT:    movl $1, %eax
791; X64-BMI2-NEXT:    shrxl %edi, %eax, %eax
792; X64-BMI2-NEXT:    testl %eax, %eax
793; X64-BMI2-NEXT:    sete %al
794; X64-BMI2-NEXT:    retq
795  %t0 = lshr i32 1, %y
796  %t1 = and i32 %t0, 2857740885
797  %res = icmp eq i32 %t1, 0
798  ret i1 %res
799}
800
801;------------------------------------------------------------------------------;
802; A few negative tests
803;------------------------------------------------------------------------------;
804
805define i1 @negative_scalar_i8_bitsinmiddle_slt(i8 %x, i8 %y) nounwind {
806; CHECK-LABEL: negative_scalar_i8_bitsinmiddle_slt:
807; CHECK:       # %bb.0:
808; CHECK-NEXT:    xorl %eax, %eax
809; CHECK-NEXT:    ret{{[l|q]}}
810  %t0 = lshr i8 24, %y
811  %t1 = and i8 %t0, %x
812  %res = icmp slt i8 %t1, 0
813  ret i1 %res
814}
815
816define i1 @scalar_i8_signbit_eq_with_nonzero(i8 %x, i8 %y) nounwind {
817; X86-LABEL: scalar_i8_signbit_eq_with_nonzero:
818; X86:       # %bb.0:
819; X86-NEXT:    movb {{[0-9]+}}(%esp), %cl
820; X86-NEXT:    movb $-128, %al
821; X86-NEXT:    shrb %cl, %al
822; X86-NEXT:    andb {{[0-9]+}}(%esp), %al
823; X86-NEXT:    cmpb $1, %al
824; X86-NEXT:    sete %al
825; X86-NEXT:    retl
826;
827; X64-LABEL: scalar_i8_signbit_eq_with_nonzero:
828; X64:       # %bb.0:
829; X64-NEXT:    movl %esi, %ecx
830; X64-NEXT:    movb $-128, %al
831; X64-NEXT:    # kill: def $cl killed $cl killed $ecx
832; X64-NEXT:    shrb %cl, %al
833; X64-NEXT:    andb %dil, %al
834; X64-NEXT:    cmpb $1, %al
835; X64-NEXT:    sete %al
836; X64-NEXT:    retq
837  %t0 = lshr i8 128, %y
838  %t1 = and i8 %t0, %x
839  %res = icmp eq i8 %t1, 1 ; should be comparing with 0
840  ret i1 %res
841}
842