1; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2; RUN: llc -mtriple=aarch64-unknown-linux-gnu < %s | FileCheck %s
3
4; *Please* keep in sync with test/CodeGen/X86/extract-lowbits.ll
5
6; https://bugs.llvm.org/show_bug.cgi?id=36419
7; https://bugs.llvm.org/show_bug.cgi?id=37603
8; https://bugs.llvm.org/show_bug.cgi?id=37610
9
10; Patterns:
11;   a) x &  (1 << nbits) - 1
12;   b) x & ~(-1 << nbits)
13;   c) x &  (-1 >> (32 - y))
14;   d) x << (32 - y) >> (32 - y)
15; are equivalent.
16
17; ---------------------------------------------------------------------------- ;
18; Pattern a. 32-bit
19; ---------------------------------------------------------------------------- ;
20
21define i32 @bzhi32_a0(i32 %val, i32 %numlowbits) nounwind {
22; CHECK-LABEL: bzhi32_a0:
23; CHECK:       // %bb.0:
24; CHECK-NEXT:    mov w8, #1
25; CHECK-NEXT:    lsl w8, w8, w1
26; CHECK-NEXT:    sub w8, w8, #1 // =1
27; CHECK-NEXT:    and w0, w8, w0
28; CHECK-NEXT:    ret
29  %onebit = shl i32 1, %numlowbits
30  %mask = add nsw i32 %onebit, -1
31  %masked = and i32 %mask, %val
32  ret i32 %masked
33}
34
35define i32 @bzhi32_a1_indexzext(i32 %val, i8 zeroext %numlowbits) nounwind {
36; CHECK-LABEL: bzhi32_a1_indexzext:
37; CHECK:       // %bb.0:
38; CHECK-NEXT:    mov w8, #1
39; CHECK-NEXT:    lsl w8, w8, w1
40; CHECK-NEXT:    sub w8, w8, #1 // =1
41; CHECK-NEXT:    and w0, w8, w0
42; CHECK-NEXT:    ret
43  %conv = zext i8 %numlowbits to i32
44  %onebit = shl i32 1, %conv
45  %mask = add nsw i32 %onebit, -1
46  %masked = and i32 %mask, %val
47  ret i32 %masked
48}
49
50define i32 @bzhi32_a2_load(i32* %w, i32 %numlowbits) nounwind {
51; CHECK-LABEL: bzhi32_a2_load:
52; CHECK:       // %bb.0:
53; CHECK-NEXT:    ldr w8, [x0]
54; CHECK-NEXT:    mov w9, #1
55; CHECK-NEXT:    lsl w9, w9, w1
56; CHECK-NEXT:    sub w9, w9, #1 // =1
57; CHECK-NEXT:    and w0, w9, w8
58; CHECK-NEXT:    ret
59  %val = load i32, i32* %w
60  %onebit = shl i32 1, %numlowbits
61  %mask = add nsw i32 %onebit, -1
62  %masked = and i32 %mask, %val
63  ret i32 %masked
64}
65
66define i32 @bzhi32_a3_load_indexzext(i32* %w, i8 zeroext %numlowbits) nounwind {
67; CHECK-LABEL: bzhi32_a3_load_indexzext:
68; CHECK:       // %bb.0:
69; CHECK-NEXT:    ldr w8, [x0]
70; CHECK-NEXT:    mov w9, #1
71; CHECK-NEXT:    lsl w9, w9, w1
72; CHECK-NEXT:    sub w9, w9, #1 // =1
73; CHECK-NEXT:    and w0, w9, w8
74; CHECK-NEXT:    ret
75  %val = load i32, i32* %w
76  %conv = zext i8 %numlowbits to i32
77  %onebit = shl i32 1, %conv
78  %mask = add nsw i32 %onebit, -1
79  %masked = and i32 %mask, %val
80  ret i32 %masked
81}
82
83define i32 @bzhi32_a4_commutative(i32 %val, i32 %numlowbits) nounwind {
84; CHECK-LABEL: bzhi32_a4_commutative:
85; CHECK:       // %bb.0:
86; CHECK-NEXT:    mov w8, #1
87; CHECK-NEXT:    lsl w8, w8, w1
88; CHECK-NEXT:    sub w8, w8, #1 // =1
89; CHECK-NEXT:    and w0, w0, w8
90; CHECK-NEXT:    ret
91  %onebit = shl i32 1, %numlowbits
92  %mask = add nsw i32 %onebit, -1
93  %masked = and i32 %val, %mask ; swapped order
94  ret i32 %masked
95}
96
97; 64-bit
98
99define i64 @bzhi64_a0(i64 %val, i64 %numlowbits) nounwind {
100; CHECK-LABEL: bzhi64_a0:
101; CHECK:       // %bb.0:
102; CHECK-NEXT:    mov w8, #1
103; CHECK-NEXT:    lsl x8, x8, x1
104; CHECK-NEXT:    sub x8, x8, #1 // =1
105; CHECK-NEXT:    and x0, x8, x0
106; CHECK-NEXT:    ret
107  %onebit = shl i64 1, %numlowbits
108  %mask = add nsw i64 %onebit, -1
109  %masked = and i64 %mask, %val
110  ret i64 %masked
111}
112
113define i64 @bzhi64_a1_indexzext(i64 %val, i8 zeroext %numlowbits) nounwind {
114; CHECK-LABEL: bzhi64_a1_indexzext:
115; CHECK:       // %bb.0:
116; CHECK-NEXT:    mov w8, #1
117; CHECK-NEXT:    // kill: def $w1 killed $w1 def $x1
118; CHECK-NEXT:    lsl x8, x8, x1
119; CHECK-NEXT:    sub x8, x8, #1 // =1
120; CHECK-NEXT:    and x0, x8, x0
121; CHECK-NEXT:    ret
122  %conv = zext i8 %numlowbits to i64
123  %onebit = shl i64 1, %conv
124  %mask = add nsw i64 %onebit, -1
125  %masked = and i64 %mask, %val
126  ret i64 %masked
127}
128
129define i64 @bzhi64_a2_load(i64* %w, i64 %numlowbits) nounwind {
130; CHECK-LABEL: bzhi64_a2_load:
131; CHECK:       // %bb.0:
132; CHECK-NEXT:    ldr x8, [x0]
133; CHECK-NEXT:    mov w9, #1
134; CHECK-NEXT:    lsl x9, x9, x1
135; CHECK-NEXT:    sub x9, x9, #1 // =1
136; CHECK-NEXT:    and x0, x9, x8
137; CHECK-NEXT:    ret
138  %val = load i64, i64* %w
139  %onebit = shl i64 1, %numlowbits
140  %mask = add nsw i64 %onebit, -1
141  %masked = and i64 %mask, %val
142  ret i64 %masked
143}
144
145define i64 @bzhi64_a3_load_indexzext(i64* %w, i8 zeroext %numlowbits) nounwind {
146; CHECK-LABEL: bzhi64_a3_load_indexzext:
147; CHECK:       // %bb.0:
148; CHECK-NEXT:    ldr x8, [x0]
149; CHECK-NEXT:    mov w9, #1
150; CHECK-NEXT:    // kill: def $w1 killed $w1 def $x1
151; CHECK-NEXT:    lsl x9, x9, x1
152; CHECK-NEXT:    sub x9, x9, #1 // =1
153; CHECK-NEXT:    and x0, x9, x8
154; CHECK-NEXT:    ret
155  %val = load i64, i64* %w
156  %conv = zext i8 %numlowbits to i64
157  %onebit = shl i64 1, %conv
158  %mask = add nsw i64 %onebit, -1
159  %masked = and i64 %mask, %val
160  ret i64 %masked
161}
162
163define i64 @bzhi64_a4_commutative(i64 %val, i64 %numlowbits) nounwind {
164; CHECK-LABEL: bzhi64_a4_commutative:
165; CHECK:       // %bb.0:
166; CHECK-NEXT:    mov w8, #1
167; CHECK-NEXT:    lsl x8, x8, x1
168; CHECK-NEXT:    sub x8, x8, #1 // =1
169; CHECK-NEXT:    and x0, x0, x8
170; CHECK-NEXT:    ret
171  %onebit = shl i64 1, %numlowbits
172  %mask = add nsw i64 %onebit, -1
173  %masked = and i64 %val, %mask ; swapped order
174  ret i64 %masked
175}
176
177; ---------------------------------------------------------------------------- ;
178; Pattern b. 32-bit
179; ---------------------------------------------------------------------------- ;
180
181define i32 @bzhi32_b0(i32 %val, i32 %numlowbits) nounwind {
182; CHECK-LABEL: bzhi32_b0:
183; CHECK:       // %bb.0:
184; CHECK-NEXT:    mov w8, #-1
185; CHECK-NEXT:    lsl w8, w8, w1
186; CHECK-NEXT:    bic w0, w0, w8
187; CHECK-NEXT:    ret
188  %notmask = shl i32 -1, %numlowbits
189  %mask = xor i32 %notmask, -1
190  %masked = and i32 %mask, %val
191  ret i32 %masked
192}
193
194define i32 @bzhi32_b1_indexzext(i32 %val, i8 zeroext %numlowbits) nounwind {
195; CHECK-LABEL: bzhi32_b1_indexzext:
196; CHECK:       // %bb.0:
197; CHECK-NEXT:    mov w8, #-1
198; CHECK-NEXT:    lsl w8, w8, w1
199; CHECK-NEXT:    bic w0, w0, w8
200; CHECK-NEXT:    ret
201  %conv = zext i8 %numlowbits to i32
202  %notmask = shl i32 -1, %conv
203  %mask = xor i32 %notmask, -1
204  %masked = and i32 %mask, %val
205  ret i32 %masked
206}
207
208define i32 @bzhi32_b2_load(i32* %w, i32 %numlowbits) nounwind {
209; CHECK-LABEL: bzhi32_b2_load:
210; CHECK:       // %bb.0:
211; CHECK-NEXT:    ldr w8, [x0]
212; CHECK-NEXT:    mov w9, #-1
213; CHECK-NEXT:    lsl w9, w9, w1
214; CHECK-NEXT:    bic w0, w8, w9
215; CHECK-NEXT:    ret
216  %val = load i32, i32* %w
217  %notmask = shl i32 -1, %numlowbits
218  %mask = xor i32 %notmask, -1
219  %masked = and i32 %mask, %val
220  ret i32 %masked
221}
222
223define i32 @bzhi32_b3_load_indexzext(i32* %w, i8 zeroext %numlowbits) nounwind {
224; CHECK-LABEL: bzhi32_b3_load_indexzext:
225; CHECK:       // %bb.0:
226; CHECK-NEXT:    ldr w8, [x0]
227; CHECK-NEXT:    mov w9, #-1
228; CHECK-NEXT:    lsl w9, w9, w1
229; CHECK-NEXT:    bic w0, w8, w9
230; CHECK-NEXT:    ret
231  %val = load i32, i32* %w
232  %conv = zext i8 %numlowbits to i32
233  %notmask = shl i32 -1, %conv
234  %mask = xor i32 %notmask, -1
235  %masked = and i32 %mask, %val
236  ret i32 %masked
237}
238
239define i32 @bzhi32_b4_commutative(i32 %val, i32 %numlowbits) nounwind {
240; CHECK-LABEL: bzhi32_b4_commutative:
241; CHECK:       // %bb.0:
242; CHECK-NEXT:    mov w8, #-1
243; CHECK-NEXT:    lsl w8, w8, w1
244; CHECK-NEXT:    bic w0, w0, w8
245; CHECK-NEXT:    ret
246  %notmask = shl i32 -1, %numlowbits
247  %mask = xor i32 %notmask, -1
248  %masked = and i32 %val, %mask ; swapped order
249  ret i32 %masked
250}
251
252; 64-bit
253
254define i64 @bzhi64_b0(i64 %val, i64 %numlowbits) nounwind {
255; CHECK-LABEL: bzhi64_b0:
256; CHECK:       // %bb.0:
257; CHECK-NEXT:    mov x8, #-1
258; CHECK-NEXT:    lsl x8, x8, x1
259; CHECK-NEXT:    bic x0, x0, x8
260; CHECK-NEXT:    ret
261  %notmask = shl i64 -1, %numlowbits
262  %mask = xor i64 %notmask, -1
263  %masked = and i64 %mask, %val
264  ret i64 %masked
265}
266
267define i64 @bzhi64_b1_indexzext(i64 %val, i8 zeroext %numlowbits) nounwind {
268; CHECK-LABEL: bzhi64_b1_indexzext:
269; CHECK:       // %bb.0:
270; CHECK-NEXT:    mov x8, #-1
271; CHECK-NEXT:    // kill: def $w1 killed $w1 def $x1
272; CHECK-NEXT:    lsl x8, x8, x1
273; CHECK-NEXT:    bic x0, x0, x8
274; CHECK-NEXT:    ret
275  %conv = zext i8 %numlowbits to i64
276  %notmask = shl i64 -1, %conv
277  %mask = xor i64 %notmask, -1
278  %masked = and i64 %mask, %val
279  ret i64 %masked
280}
281
282define i64 @bzhi64_b2_load(i64* %w, i64 %numlowbits) nounwind {
283; CHECK-LABEL: bzhi64_b2_load:
284; CHECK:       // %bb.0:
285; CHECK-NEXT:    ldr x8, [x0]
286; CHECK-NEXT:    mov x9, #-1
287; CHECK-NEXT:    lsl x9, x9, x1
288; CHECK-NEXT:    bic x0, x8, x9
289; CHECK-NEXT:    ret
290  %val = load i64, i64* %w
291  %notmask = shl i64 -1, %numlowbits
292  %mask = xor i64 %notmask, -1
293  %masked = and i64 %mask, %val
294  ret i64 %masked
295}
296
297define i64 @bzhi64_b3_load_indexzext(i64* %w, i8 zeroext %numlowbits) nounwind {
298; CHECK-LABEL: bzhi64_b3_load_indexzext:
299; CHECK:       // %bb.0:
300; CHECK-NEXT:    ldr x8, [x0]
301; CHECK-NEXT:    mov x9, #-1
302; CHECK-NEXT:    // kill: def $w1 killed $w1 def $x1
303; CHECK-NEXT:    lsl x9, x9, x1
304; CHECK-NEXT:    bic x0, x8, x9
305; CHECK-NEXT:    ret
306  %val = load i64, i64* %w
307  %conv = zext i8 %numlowbits to i64
308  %notmask = shl i64 -1, %conv
309  %mask = xor i64 %notmask, -1
310  %masked = and i64 %mask, %val
311  ret i64 %masked
312}
313
314define i64 @bzhi64_b4_commutative(i64 %val, i64 %numlowbits) nounwind {
315; CHECK-LABEL: bzhi64_b4_commutative:
316; CHECK:       // %bb.0:
317; CHECK-NEXT:    mov x8, #-1
318; CHECK-NEXT:    lsl x8, x8, x1
319; CHECK-NEXT:    bic x0, x0, x8
320; CHECK-NEXT:    ret
321  %notmask = shl i64 -1, %numlowbits
322  %mask = xor i64 %notmask, -1
323  %masked = and i64 %val, %mask ; swapped order
324  ret i64 %masked
325}
326
327; ---------------------------------------------------------------------------- ;
328; Pattern c. 32-bit
329; ---------------------------------------------------------------------------- ;
330
331define i32 @bzhi32_c0(i32 %val, i32 %numlowbits) nounwind {
332; CHECK-LABEL: bzhi32_c0:
333; CHECK:       // %bb.0:
334; CHECK-NEXT:    neg w8, w1
335; CHECK-NEXT:    mov w9, #-1
336; CHECK-NEXT:    lsr w8, w9, w8
337; CHECK-NEXT:    and w0, w8, w0
338; CHECK-NEXT:    ret
339  %numhighbits = sub i32 32, %numlowbits
340  %mask = lshr i32 -1, %numhighbits
341  %masked = and i32 %mask, %val
342  ret i32 %masked
343}
344
345define i32 @bzhi32_c1_indexzext(i32 %val, i8 %numlowbits) nounwind {
346; CHECK-LABEL: bzhi32_c1_indexzext:
347; CHECK:       // %bb.0:
348; CHECK-NEXT:    mov w8, #32
349; CHECK-NEXT:    sub w8, w8, w1
350; CHECK-NEXT:    mov w9, #-1
351; CHECK-NEXT:    lsr w8, w9, w8
352; CHECK-NEXT:    and w0, w8, w0
353; CHECK-NEXT:    ret
354  %numhighbits = sub i8 32, %numlowbits
355  %sh_prom = zext i8 %numhighbits to i32
356  %mask = lshr i32 -1, %sh_prom
357  %masked = and i32 %mask, %val
358  ret i32 %masked
359}
360
361define i32 @bzhi32_c2_load(i32* %w, i32 %numlowbits) nounwind {
362; CHECK-LABEL: bzhi32_c2_load:
363; CHECK:       // %bb.0:
364; CHECK-NEXT:    ldr w8, [x0]
365; CHECK-NEXT:    neg w9, w1
366; CHECK-NEXT:    mov w10, #-1
367; CHECK-NEXT:    lsr w9, w10, w9
368; CHECK-NEXT:    and w0, w9, w8
369; CHECK-NEXT:    ret
370  %val = load i32, i32* %w
371  %numhighbits = sub i32 32, %numlowbits
372  %mask = lshr i32 -1, %numhighbits
373  %masked = and i32 %mask, %val
374  ret i32 %masked
375}
376
377define i32 @bzhi32_c3_load_indexzext(i32* %w, i8 %numlowbits) nounwind {
378; CHECK-LABEL: bzhi32_c3_load_indexzext:
379; CHECK:       // %bb.0:
380; CHECK-NEXT:    ldr w8, [x0]
381; CHECK-NEXT:    mov w9, #32
382; CHECK-NEXT:    sub w9, w9, w1
383; CHECK-NEXT:    mov w10, #-1
384; CHECK-NEXT:    lsr w9, w10, w9
385; CHECK-NEXT:    and w0, w9, w8
386; CHECK-NEXT:    ret
387  %val = load i32, i32* %w
388  %numhighbits = sub i8 32, %numlowbits
389  %sh_prom = zext i8 %numhighbits to i32
390  %mask = lshr i32 -1, %sh_prom
391  %masked = and i32 %mask, %val
392  ret i32 %masked
393}
394
395define i32 @bzhi32_c4_commutative(i32 %val, i32 %numlowbits) nounwind {
396; CHECK-LABEL: bzhi32_c4_commutative:
397; CHECK:       // %bb.0:
398; CHECK-NEXT:    neg w8, w1
399; CHECK-NEXT:    mov w9, #-1
400; CHECK-NEXT:    lsr w8, w9, w8
401; CHECK-NEXT:    and w0, w0, w8
402; CHECK-NEXT:    ret
403  %numhighbits = sub i32 32, %numlowbits
404  %mask = lshr i32 -1, %numhighbits
405  %masked = and i32 %val, %mask ; swapped order
406  ret i32 %masked
407}
408
409; 64-bit
410
411define i64 @bzhi64_c0(i64 %val, i64 %numlowbits) nounwind {
412; CHECK-LABEL: bzhi64_c0:
413; CHECK:       // %bb.0:
414; CHECK-NEXT:    neg x8, x1
415; CHECK-NEXT:    mov x9, #-1
416; CHECK-NEXT:    lsr x8, x9, x8
417; CHECK-NEXT:    and x0, x8, x0
418; CHECK-NEXT:    ret
419  %numhighbits = sub i64 64, %numlowbits
420  %mask = lshr i64 -1, %numhighbits
421  %masked = and i64 %mask, %val
422  ret i64 %masked
423}
424
425define i64 @bzhi64_c1_indexzext(i64 %val, i8 %numlowbits) nounwind {
426; CHECK-LABEL: bzhi64_c1_indexzext:
427; CHECK:       // %bb.0:
428; CHECK-NEXT:    mov w8, #64
429; CHECK-NEXT:    sub w8, w8, w1
430; CHECK-NEXT:    mov x9, #-1
431; CHECK-NEXT:    lsr x8, x9, x8
432; CHECK-NEXT:    and x0, x8, x0
433; CHECK-NEXT:    ret
434  %numhighbits = sub i8 64, %numlowbits
435  %sh_prom = zext i8 %numhighbits to i64
436  %mask = lshr i64 -1, %sh_prom
437  %masked = and i64 %mask, %val
438  ret i64 %masked
439}
440
441define i64 @bzhi64_c2_load(i64* %w, i64 %numlowbits) nounwind {
442; CHECK-LABEL: bzhi64_c2_load:
443; CHECK:       // %bb.0:
444; CHECK-NEXT:    ldr x8, [x0]
445; CHECK-NEXT:    neg x9, x1
446; CHECK-NEXT:    mov x10, #-1
447; CHECK-NEXT:    lsr x9, x10, x9
448; CHECK-NEXT:    and x0, x9, x8
449; CHECK-NEXT:    ret
450  %val = load i64, i64* %w
451  %numhighbits = sub i64 64, %numlowbits
452  %mask = lshr i64 -1, %numhighbits
453  %masked = and i64 %mask, %val
454  ret i64 %masked
455}
456
457define i64 @bzhi64_c3_load_indexzext(i64* %w, i8 %numlowbits) nounwind {
458; CHECK-LABEL: bzhi64_c3_load_indexzext:
459; CHECK:       // %bb.0:
460; CHECK-NEXT:    ldr x8, [x0]
461; CHECK-NEXT:    mov w9, #64
462; CHECK-NEXT:    sub w9, w9, w1
463; CHECK-NEXT:    mov x10, #-1
464; CHECK-NEXT:    lsr x9, x10, x9
465; CHECK-NEXT:    and x0, x9, x8
466; CHECK-NEXT:    ret
467  %val = load i64, i64* %w
468  %numhighbits = sub i8 64, %numlowbits
469  %sh_prom = zext i8 %numhighbits to i64
470  %mask = lshr i64 -1, %sh_prom
471  %masked = and i64 %mask, %val
472  ret i64 %masked
473}
474
475define i64 @bzhi64_c4_commutative(i64 %val, i64 %numlowbits) nounwind {
476; CHECK-LABEL: bzhi64_c4_commutative:
477; CHECK:       // %bb.0:
478; CHECK-NEXT:    neg x8, x1
479; CHECK-NEXT:    mov x9, #-1
480; CHECK-NEXT:    lsr x8, x9, x8
481; CHECK-NEXT:    and x0, x0, x8
482; CHECK-NEXT:    ret
483  %numhighbits = sub i64 64, %numlowbits
484  %mask = lshr i64 -1, %numhighbits
485  %masked = and i64 %val, %mask ; swapped order
486  ret i64 %masked
487}
488
489; ---------------------------------------------------------------------------- ;
490; Pattern d. 32-bit.
491; ---------------------------------------------------------------------------- ;
492
493define i32 @bzhi32_d0(i32 %val, i32 %numlowbits) nounwind {
494; CHECK-LABEL: bzhi32_d0:
495; CHECK:       // %bb.0:
496; CHECK-NEXT:    neg w8, w1
497; CHECK-NEXT:    lsl w9, w0, w8
498; CHECK-NEXT:    lsr w0, w9, w8
499; CHECK-NEXT:    ret
500  %numhighbits = sub i32 32, %numlowbits
501  %highbitscleared = shl i32 %val, %numhighbits
502  %masked = lshr i32 %highbitscleared, %numhighbits
503  ret i32 %masked
504}
505
506define i32 @bzhi32_d1_indexzext(i32 %val, i8 %numlowbits) nounwind {
507; CHECK-LABEL: bzhi32_d1_indexzext:
508; CHECK:       // %bb.0:
509; CHECK-NEXT:    mov w8, #32
510; CHECK-NEXT:    sub w8, w8, w1
511; CHECK-NEXT:    lsl w9, w0, w8
512; CHECK-NEXT:    lsr w0, w9, w8
513; CHECK-NEXT:    ret
514  %numhighbits = sub i8 32, %numlowbits
515  %sh_prom = zext i8 %numhighbits to i32
516  %highbitscleared = shl i32 %val, %sh_prom
517  %masked = lshr i32 %highbitscleared, %sh_prom
518  ret i32 %masked
519}
520
521define i32 @bzhi32_d2_load(i32* %w, i32 %numlowbits) nounwind {
522; CHECK-LABEL: bzhi32_d2_load:
523; CHECK:       // %bb.0:
524; CHECK-NEXT:    ldr w8, [x0]
525; CHECK-NEXT:    neg w9, w1
526; CHECK-NEXT:    lsl w8, w8, w9
527; CHECK-NEXT:    lsr w0, w8, w9
528; CHECK-NEXT:    ret
529  %val = load i32, i32* %w
530  %numhighbits = sub i32 32, %numlowbits
531  %highbitscleared = shl i32 %val, %numhighbits
532  %masked = lshr i32 %highbitscleared, %numhighbits
533  ret i32 %masked
534}
535
536define i32 @bzhi32_d3_load_indexzext(i32* %w, i8 %numlowbits) nounwind {
537; CHECK-LABEL: bzhi32_d3_load_indexzext:
538; CHECK:       // %bb.0:
539; CHECK-NEXT:    ldr w8, [x0]
540; CHECK-NEXT:    mov w9, #32
541; CHECK-NEXT:    sub w9, w9, w1
542; CHECK-NEXT:    lsl w8, w8, w9
543; CHECK-NEXT:    lsr w0, w8, w9
544; CHECK-NEXT:    ret
545  %val = load i32, i32* %w
546  %numhighbits = sub i8 32, %numlowbits
547  %sh_prom = zext i8 %numhighbits to i32
548  %highbitscleared = shl i32 %val, %sh_prom
549  %masked = lshr i32 %highbitscleared, %sh_prom
550  ret i32 %masked
551}
552
553; 64-bit.
554
555define i64 @bzhi64_d0(i64 %val, i64 %numlowbits) nounwind {
556; CHECK-LABEL: bzhi64_d0:
557; CHECK:       // %bb.0:
558; CHECK-NEXT:    neg x8, x1
559; CHECK-NEXT:    lsl x9, x0, x8
560; CHECK-NEXT:    lsr x0, x9, x8
561; CHECK-NEXT:    ret
562  %numhighbits = sub i64 64, %numlowbits
563  %highbitscleared = shl i64 %val, %numhighbits
564  %masked = lshr i64 %highbitscleared, %numhighbits
565  ret i64 %masked
566}
567
568define i64 @bzhi64_d1_indexzext(i64 %val, i8 %numlowbits) nounwind {
569; CHECK-LABEL: bzhi64_d1_indexzext:
570; CHECK:       // %bb.0:
571; CHECK-NEXT:    mov w8, #64
572; CHECK-NEXT:    sub w8, w8, w1
573; CHECK-NEXT:    lsl x9, x0, x8
574; CHECK-NEXT:    lsr x0, x9, x8
575; CHECK-NEXT:    ret
576  %numhighbits = sub i8 64, %numlowbits
577  %sh_prom = zext i8 %numhighbits to i64
578  %highbitscleared = shl i64 %val, %sh_prom
579  %masked = lshr i64 %highbitscleared, %sh_prom
580  ret i64 %masked
581}
582
583define i64 @bzhi64_d2_load(i64* %w, i64 %numlowbits) nounwind {
584; CHECK-LABEL: bzhi64_d2_load:
585; CHECK:       // %bb.0:
586; CHECK-NEXT:    ldr x8, [x0]
587; CHECK-NEXT:    neg x9, x1
588; CHECK-NEXT:    lsl x8, x8, x9
589; CHECK-NEXT:    lsr x0, x8, x9
590; CHECK-NEXT:    ret
591  %val = load i64, i64* %w
592  %numhighbits = sub i64 64, %numlowbits
593  %highbitscleared = shl i64 %val, %numhighbits
594  %masked = lshr i64 %highbitscleared, %numhighbits
595  ret i64 %masked
596}
597
598define i64 @bzhi64_d3_load_indexzext(i64* %w, i8 %numlowbits) nounwind {
599; CHECK-LABEL: bzhi64_d3_load_indexzext:
600; CHECK:       // %bb.0:
601; CHECK-NEXT:    ldr x8, [x0]
602; CHECK-NEXT:    mov w9, #64
603; CHECK-NEXT:    sub w9, w9, w1
604; CHECK-NEXT:    lsl x8, x8, x9
605; CHECK-NEXT:    lsr x0, x8, x9
606; CHECK-NEXT:    ret
607  %val = load i64, i64* %w
608  %numhighbits = sub i8 64, %numlowbits
609  %sh_prom = zext i8 %numhighbits to i64
610  %highbitscleared = shl i64 %val, %sh_prom
611  %masked = lshr i64 %highbitscleared, %sh_prom
612  ret i64 %masked
613}
614
615; ---------------------------------------------------------------------------- ;
616; Constant mask
617; ---------------------------------------------------------------------------- ;
618
619; 32-bit
620
621define i32 @bzhi32_constant_mask32(i32 %val) nounwind {
622; CHECK-LABEL: bzhi32_constant_mask32:
623; CHECK:       // %bb.0:
624; CHECK-NEXT:    and w0, w0, #0x7fffffff
625; CHECK-NEXT:    ret
626  %masked = and i32 %val, 2147483647
627  ret i32 %masked
628}
629
630define i32 @bzhi32_constant_mask32_load(i32* %val) nounwind {
631; CHECK-LABEL: bzhi32_constant_mask32_load:
632; CHECK:       // %bb.0:
633; CHECK-NEXT:    ldr w8, [x0]
634; CHECK-NEXT:    and w0, w8, #0x7fffffff
635; CHECK-NEXT:    ret
636  %val1 = load i32, i32* %val
637  %masked = and i32 %val1, 2147483647
638  ret i32 %masked
639}
640
641define i32 @bzhi32_constant_mask16(i32 %val) nounwind {
642; CHECK-LABEL: bzhi32_constant_mask16:
643; CHECK:       // %bb.0:
644; CHECK-NEXT:    and w0, w0, #0x7fff
645; CHECK-NEXT:    ret
646  %masked = and i32 %val, 32767
647  ret i32 %masked
648}
649
650define i32 @bzhi32_constant_mask16_load(i32* %val) nounwind {
651; CHECK-LABEL: bzhi32_constant_mask16_load:
652; CHECK:       // %bb.0:
653; CHECK-NEXT:    ldr w8, [x0]
654; CHECK-NEXT:    and w0, w8, #0x7fff
655; CHECK-NEXT:    ret
656  %val1 = load i32, i32* %val
657  %masked = and i32 %val1, 32767
658  ret i32 %masked
659}
660
661define i32 @bzhi32_constant_mask8(i32 %val) nounwind {
662; CHECK-LABEL: bzhi32_constant_mask8:
663; CHECK:       // %bb.0:
664; CHECK-NEXT:    and w0, w0, #0x7f
665; CHECK-NEXT:    ret
666  %masked = and i32 %val, 127
667  ret i32 %masked
668}
669
670define i32 @bzhi32_constant_mask8_load(i32* %val) nounwind {
671; CHECK-LABEL: bzhi32_constant_mask8_load:
672; CHECK:       // %bb.0:
673; CHECK-NEXT:    ldr w8, [x0]
674; CHECK-NEXT:    and w0, w8, #0x7f
675; CHECK-NEXT:    ret
676  %val1 = load i32, i32* %val
677  %masked = and i32 %val1, 127
678  ret i32 %masked
679}
680
681; 64-bit
682
683define i64 @bzhi64_constant_mask64(i64 %val) nounwind {
684; CHECK-LABEL: bzhi64_constant_mask64:
685; CHECK:       // %bb.0:
686; CHECK-NEXT:    and x0, x0, #0x3fffffffffffffff
687; CHECK-NEXT:    ret
688  %masked = and i64 %val, 4611686018427387903
689  ret i64 %masked
690}
691
692define i64 @bzhi64_constant_mask64_load(i64* %val) nounwind {
693; CHECK-LABEL: bzhi64_constant_mask64_load:
694; CHECK:       // %bb.0:
695; CHECK-NEXT:    ldr x8, [x0]
696; CHECK-NEXT:    and x0, x8, #0x3fffffffffffffff
697; CHECK-NEXT:    ret
698  %val1 = load i64, i64* %val
699  %masked = and i64 %val1, 4611686018427387903
700  ret i64 %masked
701}
702
703define i64 @bzhi64_constant_mask32(i64 %val) nounwind {
704; CHECK-LABEL: bzhi64_constant_mask32:
705; CHECK:       // %bb.0:
706; CHECK-NEXT:    and x0, x0, #0x7fffffff
707; CHECK-NEXT:    ret
708  %masked = and i64 %val, 2147483647
709  ret i64 %masked
710}
711
712define i64 @bzhi64_constant_mask32_load(i64* %val) nounwind {
713; CHECK-LABEL: bzhi64_constant_mask32_load:
714; CHECK:       // %bb.0:
715; CHECK-NEXT:    ldr x8, [x0]
716; CHECK-NEXT:    and x0, x8, #0x7fffffff
717; CHECK-NEXT:    ret
718  %val1 = load i64, i64* %val
719  %masked = and i64 %val1, 2147483647
720  ret i64 %masked
721}
722
723define i64 @bzhi64_constant_mask16(i64 %val) nounwind {
724; CHECK-LABEL: bzhi64_constant_mask16:
725; CHECK:       // %bb.0:
726; CHECK-NEXT:    and x0, x0, #0x7fff
727; CHECK-NEXT:    ret
728  %masked = and i64 %val, 32767
729  ret i64 %masked
730}
731
732define i64 @bzhi64_constant_mask16_load(i64* %val) nounwind {
733; CHECK-LABEL: bzhi64_constant_mask16_load:
734; CHECK:       // %bb.0:
735; CHECK-NEXT:    ldr x8, [x0]
736; CHECK-NEXT:    and x0, x8, #0x7fff
737; CHECK-NEXT:    ret
738  %val1 = load i64, i64* %val
739  %masked = and i64 %val1, 32767
740  ret i64 %masked
741}
742
743define i64 @bzhi64_constant_mask8(i64 %val) nounwind {
744; CHECK-LABEL: bzhi64_constant_mask8:
745; CHECK:       // %bb.0:
746; CHECK-NEXT:    and x0, x0, #0x7f
747; CHECK-NEXT:    ret
748  %masked = and i64 %val, 127
749  ret i64 %masked
750}
751
752define i64 @bzhi64_constant_mask8_load(i64* %val) nounwind {
753; CHECK-LABEL: bzhi64_constant_mask8_load:
754; CHECK:       // %bb.0:
755; CHECK-NEXT:    ldr x8, [x0]
756; CHECK-NEXT:    and x0, x8, #0x7f
757; CHECK-NEXT:    ret
758  %val1 = load i64, i64* %val
759  %masked = and i64 %val1, 127
760  ret i64 %masked
761}
762