1; RUN: llc < %s -mtriple=x86_64-apple-darwin -mcpu=generic | FileCheck %s
2
3; Check that we recognize this idiom for rotation too:
4;    a << (b & (OpSize-1)) | a >> ((0 - b) & (OpSize-1))
5
6define i32 @rotate_left_32(i32 %a, i32 %b) {
7; CHECK-LABEL: rotate_left_32:
8; CHECK-NOT: and
9; CHECK: roll
10entry:
11  %and = and i32 %b, 31
12  %shl = shl i32 %a, %and
13  %0 = sub i32 0, %b
14  %and3 = and i32 %0, 31
15  %shr = lshr i32 %a, %and3
16  %or = or i32 %shl, %shr
17  ret i32 %or
18}
19
20define i32 @rotate_right_32(i32 %a, i32 %b) {
21; CHECK-LABEL: rotate_right_32:
22; CHECK-NOT: and
23; CHECK: rorl
24entry:
25  %and = and i32 %b, 31
26  %shl = lshr i32 %a, %and
27  %0 = sub i32 0, %b
28  %and3 = and i32 %0, 31
29  %shr = shl i32 %a, %and3
30  %or = or i32 %shl, %shr
31  ret i32 %or
32}
33
34define i64 @rotate_left_64(i64 %a, i64 %b) {
35; CHECK-LABEL: rotate_left_64:
36; CHECK-NOT: and
37; CHECK: rolq
38entry:
39  %and = and i64 %b, 63
40  %shl = shl i64 %a, %and
41  %0 = sub i64 0, %b
42  %and3 = and i64 %0, 63
43  %shr = lshr i64 %a, %and3
44  %or = or i64 %shl, %shr
45  ret i64 %or
46}
47
48define i64 @rotate_right_64(i64 %a, i64 %b) {
49; CHECK-LABEL: rotate_right_64:
50; CHECK-NOT: and
51; CHECK: rorq
52entry:
53  %and = and i64 %b, 63
54  %shl = lshr i64 %a, %and
55  %0 = sub i64 0, %b
56  %and3 = and i64 %0, 63
57  %shr = shl i64 %a, %and3
58  %or = or i64 %shl, %shr
59  ret i64 %or
60}
61
62; Also check mem operand.
63
64define void @rotate_left_m32(i32 *%pa, i32 %b) {
65; CHECK-LABEL: rotate_left_m32:
66; CHECK-NOT: and
67; CHECK: roll
68; no store:
69; CHECK-NOT: mov
70entry:
71  %a = load i32* %pa, align 16
72  %and = and i32 %b, 31
73  %shl = shl i32 %a, %and
74  %0 = sub i32 0, %b
75  %and3 = and i32 %0, 31
76  %shr = lshr i32 %a, %and3
77  %or = or i32 %shl, %shr
78  store i32 %or, i32* %pa, align 32
79  ret void
80}
81
82define void @rotate_right_m32(i32 *%pa, i32 %b) {
83; CHECK-LABEL: rotate_right_m32:
84; CHECK-NOT: and
85; CHECK: rorl
86; no store:
87; CHECK-NOT: mov
88entry:
89  %a = load i32* %pa, align 16
90  %and = and i32 %b, 31
91  %shl = lshr i32 %a, %and
92  %0 = sub i32 0, %b
93  %and3 = and i32 %0, 31
94  %shr = shl i32 %a, %and3
95  %or = or i32 %shl, %shr
96  store i32 %or, i32* %pa, align 32
97  ret void
98}
99
100define void @rotate_left_m64(i64 *%pa, i64 %b) {
101; CHECK-LABEL: rotate_left_m64:
102; CHECK-NOT: and
103; CHECK: rolq
104; no store:
105; CHECK-NOT: mov
106entry:
107  %a = load i64* %pa, align 16
108  %and = and i64 %b, 63
109  %shl = shl i64 %a, %and
110  %0 = sub i64 0, %b
111  %and3 = and i64 %0, 63
112  %shr = lshr i64 %a, %and3
113  %or = or i64 %shl, %shr
114  store i64 %or, i64* %pa, align 64
115  ret void
116}
117
118define void @rotate_right_m64(i64 *%pa, i64 %b) {
119; CHECK-LABEL: rotate_right_m64:
120; CHECK-NOT: and
121; CHECK: rorq
122; no store:
123; CHECK-NOT: mov
124entry:
125  %a = load i64* %pa, align 16
126  %and = and i64 %b, 63
127  %shl = lshr i64 %a, %and
128  %0 = sub i64 0, %b
129  %and3 = and i64 %0, 63
130  %shr = shl i64 %a, %and3
131  %or = or i64 %shl, %shr
132  store i64 %or, i64* %pa, align 64
133  ret void
134}
135