1; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2; RUN: llc < %s -mattr=-bmi -mtriple=x86_64-linux | FileCheck %s -check-prefix=X86-64 -check-prefix=X64
3; RUN: llc < %s -mattr=-bmi -mtriple=x86_64-linux-gnux32 | FileCheck %s -check-prefix=X86-64 -check-prefix=X32
4; RUN: llc < %s -mattr=-bmi -mtriple=x86_64-win32 | FileCheck %s -check-prefix=WIN64
5; RUN: llc < %s -mattr=-bmi -mtriple=i686--    | FileCheck %s -check-prefix=X86-32
6
7; Use h registers. On x86-64, codegen doesn't support general allocation
8; of h registers yet, due to x86 encoding complications.
9
10define void @bar64(i64 inreg %x, i8* inreg %p) nounwind {
11; X64-LABEL: bar64:
12; X64:       # %bb.0:
13; X64-NEXT:    shrq $8, %rdi
14; X64-NEXT:    incb %dil
15; X64-NEXT:    movb %dil, (%rsi)
16; X64-NEXT:    retq
17;
18; X32-LABEL: bar64:
19; X32:       # %bb.0:
20; X32-NEXT:    shrq $8, %rdi
21; X32-NEXT:    incb %dil
22; X32-NEXT:    movb %dil, (%esi)
23; X32-NEXT:    retq
24;
25; WIN64-LABEL: bar64:
26; WIN64:       # %bb.0:
27; WIN64-NEXT:    shrq $8, %rcx
28; WIN64-NEXT:    incb %cl
29; WIN64-NEXT:    movb %cl, (%rdx)
30; WIN64-NEXT:    retq
31;
32; X86-32-LABEL: bar64:
33; X86-32:       # %bb.0:
34; X86-32-NEXT:    incb %ah
35; X86-32-NEXT:    movb %ah, (%ecx)
36; X86-32-NEXT:    retl
37
38; See FIXME: on regclass GR8.
39; It could be optimally transformed like; incb %ch; movb %ch, (%rdx)
40
41  %t0 = lshr i64 %x, 8
42  %t1 = trunc i64 %t0 to i8
43  %t2 = add i8 %t1, 1
44  store i8 %t2, i8* %p
45  ret void
46}
47
48define void @bar32(i32 inreg %x, i8* inreg %p) nounwind {
49; X64-LABEL: bar32:
50; X64:       # %bb.0:
51; X64-NEXT:    shrl $8, %edi
52; X64-NEXT:    incb %dil
53; X64-NEXT:    movb %dil, (%rsi)
54; X64-NEXT:    retq
55;
56; X32-LABEL: bar32:
57; X32:       # %bb.0:
58; X32-NEXT:    shrl $8, %edi
59; X32-NEXT:    incb %dil
60; X32-NEXT:    movb %dil, (%esi)
61; X32-NEXT:    retq
62;
63; WIN64-LABEL: bar32:
64; WIN64:       # %bb.0:
65; WIN64-NEXT:    shrl $8, %ecx
66; WIN64-NEXT:    incb %cl
67; WIN64-NEXT:    movb %cl, (%rdx)
68; WIN64-NEXT:    retq
69;
70; X86-32-LABEL: bar32:
71; X86-32:       # %bb.0:
72; X86-32-NEXT:    incb %ah
73; X86-32-NEXT:    movb %ah, (%edx)
74; X86-32-NEXT:    retl
75
76
77  %t0 = lshr i32 %x, 8
78  %t1 = trunc i32 %t0 to i8
79  %t2 = add i8 %t1, 1
80  store i8 %t2, i8* %p
81  ret void
82}
83
84define void @bar16(i16 inreg %x, i8* inreg %p) nounwind {
85; X64-LABEL: bar16:
86; X64:       # %bb.0:
87; X64-NEXT:    shrl $8, %edi
88; X64-NEXT:    incb %dil
89; X64-NEXT:    movb %dil, (%rsi)
90; X64-NEXT:    retq
91;
92; X32-LABEL: bar16:
93; X32:       # %bb.0:
94; X32-NEXT:    shrl $8, %edi
95; X32-NEXT:    incb %dil
96; X32-NEXT:    movb %dil, (%esi)
97; X32-NEXT:    retq
98;
99; WIN64-LABEL: bar16:
100; WIN64:       # %bb.0:
101; WIN64-NEXT:    # kill: def $cx killed $cx def $ecx
102; WIN64-NEXT:    shrl $8, %ecx
103; WIN64-NEXT:    incb %cl
104; WIN64-NEXT:    movb %cl, (%rdx)
105; WIN64-NEXT:    retq
106;
107; X86-32-LABEL: bar16:
108; X86-32:       # %bb.0:
109; X86-32-NEXT:    incb %ah
110; X86-32-NEXT:    movb %ah, (%edx)
111; X86-32-NEXT:    retl
112
113
114  %t0 = lshr i16 %x, 8
115  %t1 = trunc i16 %t0 to i8
116  %t2 = add i8 %t1, 1
117  store i8 %t2, i8* %p
118  ret void
119}
120
121define i64 @qux64(i64 inreg %x) nounwind {
122; X86-64-LABEL: qux64:
123; X86-64:       # %bb.0:
124; X86-64-NEXT:    movq %rdi, %rax
125; X86-64-NEXT:    movzbl %ah, %eax
126; X86-64-NEXT:    retq
127;
128; WIN64-LABEL: qux64:
129; WIN64:       # %bb.0:
130; WIN64-NEXT:    movzbl %ch, %eax
131; WIN64-NEXT:    retq
132;
133; X86-32-LABEL: qux64:
134; X86-32:       # %bb.0:
135; X86-32-NEXT:    movzbl %ah, %eax
136; X86-32-NEXT:    xorl %edx, %edx
137; X86-32-NEXT:    retl
138
139
140  %t0 = lshr i64 %x, 8
141  %t1 = and i64 %t0, 255
142  ret i64 %t1
143}
144
145define i32 @qux32(i32 inreg %x) nounwind {
146; X86-64-LABEL: qux32:
147; X86-64:       # %bb.0:
148; X86-64-NEXT:    movl %edi, %eax
149; X86-64-NEXT:    movzbl %ah, %eax
150; X86-64-NEXT:    retq
151;
152; WIN64-LABEL: qux32:
153; WIN64:       # %bb.0:
154; WIN64-NEXT:    movzbl %ch, %eax
155; WIN64-NEXT:    retq
156;
157; X86-32-LABEL: qux32:
158; X86-32:       # %bb.0:
159; X86-32-NEXT:    movzbl %ah, %eax
160; X86-32-NEXT:    retl
161
162
163  %t0 = lshr i32 %x, 8
164  %t1 = and i32 %t0, 255
165  ret i32 %t1
166}
167
168define i16 @qux16(i16 inreg %x) nounwind {
169; X86-64-LABEL: qux16:
170; X86-64:       # %bb.0:
171; X86-64-NEXT:    movl %edi, %eax
172; X86-64-NEXT:    movzbl %ah, %eax
173; X86-64-NEXT:    # kill: def $ax killed $ax killed $eax
174; X86-64-NEXT:    retq
175;
176; WIN64-LABEL: qux16:
177; WIN64:       # %bb.0:
178; WIN64-NEXT:    movzwl %cx, %eax
179; WIN64-NEXT:    shrl $8, %eax
180; WIN64-NEXT:    # kill: def $ax killed $ax killed $eax
181; WIN64-NEXT:    retq
182;
183; X86-32-LABEL: qux16:
184; X86-32:       # %bb.0:
185; X86-32-NEXT:    movzbl %ah, %eax
186; X86-32-NEXT:    # kill: def $ax killed $ax killed $eax
187; X86-32-NEXT:    retl
188
189
190  %t0 = lshr i16 %x, 8
191  ret i16 %t0
192}
193