1; RUN: llc -mtriple=arm64-eabi < %s | FileCheck %s
2; rdar://10232252
3
4@object = external hidden global i64, section "__DATA, __objc_ivar", align 8
5
6; base + offset (imm9)
7; CHECK: @t1
8; CHECK: ldr xzr, [x0, #8]
9; CHECK: ret
10define void @t1(i64* %object) {
11  %incdec.ptr = getelementptr inbounds i64, i64* %object, i64 1
12  %tmp = load volatile i64, i64* %incdec.ptr, align 8
13  ret void
14}
15
16; base + offset (> imm9)
17; CHECK: @t2
18; CHECK: sub [[ADDREG:x[0-9]+]], x0, #264
19; CHECK: ldr xzr, [
20; CHECK: ret
21define void @t2(i64* %object) {
22  %incdec.ptr = getelementptr inbounds i64, i64* %object, i64 -33
23  %tmp = load volatile i64, i64* %incdec.ptr, align 8
24  ret void
25}
26
27; base + unsigned offset (> imm9 and <= imm12 * size of type in bytes)
28; CHECK: @t3
29; CHECK: ldr xzr, [x0, #32760]
30; CHECK: ret
31define void @t3(i64* %object) {
32  %incdec.ptr = getelementptr inbounds i64, i64* %object, i64 4095
33  %tmp = load volatile i64, i64* %incdec.ptr, align 8
34  ret void
35}
36
37; base + unsigned offset (> imm12 * size of type in bytes)
38; CHECK: @t4
39; CHECK: mov w[[NUM:[0-9]+]], #32768
40; CHECK: ldr xzr, [x0, x[[NUM]]]
41; CHECK: ret
42define void @t4(i64* %object) {
43  %incdec.ptr = getelementptr inbounds i64, i64* %object, i64 4096
44  %tmp = load volatile i64, i64* %incdec.ptr, align 8
45  ret void
46}
47
48; base + reg
49; CHECK: @t5
50; CHECK: ldr xzr, [x{{[0-9]+}}, x{{[0-9]+}}, lsl #3]
51; CHECK: ret
52define void @t5(i64 %a) {
53  %incdec.ptr = getelementptr inbounds i64, i64* @object, i64 %a
54  %tmp = load volatile i64, i64* %incdec.ptr, align 8
55  ret void
56}
57
58; base + reg + imm
59; CHECK: @t6
60; CHECK: add [[ADDREG:x[0-9]+]], x1, x0, lsl #3
61; CHECK-NEXT: mov w[[NUM:[0-9]+]], #32768
62; CHECK: ldr xzr, [x{{[0-9]+}}, x[[NUM]]]
63; CHECK: ret
64define void @t6(i64 %a, i64* %object) {
65  %tmp1 = getelementptr inbounds i64, i64* %object, i64 %a
66  %incdec.ptr = getelementptr inbounds i64, i64* %tmp1, i64 4096
67  %tmp = load volatile i64, i64* %incdec.ptr, align 8
68  ret void
69}
70
71; Test base + wide immediate
72define void @t7(i64 %a) {
73; CHECK-LABEL: t7:
74; CHECK: mov w[[NUM:[0-9]+]], #65535
75; CHECK-NEXT: ldr xzr, [x0, x[[NUM]]]
76  %1 = add i64 %a, 65535   ;0xffff
77  %2 = inttoptr i64 %1 to i64*
78  %3 = load volatile i64, i64* %2, align 8
79  ret void
80}
81
82define void @t8(i64 %a) {
83; CHECK-LABEL: t8:
84; CHECK: mov [[REG:x[0-9]+]], #-4662
85; CHECK-NEXT: ldr xzr, [x0, [[REG]]]
86  %1 = sub i64 %a, 4662   ;-4662 is 0xffffffffffffedca
87  %2 = inttoptr i64 %1 to i64*
88  %3 = load volatile i64, i64* %2, align 8
89  ret void
90}
91
92define void @t9(i64 %a) {
93; CHECK-LABEL: t9:
94; CHECK: mov [[REG:x[0-9]+]], #-305463297
95; CHECK-NEXT: ldr xzr, [x0, [[REG]]]
96  %1 = add i64 -305463297, %a   ;-305463297 is 0xffffffffedcaffff
97  %2 = inttoptr i64 %1 to i64*
98  %3 = load volatile i64, i64* %2, align 8
99  ret void
100}
101
102define void @t10(i64 %a) {
103; CHECK-LABEL: t10:
104; CHECK: mov [[REG:x[0-9]+]], #81909218222800896
105; CHECK-NEXT: ldr xzr, [x0, [[REG]]]
106  %1 = add i64 %a, 81909218222800896   ;0x123000000000000
107  %2 = inttoptr i64 %1 to i64*
108  %3 = load volatile i64, i64* %2, align 8
109  ret void
110}
111
112define void @t11(i64 %a) {
113; CHECK-LABEL: t11:
114; CHECK: mov w[[NUM:[0-9]+]], #17767
115; CHECK: movk w[[NUM:[0-9]+]], #291
116; CHECK-NEXT: ldr xzr, [x0, x[[NUM]]]
117  %1 = add i64 %a, 19088743   ;0x1234567
118  %2 = inttoptr i64 %1 to i64*
119  %3 = load volatile i64, i64* %2, align 8
120  ret void
121}
122
123; Test some boundaries that should not use movz/movn/orr
124define void @t12(i64 %a) {
125; CHECK-LABEL: t12:
126; CHECK: add [[REG:x[0-9]+]], x0, #4095
127; CHECK-NEXT: ldr xzr, {{\[}}[[REG]]]
128  %1 = add i64 %a, 4095   ;0xfff
129  %2 = inttoptr i64 %1 to i64*
130  %3 = load volatile i64, i64* %2, align 8
131  ret void
132}
133
134define void @t13(i64 %a) {
135; CHECK-LABEL: t13:
136; CHECK: sub [[REG:x[0-9]+]], x0, #4095
137; CHECK-NEXT: ldr xzr, {{\[}}[[REG]]]
138  %1 = add i64 %a, -4095   ;-0xfff
139  %2 = inttoptr i64 %1 to i64*
140  %3 = load volatile i64, i64* %2, align 8
141  ret void
142}
143
144define void @t14(i64 %a) {
145; CHECK-LABEL: t14:
146; CHECK: add [[REG:x[0-9]+]], x0, #291, lsl #12
147; CHECK-NEXT: ldr xzr, {{\[}}[[REG]]]
148  %1 = add i64 %a, 1191936   ;0x123000
149  %2 = inttoptr i64 %1 to i64*
150  %3 = load volatile i64, i64* %2, align 8
151  ret void
152}
153
154define void @t15(i64 %a) {
155; CHECK-LABEL: t15:
156; CHECK: sub [[REG:x[0-9]+]], x0, #291, lsl #12
157; CHECK-NEXT: ldr xzr, {{\[}}[[REG]]]
158  %1 = add i64 %a, -1191936   ;0xFFFFFFFFFFEDD000
159  %2 = inttoptr i64 %1 to i64*
160  %3 = load volatile i64, i64* %2, align 8
161  ret void
162}
163
164define void @t16(i64 %a) {
165; CHECK-LABEL: t16:
166; CHECK: ldr xzr, [x0, #28672]
167  %1 = add i64 %a, 28672   ;0x7000
168  %2 = inttoptr i64 %1 to i64*
169  %3 = load volatile i64, i64* %2, align 8
170  ret void
171}
172
173define void @t17(i64 %a) {
174; CHECK-LABEL: t17:
175; CHECK: ldur xzr, [x0, #-256]
176  %1 = add i64 %a, -256   ;-0x100
177  %2 = inttoptr i64 %1 to i64*
178  %3 = load volatile i64, i64* %2, align 8
179  ret void
180}
181