1; RUN: opt -basic-aa -print-memoryssa -verify-memoryssa -analyze < %s 2>&1 | FileCheck %s
2;
3; Currently, MemorySSA doesn't support invariant groups. So, we should ignore
4; launder.invariant.group intrinsics entirely. We'll need to pay attention to
5; them when/if we decide to support invariant groups.
6
7@g = external global i32
8
9define i32 @foo(i32* %a) {
10; CHECK: 1 = MemoryDef(liveOnEntry)
11; CHECK-NEXT: store i32 0
12  store i32 0, i32* %a, align 4, !invariant.group !0
13
14; CHECK: 2 = MemoryDef(1)
15; CHECK-NEXT: store i32 1
16  store i32 1, i32* @g, align 4
17
18  %1 = bitcast i32* %a to i8*
19; CHECK:  3 = MemoryDef(2)
20; CHECK-NEXT: %a8 = call i8* @llvm.launder.invariant.group.p0i8(i8* %1)
21  %a8 = call i8* @llvm.launder.invariant.group.p0i8(i8* %1)
22  %a32 = bitcast i8* %a8 to i32*
23
24; This have to be MemoryUse(2), because we can't skip the barrier based on
25; invariant.group.
26; CHECK: MemoryUse(2)
27; CHECK-NEXT: %2 = load i32
28  %2 = load i32, i32* %a32, align 4, !invariant.group !0
29  ret i32 %2
30}
31
32define i32 @skipBarrier(i32* %a) {
33; CHECK: 1 = MemoryDef(liveOnEntry)
34; CHECK-NEXT: store i32 0
35  store i32 0, i32* %a, align 4, !invariant.group !0
36
37  %1 = bitcast i32* %a to i8*
38; CHECK: 2 = MemoryDef(1)
39; CHECK-NEXT: %a8 = call i8* @llvm.launder.invariant.group.p0i8(i8* %1)
40  %a8 = call i8* @llvm.launder.invariant.group.p0i8(i8* %1)
41  %a32 = bitcast i8* %a8 to i32*
42
43; We can skip the barrier only if the "skip" is not based on !invariant.group.
44; CHECK: MemoryUse(1)
45; CHECK-NEXT: %2 = load i32
46  %2 = load i32, i32* %a32, align 4, !invariant.group !0
47  ret i32 %2
48}
49
50define i32 @skipBarrier2(i32* %a) {
51
52; CHECK: MemoryUse(liveOnEntry)
53; CHECK-NEXT: %v = load i32
54  %v = load i32, i32* %a, align 4, !invariant.group !0
55
56  %1 = bitcast i32* %a to i8*
57; CHECK: 1 = MemoryDef(liveOnEntry)
58; CHECK-NEXT: %a8 = call i8* @llvm.launder.invariant.group.p0i8(i8* %1)
59  %a8 = call i8* @llvm.launder.invariant.group.p0i8(i8* %1)
60  %a32 = bitcast i8* %a8 to i32*
61
62; We can skip the barrier only if the "skip" is not based on !invariant.group.
63; CHECK: MemoryUse(liveOnEntry)
64; CHECK-NEXT: %v2 = load i32
65  %v2 = load i32, i32* %a32, align 4, !invariant.group !0
66; CHECK: 2 = MemoryDef(1)
67; CHECK-NEXT: store i32 1
68  store i32 1, i32* @g, align 4
69
70; FIXME: based on invariant.group it should be MemoryUse(liveOnEntry)
71; CHECK: MemoryUse(2)
72; CHECK-NEXT: %v3 = load i32
73  %v3 = load i32, i32* %a32, align 4, !invariant.group !0
74  %add = add nsw i32 %v2, %v3
75  %add2 = add nsw i32 %add, %v
76  ret i32 %add2
77}
78
79define i32 @handleInvariantGroups(i32* %a) {
80; CHECK: 1 = MemoryDef(liveOnEntry)
81; CHECK-NEXT: store i32 0
82  store i32 0, i32* %a, align 4, !invariant.group !0
83
84; CHECK: 2 = MemoryDef(1)
85; CHECK-NEXT: store i32 1
86  store i32 1, i32* @g, align 4
87  %1 = bitcast i32* %a to i8*
88; CHECK: 3 = MemoryDef(2)
89; CHECK-NEXT: %a8 = call i8* @llvm.launder.invariant.group.p0i8(i8* %1)
90  %a8 = call i8* @llvm.launder.invariant.group.p0i8(i8* %1)
91  %a32 = bitcast i8* %a8 to i32*
92
93; CHECK: MemoryUse(2)
94; CHECK-NEXT: %2 = load i32
95  %2 = load i32, i32* %a32, align 4, !invariant.group !0
96
97; CHECK: 4 = MemoryDef(3)
98; CHECK-NEXT: store i32 2
99  store i32 2, i32* @g, align 4
100
101; FIXME: This can be changed to MemoryUse(2)
102; CHECK: MemoryUse(4)
103; CHECK-NEXT: %3 = load i32
104  %3 = load i32, i32* %a32, align 4, !invariant.group !0
105  %add = add nsw i32 %2, %3
106  ret i32 %add
107}
108
109define i32 @loop(i1 %a) {
110entry:
111  %0 = alloca i32, align 4
112; CHECK: 1 = MemoryDef(liveOnEntry)
113; CHECK-NEXT: store i32 4
114  store i32 4, i32* %0, !invariant.group !0
115; CHECK: 2 = MemoryDef(1)
116; CHECK-NEXT: call void @clobber
117  call void @clobber(i32* %0)
118  br i1 %a, label %Loop.Body, label %Loop.End
119
120Loop.Body:
121; FIXME: MemoryUse(1)
122; CHECK: MemoryUse(2)
123; CHECK-NEXT: %1 = load i32
124  %1 = load i32, i32* %0, !invariant.group !0
125  br i1 %a, label %Loop.End, label %Loop.Body
126
127Loop.End:
128; FIXME: MemoryUse(1)
129; CHECK: MemoryUse(2)
130; CHECK-NEXT: %2 = load
131  %2 = load i32, i32* %0, align 4, !invariant.group !0
132  br i1 %a, label %Ret, label %Loop.Body
133
134Ret:
135  ret i32 %2
136}
137
138define i8 @loop2(i8* %p) {
139entry:
140; CHECK: 1 = MemoryDef(liveOnEntry)
141; CHECK-NEXT: store i8
142  store i8 4, i8* %p, !invariant.group !0
143; CHECK: 2 = MemoryDef(1)
144; CHECK-NEXT: call void @clobber
145  call void @clobber8(i8* %p)
146
147; CHECK: 3 = MemoryDef(2)
148; CHECK-NEXT: %after = call i8* @llvm.launder.invariant.group.p0i8(i8* %p)
149  %after = call i8* @llvm.launder.invariant.group.p0i8(i8* %p)
150  br i1 undef, label %Loop.Body, label %Loop.End
151
152Loop.Body:
153; 5 = MemoryPhi({entry,3},{Loop.Body,4},{Loop.End,6})
154; CHECK: MemoryUse(6)
155; CHECK-NEXT: %0 = load i8
156  %0 = load i8, i8* %after, !invariant.group !0
157
158; FIXME: MemoryUse(1)
159; CHECK: MemoryUse(6)
160; CHECK-NEXT: %1 = load i8
161  %1 = load i8, i8* %p, !invariant.group !0
162
163; CHECK: 4 = MemoryDef(6)
164  store i8 4, i8* %after, !invariant.group !0
165
166  br i1 undef, label %Loop.End, label %Loop.Body
167
168Loop.End:
169; 6 = MemoryPhi({entry,3},{Loop.Body,4})
170; CHECK: MemoryUse(5)
171; CHECK-NEXT: %2 = load
172  %2 = load i8, i8* %after, align 4, !invariant.group !0
173
174; FIXME: MemoryUse(1)
175; CHECK: MemoryUse(5)
176; CHECK-NEXT: %3 = load
177  %3 = load i8, i8* %p, align 4, !invariant.group !0
178  br i1 undef, label %Ret, label %Loop.Body
179
180Ret:
181  ret i8 %3
182}
183
184
185define i8 @loop3(i8* %p) {
186entry:
187; CHECK: 1 = MemoryDef(liveOnEntry)
188; CHECK-NEXT: store i8
189  store i8 4, i8* %p, !invariant.group !0
190; CHECK: 2 = MemoryDef(1)
191; CHECK-NEXT: call void @clobber
192  call void @clobber8(i8* %p)
193
194; CHECK: 3 = MemoryDef(2)
195; CHECK-NEXT: %after = call i8* @llvm.launder.invariant.group.p0i8(i8* %p)
196  %after = call i8* @llvm.launder.invariant.group.p0i8(i8* %p)
197  br i1 undef, label %Loop.Body, label %Loop.End
198
199Loop.Body:
200; CHECK: 8 = MemoryPhi({entry,3},{Loop.Body,4},{Loop.next,5},{Loop.End,6})
201; CHECK: MemoryUse(8)
202; CHECK-NEXT: %0 = load i8
203  %0 = load i8, i8* %after, !invariant.group !0
204
205; CHECK: 4 = MemoryDef(8)
206; CHECK-NEXT: call void @clobber8
207  call void @clobber8(i8* %after)
208
209; FIXME: MemoryUse(8)
210; CHECK: MemoryUse(4)
211; CHECK-NEXT: %1 = load i8
212  %1 = load i8, i8* %after, !invariant.group !0
213
214  br i1 undef, label %Loop.next, label %Loop.Body
215Loop.next:
216; CHECK: 5 = MemoryDef(4)
217; CHECK-NEXT: call void @clobber8
218  call void @clobber8(i8* %after)
219
220; FIXME: MemoryUse(8)
221; CHECK: MemoryUse(5)
222; CHECK-NEXT: %2 = load i8
223  %2 = load i8, i8* %after, !invariant.group !0
224
225  br i1 undef, label %Loop.End, label %Loop.Body
226
227Loop.End:
228; CHECK: 7 = MemoryPhi({entry,3},{Loop.next,5})
229; CHECK: MemoryUse(7)
230; CHECK-NEXT: %3 = load
231  %3 = load i8, i8* %after, align 4, !invariant.group !0
232
233; CHECK: 6 = MemoryDef(7)
234; CHECK-NEXT: call void @clobber8
235  call void @clobber8(i8* %after)
236
237; FIXME: MemoryUse(7)
238; CHECK: MemoryUse(6)
239; CHECK-NEXT: %4 = load
240  %4 = load i8, i8* %after, align 4, !invariant.group !0
241  br i1 undef, label %Ret, label %Loop.Body
242
243Ret:
244  ret i8 %3
245}
246
247define i8 @loop4(i8* %p) {
248entry:
249; CHECK: 1 = MemoryDef(liveOnEntry)
250; CHECK-NEXT: store i8
251  store i8 4, i8* %p, !invariant.group !0
252; CHECK: 2 = MemoryDef(1)
253; CHECK-NEXT: call void @clobber
254  call void @clobber8(i8* %p)
255; CHECK: 3 = MemoryDef(2)
256; CHECK-NEXT: %after = call i8* @llvm.launder.invariant.group.p0i8(i8* %p)
257  %after = call i8* @llvm.launder.invariant.group.p0i8(i8* %p)
258  br i1 undef, label %Loop.Pre, label %Loop.End
259
260Loop.Pre:
261; CHECK: MemoryUse(2)
262; CHECK-NEXT: %0 = load i8
263  %0 = load i8, i8* %after, !invariant.group !0
264  br label %Loop.Body
265Loop.Body:
266; CHECK: 6 = MemoryPhi({Loop.Pre,3},{Loop.Body,4},{Loop.End,5})
267; CHECK-NEXT: MemoryUse(6)
268; CHECK-NEXT: %1 = load i8
269  %1 = load i8, i8* %after, !invariant.group !0
270
271; FIXME: MemoryUse(2)
272; CHECK: MemoryUse(6)
273; CHECK-NEXT: %2 = load i8
274  %2 = load i8, i8* %p, !invariant.group !0
275
276; CHECK: 4 = MemoryDef(6)
277  store i8 4, i8* %after, !invariant.group !0
278  br i1 undef, label %Loop.End, label %Loop.Body
279
280Loop.End:
281; CHECK: 5 = MemoryPhi({entry,3},{Loop.Body,4})
282; CHECK-NEXT: MemoryUse(5)
283; CHECK-NEXT: %3 = load
284  %3 = load i8, i8* %after, align 4, !invariant.group !0
285
286; FIXME: MemoryUse(2)
287; CHECK: MemoryUse(5)
288; CHECK-NEXT: %4 = load
289  %4 = load i8, i8* %p, align 4, !invariant.group !0
290  br i1 undef, label %Ret, label %Loop.Body
291
292Ret:
293  ret i8 %3
294}
295
296; In the future we would like to CSE barriers if there is no clobber between.
297; CHECK-LABEL: define i8 @optimizable()
298define i8 @optimizable() {
299entry:
300  %ptr = alloca i8
301; CHECK: 1 = MemoryDef(liveOnEntry)
302; CHECK-NEXT: store i8 42, i8* %ptr, align 1, !invariant.group !0
303  store i8 42, i8* %ptr, !invariant.group !0
304; CHECK: 2 = MemoryDef(1)
305; CHECK-NEXT: call i8* @llvm.launder.invariant.group
306  %ptr2 = call i8* @llvm.launder.invariant.group.p0i8(i8* %ptr)
307; FIXME: This one could be CSEd.
308; CHECK: 3 = MemoryDef(2)
309; CHECK: call i8* @llvm.launder.invariant.group
310  %ptr3 = call i8* @llvm.launder.invariant.group.p0i8(i8* %ptr)
311; CHECK: 4 = MemoryDef(3)
312; CHECK-NEXT: call void @clobber8(i8* %ptr)
313  call void @clobber8(i8* %ptr)
314; CHECK: 5 = MemoryDef(4)
315; CHECK-NEXT: call void @use(i8* %ptr2)
316  call void @use(i8* %ptr2)
317; CHECK: 6 = MemoryDef(5)
318; CHECK-NEXT: call void @use(i8* %ptr3)
319  call void @use(i8* %ptr3)
320; CHECK: MemoryUse(6)
321; CHECK-NEXT: load i8, i8* %ptr3, {{.*}}!invariant.group
322  %v = load i8, i8* %ptr3, !invariant.group !0
323
324  ret i8 %v
325}
326
327; CHECK-LABEL: define i8 @unoptimizable2()
328define i8 @unoptimizable2() {
329  %ptr = alloca i8
330; CHECK: 1 = MemoryDef(liveOnEntry)
331; CHECK-NEXT: store i8 42, i8* %ptr, align 1, !invariant.group !0
332  store i8 42, i8* %ptr, !invariant.group !0
333; CHECK: 2 = MemoryDef(1)
334; CHECK-NEXT: call i8* @llvm.launder.invariant.group
335  %ptr2 = call i8* @llvm.launder.invariant.group.p0i8(i8* %ptr)
336; CHECK: 3 = MemoryDef(2)
337  store i8 43, i8* %ptr
338; CHECK: 4 = MemoryDef(3)
339; CHECK-NEXT: call i8* @llvm.launder.invariant.group
340  %ptr3 = call i8* @llvm.launder.invariant.group.p0i8(i8* %ptr)
341; CHECK: 5 = MemoryDef(4)
342; CHECK-NEXT: call void @clobber8(i8* %ptr)
343  call void @clobber8(i8* %ptr)
344; CHECK: 6 = MemoryDef(5)
345; CHECK-NEXT: call void @use(i8* %ptr2)
346  call void @use(i8* %ptr2)
347; CHECK: 7 = MemoryDef(6)
348; CHECK-NEXT: call void @use(i8* %ptr3)
349  call void @use(i8* %ptr3)
350; CHECK: MemoryUse(7)
351; CHECK-NEXT: %v = load i8, i8* %ptr3, align 1, !invariant.group !0
352  %v = load i8, i8* %ptr3, !invariant.group !0
353  ret i8 %v
354}
355
356
357declare i8* @llvm.launder.invariant.group.p0i8(i8*)
358declare void @clobber(i32*)
359declare void @clobber8(i8*)
360declare void @use(i8* readonly)
361
362!0 = !{!"group1"}
363