1; RUN: opt -tbaa -basic-aa -licm -S < %s | FileCheck %s
2; RUN: opt -aa-pipeline=tbaa,basic-aa -passes='require<aa>,require<targetir>,require<scalar-evolution>,require<opt-remark-emit>,loop(licm)' -S %s | FileCheck %s
3
4; If we can prove a local is thread local, we can insert stores during
5; promotion which wouldn't be legal otherwise.
6
7target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64-S128"
8target triple = "x86_64-linux-generic"
9
10@p = external global i8*
11
12declare i8* @malloc(i64)
13
14; Exercise the TLS case
15; CHECK-LABEL: @test
16define i32* @test(i32 %n) {
17entry:
18  ;; ignore the required null check for simplicity
19  %mem = call dereferenceable(16) noalias i8* @malloc(i64 16)
20  %addr = bitcast i8* %mem to i32*
21  br label %for.body.lr.ph
22
23for.body.lr.ph:                                   ; preds = %entry
24; CHECK-LABEL: for.body.lr.ph:
25; CHECK-NEXT: %addr.promoted = load i32, i32* %addr, align 4
26  br label %for.header
27
28for.header:
29  %i.02 = phi i32 [ 0, %for.body.lr.ph ], [ %inc, %for.body ]
30  %old = load i32, i32* %addr, align 4
31  ; deliberate impossible to analyze branch
32  %guard = load atomic i8*, i8** @p monotonic, align 8
33  %exitcmp = icmp eq i8* %guard, null
34  br i1 %exitcmp, label %for.body, label %early-exit
35
36early-exit:
37; CHECK-LABEL: early-exit:
38; CHECK: store i32 %new1.lcssa, i32* %addr, align 4
39  ret i32* null
40
41for.body:
42  %new = add i32 %old, 1
43  store i32 %new, i32* %addr, align 4
44  %inc = add nsw i32 %i.02, 1
45  %cmp = icmp slt i32 %inc, %n
46  br i1 %cmp, label %for.header, label %for.cond.for.end_crit_edge
47
48for.cond.for.end_crit_edge:                       ; preds = %for.body
49; CHECK-LABEL: for.cond.for.end_crit_edge:
50; CHECK: store i32 %new.lcssa, i32* %addr, align 4
51  %split = phi i32* [ %addr, %for.body ]
52  ret i32* null
53}
54
55; Stack allocations can also be thread-local
56; CHECK-LABEL: @test2
57define i32* @test2(i32 %n) {
58entry:
59  %mem = alloca i8, i32 16
60  %addr = bitcast i8* %mem to i32*
61  br label %for.body.lr.ph
62
63for.body.lr.ph:                                   ; preds = %entry
64; CHECK-LABEL: for.body.lr.ph:
65; CHECK-NEXT: %addr.promoted = load i32, i32* %addr, align 4
66  br label %for.header
67
68for.header:
69  %i.02 = phi i32 [ 0, %for.body.lr.ph ], [ %inc, %for.body ]
70  %old = load i32, i32* %addr, align 4
71  ; deliberate impossible to analyze branch
72  %guard = load atomic i8*, i8** @p monotonic, align 8
73  %exitcmp = icmp eq i8* %guard, null
74  br i1 %exitcmp, label %for.body, label %early-exit
75
76early-exit:
77; CHECK-LABEL: early-exit:
78; CHECK: store i32 %new1.lcssa, i32* %addr, align 4
79  ret i32* null
80
81for.body:
82  %new = add i32 %old, 1
83  store i32 %new, i32* %addr, align 4
84  %inc = add nsw i32 %i.02, 1
85  %cmp = icmp slt i32 %inc, %n
86  br i1 %cmp, label %for.header, label %for.cond.for.end_crit_edge
87
88for.cond.for.end_crit_edge:                       ; preds = %for.body
89; CHECK-LABEL: for.cond.for.end_crit_edge:
90; CHECK: store i32 %new.lcssa, i32* %addr, align 4
91  %split = phi i32* [ %addr, %for.body ]
92  ret i32* null
93}
94
95declare i8* @not_malloc(i64)
96
97; Negative test - not TLS
98; CHECK-LABEL: @test_neg
99define i32* @test_neg(i32 %n) {
100entry:
101  ;; ignore the required null check for simplicity
102  %mem = call dereferenceable(16) noalias i8* @not_malloc(i64 16)
103  %addr = bitcast i8* %mem to i32*
104  br label %for.body.lr.ph
105
106for.body.lr.ph:                                   ; preds = %entry
107  br label %for.header
108
109for.header:
110  %i.02 = phi i32 [ 0, %for.body.lr.ph ], [ %inc, %for.body ]
111  %old = load i32, i32* %addr, align 4
112  ; deliberate impossible to analyze branch
113  %guard = load volatile i8*, i8** @p
114  %exitcmp = icmp eq i8* %guard, null
115  br i1 %exitcmp, label %for.body, label %early-exit
116
117early-exit:
118; CHECK-LABEL: early-exit:
119; CHECK-NOT: store
120  ret i32* null
121
122for.body:
123; CHECK-LABEL: for.body:
124; CHECK: store i32 %new, i32* %addr, align 4
125  %new = add i32 %old, 1
126  store i32 %new, i32* %addr, align 4
127  %inc = add nsw i32 %i.02, 1
128  %cmp = icmp slt i32 %inc, %n
129  br i1 %cmp, label %for.header, label %for.cond.for.end_crit_edge
130
131for.cond.for.end_crit_edge:                       ; preds = %for.body
132; CHECK-LABEL: for.cond.for.end_crit_edge:
133; CHECK-NOT: store
134  %split = phi i32* [ %addr, %for.body ]
135  ret i32* null
136}
137
138; Negative test - can't speculate load since branch
139; may control alignment
140; CHECK-LABEL: @test_neg2
141define i32* @test_neg2(i32 %n) {
142entry:
143  ;; ignore the required null check for simplicity
144  %mem = call dereferenceable(16) noalias i8* @malloc(i64 16)
145  %addr = bitcast i8* %mem to i32*
146  br label %for.body.lr.ph
147
148for.body.lr.ph:                                   ; preds = %entry
149  br label %for.header
150
151for.header:
152  %i.02 = phi i32 [ 0, %for.body.lr.ph ], [ %inc, %for.body ]
153  ; deliberate impossible to analyze branch
154  %guard = load volatile i8*, i8** @p
155  %exitcmp = icmp eq i8* %guard, null
156  br i1 %exitcmp, label %for.body, label %early-exit
157
158early-exit:
159; CHECK-LABEL: early-exit:
160; CHECK-NOT: store
161  ret i32* null
162
163for.body:
164; CHECK-LABEL: for.body:
165; CHECK: store i32 %new, i32* %addr, align 4
166  %old = load i32, i32* %addr, align 4
167  %new = add i32 %old, 1
168  store i32 %new, i32* %addr, align 4
169  %inc = add nsw i32 %i.02, 1
170  %cmp = icmp slt i32 %inc, %n
171  br i1 %cmp, label %for.header, label %for.cond.for.end_crit_edge
172
173for.cond.for.end_crit_edge:                       ; preds = %for.body
174; CHECK-LABEL: for.cond.for.end_crit_edge:
175; CHECK-NOT: store
176  %split = phi i32* [ %addr, %for.body ]
177  ret i32* null
178}
179