1; RUN: opt < %s -sroa -S | FileCheck %s
2; RUN: opt -debugify -sroa -S < %s | FileCheck %s -check-prefix DEBUGLOC
3
4target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-n8:16:32:64"
5
6declare void @llvm.memcpy.p0i8.p0i8.i32(i8*, i8*, i32, i1)
7
8define void @test1({ i8, i8 }* %a, { i8, i8 }* %b) {
9; CHECK-LABEL: @test1(
10; CHECK: %[[gep_a0:.*]] = getelementptr inbounds { i8, i8 }, { i8, i8 }* %a, i64 0, i32 0
11; CHECK: %[[a0:.*]] = load i8, i8* %[[gep_a0]], align 16
12; CHECK: %[[gep_a1:.*]] = getelementptr inbounds { i8, i8 }, { i8, i8 }* %a, i64 0, i32 1
13; CHECK: %[[a1:.*]] = load i8, i8* %[[gep_a1]], align 1
14; CHECK: %[[gep_b0:.*]] = getelementptr inbounds { i8, i8 }, { i8, i8 }* %b, i64 0, i32 0
15; CHECK: store i8 %[[a0]], i8* %[[gep_b0]], align 16
16; CHECK: %[[gep_b1:.*]] = getelementptr inbounds { i8, i8 }, { i8, i8 }* %b, i64 0, i32 1
17; CHECK: store i8 %[[a1]], i8* %[[gep_b1]], align 1
18; CHECK: ret void
19
20entry:
21  %alloca = alloca { i8, i8 }, align 16
22  %gep_a = getelementptr { i8, i8 }, { i8, i8 }* %a, i32 0, i32 0
23  %gep_alloca = getelementptr { i8, i8 }, { i8, i8 }* %alloca, i32 0, i32 0
24  %gep_b = getelementptr { i8, i8 }, { i8, i8 }* %b, i32 0, i32 0
25
26  store i8 420, i8* %gep_alloca, align 16
27
28  call void @llvm.memcpy.p0i8.p0i8.i32(i8* align 16 %gep_alloca, i8* align 16 %gep_a, i32 2, i1 false)
29  call void @llvm.memcpy.p0i8.p0i8.i32(i8* align 16 %gep_b, i8* align 16 %gep_alloca, i32 2, i1 false)
30  ret void
31}
32
33define void @test2() {
34; CHECK-LABEL: @test2(
35; CHECK: alloca i16
36; CHECK: load i8, i8* %{{.*}}
37; CHECK: store i8 42, i8* %{{.*}}
38; CHECK: ret void
39
40; Check that when sroa rewrites the alloca partition
41; it preserves the original DebugLocation.
42; DEBUGLOC-LABEL: @test2(
43; DEBUGLOC: {{.*}} = alloca {{.*}} !dbg ![[DbgLoc:[0-9]+]]
44; DEBUGLOC-LABEL: }
45;
46; DEBUGLOC: ![[DbgLoc]] = !DILocation(line: 9,
47
48entry:
49  %a = alloca { i8, i8, i8, i8 }, align 2      ; "line 9" to -debugify
50  %gep1 = getelementptr { i8, i8, i8, i8 }, { i8, i8, i8, i8 }* %a, i32 0, i32 1
51  %cast1 = bitcast i8* %gep1 to i16*
52  store volatile i16 0, i16* %cast1
53  %gep2 = getelementptr { i8, i8, i8, i8 }, { i8, i8, i8, i8 }* %a, i32 0, i32 2
54  %result = load i8, i8* %gep2
55  store i8 42, i8* %gep2
56  ret void
57}
58
59define void @PR13920(<2 x i64>* %a, i16* %b) {
60; Test that alignments on memcpy intrinsics get propagated to loads and stores.
61; CHECK-LABEL: @PR13920(
62; CHECK: load <2 x i64>, <2 x i64>* %a, align 2
63; CHECK: store <2 x i64> {{.*}}, <2 x i64>* {{.*}}, align 2
64; CHECK: ret void
65
66entry:
67  %aa = alloca <2 x i64>, align 16
68  %aptr = bitcast <2 x i64>* %a to i8*
69  %aaptr = bitcast <2 x i64>* %aa to i8*
70  call void @llvm.memcpy.p0i8.p0i8.i32(i8* align 2 %aaptr, i8* align 2 %aptr, i32 16, i1 false)
71  %bptr = bitcast i16* %b to i8*
72  call void @llvm.memcpy.p0i8.p0i8.i32(i8* align 2 %bptr, i8* align 2 %aaptr, i32 16, i1 false)
73  ret void
74}
75
76define void @test3(i8* %x) {
77; Test that when we promote an alloca to a type with lower ABI alignment, we
78; provide the needed explicit alignment that code using the alloca may be
79; expecting. However, also check that any offset within an alloca can in turn
80; reduce the alignment.
81; CHECK-LABEL: @test3(
82; CHECK: alloca [22 x i8], align 8
83; CHECK: alloca [18 x i8], align 2
84; CHECK: ret void
85
86entry:
87  %a = alloca { i8*, i8*, i8* }
88  %b = alloca { i8*, i8*, i8* }
89  %a_raw = bitcast { i8*, i8*, i8* }* %a to i8*
90  call void @llvm.memcpy.p0i8.p0i8.i32(i8* align 8 %a_raw, i8* align 8 %x, i32 22, i1 false)
91  %b_raw = bitcast { i8*, i8*, i8* }* %b to i8*
92  %b_gep = getelementptr i8, i8* %b_raw, i32 6
93  call void @llvm.memcpy.p0i8.p0i8.i32(i8* align 2 %b_gep, i8* align 2 %x, i32 18, i1 false)
94  ret void
95}
96
97define void @test5() {
98; Test that we preserve underaligned loads and stores when splitting. The use
99; of volatile in this test case is just to force the loads and stores to not be
100; split or promoted out of existence.
101;
102; CHECK-LABEL: @test5(
103; CHECK: alloca [9 x i8]
104; CHECK: alloca [9 x i8]
105; CHECK: store volatile double 0.0{{.*}}, double* %{{.*}}, align 1
106; CHECK: load volatile i16, i16* %{{.*}}, align 1
107; CHECK: load double, double* %{{.*}}, align 1
108; CHECK: store volatile double %{{.*}}, double* %{{.*}}, align 1
109; CHECK: load volatile i16, i16* %{{.*}}, align 1
110; CHECK: ret void
111
112entry:
113  %a = alloca [18 x i8]
114  %raw1 = getelementptr inbounds [18 x i8], [18 x i8]* %a, i32 0, i32 0
115  %ptr1 = bitcast i8* %raw1 to double*
116  store volatile double 0.0, double* %ptr1, align 1
117  %weird_gep1 = getelementptr inbounds [18 x i8], [18 x i8]* %a, i32 0, i32 7
118  %weird_cast1 = bitcast i8* %weird_gep1 to i16*
119  %weird_load1 = load volatile i16, i16* %weird_cast1, align 1
120
121  %raw2 = getelementptr inbounds [18 x i8], [18 x i8]* %a, i32 0, i32 9
122  %ptr2 = bitcast i8* %raw2 to double*
123  %d1 = load double, double* %ptr1, align 1
124  store volatile double %d1, double* %ptr2, align 1
125  %weird_gep2 = getelementptr inbounds [18 x i8], [18 x i8]* %a, i32 0, i32 16
126  %weird_cast2 = bitcast i8* %weird_gep2 to i16*
127  %weird_load2 = load volatile i16, i16* %weird_cast2, align 1
128
129  ret void
130}
131
132define void @test6() {
133; We should set the alignment on all load and store operations; make sure
134; we choose an appropriate alignment.
135; CHECK-LABEL: @test6(
136; CHECK: alloca double, align 8{{$}}
137; CHECK: alloca double, align 8{{$}}
138; CHECK: store{{.*}}, align 8
139; CHECK: load{{.*}}, align 8
140; CHECK: store{{.*}}, align 8
141; CHECK-NOT: align
142; CHECK: ret void
143
144entry:
145  %a = alloca [16 x i8]
146  %raw1 = getelementptr inbounds [16 x i8], [16 x i8]* %a, i32 0, i32 0
147  %ptr1 = bitcast i8* %raw1 to double*
148  store volatile double 0.0, double* %ptr1, align 1
149
150  %raw2 = getelementptr inbounds [16 x i8], [16 x i8]* %a, i32 0, i32 8
151  %ptr2 = bitcast i8* %raw2 to double*
152  %val = load double, double* %ptr1, align 1
153  store volatile double %val, double* %ptr2, align 1
154
155  ret void
156}
157
158define void @test7(i8* %out) {
159; Test that we properly compute the destination alignment when rewriting
160; memcpys as direct loads or stores.
161; CHECK-LABEL: @test7(
162; CHECK-NOT: alloca
163
164entry:
165  %a = alloca [16 x i8]
166  %raw1 = getelementptr inbounds [16 x i8], [16 x i8]* %a, i32 0, i32 0
167  %ptr1 = bitcast i8* %raw1 to double*
168  %raw2 = getelementptr inbounds [16 x i8], [16 x i8]* %a, i32 0, i32 8
169  %ptr2 = bitcast i8* %raw2 to double*
170
171  call void @llvm.memcpy.p0i8.p0i8.i32(i8* %raw1, i8* %out, i32 16, i1 false)
172; CHECK: %[[val2:.*]] = load double, double* %{{.*}}, align 1
173; CHECK: %[[val1:.*]] = load double, double* %{{.*}}, align 1
174
175  %val1 = load double, double* %ptr2, align 1
176  %val2 = load double, double* %ptr1, align 1
177
178  store double %val1, double* %ptr1, align 1
179  store double %val2, double* %ptr2, align 1
180
181  call void @llvm.memcpy.p0i8.p0i8.i32(i8* %out, i8* %raw1, i32 16, i1 false)
182; CHECK: store double %[[val1]], double* %{{.*}}, align 1
183; CHECK: store double %[[val2]], double* %{{.*}}, align 1
184
185  ret void
186; CHECK: ret void
187}
188
189define void @test8() {
190; CHECK-LABEL: @test8(
191; CHECK: load i32, {{.*}}, align 1
192; CHECK: load i32, {{.*}}, align 1
193; CHECK: load i32, {{.*}}, align 1
194; CHECK: load i32, {{.*}}, align 1
195; CHECK: load i32, {{.*}}, align 1
196
197  %ptr = alloca [5 x i32], align 1
198  %ptr.8 = bitcast [5 x i32]* %ptr to i8*
199  call void @populate(i8* %ptr.8)
200  %val = load [5 x i32], [5 x i32]* %ptr, align 1
201  ret void
202}
203
204define void @test9() {
205; CHECK-LABEL: @test9(
206; CHECK: load i32, {{.*}}, align 8
207; CHECK: load i32, {{.*}}, align 4
208; CHECK: load i32, {{.*}}, align 8
209; CHECK: load i32, {{.*}}, align 4
210; CHECK: load i32, {{.*}}, align 8
211
212  %ptr = alloca [5 x i32], align 8
213  %ptr.8 = bitcast [5 x i32]* %ptr to i8*
214  call void @populate(i8* %ptr.8)
215  %val = load [5 x i32], [5 x i32]* %ptr, align 8
216  ret void
217}
218
219define void @test10() {
220; CHECK-LABEL: @test10(
221; CHECK: load i32, {{.*}}, align 2
222; CHECK: load i8, {{.*}}, align 2
223; CHECK: load i8, {{.*}}, align 1
224; CHECK: load i8, {{.*}}, align 2
225; CHECK: load i16, {{.*}}, align 2
226
227  %ptr = alloca {i32, i8, i8, {i8, i16}}, align 2
228  %ptr.8 = bitcast {i32, i8, i8, {i8, i16}}* %ptr to i8*
229  call void @populate(i8* %ptr.8)
230  %val = load {i32, i8, i8, {i8, i16}}, {i32, i8, i8, {i8, i16}}* %ptr, align 2
231  ret void
232}
233
234%struct = type { i32, i32 }
235define dso_local i32 @pr45010(%struct* %A) {
236; CHECK-LABEL: @pr45010
237; CHECK: load atomic volatile i32, {{.*}}, align 4
238
239  %B = alloca %struct, align 4
240  %A.i = getelementptr inbounds %struct, %struct* %A, i32 0, i32 0
241  %B.i = getelementptr inbounds %struct, %struct* %B, i32 0, i32 0
242  %1 = load i32, i32* %A.i, align 4
243  store atomic volatile i32 %1, i32* %B.i release, align 4
244  %2 = bitcast %struct* %B to i32*
245  %x = load atomic volatile i32, i32* %2 acquire, align 4
246  ret i32 %x
247}
248
249declare void @populate(i8*)
250