1 // NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --function-signature --include-generated-funcs --replace-value-regex "__omp_offloading_[0-9a-z]+_[0-9a-z]+" "reduction_size[.].+[.]" "pl_cond[.].+[.|,]" --prefix-filecheck-ir-name _
2 // RUN: %clang_cc1 -DLAMBDA -verify -fopenmp -x c++ -std=c++11 -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -emit-llvm %s -o - | FileCheck %s --check-prefix=CHECK1
3 // RUN: %clang_cc1 -DLAMBDA -fopenmp -x c++ -std=c++11 -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -emit-pch -o %t %s
4 // RUN: %clang_cc1 -DLAMBDA -fopenmp -x c++ -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -std=c++11 -include-pch %t -verify %s -emit-llvm -o - | FileCheck %s --check-prefix=CHECK2
5 // RUN: %clang_cc1 -DLAMBDA -verify -fopenmp -x c++ -std=c++11 -triple i386-unknown-unknown -fopenmp-targets=i386-pc-linux-gnu -emit-llvm %s -o - | FileCheck %s --check-prefix=CHECK3
6 // RUN: %clang_cc1 -DLAMBDA -fopenmp -x c++ -std=c++11 -triple i386-unknown-unknown -fopenmp-targets=i386-pc-linux-gnu -emit-pch -o %t %s
7 // RUN: %clang_cc1 -DLAMBDA -fopenmp -x c++ -std=c++11 -triple i386-unknown-unknown -fopenmp-targets=i386-pc-linux-gnu -std=c++11 -include-pch %t -verify %s -emit-llvm -o - | FileCheck %s --check-prefix=CHECK4
8 
9 // RUN: %clang_cc1  -verify -fopenmp -x c++ -std=c++11 -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -emit-llvm %s -o - | FileCheck %s --check-prefix=CHECK5
10 // RUN: %clang_cc1  -fopenmp -x c++ -std=c++11 -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -emit-pch -o %t %s
11 // RUN: %clang_cc1  -fopenmp -x c++ -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -std=c++11 -include-pch %t -verify %s -emit-llvm -o - | FileCheck %s --check-prefix=CHECK6
12 // RUN: %clang_cc1  -verify -fopenmp -x c++ -std=c++11 -triple i386-unknown-unknown -fopenmp-targets=i386-pc-linux-gnu -emit-llvm %s -o - | FileCheck %s --check-prefix=CHECK7
13 // RUN: %clang_cc1  -fopenmp -x c++ -std=c++11 -triple i386-unknown-unknown -fopenmp-targets=i386-pc-linux-gnu -emit-pch -o %t %s
14 // RUN: %clang_cc1  -fopenmp -x c++ -std=c++11 -triple i386-unknown-unknown -fopenmp-targets=i386-pc-linux-gnu -std=c++11 -include-pch %t -verify %s -emit-llvm -o - | FileCheck %s --check-prefix=CHECK8
15 
16 // RUN: %clang_cc1 -DLAMBDA -verify -fopenmp-simd -x c++ -std=c++11 -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -emit-llvm %s -o - | FileCheck %s --check-prefix=CHECK9
17 // RUN: %clang_cc1 -DLAMBDA -fopenmp-simd -x c++ -std=c++11 -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -emit-pch -o %t %s
18 // RUN: %clang_cc1 -DLAMBDA -fopenmp-simd -x c++ -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -std=c++11 -include-pch %t -verify %s -emit-llvm -o - | FileCheck %s --check-prefix=CHECK10
19 // RUN: %clang_cc1 -DLAMBDA -verify -fopenmp-simd -x c++ -std=c++11 -triple i386-unknown-unknown -fopenmp-targets=i386-pc-linux-gnu -emit-llvm %s -o - | FileCheck %s --check-prefix=CHECK11
20 // RUN: %clang_cc1 -DLAMBDA -fopenmp-simd -x c++ -std=c++11 -triple i386-unknown-unknown -fopenmp-targets=i386-pc-linux-gnu -emit-pch -o %t %s
21 // RUN: %clang_cc1 -DLAMBDA -fopenmp-simd -x c++ -std=c++11 -triple i386-unknown-unknown -fopenmp-targets=i386-pc-linux-gnu -std=c++11 -include-pch %t -verify %s -emit-llvm -o - | FileCheck %s --check-prefix=CHECK12
22 
23 // RUN: %clang_cc1  -verify -fopenmp-simd -x c++ -std=c++11 -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -emit-llvm %s -o - | FileCheck %s --check-prefix=CHECK13
24 // RUN: %clang_cc1  -fopenmp-simd -x c++ -std=c++11 -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -emit-pch -o %t %s
25 // RUN: %clang_cc1  -fopenmp-simd -x c++ -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -std=c++11 -include-pch %t -verify %s -emit-llvm -o - | FileCheck %s --check-prefix=CHECK14
26 // RUN: %clang_cc1  -verify -fopenmp-simd -x c++ -std=c++11 -triple i386-unknown-unknown -fopenmp-targets=i386-pc-linux-gnu -emit-llvm %s -o - | FileCheck %s --check-prefix=CHECK15
27 // RUN: %clang_cc1  -fopenmp-simd -x c++ -std=c++11 -triple i386-unknown-unknown -fopenmp-targets=i386-pc-linux-gnu -emit-pch -o %t %s
28 // RUN: %clang_cc1  -fopenmp-simd -x c++ -std=c++11 -triple i386-unknown-unknown -fopenmp-targets=i386-pc-linux-gnu -std=c++11 -include-pch %t -verify %s -emit-llvm -o - | FileCheck %s --check-prefix=CHECK16
29 
30 // expected-no-diagnostics
31 #ifndef HEADER
32 #define HEADER
33 
34 template <class T>
35 struct S {
36   T f;
SS37   S(T a) : f(a) {}
SS38   S() : f() {}
operator TS39   operator T() { return T(); }
~SS40   ~S() {}
41 };
42 
43 template <typename T>
tmain()44 T tmain() {
45   S<T> test;
46   T t_var = T();
47   T vec[] = {1, 2};
48   S<T> s_arr[] = {1, 2};
49   S<T> &var = test;
50   #pragma omp target teams distribute parallel for simd lastprivate(t_var, vec, s_arr, s_arr, var, var)
51   for (int i = 0; i < 2; ++i) {
52     vec[i] = t_var;
53     s_arr[i] = var;
54   }
55   return T();
56 }
57 
main()58 int main() {
59   static int svar;
60   volatile double g;
61   volatile double &g1 = g;
62 
63   #ifdef LAMBDA
64   [&]() {
65     static float sfvar;
66 
67     #pragma omp target teams distribute parallel for simd lastprivate(g, g1, svar, sfvar)
68     for (int i = 0; i < 2; ++i) {
69       // skip gbl and bound tid
70       // loop variables
71 
72 
73 
74       g1 = 1;
75       svar = 3;
76       sfvar = 4.0;
77 
78 
79 
80       // skip tid and prev variables
81       // loop variables
82 
83 
84 
85 
86 
87 
88 
89       [&]() {
90         g = 2;
91         g1 = 2;
92         svar = 4;
93         sfvar = 8.0;
94 
95       }();
96     }
97   }();
98   return 0;
99   #else
100   S<float> test;
101   int t_var = 0;
102   int vec[] = {1, 2};
103   S<float> s_arr[] = {1, 2};
104   S<float> &var = test;
105 
106   #pragma omp target teams distribute parallel for simd lastprivate(t_var, vec, s_arr, s_arr, var, var, svar)
107   for (int i = 0; i < 2; ++i) {
108     vec[i] = t_var;
109     s_arr[i] = var;
110   }
111   int i;
112 
113   return tmain<int>();
114   #endif
115 }
116 
117 
118 // skip loop variables
119 
120 // copy from parameters to local address variables
121 
122 // prepare lastprivate targets
123 
124 // the distribute loop
125 
126 // lastprivates
127 
128 
129 
130 // gbl and bound tid vars, prev lb and ub vars
131 
132 // skip loop variables
133 
134 // copy from parameters to local address variables
135 
136 // prepare lastprivate targets
137 
138 // the distribute loop
139 // skip body: code generation routine is same as distribute parallel for lastprivate
140 
141 // lastprivates
142 
143 
144 // template tmain
145 
146 
147 // skip alloca of global_tid and bound_tid
148 // skip loop variables
149 
150 // copy from parameters to local address variables
151 
152 // prepare lastprivate targets
153 
154 
155 // lastprivates
156 
157 
158 // skip alloca of global_tid and bound_tid, and prev lb and ub vars
159 
160 // skip loop variables
161 
162 // copy from parameters to local address variables
163 
164 // prepare lastprivate targets
165 
166 // skip body: code generation routine is same as distribute parallel for lastprivate
167 
168 // lastprivates
169 
170 
171 #endif
172 // CHECK1-LABEL: define {{[^@]+}}@main
173 // CHECK1-SAME: () #[[ATTR0:[0-9]+]] {
174 // CHECK1-NEXT:  entry:
175 // CHECK1-NEXT:    [[RETVAL:%.*]] = alloca i32, align 4
176 // CHECK1-NEXT:    [[G:%.*]] = alloca double, align 8
177 // CHECK1-NEXT:    [[G1:%.*]] = alloca double*, align 8
178 // CHECK1-NEXT:    [[REF_TMP:%.*]] = alloca [[CLASS_ANON:%.*]], align 8
179 // CHECK1-NEXT:    store i32 0, i32* [[RETVAL]], align 4
180 // CHECK1-NEXT:    store double* [[G]], double** [[G1]], align 8
181 // CHECK1-NEXT:    [[TMP0:%.*]] = getelementptr inbounds [[CLASS_ANON]], %class.anon* [[REF_TMP]], i32 0, i32 0
182 // CHECK1-NEXT:    store double* [[G]], double** [[TMP0]], align 8
183 // CHECK1-NEXT:    [[TMP1:%.*]] = getelementptr inbounds [[CLASS_ANON]], %class.anon* [[REF_TMP]], i32 0, i32 1
184 // CHECK1-NEXT:    [[TMP2:%.*]] = load double*, double** [[G1]], align 8
185 // CHECK1-NEXT:    store double* [[TMP2]], double** [[TMP1]], align 8
186 // CHECK1-NEXT:    call void @"_ZZ4mainENK3$_0clEv"(%class.anon* nonnull align 8 dereferenceable(16) [[REF_TMP]])
187 // CHECK1-NEXT:    ret i32 0
188 //
189 //
190 // CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l67
191 // CHECK1-SAME: (i64 [[G1:%.*]], i64 [[SVAR:%.*]], i64 [[SFVAR:%.*]], i64 [[G:%.*]]) #[[ATTR2:[0-9]+]] {
192 // CHECK1-NEXT:  entry:
193 // CHECK1-NEXT:    [[G1_ADDR:%.*]] = alloca i64, align 8
194 // CHECK1-NEXT:    [[SVAR_ADDR:%.*]] = alloca i64, align 8
195 // CHECK1-NEXT:    [[SFVAR_ADDR:%.*]] = alloca i64, align 8
196 // CHECK1-NEXT:    [[G_ADDR:%.*]] = alloca i64, align 8
197 // CHECK1-NEXT:    [[TMP:%.*]] = alloca double*, align 8
198 // CHECK1-NEXT:    [[G1_CASTED:%.*]] = alloca i64, align 8
199 // CHECK1-NEXT:    [[SVAR_CASTED:%.*]] = alloca i64, align 8
200 // CHECK1-NEXT:    [[SFVAR_CASTED:%.*]] = alloca i64, align 8
201 // CHECK1-NEXT:    [[G_CASTED:%.*]] = alloca i64, align 8
202 // CHECK1-NEXT:    store i64 [[G1]], i64* [[G1_ADDR]], align 8
203 // CHECK1-NEXT:    store i64 [[SVAR]], i64* [[SVAR_ADDR]], align 8
204 // CHECK1-NEXT:    store i64 [[SFVAR]], i64* [[SFVAR_ADDR]], align 8
205 // CHECK1-NEXT:    store i64 [[G]], i64* [[G_ADDR]], align 8
206 // CHECK1-NEXT:    [[CONV:%.*]] = bitcast i64* [[G1_ADDR]] to double*
207 // CHECK1-NEXT:    [[CONV1:%.*]] = bitcast i64* [[SVAR_ADDR]] to i32*
208 // CHECK1-NEXT:    [[CONV2:%.*]] = bitcast i64* [[SFVAR_ADDR]] to float*
209 // CHECK1-NEXT:    [[CONV3:%.*]] = bitcast i64* [[G_ADDR]] to double*
210 // CHECK1-NEXT:    store double* [[CONV]], double** [[TMP]], align 8
211 // CHECK1-NEXT:    [[TMP0:%.*]] = load double*, double** [[TMP]], align 8
212 // CHECK1-NEXT:    [[TMP1:%.*]] = load volatile double, double* [[TMP0]], align 8
213 // CHECK1-NEXT:    [[CONV4:%.*]] = bitcast i64* [[G1_CASTED]] to double*
214 // CHECK1-NEXT:    store double [[TMP1]], double* [[CONV4]], align 8
215 // CHECK1-NEXT:    [[TMP2:%.*]] = load i64, i64* [[G1_CASTED]], align 8
216 // CHECK1-NEXT:    [[TMP3:%.*]] = load i32, i32* [[CONV1]], align 8
217 // CHECK1-NEXT:    [[CONV5:%.*]] = bitcast i64* [[SVAR_CASTED]] to i32*
218 // CHECK1-NEXT:    store i32 [[TMP3]], i32* [[CONV5]], align 4
219 // CHECK1-NEXT:    [[TMP4:%.*]] = load i64, i64* [[SVAR_CASTED]], align 8
220 // CHECK1-NEXT:    [[TMP5:%.*]] = load float, float* [[CONV2]], align 8
221 // CHECK1-NEXT:    [[CONV6:%.*]] = bitcast i64* [[SFVAR_CASTED]] to float*
222 // CHECK1-NEXT:    store float [[TMP5]], float* [[CONV6]], align 4
223 // CHECK1-NEXT:    [[TMP6:%.*]] = load i64, i64* [[SFVAR_CASTED]], align 8
224 // CHECK1-NEXT:    [[TMP7:%.*]] = load double, double* [[CONV3]], align 8
225 // CHECK1-NEXT:    [[CONV7:%.*]] = bitcast i64* [[G_CASTED]] to double*
226 // CHECK1-NEXT:    store double [[TMP7]], double* [[CONV7]], align 8
227 // CHECK1-NEXT:    [[TMP8:%.*]] = load i64, i64* [[G_CASTED]], align 8
228 // CHECK1-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3:[0-9]+]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, i64, i64)* @.omp_outlined. to void (i32*, i32*, ...)*), i64 [[TMP2]], i64 [[TMP4]], i64 [[TMP6]], i64 [[TMP8]])
229 // CHECK1-NEXT:    ret void
230 //
231 //
232 // CHECK1-LABEL: define {{[^@]+}}@.omp_outlined.
233 // CHECK1-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i64 [[G1:%.*]], i64 [[SVAR:%.*]], i64 [[SFVAR:%.*]], i64 [[G:%.*]]) #[[ATTR2]] {
234 // CHECK1-NEXT:  entry:
235 // CHECK1-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
236 // CHECK1-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
237 // CHECK1-NEXT:    [[G1_ADDR:%.*]] = alloca i64, align 8
238 // CHECK1-NEXT:    [[SVAR_ADDR:%.*]] = alloca i64, align 8
239 // CHECK1-NEXT:    [[SFVAR_ADDR:%.*]] = alloca i64, align 8
240 // CHECK1-NEXT:    [[G_ADDR:%.*]] = alloca i64, align 8
241 // CHECK1-NEXT:    [[TMP:%.*]] = alloca double*, align 8
242 // CHECK1-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
243 // CHECK1-NEXT:    [[_TMP4:%.*]] = alloca i32, align 4
244 // CHECK1-NEXT:    [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
245 // CHECK1-NEXT:    [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
246 // CHECK1-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
247 // CHECK1-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
248 // CHECK1-NEXT:    [[G5:%.*]] = alloca double, align 8
249 // CHECK1-NEXT:    [[G16:%.*]] = alloca double, align 8
250 // CHECK1-NEXT:    [[_TMP7:%.*]] = alloca double*, align 8
251 // CHECK1-NEXT:    [[SVAR8:%.*]] = alloca i32, align 4
252 // CHECK1-NEXT:    [[SFVAR9:%.*]] = alloca float, align 4
253 // CHECK1-NEXT:    [[I:%.*]] = alloca i32, align 4
254 // CHECK1-NEXT:    [[G1_CASTED:%.*]] = alloca i64, align 8
255 // CHECK1-NEXT:    [[SVAR_CASTED:%.*]] = alloca i64, align 8
256 // CHECK1-NEXT:    [[SFVAR_CASTED:%.*]] = alloca i64, align 8
257 // CHECK1-NEXT:    [[G_CASTED:%.*]] = alloca i64, align 8
258 // CHECK1-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
259 // CHECK1-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
260 // CHECK1-NEXT:    store i64 [[G1]], i64* [[G1_ADDR]], align 8
261 // CHECK1-NEXT:    store i64 [[SVAR]], i64* [[SVAR_ADDR]], align 8
262 // CHECK1-NEXT:    store i64 [[SFVAR]], i64* [[SFVAR_ADDR]], align 8
263 // CHECK1-NEXT:    store i64 [[G]], i64* [[G_ADDR]], align 8
264 // CHECK1-NEXT:    [[CONV:%.*]] = bitcast i64* [[G1_ADDR]] to double*
265 // CHECK1-NEXT:    [[CONV1:%.*]] = bitcast i64* [[SVAR_ADDR]] to i32*
266 // CHECK1-NEXT:    [[CONV2:%.*]] = bitcast i64* [[SFVAR_ADDR]] to float*
267 // CHECK1-NEXT:    [[CONV3:%.*]] = bitcast i64* [[G_ADDR]] to double*
268 // CHECK1-NEXT:    store double* [[CONV]], double** [[TMP]], align 8
269 // CHECK1-NEXT:    store i32 0, i32* [[DOTOMP_COMB_LB]], align 4
270 // CHECK1-NEXT:    store i32 1, i32* [[DOTOMP_COMB_UB]], align 4
271 // CHECK1-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
272 // CHECK1-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
273 // CHECK1-NEXT:    [[TMP0:%.*]] = load double*, double** [[TMP]], align 8
274 // CHECK1-NEXT:    store double* [[G16]], double** [[_TMP7]], align 8
275 // CHECK1-NEXT:    [[TMP1:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
276 // CHECK1-NEXT:    [[TMP2:%.*]] = load i32, i32* [[TMP1]], align 4
277 // CHECK1-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1:[0-9]+]], i32 [[TMP2]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_COMB_LB]], i32* [[DOTOMP_COMB_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
278 // CHECK1-NEXT:    [[TMP3:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
279 // CHECK1-NEXT:    [[CMP:%.*]] = icmp sgt i32 [[TMP3]], 1
280 // CHECK1-NEXT:    br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
281 // CHECK1:       cond.true:
282 // CHECK1-NEXT:    br label [[COND_END:%.*]]
283 // CHECK1:       cond.false:
284 // CHECK1-NEXT:    [[TMP4:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
285 // CHECK1-NEXT:    br label [[COND_END]]
286 // CHECK1:       cond.end:
287 // CHECK1-NEXT:    [[COND:%.*]] = phi i32 [ 1, [[COND_TRUE]] ], [ [[TMP4]], [[COND_FALSE]] ]
288 // CHECK1-NEXT:    store i32 [[COND]], i32* [[DOTOMP_COMB_UB]], align 4
289 // CHECK1-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
290 // CHECK1-NEXT:    store i32 [[TMP5]], i32* [[DOTOMP_IV]], align 4
291 // CHECK1-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
292 // CHECK1:       omp.inner.for.cond:
293 // CHECK1-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
294 // CHECK1-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
295 // CHECK1-NEXT:    [[CMP10:%.*]] = icmp sle i32 [[TMP6]], [[TMP7]]
296 // CHECK1-NEXT:    br i1 [[CMP10]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
297 // CHECK1:       omp.inner.for.body:
298 // CHECK1-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
299 // CHECK1-NEXT:    [[TMP9:%.*]] = zext i32 [[TMP8]] to i64
300 // CHECK1-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
301 // CHECK1-NEXT:    [[TMP11:%.*]] = zext i32 [[TMP10]] to i64
302 // CHECK1-NEXT:    [[TMP12:%.*]] = load double*, double** [[_TMP7]], align 8
303 // CHECK1-NEXT:    [[TMP13:%.*]] = load volatile double, double* [[TMP12]], align 8
304 // CHECK1-NEXT:    [[CONV11:%.*]] = bitcast i64* [[G1_CASTED]] to double*
305 // CHECK1-NEXT:    store double [[TMP13]], double* [[CONV11]], align 8
306 // CHECK1-NEXT:    [[TMP14:%.*]] = load i64, i64* [[G1_CASTED]], align 8
307 // CHECK1-NEXT:    [[TMP15:%.*]] = load i32, i32* [[SVAR8]], align 4
308 // CHECK1-NEXT:    [[CONV12:%.*]] = bitcast i64* [[SVAR_CASTED]] to i32*
309 // CHECK1-NEXT:    store i32 [[TMP15]], i32* [[CONV12]], align 4
310 // CHECK1-NEXT:    [[TMP16:%.*]] = load i64, i64* [[SVAR_CASTED]], align 8
311 // CHECK1-NEXT:    [[TMP17:%.*]] = load float, float* [[SFVAR9]], align 4
312 // CHECK1-NEXT:    [[CONV13:%.*]] = bitcast i64* [[SFVAR_CASTED]] to float*
313 // CHECK1-NEXT:    store float [[TMP17]], float* [[CONV13]], align 4
314 // CHECK1-NEXT:    [[TMP18:%.*]] = load i64, i64* [[SFVAR_CASTED]], align 8
315 // CHECK1-NEXT:    [[TMP19:%.*]] = load double, double* [[G5]], align 8
316 // CHECK1-NEXT:    [[CONV14:%.*]] = bitcast i64* [[G_CASTED]] to double*
317 // CHECK1-NEXT:    store double [[TMP19]], double* [[CONV14]], align 8
318 // CHECK1-NEXT:    [[TMP20:%.*]] = load i64, i64* [[G_CASTED]], align 8
319 // CHECK1-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 6, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, i64, i64, i64, i64)* @.omp_outlined..1 to void (i32*, i32*, ...)*), i64 [[TMP9]], i64 [[TMP11]], i64 [[TMP14]], i64 [[TMP16]], i64 [[TMP18]], i64 [[TMP20]])
320 // CHECK1-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
321 // CHECK1:       omp.inner.for.inc:
322 // CHECK1-NEXT:    [[TMP21:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
323 // CHECK1-NEXT:    [[TMP22:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
324 // CHECK1-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP21]], [[TMP22]]
325 // CHECK1-NEXT:    store i32 [[ADD]], i32* [[DOTOMP_IV]], align 4
326 // CHECK1-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP3:![0-9]+]]
327 // CHECK1:       omp.inner.for.end:
328 // CHECK1-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
329 // CHECK1:       omp.loop.exit:
330 // CHECK1-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]])
331 // CHECK1-NEXT:    [[TMP23:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
332 // CHECK1-NEXT:    [[TMP24:%.*]] = icmp ne i32 [[TMP23]], 0
333 // CHECK1-NEXT:    br i1 [[TMP24]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
334 // CHECK1:       .omp.final.then:
335 // CHECK1-NEXT:    store i32 2, i32* [[I]], align 4
336 // CHECK1-NEXT:    br label [[DOTOMP_FINAL_DONE]]
337 // CHECK1:       .omp.final.done:
338 // CHECK1-NEXT:    [[TMP25:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
339 // CHECK1-NEXT:    [[TMP26:%.*]] = icmp ne i32 [[TMP25]], 0
340 // CHECK1-NEXT:    br i1 [[TMP26]], label [[DOTOMP_LASTPRIVATE_THEN:%.*]], label [[DOTOMP_LASTPRIVATE_DONE:%.*]]
341 // CHECK1:       .omp.lastprivate.then:
342 // CHECK1-NEXT:    [[TMP27:%.*]] = load double, double* [[G5]], align 8
343 // CHECK1-NEXT:    store volatile double [[TMP27]], double* [[CONV3]], align 8
344 // CHECK1-NEXT:    [[TMP28:%.*]] = load double*, double** [[_TMP7]], align 8
345 // CHECK1-NEXT:    [[TMP29:%.*]] = load double, double* [[TMP28]], align 8
346 // CHECK1-NEXT:    store volatile double [[TMP29]], double* [[TMP0]], align 8
347 // CHECK1-NEXT:    [[TMP30:%.*]] = load i32, i32* [[SVAR8]], align 4
348 // CHECK1-NEXT:    store i32 [[TMP30]], i32* [[CONV1]], align 8
349 // CHECK1-NEXT:    [[TMP31:%.*]] = load float, float* [[SFVAR9]], align 4
350 // CHECK1-NEXT:    store float [[TMP31]], float* [[CONV2]], align 8
351 // CHECK1-NEXT:    br label [[DOTOMP_LASTPRIVATE_DONE]]
352 // CHECK1:       .omp.lastprivate.done:
353 // CHECK1-NEXT:    ret void
354 //
355 //
356 // CHECK1-LABEL: define {{[^@]+}}@.omp_outlined..1
357 // CHECK1-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i64 [[DOTPREVIOUS_LB_:%.*]], i64 [[DOTPREVIOUS_UB_:%.*]], i64 [[G1:%.*]], i64 [[SVAR:%.*]], i64 [[SFVAR:%.*]], i64 [[G:%.*]]) #[[ATTR2]] {
358 // CHECK1-NEXT:  entry:
359 // CHECK1-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
360 // CHECK1-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
361 // CHECK1-NEXT:    [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i64, align 8
362 // CHECK1-NEXT:    [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i64, align 8
363 // CHECK1-NEXT:    [[G1_ADDR:%.*]] = alloca i64, align 8
364 // CHECK1-NEXT:    [[SVAR_ADDR:%.*]] = alloca i64, align 8
365 // CHECK1-NEXT:    [[SFVAR_ADDR:%.*]] = alloca i64, align 8
366 // CHECK1-NEXT:    [[G_ADDR:%.*]] = alloca i64, align 8
367 // CHECK1-NEXT:    [[TMP:%.*]] = alloca double*, align 8
368 // CHECK1-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
369 // CHECK1-NEXT:    [[_TMP4:%.*]] = alloca i32, align 4
370 // CHECK1-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
371 // CHECK1-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
372 // CHECK1-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
373 // CHECK1-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
374 // CHECK1-NEXT:    [[G7:%.*]] = alloca double, align 8
375 // CHECK1-NEXT:    [[G18:%.*]] = alloca double, align 8
376 // CHECK1-NEXT:    [[_TMP9:%.*]] = alloca double*, align 8
377 // CHECK1-NEXT:    [[SVAR10:%.*]] = alloca i32, align 4
378 // CHECK1-NEXT:    [[SFVAR11:%.*]] = alloca float, align 4
379 // CHECK1-NEXT:    [[I:%.*]] = alloca i32, align 4
380 // CHECK1-NEXT:    [[REF_TMP:%.*]] = alloca [[CLASS_ANON_0:%.*]], align 8
381 // CHECK1-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
382 // CHECK1-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
383 // CHECK1-NEXT:    store i64 [[DOTPREVIOUS_LB_]], i64* [[DOTPREVIOUS_LB__ADDR]], align 8
384 // CHECK1-NEXT:    store i64 [[DOTPREVIOUS_UB_]], i64* [[DOTPREVIOUS_UB__ADDR]], align 8
385 // CHECK1-NEXT:    store i64 [[G1]], i64* [[G1_ADDR]], align 8
386 // CHECK1-NEXT:    store i64 [[SVAR]], i64* [[SVAR_ADDR]], align 8
387 // CHECK1-NEXT:    store i64 [[SFVAR]], i64* [[SFVAR_ADDR]], align 8
388 // CHECK1-NEXT:    store i64 [[G]], i64* [[G_ADDR]], align 8
389 // CHECK1-NEXT:    [[CONV:%.*]] = bitcast i64* [[G1_ADDR]] to double*
390 // CHECK1-NEXT:    [[CONV1:%.*]] = bitcast i64* [[SVAR_ADDR]] to i32*
391 // CHECK1-NEXT:    [[CONV2:%.*]] = bitcast i64* [[SFVAR_ADDR]] to float*
392 // CHECK1-NEXT:    [[CONV3:%.*]] = bitcast i64* [[G_ADDR]] to double*
393 // CHECK1-NEXT:    store double* [[CONV]], double** [[TMP]], align 8
394 // CHECK1-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
395 // CHECK1-NEXT:    store i32 1, i32* [[DOTOMP_UB]], align 4
396 // CHECK1-NEXT:    [[TMP0:%.*]] = load i64, i64* [[DOTPREVIOUS_LB__ADDR]], align 8
397 // CHECK1-NEXT:    [[CONV5:%.*]] = trunc i64 [[TMP0]] to i32
398 // CHECK1-NEXT:    [[TMP1:%.*]] = load i64, i64* [[DOTPREVIOUS_UB__ADDR]], align 8
399 // CHECK1-NEXT:    [[CONV6:%.*]] = trunc i64 [[TMP1]] to i32
400 // CHECK1-NEXT:    store i32 [[CONV5]], i32* [[DOTOMP_LB]], align 4
401 // CHECK1-NEXT:    store i32 [[CONV6]], i32* [[DOTOMP_UB]], align 4
402 // CHECK1-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
403 // CHECK1-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
404 // CHECK1-NEXT:    [[TMP2:%.*]] = load double*, double** [[TMP]], align 8
405 // CHECK1-NEXT:    store double* [[G18]], double** [[_TMP9]], align 8
406 // CHECK1-NEXT:    [[TMP3:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
407 // CHECK1-NEXT:    [[TMP4:%.*]] = load i32, i32* [[TMP3]], align 4
408 // CHECK1-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB2:[0-9]+]], i32 [[TMP4]], i32 34, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
409 // CHECK1-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
410 // CHECK1-NEXT:    [[CMP:%.*]] = icmp sgt i32 [[TMP5]], 1
411 // CHECK1-NEXT:    br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
412 // CHECK1:       cond.true:
413 // CHECK1-NEXT:    br label [[COND_END:%.*]]
414 // CHECK1:       cond.false:
415 // CHECK1-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
416 // CHECK1-NEXT:    br label [[COND_END]]
417 // CHECK1:       cond.end:
418 // CHECK1-NEXT:    [[COND:%.*]] = phi i32 [ 1, [[COND_TRUE]] ], [ [[TMP6]], [[COND_FALSE]] ]
419 // CHECK1-NEXT:    store i32 [[COND]], i32* [[DOTOMP_UB]], align 4
420 // CHECK1-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
421 // CHECK1-NEXT:    store i32 [[TMP7]], i32* [[DOTOMP_IV]], align 4
422 // CHECK1-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
423 // CHECK1:       omp.inner.for.cond:
424 // CHECK1-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
425 // CHECK1-NEXT:    [[TMP9:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
426 // CHECK1-NEXT:    [[CMP12:%.*]] = icmp sle i32 [[TMP8]], [[TMP9]]
427 // CHECK1-NEXT:    br i1 [[CMP12]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
428 // CHECK1:       omp.inner.for.body:
429 // CHECK1-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
430 // CHECK1-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP10]], 1
431 // CHECK1-NEXT:    [[ADD:%.*]] = add nsw i32 0, [[MUL]]
432 // CHECK1-NEXT:    store i32 [[ADD]], i32* [[I]], align 4
433 // CHECK1-NEXT:    [[TMP11:%.*]] = load double*, double** [[_TMP9]], align 8
434 // CHECK1-NEXT:    store volatile double 1.000000e+00, double* [[TMP11]], align 8
435 // CHECK1-NEXT:    store i32 3, i32* [[SVAR10]], align 4
436 // CHECK1-NEXT:    store float 4.000000e+00, float* [[SFVAR11]], align 4
437 // CHECK1-NEXT:    [[TMP12:%.*]] = getelementptr inbounds [[CLASS_ANON_0]], %class.anon.0* [[REF_TMP]], i32 0, i32 0
438 // CHECK1-NEXT:    store double* [[G7]], double** [[TMP12]], align 8
439 // CHECK1-NEXT:    [[TMP13:%.*]] = getelementptr inbounds [[CLASS_ANON_0]], %class.anon.0* [[REF_TMP]], i32 0, i32 1
440 // CHECK1-NEXT:    [[TMP14:%.*]] = load double*, double** [[_TMP9]], align 8
441 // CHECK1-NEXT:    store double* [[TMP14]], double** [[TMP13]], align 8
442 // CHECK1-NEXT:    [[TMP15:%.*]] = getelementptr inbounds [[CLASS_ANON_0]], %class.anon.0* [[REF_TMP]], i32 0, i32 2
443 // CHECK1-NEXT:    store i32* [[SVAR10]], i32** [[TMP15]], align 8
444 // CHECK1-NEXT:    [[TMP16:%.*]] = getelementptr inbounds [[CLASS_ANON_0]], %class.anon.0* [[REF_TMP]], i32 0, i32 3
445 // CHECK1-NEXT:    store float* [[SFVAR11]], float** [[TMP16]], align 8
446 // CHECK1-NEXT:    call void @"_ZZZ4mainENK3$_0clEvENKUlvE_clEv"(%class.anon.0* nonnull align 8 dereferenceable(32) [[REF_TMP]])
447 // CHECK1-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
448 // CHECK1:       omp.body.continue:
449 // CHECK1-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
450 // CHECK1:       omp.inner.for.inc:
451 // CHECK1-NEXT:    [[TMP17:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
452 // CHECK1-NEXT:    [[ADD13:%.*]] = add nsw i32 [[TMP17]], 1
453 // CHECK1-NEXT:    store i32 [[ADD13]], i32* [[DOTOMP_IV]], align 4
454 // CHECK1-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP5:![0-9]+]]
455 // CHECK1:       omp.inner.for.end:
456 // CHECK1-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
457 // CHECK1:       omp.loop.exit:
458 // CHECK1-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP4]])
459 // CHECK1-NEXT:    [[TMP18:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
460 // CHECK1-NEXT:    [[TMP19:%.*]] = icmp ne i32 [[TMP18]], 0
461 // CHECK1-NEXT:    br i1 [[TMP19]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
462 // CHECK1:       .omp.final.then:
463 // CHECK1-NEXT:    store i32 2, i32* [[I]], align 4
464 // CHECK1-NEXT:    br label [[DOTOMP_FINAL_DONE]]
465 // CHECK1:       .omp.final.done:
466 // CHECK1-NEXT:    [[TMP20:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
467 // CHECK1-NEXT:    [[TMP21:%.*]] = icmp ne i32 [[TMP20]], 0
468 // CHECK1-NEXT:    br i1 [[TMP21]], label [[DOTOMP_LASTPRIVATE_THEN:%.*]], label [[DOTOMP_LASTPRIVATE_DONE:%.*]]
469 // CHECK1:       .omp.lastprivate.then:
470 // CHECK1-NEXT:    [[TMP22:%.*]] = load double, double* [[G7]], align 8
471 // CHECK1-NEXT:    store volatile double [[TMP22]], double* [[CONV3]], align 8
472 // CHECK1-NEXT:    [[TMP23:%.*]] = load double*, double** [[_TMP9]], align 8
473 // CHECK1-NEXT:    [[TMP24:%.*]] = load double, double* [[TMP23]], align 8
474 // CHECK1-NEXT:    store volatile double [[TMP24]], double* [[TMP2]], align 8
475 // CHECK1-NEXT:    [[TMP25:%.*]] = load i32, i32* [[SVAR10]], align 4
476 // CHECK1-NEXT:    store i32 [[TMP25]], i32* [[CONV1]], align 8
477 // CHECK1-NEXT:    [[TMP26:%.*]] = load float, float* [[SFVAR11]], align 4
478 // CHECK1-NEXT:    store float [[TMP26]], float* [[CONV2]], align 8
479 // CHECK1-NEXT:    br label [[DOTOMP_LASTPRIVATE_DONE]]
480 // CHECK1:       .omp.lastprivate.done:
481 // CHECK1-NEXT:    ret void
482 //
483 //
484 // CHECK1-LABEL: define {{[^@]+}}@.omp_offloading.requires_reg
485 // CHECK1-SAME: () #[[ATTR4:[0-9]+]] {
486 // CHECK1-NEXT:  entry:
487 // CHECK1-NEXT:    call void @__tgt_register_requires(i64 1)
488 // CHECK1-NEXT:    ret void
489 //
490 //
491 // CHECK2-LABEL: define {{[^@]+}}@main
492 // CHECK2-SAME: () #[[ATTR0:[0-9]+]] {
493 // CHECK2-NEXT:  entry:
494 // CHECK2-NEXT:    [[RETVAL:%.*]] = alloca i32, align 4
495 // CHECK2-NEXT:    [[G:%.*]] = alloca double, align 8
496 // CHECK2-NEXT:    [[G1:%.*]] = alloca double*, align 8
497 // CHECK2-NEXT:    [[REF_TMP:%.*]] = alloca [[CLASS_ANON:%.*]], align 8
498 // CHECK2-NEXT:    store i32 0, i32* [[RETVAL]], align 4
499 // CHECK2-NEXT:    store double* [[G]], double** [[G1]], align 8
500 // CHECK2-NEXT:    [[TMP0:%.*]] = getelementptr inbounds [[CLASS_ANON]], %class.anon* [[REF_TMP]], i32 0, i32 0
501 // CHECK2-NEXT:    store double* [[G]], double** [[TMP0]], align 8
502 // CHECK2-NEXT:    [[TMP1:%.*]] = getelementptr inbounds [[CLASS_ANON]], %class.anon* [[REF_TMP]], i32 0, i32 1
503 // CHECK2-NEXT:    [[TMP2:%.*]] = load double*, double** [[G1]], align 8
504 // CHECK2-NEXT:    store double* [[TMP2]], double** [[TMP1]], align 8
505 // CHECK2-NEXT:    call void @"_ZZ4mainENK3$_0clEv"(%class.anon* nonnull align 8 dereferenceable(16) [[REF_TMP]])
506 // CHECK2-NEXT:    ret i32 0
507 //
508 //
509 // CHECK2-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l67
510 // CHECK2-SAME: (i64 [[G1:%.*]], i64 [[SVAR:%.*]], i64 [[SFVAR:%.*]], i64 [[G:%.*]]) #[[ATTR2:[0-9]+]] {
511 // CHECK2-NEXT:  entry:
512 // CHECK2-NEXT:    [[G1_ADDR:%.*]] = alloca i64, align 8
513 // CHECK2-NEXT:    [[SVAR_ADDR:%.*]] = alloca i64, align 8
514 // CHECK2-NEXT:    [[SFVAR_ADDR:%.*]] = alloca i64, align 8
515 // CHECK2-NEXT:    [[G_ADDR:%.*]] = alloca i64, align 8
516 // CHECK2-NEXT:    [[TMP:%.*]] = alloca double*, align 8
517 // CHECK2-NEXT:    [[G1_CASTED:%.*]] = alloca i64, align 8
518 // CHECK2-NEXT:    [[SVAR_CASTED:%.*]] = alloca i64, align 8
519 // CHECK2-NEXT:    [[SFVAR_CASTED:%.*]] = alloca i64, align 8
520 // CHECK2-NEXT:    [[G_CASTED:%.*]] = alloca i64, align 8
521 // CHECK2-NEXT:    store i64 [[G1]], i64* [[G1_ADDR]], align 8
522 // CHECK2-NEXT:    store i64 [[SVAR]], i64* [[SVAR_ADDR]], align 8
523 // CHECK2-NEXT:    store i64 [[SFVAR]], i64* [[SFVAR_ADDR]], align 8
524 // CHECK2-NEXT:    store i64 [[G]], i64* [[G_ADDR]], align 8
525 // CHECK2-NEXT:    [[CONV:%.*]] = bitcast i64* [[G1_ADDR]] to double*
526 // CHECK2-NEXT:    [[CONV1:%.*]] = bitcast i64* [[SVAR_ADDR]] to i32*
527 // CHECK2-NEXT:    [[CONV2:%.*]] = bitcast i64* [[SFVAR_ADDR]] to float*
528 // CHECK2-NEXT:    [[CONV3:%.*]] = bitcast i64* [[G_ADDR]] to double*
529 // CHECK2-NEXT:    store double* [[CONV]], double** [[TMP]], align 8
530 // CHECK2-NEXT:    [[TMP0:%.*]] = load double*, double** [[TMP]], align 8
531 // CHECK2-NEXT:    [[TMP1:%.*]] = load volatile double, double* [[TMP0]], align 8
532 // CHECK2-NEXT:    [[CONV4:%.*]] = bitcast i64* [[G1_CASTED]] to double*
533 // CHECK2-NEXT:    store double [[TMP1]], double* [[CONV4]], align 8
534 // CHECK2-NEXT:    [[TMP2:%.*]] = load i64, i64* [[G1_CASTED]], align 8
535 // CHECK2-NEXT:    [[TMP3:%.*]] = load i32, i32* [[CONV1]], align 8
536 // CHECK2-NEXT:    [[CONV5:%.*]] = bitcast i64* [[SVAR_CASTED]] to i32*
537 // CHECK2-NEXT:    store i32 [[TMP3]], i32* [[CONV5]], align 4
538 // CHECK2-NEXT:    [[TMP4:%.*]] = load i64, i64* [[SVAR_CASTED]], align 8
539 // CHECK2-NEXT:    [[TMP5:%.*]] = load float, float* [[CONV2]], align 8
540 // CHECK2-NEXT:    [[CONV6:%.*]] = bitcast i64* [[SFVAR_CASTED]] to float*
541 // CHECK2-NEXT:    store float [[TMP5]], float* [[CONV6]], align 4
542 // CHECK2-NEXT:    [[TMP6:%.*]] = load i64, i64* [[SFVAR_CASTED]], align 8
543 // CHECK2-NEXT:    [[TMP7:%.*]] = load double, double* [[CONV3]], align 8
544 // CHECK2-NEXT:    [[CONV7:%.*]] = bitcast i64* [[G_CASTED]] to double*
545 // CHECK2-NEXT:    store double [[TMP7]], double* [[CONV7]], align 8
546 // CHECK2-NEXT:    [[TMP8:%.*]] = load i64, i64* [[G_CASTED]], align 8
547 // CHECK2-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3:[0-9]+]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, i64, i64)* @.omp_outlined. to void (i32*, i32*, ...)*), i64 [[TMP2]], i64 [[TMP4]], i64 [[TMP6]], i64 [[TMP8]])
548 // CHECK2-NEXT:    ret void
549 //
550 //
551 // CHECK2-LABEL: define {{[^@]+}}@.omp_outlined.
552 // CHECK2-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i64 [[G1:%.*]], i64 [[SVAR:%.*]], i64 [[SFVAR:%.*]], i64 [[G:%.*]]) #[[ATTR2]] {
553 // CHECK2-NEXT:  entry:
554 // CHECK2-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
555 // CHECK2-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
556 // CHECK2-NEXT:    [[G1_ADDR:%.*]] = alloca i64, align 8
557 // CHECK2-NEXT:    [[SVAR_ADDR:%.*]] = alloca i64, align 8
558 // CHECK2-NEXT:    [[SFVAR_ADDR:%.*]] = alloca i64, align 8
559 // CHECK2-NEXT:    [[G_ADDR:%.*]] = alloca i64, align 8
560 // CHECK2-NEXT:    [[TMP:%.*]] = alloca double*, align 8
561 // CHECK2-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
562 // CHECK2-NEXT:    [[_TMP4:%.*]] = alloca i32, align 4
563 // CHECK2-NEXT:    [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
564 // CHECK2-NEXT:    [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
565 // CHECK2-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
566 // CHECK2-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
567 // CHECK2-NEXT:    [[G5:%.*]] = alloca double, align 8
568 // CHECK2-NEXT:    [[G16:%.*]] = alloca double, align 8
569 // CHECK2-NEXT:    [[_TMP7:%.*]] = alloca double*, align 8
570 // CHECK2-NEXT:    [[SVAR8:%.*]] = alloca i32, align 4
571 // CHECK2-NEXT:    [[SFVAR9:%.*]] = alloca float, align 4
572 // CHECK2-NEXT:    [[I:%.*]] = alloca i32, align 4
573 // CHECK2-NEXT:    [[G1_CASTED:%.*]] = alloca i64, align 8
574 // CHECK2-NEXT:    [[SVAR_CASTED:%.*]] = alloca i64, align 8
575 // CHECK2-NEXT:    [[SFVAR_CASTED:%.*]] = alloca i64, align 8
576 // CHECK2-NEXT:    [[G_CASTED:%.*]] = alloca i64, align 8
577 // CHECK2-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
578 // CHECK2-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
579 // CHECK2-NEXT:    store i64 [[G1]], i64* [[G1_ADDR]], align 8
580 // CHECK2-NEXT:    store i64 [[SVAR]], i64* [[SVAR_ADDR]], align 8
581 // CHECK2-NEXT:    store i64 [[SFVAR]], i64* [[SFVAR_ADDR]], align 8
582 // CHECK2-NEXT:    store i64 [[G]], i64* [[G_ADDR]], align 8
583 // CHECK2-NEXT:    [[CONV:%.*]] = bitcast i64* [[G1_ADDR]] to double*
584 // CHECK2-NEXT:    [[CONV1:%.*]] = bitcast i64* [[SVAR_ADDR]] to i32*
585 // CHECK2-NEXT:    [[CONV2:%.*]] = bitcast i64* [[SFVAR_ADDR]] to float*
586 // CHECK2-NEXT:    [[CONV3:%.*]] = bitcast i64* [[G_ADDR]] to double*
587 // CHECK2-NEXT:    store double* [[CONV]], double** [[TMP]], align 8
588 // CHECK2-NEXT:    store i32 0, i32* [[DOTOMP_COMB_LB]], align 4
589 // CHECK2-NEXT:    store i32 1, i32* [[DOTOMP_COMB_UB]], align 4
590 // CHECK2-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
591 // CHECK2-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
592 // CHECK2-NEXT:    [[TMP0:%.*]] = load double*, double** [[TMP]], align 8
593 // CHECK2-NEXT:    store double* [[G16]], double** [[_TMP7]], align 8
594 // CHECK2-NEXT:    [[TMP1:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
595 // CHECK2-NEXT:    [[TMP2:%.*]] = load i32, i32* [[TMP1]], align 4
596 // CHECK2-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1:[0-9]+]], i32 [[TMP2]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_COMB_LB]], i32* [[DOTOMP_COMB_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
597 // CHECK2-NEXT:    [[TMP3:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
598 // CHECK2-NEXT:    [[CMP:%.*]] = icmp sgt i32 [[TMP3]], 1
599 // CHECK2-NEXT:    br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
600 // CHECK2:       cond.true:
601 // CHECK2-NEXT:    br label [[COND_END:%.*]]
602 // CHECK2:       cond.false:
603 // CHECK2-NEXT:    [[TMP4:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
604 // CHECK2-NEXT:    br label [[COND_END]]
605 // CHECK2:       cond.end:
606 // CHECK2-NEXT:    [[COND:%.*]] = phi i32 [ 1, [[COND_TRUE]] ], [ [[TMP4]], [[COND_FALSE]] ]
607 // CHECK2-NEXT:    store i32 [[COND]], i32* [[DOTOMP_COMB_UB]], align 4
608 // CHECK2-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
609 // CHECK2-NEXT:    store i32 [[TMP5]], i32* [[DOTOMP_IV]], align 4
610 // CHECK2-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
611 // CHECK2:       omp.inner.for.cond:
612 // CHECK2-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
613 // CHECK2-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
614 // CHECK2-NEXT:    [[CMP10:%.*]] = icmp sle i32 [[TMP6]], [[TMP7]]
615 // CHECK2-NEXT:    br i1 [[CMP10]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
616 // CHECK2:       omp.inner.for.body:
617 // CHECK2-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
618 // CHECK2-NEXT:    [[TMP9:%.*]] = zext i32 [[TMP8]] to i64
619 // CHECK2-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
620 // CHECK2-NEXT:    [[TMP11:%.*]] = zext i32 [[TMP10]] to i64
621 // CHECK2-NEXT:    [[TMP12:%.*]] = load double*, double** [[_TMP7]], align 8
622 // CHECK2-NEXT:    [[TMP13:%.*]] = load volatile double, double* [[TMP12]], align 8
623 // CHECK2-NEXT:    [[CONV11:%.*]] = bitcast i64* [[G1_CASTED]] to double*
624 // CHECK2-NEXT:    store double [[TMP13]], double* [[CONV11]], align 8
625 // CHECK2-NEXT:    [[TMP14:%.*]] = load i64, i64* [[G1_CASTED]], align 8
626 // CHECK2-NEXT:    [[TMP15:%.*]] = load i32, i32* [[SVAR8]], align 4
627 // CHECK2-NEXT:    [[CONV12:%.*]] = bitcast i64* [[SVAR_CASTED]] to i32*
628 // CHECK2-NEXT:    store i32 [[TMP15]], i32* [[CONV12]], align 4
629 // CHECK2-NEXT:    [[TMP16:%.*]] = load i64, i64* [[SVAR_CASTED]], align 8
630 // CHECK2-NEXT:    [[TMP17:%.*]] = load float, float* [[SFVAR9]], align 4
631 // CHECK2-NEXT:    [[CONV13:%.*]] = bitcast i64* [[SFVAR_CASTED]] to float*
632 // CHECK2-NEXT:    store float [[TMP17]], float* [[CONV13]], align 4
633 // CHECK2-NEXT:    [[TMP18:%.*]] = load i64, i64* [[SFVAR_CASTED]], align 8
634 // CHECK2-NEXT:    [[TMP19:%.*]] = load double, double* [[G5]], align 8
635 // CHECK2-NEXT:    [[CONV14:%.*]] = bitcast i64* [[G_CASTED]] to double*
636 // CHECK2-NEXT:    store double [[TMP19]], double* [[CONV14]], align 8
637 // CHECK2-NEXT:    [[TMP20:%.*]] = load i64, i64* [[G_CASTED]], align 8
638 // CHECK2-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 6, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, i64, i64, i64, i64)* @.omp_outlined..1 to void (i32*, i32*, ...)*), i64 [[TMP9]], i64 [[TMP11]], i64 [[TMP14]], i64 [[TMP16]], i64 [[TMP18]], i64 [[TMP20]])
639 // CHECK2-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
640 // CHECK2:       omp.inner.for.inc:
641 // CHECK2-NEXT:    [[TMP21:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
642 // CHECK2-NEXT:    [[TMP22:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
643 // CHECK2-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP21]], [[TMP22]]
644 // CHECK2-NEXT:    store i32 [[ADD]], i32* [[DOTOMP_IV]], align 4
645 // CHECK2-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP3:![0-9]+]]
646 // CHECK2:       omp.inner.for.end:
647 // CHECK2-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
648 // CHECK2:       omp.loop.exit:
649 // CHECK2-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]])
650 // CHECK2-NEXT:    [[TMP23:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
651 // CHECK2-NEXT:    [[TMP24:%.*]] = icmp ne i32 [[TMP23]], 0
652 // CHECK2-NEXT:    br i1 [[TMP24]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
653 // CHECK2:       .omp.final.then:
654 // CHECK2-NEXT:    store i32 2, i32* [[I]], align 4
655 // CHECK2-NEXT:    br label [[DOTOMP_FINAL_DONE]]
656 // CHECK2:       .omp.final.done:
657 // CHECK2-NEXT:    [[TMP25:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
658 // CHECK2-NEXT:    [[TMP26:%.*]] = icmp ne i32 [[TMP25]], 0
659 // CHECK2-NEXT:    br i1 [[TMP26]], label [[DOTOMP_LASTPRIVATE_THEN:%.*]], label [[DOTOMP_LASTPRIVATE_DONE:%.*]]
660 // CHECK2:       .omp.lastprivate.then:
661 // CHECK2-NEXT:    [[TMP27:%.*]] = load double, double* [[G5]], align 8
662 // CHECK2-NEXT:    store volatile double [[TMP27]], double* [[CONV3]], align 8
663 // CHECK2-NEXT:    [[TMP28:%.*]] = load double*, double** [[_TMP7]], align 8
664 // CHECK2-NEXT:    [[TMP29:%.*]] = load double, double* [[TMP28]], align 8
665 // CHECK2-NEXT:    store volatile double [[TMP29]], double* [[TMP0]], align 8
666 // CHECK2-NEXT:    [[TMP30:%.*]] = load i32, i32* [[SVAR8]], align 4
667 // CHECK2-NEXT:    store i32 [[TMP30]], i32* [[CONV1]], align 8
668 // CHECK2-NEXT:    [[TMP31:%.*]] = load float, float* [[SFVAR9]], align 4
669 // CHECK2-NEXT:    store float [[TMP31]], float* [[CONV2]], align 8
670 // CHECK2-NEXT:    br label [[DOTOMP_LASTPRIVATE_DONE]]
671 // CHECK2:       .omp.lastprivate.done:
672 // CHECK2-NEXT:    ret void
673 //
674 //
675 // CHECK2-LABEL: define {{[^@]+}}@.omp_outlined..1
676 // CHECK2-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i64 [[DOTPREVIOUS_LB_:%.*]], i64 [[DOTPREVIOUS_UB_:%.*]], i64 [[G1:%.*]], i64 [[SVAR:%.*]], i64 [[SFVAR:%.*]], i64 [[G:%.*]]) #[[ATTR2]] {
677 // CHECK2-NEXT:  entry:
678 // CHECK2-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
679 // CHECK2-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
680 // CHECK2-NEXT:    [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i64, align 8
681 // CHECK2-NEXT:    [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i64, align 8
682 // CHECK2-NEXT:    [[G1_ADDR:%.*]] = alloca i64, align 8
683 // CHECK2-NEXT:    [[SVAR_ADDR:%.*]] = alloca i64, align 8
684 // CHECK2-NEXT:    [[SFVAR_ADDR:%.*]] = alloca i64, align 8
685 // CHECK2-NEXT:    [[G_ADDR:%.*]] = alloca i64, align 8
686 // CHECK2-NEXT:    [[TMP:%.*]] = alloca double*, align 8
687 // CHECK2-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
688 // CHECK2-NEXT:    [[_TMP4:%.*]] = alloca i32, align 4
689 // CHECK2-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
690 // CHECK2-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
691 // CHECK2-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
692 // CHECK2-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
693 // CHECK2-NEXT:    [[G7:%.*]] = alloca double, align 8
694 // CHECK2-NEXT:    [[G18:%.*]] = alloca double, align 8
695 // CHECK2-NEXT:    [[_TMP9:%.*]] = alloca double*, align 8
696 // CHECK2-NEXT:    [[SVAR10:%.*]] = alloca i32, align 4
697 // CHECK2-NEXT:    [[SFVAR11:%.*]] = alloca float, align 4
698 // CHECK2-NEXT:    [[I:%.*]] = alloca i32, align 4
699 // CHECK2-NEXT:    [[REF_TMP:%.*]] = alloca [[CLASS_ANON_0:%.*]], align 8
700 // CHECK2-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
701 // CHECK2-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
702 // CHECK2-NEXT:    store i64 [[DOTPREVIOUS_LB_]], i64* [[DOTPREVIOUS_LB__ADDR]], align 8
703 // CHECK2-NEXT:    store i64 [[DOTPREVIOUS_UB_]], i64* [[DOTPREVIOUS_UB__ADDR]], align 8
704 // CHECK2-NEXT:    store i64 [[G1]], i64* [[G1_ADDR]], align 8
705 // CHECK2-NEXT:    store i64 [[SVAR]], i64* [[SVAR_ADDR]], align 8
706 // CHECK2-NEXT:    store i64 [[SFVAR]], i64* [[SFVAR_ADDR]], align 8
707 // CHECK2-NEXT:    store i64 [[G]], i64* [[G_ADDR]], align 8
708 // CHECK2-NEXT:    [[CONV:%.*]] = bitcast i64* [[G1_ADDR]] to double*
709 // CHECK2-NEXT:    [[CONV1:%.*]] = bitcast i64* [[SVAR_ADDR]] to i32*
710 // CHECK2-NEXT:    [[CONV2:%.*]] = bitcast i64* [[SFVAR_ADDR]] to float*
711 // CHECK2-NEXT:    [[CONV3:%.*]] = bitcast i64* [[G_ADDR]] to double*
712 // CHECK2-NEXT:    store double* [[CONV]], double** [[TMP]], align 8
713 // CHECK2-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
714 // CHECK2-NEXT:    store i32 1, i32* [[DOTOMP_UB]], align 4
715 // CHECK2-NEXT:    [[TMP0:%.*]] = load i64, i64* [[DOTPREVIOUS_LB__ADDR]], align 8
716 // CHECK2-NEXT:    [[CONV5:%.*]] = trunc i64 [[TMP0]] to i32
717 // CHECK2-NEXT:    [[TMP1:%.*]] = load i64, i64* [[DOTPREVIOUS_UB__ADDR]], align 8
718 // CHECK2-NEXT:    [[CONV6:%.*]] = trunc i64 [[TMP1]] to i32
719 // CHECK2-NEXT:    store i32 [[CONV5]], i32* [[DOTOMP_LB]], align 4
720 // CHECK2-NEXT:    store i32 [[CONV6]], i32* [[DOTOMP_UB]], align 4
721 // CHECK2-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
722 // CHECK2-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
723 // CHECK2-NEXT:    [[TMP2:%.*]] = load double*, double** [[TMP]], align 8
724 // CHECK2-NEXT:    store double* [[G18]], double** [[_TMP9]], align 8
725 // CHECK2-NEXT:    [[TMP3:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
726 // CHECK2-NEXT:    [[TMP4:%.*]] = load i32, i32* [[TMP3]], align 4
727 // CHECK2-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB2:[0-9]+]], i32 [[TMP4]], i32 34, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
728 // CHECK2-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
729 // CHECK2-NEXT:    [[CMP:%.*]] = icmp sgt i32 [[TMP5]], 1
730 // CHECK2-NEXT:    br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
731 // CHECK2:       cond.true:
732 // CHECK2-NEXT:    br label [[COND_END:%.*]]
733 // CHECK2:       cond.false:
734 // CHECK2-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
735 // CHECK2-NEXT:    br label [[COND_END]]
736 // CHECK2:       cond.end:
737 // CHECK2-NEXT:    [[COND:%.*]] = phi i32 [ 1, [[COND_TRUE]] ], [ [[TMP6]], [[COND_FALSE]] ]
738 // CHECK2-NEXT:    store i32 [[COND]], i32* [[DOTOMP_UB]], align 4
739 // CHECK2-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
740 // CHECK2-NEXT:    store i32 [[TMP7]], i32* [[DOTOMP_IV]], align 4
741 // CHECK2-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
742 // CHECK2:       omp.inner.for.cond:
743 // CHECK2-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
744 // CHECK2-NEXT:    [[TMP9:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
745 // CHECK2-NEXT:    [[CMP12:%.*]] = icmp sle i32 [[TMP8]], [[TMP9]]
746 // CHECK2-NEXT:    br i1 [[CMP12]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
747 // CHECK2:       omp.inner.for.body:
748 // CHECK2-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
749 // CHECK2-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP10]], 1
750 // CHECK2-NEXT:    [[ADD:%.*]] = add nsw i32 0, [[MUL]]
751 // CHECK2-NEXT:    store i32 [[ADD]], i32* [[I]], align 4
752 // CHECK2-NEXT:    [[TMP11:%.*]] = load double*, double** [[_TMP9]], align 8
753 // CHECK2-NEXT:    store volatile double 1.000000e+00, double* [[TMP11]], align 8
754 // CHECK2-NEXT:    store i32 3, i32* [[SVAR10]], align 4
755 // CHECK2-NEXT:    store float 4.000000e+00, float* [[SFVAR11]], align 4
756 // CHECK2-NEXT:    [[TMP12:%.*]] = getelementptr inbounds [[CLASS_ANON_0]], %class.anon.0* [[REF_TMP]], i32 0, i32 0
757 // CHECK2-NEXT:    store double* [[G7]], double** [[TMP12]], align 8
758 // CHECK2-NEXT:    [[TMP13:%.*]] = getelementptr inbounds [[CLASS_ANON_0]], %class.anon.0* [[REF_TMP]], i32 0, i32 1
759 // CHECK2-NEXT:    [[TMP14:%.*]] = load double*, double** [[_TMP9]], align 8
760 // CHECK2-NEXT:    store double* [[TMP14]], double** [[TMP13]], align 8
761 // CHECK2-NEXT:    [[TMP15:%.*]] = getelementptr inbounds [[CLASS_ANON_0]], %class.anon.0* [[REF_TMP]], i32 0, i32 2
762 // CHECK2-NEXT:    store i32* [[SVAR10]], i32** [[TMP15]], align 8
763 // CHECK2-NEXT:    [[TMP16:%.*]] = getelementptr inbounds [[CLASS_ANON_0]], %class.anon.0* [[REF_TMP]], i32 0, i32 3
764 // CHECK2-NEXT:    store float* [[SFVAR11]], float** [[TMP16]], align 8
765 // CHECK2-NEXT:    call void @"_ZZZ4mainENK3$_0clEvENKUlvE_clEv"(%class.anon.0* nonnull align 8 dereferenceable(32) [[REF_TMP]])
766 // CHECK2-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
767 // CHECK2:       omp.body.continue:
768 // CHECK2-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
769 // CHECK2:       omp.inner.for.inc:
770 // CHECK2-NEXT:    [[TMP17:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
771 // CHECK2-NEXT:    [[ADD13:%.*]] = add nsw i32 [[TMP17]], 1
772 // CHECK2-NEXT:    store i32 [[ADD13]], i32* [[DOTOMP_IV]], align 4
773 // CHECK2-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP5:![0-9]+]]
774 // CHECK2:       omp.inner.for.end:
775 // CHECK2-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
776 // CHECK2:       omp.loop.exit:
777 // CHECK2-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP4]])
778 // CHECK2-NEXT:    [[TMP18:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
779 // CHECK2-NEXT:    [[TMP19:%.*]] = icmp ne i32 [[TMP18]], 0
780 // CHECK2-NEXT:    br i1 [[TMP19]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
781 // CHECK2:       .omp.final.then:
782 // CHECK2-NEXT:    store i32 2, i32* [[I]], align 4
783 // CHECK2-NEXT:    br label [[DOTOMP_FINAL_DONE]]
784 // CHECK2:       .omp.final.done:
785 // CHECK2-NEXT:    [[TMP20:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
786 // CHECK2-NEXT:    [[TMP21:%.*]] = icmp ne i32 [[TMP20]], 0
787 // CHECK2-NEXT:    br i1 [[TMP21]], label [[DOTOMP_LASTPRIVATE_THEN:%.*]], label [[DOTOMP_LASTPRIVATE_DONE:%.*]]
788 // CHECK2:       .omp.lastprivate.then:
789 // CHECK2-NEXT:    [[TMP22:%.*]] = load double, double* [[G7]], align 8
790 // CHECK2-NEXT:    store volatile double [[TMP22]], double* [[CONV3]], align 8
791 // CHECK2-NEXT:    [[TMP23:%.*]] = load double*, double** [[_TMP9]], align 8
792 // CHECK2-NEXT:    [[TMP24:%.*]] = load double, double* [[TMP23]], align 8
793 // CHECK2-NEXT:    store volatile double [[TMP24]], double* [[TMP2]], align 8
794 // CHECK2-NEXT:    [[TMP25:%.*]] = load i32, i32* [[SVAR10]], align 4
795 // CHECK2-NEXT:    store i32 [[TMP25]], i32* [[CONV1]], align 8
796 // CHECK2-NEXT:    [[TMP26:%.*]] = load float, float* [[SFVAR11]], align 4
797 // CHECK2-NEXT:    store float [[TMP26]], float* [[CONV2]], align 8
798 // CHECK2-NEXT:    br label [[DOTOMP_LASTPRIVATE_DONE]]
799 // CHECK2:       .omp.lastprivate.done:
800 // CHECK2-NEXT:    ret void
801 //
802 //
803 // CHECK2-LABEL: define {{[^@]+}}@.omp_offloading.requires_reg
804 // CHECK2-SAME: () #[[ATTR4:[0-9]+]] {
805 // CHECK2-NEXT:  entry:
806 // CHECK2-NEXT:    call void @__tgt_register_requires(i64 1)
807 // CHECK2-NEXT:    ret void
808 //
809 //
810 // CHECK3-LABEL: define {{[^@]+}}@main
811 // CHECK3-SAME: () #[[ATTR0:[0-9]+]] {
812 // CHECK3-NEXT:  entry:
813 // CHECK3-NEXT:    [[RETVAL:%.*]] = alloca i32, align 4
814 // CHECK3-NEXT:    [[G:%.*]] = alloca double, align 8
815 // CHECK3-NEXT:    [[G1:%.*]] = alloca double*, align 4
816 // CHECK3-NEXT:    [[REF_TMP:%.*]] = alloca [[CLASS_ANON:%.*]], align 4
817 // CHECK3-NEXT:    store i32 0, i32* [[RETVAL]], align 4
818 // CHECK3-NEXT:    store double* [[G]], double** [[G1]], align 4
819 // CHECK3-NEXT:    [[TMP0:%.*]] = getelementptr inbounds [[CLASS_ANON]], %class.anon* [[REF_TMP]], i32 0, i32 0
820 // CHECK3-NEXT:    store double* [[G]], double** [[TMP0]], align 4
821 // CHECK3-NEXT:    [[TMP1:%.*]] = getelementptr inbounds [[CLASS_ANON]], %class.anon* [[REF_TMP]], i32 0, i32 1
822 // CHECK3-NEXT:    [[TMP2:%.*]] = load double*, double** [[G1]], align 4
823 // CHECK3-NEXT:    store double* [[TMP2]], double** [[TMP1]], align 4
824 // CHECK3-NEXT:    call void @"_ZZ4mainENK3$_0clEv"(%class.anon* nonnull align 4 dereferenceable(8) [[REF_TMP]])
825 // CHECK3-NEXT:    ret i32 0
826 //
827 //
828 // CHECK3-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l67
829 // CHECK3-SAME: (double* nonnull align 4 dereferenceable(8) [[G1:%.*]], i32 [[SVAR:%.*]], i32 [[SFVAR:%.*]], double* nonnull align 4 dereferenceable(8) [[G:%.*]]) #[[ATTR2:[0-9]+]] {
830 // CHECK3-NEXT:  entry:
831 // CHECK3-NEXT:    [[G1_ADDR:%.*]] = alloca double*, align 4
832 // CHECK3-NEXT:    [[SVAR_ADDR:%.*]] = alloca i32, align 4
833 // CHECK3-NEXT:    [[SFVAR_ADDR:%.*]] = alloca i32, align 4
834 // CHECK3-NEXT:    [[G_ADDR:%.*]] = alloca double*, align 4
835 // CHECK3-NEXT:    [[TMP:%.*]] = alloca double*, align 4
836 // CHECK3-NEXT:    [[SVAR_CASTED:%.*]] = alloca i32, align 4
837 // CHECK3-NEXT:    [[SFVAR_CASTED:%.*]] = alloca i32, align 4
838 // CHECK3-NEXT:    store double* [[G1]], double** [[G1_ADDR]], align 4
839 // CHECK3-NEXT:    store i32 [[SVAR]], i32* [[SVAR_ADDR]], align 4
840 // CHECK3-NEXT:    store i32 [[SFVAR]], i32* [[SFVAR_ADDR]], align 4
841 // CHECK3-NEXT:    store double* [[G]], double** [[G_ADDR]], align 4
842 // CHECK3-NEXT:    [[TMP0:%.*]] = load double*, double** [[G1_ADDR]], align 4
843 // CHECK3-NEXT:    [[CONV:%.*]] = bitcast i32* [[SFVAR_ADDR]] to float*
844 // CHECK3-NEXT:    [[TMP1:%.*]] = load double*, double** [[G_ADDR]], align 4
845 // CHECK3-NEXT:    store double* [[TMP0]], double** [[TMP]], align 4
846 // CHECK3-NEXT:    [[TMP2:%.*]] = load double*, double** [[TMP]], align 4
847 // CHECK3-NEXT:    [[TMP3:%.*]] = load i32, i32* [[SVAR_ADDR]], align 4
848 // CHECK3-NEXT:    store i32 [[TMP3]], i32* [[SVAR_CASTED]], align 4
849 // CHECK3-NEXT:    [[TMP4:%.*]] = load i32, i32* [[SVAR_CASTED]], align 4
850 // CHECK3-NEXT:    [[TMP5:%.*]] = load float, float* [[CONV]], align 4
851 // CHECK3-NEXT:    [[CONV1:%.*]] = bitcast i32* [[SFVAR_CASTED]] to float*
852 // CHECK3-NEXT:    store float [[TMP5]], float* [[CONV1]], align 4
853 // CHECK3-NEXT:    [[TMP6:%.*]] = load i32, i32* [[SFVAR_CASTED]], align 4
854 // CHECK3-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3:[0-9]+]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, double*, i32, i32, double*)* @.omp_outlined. to void (i32*, i32*, ...)*), double* [[TMP2]], i32 [[TMP4]], i32 [[TMP6]], double* [[TMP1]])
855 // CHECK3-NEXT:    ret void
856 //
857 //
858 // CHECK3-LABEL: define {{[^@]+}}@.omp_outlined.
859 // CHECK3-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], double* nonnull align 4 dereferenceable(8) [[G1:%.*]], i32 [[SVAR:%.*]], i32 [[SFVAR:%.*]], double* nonnull align 4 dereferenceable(8) [[G:%.*]]) #[[ATTR2]] {
860 // CHECK3-NEXT:  entry:
861 // CHECK3-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
862 // CHECK3-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
863 // CHECK3-NEXT:    [[G1_ADDR:%.*]] = alloca double*, align 4
864 // CHECK3-NEXT:    [[SVAR_ADDR:%.*]] = alloca i32, align 4
865 // CHECK3-NEXT:    [[SFVAR_ADDR:%.*]] = alloca i32, align 4
866 // CHECK3-NEXT:    [[G_ADDR:%.*]] = alloca double*, align 4
867 // CHECK3-NEXT:    [[TMP:%.*]] = alloca double*, align 4
868 // CHECK3-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
869 // CHECK3-NEXT:    [[_TMP1:%.*]] = alloca i32, align 4
870 // CHECK3-NEXT:    [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
871 // CHECK3-NEXT:    [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
872 // CHECK3-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
873 // CHECK3-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
874 // CHECK3-NEXT:    [[G2:%.*]] = alloca double, align 8
875 // CHECK3-NEXT:    [[G13:%.*]] = alloca double, align 8
876 // CHECK3-NEXT:    [[_TMP4:%.*]] = alloca double*, align 4
877 // CHECK3-NEXT:    [[SVAR5:%.*]] = alloca i32, align 4
878 // CHECK3-NEXT:    [[SFVAR6:%.*]] = alloca float, align 4
879 // CHECK3-NEXT:    [[I:%.*]] = alloca i32, align 4
880 // CHECK3-NEXT:    [[SVAR_CASTED:%.*]] = alloca i32, align 4
881 // CHECK3-NEXT:    [[SFVAR_CASTED:%.*]] = alloca i32, align 4
882 // CHECK3-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
883 // CHECK3-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
884 // CHECK3-NEXT:    store double* [[G1]], double** [[G1_ADDR]], align 4
885 // CHECK3-NEXT:    store i32 [[SVAR]], i32* [[SVAR_ADDR]], align 4
886 // CHECK3-NEXT:    store i32 [[SFVAR]], i32* [[SFVAR_ADDR]], align 4
887 // CHECK3-NEXT:    store double* [[G]], double** [[G_ADDR]], align 4
888 // CHECK3-NEXT:    [[TMP0:%.*]] = load double*, double** [[G1_ADDR]], align 4
889 // CHECK3-NEXT:    [[CONV:%.*]] = bitcast i32* [[SFVAR_ADDR]] to float*
890 // CHECK3-NEXT:    [[TMP1:%.*]] = load double*, double** [[G_ADDR]], align 4
891 // CHECK3-NEXT:    store double* [[TMP0]], double** [[TMP]], align 4
892 // CHECK3-NEXT:    store i32 0, i32* [[DOTOMP_COMB_LB]], align 4
893 // CHECK3-NEXT:    store i32 1, i32* [[DOTOMP_COMB_UB]], align 4
894 // CHECK3-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
895 // CHECK3-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
896 // CHECK3-NEXT:    [[TMP2:%.*]] = load double*, double** [[TMP]], align 4
897 // CHECK3-NEXT:    store double* [[G13]], double** [[_TMP4]], align 4
898 // CHECK3-NEXT:    [[TMP3:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
899 // CHECK3-NEXT:    [[TMP4:%.*]] = load i32, i32* [[TMP3]], align 4
900 // CHECK3-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1:[0-9]+]], i32 [[TMP4]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_COMB_LB]], i32* [[DOTOMP_COMB_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
901 // CHECK3-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
902 // CHECK3-NEXT:    [[CMP:%.*]] = icmp sgt i32 [[TMP5]], 1
903 // CHECK3-NEXT:    br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
904 // CHECK3:       cond.true:
905 // CHECK3-NEXT:    br label [[COND_END:%.*]]
906 // CHECK3:       cond.false:
907 // CHECK3-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
908 // CHECK3-NEXT:    br label [[COND_END]]
909 // CHECK3:       cond.end:
910 // CHECK3-NEXT:    [[COND:%.*]] = phi i32 [ 1, [[COND_TRUE]] ], [ [[TMP6]], [[COND_FALSE]] ]
911 // CHECK3-NEXT:    store i32 [[COND]], i32* [[DOTOMP_COMB_UB]], align 4
912 // CHECK3-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
913 // CHECK3-NEXT:    store i32 [[TMP7]], i32* [[DOTOMP_IV]], align 4
914 // CHECK3-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
915 // CHECK3:       omp.inner.for.cond:
916 // CHECK3-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
917 // CHECK3-NEXT:    [[TMP9:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
918 // CHECK3-NEXT:    [[CMP7:%.*]] = icmp sle i32 [[TMP8]], [[TMP9]]
919 // CHECK3-NEXT:    br i1 [[CMP7]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
920 // CHECK3:       omp.inner.for.body:
921 // CHECK3-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
922 // CHECK3-NEXT:    [[TMP11:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
923 // CHECK3-NEXT:    [[TMP12:%.*]] = load double*, double** [[_TMP4]], align 4
924 // CHECK3-NEXT:    [[TMP13:%.*]] = load i32, i32* [[SVAR5]], align 4
925 // CHECK3-NEXT:    store i32 [[TMP13]], i32* [[SVAR_CASTED]], align 4
926 // CHECK3-NEXT:    [[TMP14:%.*]] = load i32, i32* [[SVAR_CASTED]], align 4
927 // CHECK3-NEXT:    [[TMP15:%.*]] = load float, float* [[SFVAR6]], align 4
928 // CHECK3-NEXT:    [[CONV8:%.*]] = bitcast i32* [[SFVAR_CASTED]] to float*
929 // CHECK3-NEXT:    store float [[TMP15]], float* [[CONV8]], align 4
930 // CHECK3-NEXT:    [[TMP16:%.*]] = load i32, i32* [[SFVAR_CASTED]], align 4
931 // CHECK3-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 6, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, double*, i32, i32, double*)* @.omp_outlined..1 to void (i32*, i32*, ...)*), i32 [[TMP10]], i32 [[TMP11]], double* [[TMP12]], i32 [[TMP14]], i32 [[TMP16]], double* [[G2]])
932 // CHECK3-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
933 // CHECK3:       omp.inner.for.inc:
934 // CHECK3-NEXT:    [[TMP17:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
935 // CHECK3-NEXT:    [[TMP18:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
936 // CHECK3-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP17]], [[TMP18]]
937 // CHECK3-NEXT:    store i32 [[ADD]], i32* [[DOTOMP_IV]], align 4
938 // CHECK3-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP4:![0-9]+]]
939 // CHECK3:       omp.inner.for.end:
940 // CHECK3-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
941 // CHECK3:       omp.loop.exit:
942 // CHECK3-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP4]])
943 // CHECK3-NEXT:    [[TMP19:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
944 // CHECK3-NEXT:    [[TMP20:%.*]] = icmp ne i32 [[TMP19]], 0
945 // CHECK3-NEXT:    br i1 [[TMP20]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
946 // CHECK3:       .omp.final.then:
947 // CHECK3-NEXT:    store i32 2, i32* [[I]], align 4
948 // CHECK3-NEXT:    br label [[DOTOMP_FINAL_DONE]]
949 // CHECK3:       .omp.final.done:
950 // CHECK3-NEXT:    [[TMP21:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
951 // CHECK3-NEXT:    [[TMP22:%.*]] = icmp ne i32 [[TMP21]], 0
952 // CHECK3-NEXT:    br i1 [[TMP22]], label [[DOTOMP_LASTPRIVATE_THEN:%.*]], label [[DOTOMP_LASTPRIVATE_DONE:%.*]]
953 // CHECK3:       .omp.lastprivate.then:
954 // CHECK3-NEXT:    [[TMP23:%.*]] = load double, double* [[G2]], align 8
955 // CHECK3-NEXT:    store volatile double [[TMP23]], double* [[TMP1]], align 8
956 // CHECK3-NEXT:    [[TMP24:%.*]] = load double*, double** [[_TMP4]], align 4
957 // CHECK3-NEXT:    [[TMP25:%.*]] = load double, double* [[TMP24]], align 4
958 // CHECK3-NEXT:    store volatile double [[TMP25]], double* [[TMP2]], align 4
959 // CHECK3-NEXT:    [[TMP26:%.*]] = load i32, i32* [[SVAR5]], align 4
960 // CHECK3-NEXT:    store i32 [[TMP26]], i32* [[SVAR_ADDR]], align 4
961 // CHECK3-NEXT:    [[TMP27:%.*]] = load float, float* [[SFVAR6]], align 4
962 // CHECK3-NEXT:    store float [[TMP27]], float* [[CONV]], align 4
963 // CHECK3-NEXT:    br label [[DOTOMP_LASTPRIVATE_DONE]]
964 // CHECK3:       .omp.lastprivate.done:
965 // CHECK3-NEXT:    ret void
966 //
967 //
968 // CHECK3-LABEL: define {{[^@]+}}@.omp_outlined..1
969 // CHECK3-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32 [[DOTPREVIOUS_LB_:%.*]], i32 [[DOTPREVIOUS_UB_:%.*]], double* nonnull align 4 dereferenceable(8) [[G1:%.*]], i32 [[SVAR:%.*]], i32 [[SFVAR:%.*]], double* nonnull align 4 dereferenceable(8) [[G:%.*]]) #[[ATTR2]] {
970 // CHECK3-NEXT:  entry:
971 // CHECK3-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
972 // CHECK3-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
973 // CHECK3-NEXT:    [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i32, align 4
974 // CHECK3-NEXT:    [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i32, align 4
975 // CHECK3-NEXT:    [[G1_ADDR:%.*]] = alloca double*, align 4
976 // CHECK3-NEXT:    [[SVAR_ADDR:%.*]] = alloca i32, align 4
977 // CHECK3-NEXT:    [[SFVAR_ADDR:%.*]] = alloca i32, align 4
978 // CHECK3-NEXT:    [[G_ADDR:%.*]] = alloca double*, align 4
979 // CHECK3-NEXT:    [[TMP:%.*]] = alloca double*, align 4
980 // CHECK3-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
981 // CHECK3-NEXT:    [[_TMP1:%.*]] = alloca i32, align 4
982 // CHECK3-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
983 // CHECK3-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
984 // CHECK3-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
985 // CHECK3-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
986 // CHECK3-NEXT:    [[G2:%.*]] = alloca double, align 8
987 // CHECK3-NEXT:    [[G13:%.*]] = alloca double, align 8
988 // CHECK3-NEXT:    [[_TMP4:%.*]] = alloca double*, align 4
989 // CHECK3-NEXT:    [[SVAR5:%.*]] = alloca i32, align 4
990 // CHECK3-NEXT:    [[SFVAR6:%.*]] = alloca float, align 4
991 // CHECK3-NEXT:    [[I:%.*]] = alloca i32, align 4
992 // CHECK3-NEXT:    [[REF_TMP:%.*]] = alloca [[CLASS_ANON_0:%.*]], align 4
993 // CHECK3-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
994 // CHECK3-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
995 // CHECK3-NEXT:    store i32 [[DOTPREVIOUS_LB_]], i32* [[DOTPREVIOUS_LB__ADDR]], align 4
996 // CHECK3-NEXT:    store i32 [[DOTPREVIOUS_UB_]], i32* [[DOTPREVIOUS_UB__ADDR]], align 4
997 // CHECK3-NEXT:    store double* [[G1]], double** [[G1_ADDR]], align 4
998 // CHECK3-NEXT:    store i32 [[SVAR]], i32* [[SVAR_ADDR]], align 4
999 // CHECK3-NEXT:    store i32 [[SFVAR]], i32* [[SFVAR_ADDR]], align 4
1000 // CHECK3-NEXT:    store double* [[G]], double** [[G_ADDR]], align 4
1001 // CHECK3-NEXT:    [[TMP0:%.*]] = load double*, double** [[G1_ADDR]], align 4
1002 // CHECK3-NEXT:    [[CONV:%.*]] = bitcast i32* [[SFVAR_ADDR]] to float*
1003 // CHECK3-NEXT:    [[TMP1:%.*]] = load double*, double** [[G_ADDR]], align 4
1004 // CHECK3-NEXT:    store double* [[TMP0]], double** [[TMP]], align 4
1005 // CHECK3-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
1006 // CHECK3-NEXT:    store i32 1, i32* [[DOTOMP_UB]], align 4
1007 // CHECK3-NEXT:    [[TMP2:%.*]] = load i32, i32* [[DOTPREVIOUS_LB__ADDR]], align 4
1008 // CHECK3-NEXT:    [[TMP3:%.*]] = load i32, i32* [[DOTPREVIOUS_UB__ADDR]], align 4
1009 // CHECK3-NEXT:    store i32 [[TMP2]], i32* [[DOTOMP_LB]], align 4
1010 // CHECK3-NEXT:    store i32 [[TMP3]], i32* [[DOTOMP_UB]], align 4
1011 // CHECK3-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
1012 // CHECK3-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
1013 // CHECK3-NEXT:    [[TMP4:%.*]] = load double*, double** [[TMP]], align 4
1014 // CHECK3-NEXT:    store double* [[G13]], double** [[_TMP4]], align 4
1015 // CHECK3-NEXT:    [[TMP5:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
1016 // CHECK3-NEXT:    [[TMP6:%.*]] = load i32, i32* [[TMP5]], align 4
1017 // CHECK3-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB2:[0-9]+]], i32 [[TMP6]], i32 34, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
1018 // CHECK3-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
1019 // CHECK3-NEXT:    [[CMP:%.*]] = icmp sgt i32 [[TMP7]], 1
1020 // CHECK3-NEXT:    br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
1021 // CHECK3:       cond.true:
1022 // CHECK3-NEXT:    br label [[COND_END:%.*]]
1023 // CHECK3:       cond.false:
1024 // CHECK3-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
1025 // CHECK3-NEXT:    br label [[COND_END]]
1026 // CHECK3:       cond.end:
1027 // CHECK3-NEXT:    [[COND:%.*]] = phi i32 [ 1, [[COND_TRUE]] ], [ [[TMP8]], [[COND_FALSE]] ]
1028 // CHECK3-NEXT:    store i32 [[COND]], i32* [[DOTOMP_UB]], align 4
1029 // CHECK3-NEXT:    [[TMP9:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
1030 // CHECK3-NEXT:    store i32 [[TMP9]], i32* [[DOTOMP_IV]], align 4
1031 // CHECK3-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
1032 // CHECK3:       omp.inner.for.cond:
1033 // CHECK3-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
1034 // CHECK3-NEXT:    [[TMP11:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
1035 // CHECK3-NEXT:    [[CMP7:%.*]] = icmp sle i32 [[TMP10]], [[TMP11]]
1036 // CHECK3-NEXT:    br i1 [[CMP7]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
1037 // CHECK3:       omp.inner.for.body:
1038 // CHECK3-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
1039 // CHECK3-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP12]], 1
1040 // CHECK3-NEXT:    [[ADD:%.*]] = add nsw i32 0, [[MUL]]
1041 // CHECK3-NEXT:    store i32 [[ADD]], i32* [[I]], align 4
1042 // CHECK3-NEXT:    [[TMP13:%.*]] = load double*, double** [[_TMP4]], align 4
1043 // CHECK3-NEXT:    store volatile double 1.000000e+00, double* [[TMP13]], align 4
1044 // CHECK3-NEXT:    store i32 3, i32* [[SVAR5]], align 4
1045 // CHECK3-NEXT:    store float 4.000000e+00, float* [[SFVAR6]], align 4
1046 // CHECK3-NEXT:    [[TMP14:%.*]] = getelementptr inbounds [[CLASS_ANON_0]], %class.anon.0* [[REF_TMP]], i32 0, i32 0
1047 // CHECK3-NEXT:    store double* [[G2]], double** [[TMP14]], align 4
1048 // CHECK3-NEXT:    [[TMP15:%.*]] = getelementptr inbounds [[CLASS_ANON_0]], %class.anon.0* [[REF_TMP]], i32 0, i32 1
1049 // CHECK3-NEXT:    [[TMP16:%.*]] = load double*, double** [[_TMP4]], align 4
1050 // CHECK3-NEXT:    store double* [[TMP16]], double** [[TMP15]], align 4
1051 // CHECK3-NEXT:    [[TMP17:%.*]] = getelementptr inbounds [[CLASS_ANON_0]], %class.anon.0* [[REF_TMP]], i32 0, i32 2
1052 // CHECK3-NEXT:    store i32* [[SVAR5]], i32** [[TMP17]], align 4
1053 // CHECK3-NEXT:    [[TMP18:%.*]] = getelementptr inbounds [[CLASS_ANON_0]], %class.anon.0* [[REF_TMP]], i32 0, i32 3
1054 // CHECK3-NEXT:    store float* [[SFVAR6]], float** [[TMP18]], align 4
1055 // CHECK3-NEXT:    call void @"_ZZZ4mainENK3$_0clEvENKUlvE_clEv"(%class.anon.0* nonnull align 4 dereferenceable(16) [[REF_TMP]])
1056 // CHECK3-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
1057 // CHECK3:       omp.body.continue:
1058 // CHECK3-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
1059 // CHECK3:       omp.inner.for.inc:
1060 // CHECK3-NEXT:    [[TMP19:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
1061 // CHECK3-NEXT:    [[ADD8:%.*]] = add nsw i32 [[TMP19]], 1
1062 // CHECK3-NEXT:    store i32 [[ADD8]], i32* [[DOTOMP_IV]], align 4
1063 // CHECK3-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP6:![0-9]+]]
1064 // CHECK3:       omp.inner.for.end:
1065 // CHECK3-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
1066 // CHECK3:       omp.loop.exit:
1067 // CHECK3-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP6]])
1068 // CHECK3-NEXT:    [[TMP20:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
1069 // CHECK3-NEXT:    [[TMP21:%.*]] = icmp ne i32 [[TMP20]], 0
1070 // CHECK3-NEXT:    br i1 [[TMP21]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
1071 // CHECK3:       .omp.final.then:
1072 // CHECK3-NEXT:    store i32 2, i32* [[I]], align 4
1073 // CHECK3-NEXT:    br label [[DOTOMP_FINAL_DONE]]
1074 // CHECK3:       .omp.final.done:
1075 // CHECK3-NEXT:    [[TMP22:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
1076 // CHECK3-NEXT:    [[TMP23:%.*]] = icmp ne i32 [[TMP22]], 0
1077 // CHECK3-NEXT:    br i1 [[TMP23]], label [[DOTOMP_LASTPRIVATE_THEN:%.*]], label [[DOTOMP_LASTPRIVATE_DONE:%.*]]
1078 // CHECK3:       .omp.lastprivate.then:
1079 // CHECK3-NEXT:    [[TMP24:%.*]] = load double, double* [[G2]], align 8
1080 // CHECK3-NEXT:    store volatile double [[TMP24]], double* [[TMP1]], align 8
1081 // CHECK3-NEXT:    [[TMP25:%.*]] = load double*, double** [[_TMP4]], align 4
1082 // CHECK3-NEXT:    [[TMP26:%.*]] = load double, double* [[TMP25]], align 4
1083 // CHECK3-NEXT:    store volatile double [[TMP26]], double* [[TMP4]], align 4
1084 // CHECK3-NEXT:    [[TMP27:%.*]] = load i32, i32* [[SVAR5]], align 4
1085 // CHECK3-NEXT:    store i32 [[TMP27]], i32* [[SVAR_ADDR]], align 4
1086 // CHECK3-NEXT:    [[TMP28:%.*]] = load float, float* [[SFVAR6]], align 4
1087 // CHECK3-NEXT:    store float [[TMP28]], float* [[CONV]], align 4
1088 // CHECK3-NEXT:    br label [[DOTOMP_LASTPRIVATE_DONE]]
1089 // CHECK3:       .omp.lastprivate.done:
1090 // CHECK3-NEXT:    ret void
1091 //
1092 //
1093 // CHECK3-LABEL: define {{[^@]+}}@.omp_offloading.requires_reg
1094 // CHECK3-SAME: () #[[ATTR4:[0-9]+]] {
1095 // CHECK3-NEXT:  entry:
1096 // CHECK3-NEXT:    call void @__tgt_register_requires(i64 1)
1097 // CHECK3-NEXT:    ret void
1098 //
1099 //
1100 // CHECK4-LABEL: define {{[^@]+}}@main
1101 // CHECK4-SAME: () #[[ATTR0:[0-9]+]] {
1102 // CHECK4-NEXT:  entry:
1103 // CHECK4-NEXT:    [[RETVAL:%.*]] = alloca i32, align 4
1104 // CHECK4-NEXT:    [[G:%.*]] = alloca double, align 8
1105 // CHECK4-NEXT:    [[G1:%.*]] = alloca double*, align 4
1106 // CHECK4-NEXT:    [[REF_TMP:%.*]] = alloca [[CLASS_ANON:%.*]], align 4
1107 // CHECK4-NEXT:    store i32 0, i32* [[RETVAL]], align 4
1108 // CHECK4-NEXT:    store double* [[G]], double** [[G1]], align 4
1109 // CHECK4-NEXT:    [[TMP0:%.*]] = getelementptr inbounds [[CLASS_ANON]], %class.anon* [[REF_TMP]], i32 0, i32 0
1110 // CHECK4-NEXT:    store double* [[G]], double** [[TMP0]], align 4
1111 // CHECK4-NEXT:    [[TMP1:%.*]] = getelementptr inbounds [[CLASS_ANON]], %class.anon* [[REF_TMP]], i32 0, i32 1
1112 // CHECK4-NEXT:    [[TMP2:%.*]] = load double*, double** [[G1]], align 4
1113 // CHECK4-NEXT:    store double* [[TMP2]], double** [[TMP1]], align 4
1114 // CHECK4-NEXT:    call void @"_ZZ4mainENK3$_0clEv"(%class.anon* nonnull align 4 dereferenceable(8) [[REF_TMP]])
1115 // CHECK4-NEXT:    ret i32 0
1116 //
1117 //
1118 // CHECK4-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l67
1119 // CHECK4-SAME: (double* nonnull align 4 dereferenceable(8) [[G1:%.*]], i32 [[SVAR:%.*]], i32 [[SFVAR:%.*]], double* nonnull align 4 dereferenceable(8) [[G:%.*]]) #[[ATTR2:[0-9]+]] {
1120 // CHECK4-NEXT:  entry:
1121 // CHECK4-NEXT:    [[G1_ADDR:%.*]] = alloca double*, align 4
1122 // CHECK4-NEXT:    [[SVAR_ADDR:%.*]] = alloca i32, align 4
1123 // CHECK4-NEXT:    [[SFVAR_ADDR:%.*]] = alloca i32, align 4
1124 // CHECK4-NEXT:    [[G_ADDR:%.*]] = alloca double*, align 4
1125 // CHECK4-NEXT:    [[TMP:%.*]] = alloca double*, align 4
1126 // CHECK4-NEXT:    [[SVAR_CASTED:%.*]] = alloca i32, align 4
1127 // CHECK4-NEXT:    [[SFVAR_CASTED:%.*]] = alloca i32, align 4
1128 // CHECK4-NEXT:    store double* [[G1]], double** [[G1_ADDR]], align 4
1129 // CHECK4-NEXT:    store i32 [[SVAR]], i32* [[SVAR_ADDR]], align 4
1130 // CHECK4-NEXT:    store i32 [[SFVAR]], i32* [[SFVAR_ADDR]], align 4
1131 // CHECK4-NEXT:    store double* [[G]], double** [[G_ADDR]], align 4
1132 // CHECK4-NEXT:    [[TMP0:%.*]] = load double*, double** [[G1_ADDR]], align 4
1133 // CHECK4-NEXT:    [[CONV:%.*]] = bitcast i32* [[SFVAR_ADDR]] to float*
1134 // CHECK4-NEXT:    [[TMP1:%.*]] = load double*, double** [[G_ADDR]], align 4
1135 // CHECK4-NEXT:    store double* [[TMP0]], double** [[TMP]], align 4
1136 // CHECK4-NEXT:    [[TMP2:%.*]] = load double*, double** [[TMP]], align 4
1137 // CHECK4-NEXT:    [[TMP3:%.*]] = load i32, i32* [[SVAR_ADDR]], align 4
1138 // CHECK4-NEXT:    store i32 [[TMP3]], i32* [[SVAR_CASTED]], align 4
1139 // CHECK4-NEXT:    [[TMP4:%.*]] = load i32, i32* [[SVAR_CASTED]], align 4
1140 // CHECK4-NEXT:    [[TMP5:%.*]] = load float, float* [[CONV]], align 4
1141 // CHECK4-NEXT:    [[CONV1:%.*]] = bitcast i32* [[SFVAR_CASTED]] to float*
1142 // CHECK4-NEXT:    store float [[TMP5]], float* [[CONV1]], align 4
1143 // CHECK4-NEXT:    [[TMP6:%.*]] = load i32, i32* [[SFVAR_CASTED]], align 4
1144 // CHECK4-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3:[0-9]+]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, double*, i32, i32, double*)* @.omp_outlined. to void (i32*, i32*, ...)*), double* [[TMP2]], i32 [[TMP4]], i32 [[TMP6]], double* [[TMP1]])
1145 // CHECK4-NEXT:    ret void
1146 //
1147 //
1148 // CHECK4-LABEL: define {{[^@]+}}@.omp_outlined.
1149 // CHECK4-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], double* nonnull align 4 dereferenceable(8) [[G1:%.*]], i32 [[SVAR:%.*]], i32 [[SFVAR:%.*]], double* nonnull align 4 dereferenceable(8) [[G:%.*]]) #[[ATTR2]] {
1150 // CHECK4-NEXT:  entry:
1151 // CHECK4-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
1152 // CHECK4-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
1153 // CHECK4-NEXT:    [[G1_ADDR:%.*]] = alloca double*, align 4
1154 // CHECK4-NEXT:    [[SVAR_ADDR:%.*]] = alloca i32, align 4
1155 // CHECK4-NEXT:    [[SFVAR_ADDR:%.*]] = alloca i32, align 4
1156 // CHECK4-NEXT:    [[G_ADDR:%.*]] = alloca double*, align 4
1157 // CHECK4-NEXT:    [[TMP:%.*]] = alloca double*, align 4
1158 // CHECK4-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
1159 // CHECK4-NEXT:    [[_TMP1:%.*]] = alloca i32, align 4
1160 // CHECK4-NEXT:    [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
1161 // CHECK4-NEXT:    [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
1162 // CHECK4-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
1163 // CHECK4-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
1164 // CHECK4-NEXT:    [[G2:%.*]] = alloca double, align 8
1165 // CHECK4-NEXT:    [[G13:%.*]] = alloca double, align 8
1166 // CHECK4-NEXT:    [[_TMP4:%.*]] = alloca double*, align 4
1167 // CHECK4-NEXT:    [[SVAR5:%.*]] = alloca i32, align 4
1168 // CHECK4-NEXT:    [[SFVAR6:%.*]] = alloca float, align 4
1169 // CHECK4-NEXT:    [[I:%.*]] = alloca i32, align 4
1170 // CHECK4-NEXT:    [[SVAR_CASTED:%.*]] = alloca i32, align 4
1171 // CHECK4-NEXT:    [[SFVAR_CASTED:%.*]] = alloca i32, align 4
1172 // CHECK4-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
1173 // CHECK4-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
1174 // CHECK4-NEXT:    store double* [[G1]], double** [[G1_ADDR]], align 4
1175 // CHECK4-NEXT:    store i32 [[SVAR]], i32* [[SVAR_ADDR]], align 4
1176 // CHECK4-NEXT:    store i32 [[SFVAR]], i32* [[SFVAR_ADDR]], align 4
1177 // CHECK4-NEXT:    store double* [[G]], double** [[G_ADDR]], align 4
1178 // CHECK4-NEXT:    [[TMP0:%.*]] = load double*, double** [[G1_ADDR]], align 4
1179 // CHECK4-NEXT:    [[CONV:%.*]] = bitcast i32* [[SFVAR_ADDR]] to float*
1180 // CHECK4-NEXT:    [[TMP1:%.*]] = load double*, double** [[G_ADDR]], align 4
1181 // CHECK4-NEXT:    store double* [[TMP0]], double** [[TMP]], align 4
1182 // CHECK4-NEXT:    store i32 0, i32* [[DOTOMP_COMB_LB]], align 4
1183 // CHECK4-NEXT:    store i32 1, i32* [[DOTOMP_COMB_UB]], align 4
1184 // CHECK4-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
1185 // CHECK4-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
1186 // CHECK4-NEXT:    [[TMP2:%.*]] = load double*, double** [[TMP]], align 4
1187 // CHECK4-NEXT:    store double* [[G13]], double** [[_TMP4]], align 4
1188 // CHECK4-NEXT:    [[TMP3:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
1189 // CHECK4-NEXT:    [[TMP4:%.*]] = load i32, i32* [[TMP3]], align 4
1190 // CHECK4-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1:[0-9]+]], i32 [[TMP4]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_COMB_LB]], i32* [[DOTOMP_COMB_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
1191 // CHECK4-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
1192 // CHECK4-NEXT:    [[CMP:%.*]] = icmp sgt i32 [[TMP5]], 1
1193 // CHECK4-NEXT:    br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
1194 // CHECK4:       cond.true:
1195 // CHECK4-NEXT:    br label [[COND_END:%.*]]
1196 // CHECK4:       cond.false:
1197 // CHECK4-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
1198 // CHECK4-NEXT:    br label [[COND_END]]
1199 // CHECK4:       cond.end:
1200 // CHECK4-NEXT:    [[COND:%.*]] = phi i32 [ 1, [[COND_TRUE]] ], [ [[TMP6]], [[COND_FALSE]] ]
1201 // CHECK4-NEXT:    store i32 [[COND]], i32* [[DOTOMP_COMB_UB]], align 4
1202 // CHECK4-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
1203 // CHECK4-NEXT:    store i32 [[TMP7]], i32* [[DOTOMP_IV]], align 4
1204 // CHECK4-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
1205 // CHECK4:       omp.inner.for.cond:
1206 // CHECK4-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
1207 // CHECK4-NEXT:    [[TMP9:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
1208 // CHECK4-NEXT:    [[CMP7:%.*]] = icmp sle i32 [[TMP8]], [[TMP9]]
1209 // CHECK4-NEXT:    br i1 [[CMP7]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
1210 // CHECK4:       omp.inner.for.body:
1211 // CHECK4-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
1212 // CHECK4-NEXT:    [[TMP11:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
1213 // CHECK4-NEXT:    [[TMP12:%.*]] = load double*, double** [[_TMP4]], align 4
1214 // CHECK4-NEXT:    [[TMP13:%.*]] = load i32, i32* [[SVAR5]], align 4
1215 // CHECK4-NEXT:    store i32 [[TMP13]], i32* [[SVAR_CASTED]], align 4
1216 // CHECK4-NEXT:    [[TMP14:%.*]] = load i32, i32* [[SVAR_CASTED]], align 4
1217 // CHECK4-NEXT:    [[TMP15:%.*]] = load float, float* [[SFVAR6]], align 4
1218 // CHECK4-NEXT:    [[CONV8:%.*]] = bitcast i32* [[SFVAR_CASTED]] to float*
1219 // CHECK4-NEXT:    store float [[TMP15]], float* [[CONV8]], align 4
1220 // CHECK4-NEXT:    [[TMP16:%.*]] = load i32, i32* [[SFVAR_CASTED]], align 4
1221 // CHECK4-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 6, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, double*, i32, i32, double*)* @.omp_outlined..1 to void (i32*, i32*, ...)*), i32 [[TMP10]], i32 [[TMP11]], double* [[TMP12]], i32 [[TMP14]], i32 [[TMP16]], double* [[G2]])
1222 // CHECK4-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
1223 // CHECK4:       omp.inner.for.inc:
1224 // CHECK4-NEXT:    [[TMP17:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
1225 // CHECK4-NEXT:    [[TMP18:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
1226 // CHECK4-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP17]], [[TMP18]]
1227 // CHECK4-NEXT:    store i32 [[ADD]], i32* [[DOTOMP_IV]], align 4
1228 // CHECK4-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP4:![0-9]+]]
1229 // CHECK4:       omp.inner.for.end:
1230 // CHECK4-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
1231 // CHECK4:       omp.loop.exit:
1232 // CHECK4-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP4]])
1233 // CHECK4-NEXT:    [[TMP19:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
1234 // CHECK4-NEXT:    [[TMP20:%.*]] = icmp ne i32 [[TMP19]], 0
1235 // CHECK4-NEXT:    br i1 [[TMP20]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
1236 // CHECK4:       .omp.final.then:
1237 // CHECK4-NEXT:    store i32 2, i32* [[I]], align 4
1238 // CHECK4-NEXT:    br label [[DOTOMP_FINAL_DONE]]
1239 // CHECK4:       .omp.final.done:
1240 // CHECK4-NEXT:    [[TMP21:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
1241 // CHECK4-NEXT:    [[TMP22:%.*]] = icmp ne i32 [[TMP21]], 0
1242 // CHECK4-NEXT:    br i1 [[TMP22]], label [[DOTOMP_LASTPRIVATE_THEN:%.*]], label [[DOTOMP_LASTPRIVATE_DONE:%.*]]
1243 // CHECK4:       .omp.lastprivate.then:
1244 // CHECK4-NEXT:    [[TMP23:%.*]] = load double, double* [[G2]], align 8
1245 // CHECK4-NEXT:    store volatile double [[TMP23]], double* [[TMP1]], align 8
1246 // CHECK4-NEXT:    [[TMP24:%.*]] = load double*, double** [[_TMP4]], align 4
1247 // CHECK4-NEXT:    [[TMP25:%.*]] = load double, double* [[TMP24]], align 4
1248 // CHECK4-NEXT:    store volatile double [[TMP25]], double* [[TMP2]], align 4
1249 // CHECK4-NEXT:    [[TMP26:%.*]] = load i32, i32* [[SVAR5]], align 4
1250 // CHECK4-NEXT:    store i32 [[TMP26]], i32* [[SVAR_ADDR]], align 4
1251 // CHECK4-NEXT:    [[TMP27:%.*]] = load float, float* [[SFVAR6]], align 4
1252 // CHECK4-NEXT:    store float [[TMP27]], float* [[CONV]], align 4
1253 // CHECK4-NEXT:    br label [[DOTOMP_LASTPRIVATE_DONE]]
1254 // CHECK4:       .omp.lastprivate.done:
1255 // CHECK4-NEXT:    ret void
1256 //
1257 //
1258 // CHECK4-LABEL: define {{[^@]+}}@.omp_outlined..1
1259 // CHECK4-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32 [[DOTPREVIOUS_LB_:%.*]], i32 [[DOTPREVIOUS_UB_:%.*]], double* nonnull align 4 dereferenceable(8) [[G1:%.*]], i32 [[SVAR:%.*]], i32 [[SFVAR:%.*]], double* nonnull align 4 dereferenceable(8) [[G:%.*]]) #[[ATTR2]] {
1260 // CHECK4-NEXT:  entry:
1261 // CHECK4-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
1262 // CHECK4-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
1263 // CHECK4-NEXT:    [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i32, align 4
1264 // CHECK4-NEXT:    [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i32, align 4
1265 // CHECK4-NEXT:    [[G1_ADDR:%.*]] = alloca double*, align 4
1266 // CHECK4-NEXT:    [[SVAR_ADDR:%.*]] = alloca i32, align 4
1267 // CHECK4-NEXT:    [[SFVAR_ADDR:%.*]] = alloca i32, align 4
1268 // CHECK4-NEXT:    [[G_ADDR:%.*]] = alloca double*, align 4
1269 // CHECK4-NEXT:    [[TMP:%.*]] = alloca double*, align 4
1270 // CHECK4-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
1271 // CHECK4-NEXT:    [[_TMP1:%.*]] = alloca i32, align 4
1272 // CHECK4-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
1273 // CHECK4-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
1274 // CHECK4-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
1275 // CHECK4-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
1276 // CHECK4-NEXT:    [[G2:%.*]] = alloca double, align 8
1277 // CHECK4-NEXT:    [[G13:%.*]] = alloca double, align 8
1278 // CHECK4-NEXT:    [[_TMP4:%.*]] = alloca double*, align 4
1279 // CHECK4-NEXT:    [[SVAR5:%.*]] = alloca i32, align 4
1280 // CHECK4-NEXT:    [[SFVAR6:%.*]] = alloca float, align 4
1281 // CHECK4-NEXT:    [[I:%.*]] = alloca i32, align 4
1282 // CHECK4-NEXT:    [[REF_TMP:%.*]] = alloca [[CLASS_ANON_0:%.*]], align 4
1283 // CHECK4-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
1284 // CHECK4-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
1285 // CHECK4-NEXT:    store i32 [[DOTPREVIOUS_LB_]], i32* [[DOTPREVIOUS_LB__ADDR]], align 4
1286 // CHECK4-NEXT:    store i32 [[DOTPREVIOUS_UB_]], i32* [[DOTPREVIOUS_UB__ADDR]], align 4
1287 // CHECK4-NEXT:    store double* [[G1]], double** [[G1_ADDR]], align 4
1288 // CHECK4-NEXT:    store i32 [[SVAR]], i32* [[SVAR_ADDR]], align 4
1289 // CHECK4-NEXT:    store i32 [[SFVAR]], i32* [[SFVAR_ADDR]], align 4
1290 // CHECK4-NEXT:    store double* [[G]], double** [[G_ADDR]], align 4
1291 // CHECK4-NEXT:    [[TMP0:%.*]] = load double*, double** [[G1_ADDR]], align 4
1292 // CHECK4-NEXT:    [[CONV:%.*]] = bitcast i32* [[SFVAR_ADDR]] to float*
1293 // CHECK4-NEXT:    [[TMP1:%.*]] = load double*, double** [[G_ADDR]], align 4
1294 // CHECK4-NEXT:    store double* [[TMP0]], double** [[TMP]], align 4
1295 // CHECK4-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
1296 // CHECK4-NEXT:    store i32 1, i32* [[DOTOMP_UB]], align 4
1297 // CHECK4-NEXT:    [[TMP2:%.*]] = load i32, i32* [[DOTPREVIOUS_LB__ADDR]], align 4
1298 // CHECK4-NEXT:    [[TMP3:%.*]] = load i32, i32* [[DOTPREVIOUS_UB__ADDR]], align 4
1299 // CHECK4-NEXT:    store i32 [[TMP2]], i32* [[DOTOMP_LB]], align 4
1300 // CHECK4-NEXT:    store i32 [[TMP3]], i32* [[DOTOMP_UB]], align 4
1301 // CHECK4-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
1302 // CHECK4-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
1303 // CHECK4-NEXT:    [[TMP4:%.*]] = load double*, double** [[TMP]], align 4
1304 // CHECK4-NEXT:    store double* [[G13]], double** [[_TMP4]], align 4
1305 // CHECK4-NEXT:    [[TMP5:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
1306 // CHECK4-NEXT:    [[TMP6:%.*]] = load i32, i32* [[TMP5]], align 4
1307 // CHECK4-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB2:[0-9]+]], i32 [[TMP6]], i32 34, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
1308 // CHECK4-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
1309 // CHECK4-NEXT:    [[CMP:%.*]] = icmp sgt i32 [[TMP7]], 1
1310 // CHECK4-NEXT:    br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
1311 // CHECK4:       cond.true:
1312 // CHECK4-NEXT:    br label [[COND_END:%.*]]
1313 // CHECK4:       cond.false:
1314 // CHECK4-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
1315 // CHECK4-NEXT:    br label [[COND_END]]
1316 // CHECK4:       cond.end:
1317 // CHECK4-NEXT:    [[COND:%.*]] = phi i32 [ 1, [[COND_TRUE]] ], [ [[TMP8]], [[COND_FALSE]] ]
1318 // CHECK4-NEXT:    store i32 [[COND]], i32* [[DOTOMP_UB]], align 4
1319 // CHECK4-NEXT:    [[TMP9:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
1320 // CHECK4-NEXT:    store i32 [[TMP9]], i32* [[DOTOMP_IV]], align 4
1321 // CHECK4-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
1322 // CHECK4:       omp.inner.for.cond:
1323 // CHECK4-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
1324 // CHECK4-NEXT:    [[TMP11:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
1325 // CHECK4-NEXT:    [[CMP7:%.*]] = icmp sle i32 [[TMP10]], [[TMP11]]
1326 // CHECK4-NEXT:    br i1 [[CMP7]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
1327 // CHECK4:       omp.inner.for.body:
1328 // CHECK4-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
1329 // CHECK4-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP12]], 1
1330 // CHECK4-NEXT:    [[ADD:%.*]] = add nsw i32 0, [[MUL]]
1331 // CHECK4-NEXT:    store i32 [[ADD]], i32* [[I]], align 4
1332 // CHECK4-NEXT:    [[TMP13:%.*]] = load double*, double** [[_TMP4]], align 4
1333 // CHECK4-NEXT:    store volatile double 1.000000e+00, double* [[TMP13]], align 4
1334 // CHECK4-NEXT:    store i32 3, i32* [[SVAR5]], align 4
1335 // CHECK4-NEXT:    store float 4.000000e+00, float* [[SFVAR6]], align 4
1336 // CHECK4-NEXT:    [[TMP14:%.*]] = getelementptr inbounds [[CLASS_ANON_0]], %class.anon.0* [[REF_TMP]], i32 0, i32 0
1337 // CHECK4-NEXT:    store double* [[G2]], double** [[TMP14]], align 4
1338 // CHECK4-NEXT:    [[TMP15:%.*]] = getelementptr inbounds [[CLASS_ANON_0]], %class.anon.0* [[REF_TMP]], i32 0, i32 1
1339 // CHECK4-NEXT:    [[TMP16:%.*]] = load double*, double** [[_TMP4]], align 4
1340 // CHECK4-NEXT:    store double* [[TMP16]], double** [[TMP15]], align 4
1341 // CHECK4-NEXT:    [[TMP17:%.*]] = getelementptr inbounds [[CLASS_ANON_0]], %class.anon.0* [[REF_TMP]], i32 0, i32 2
1342 // CHECK4-NEXT:    store i32* [[SVAR5]], i32** [[TMP17]], align 4
1343 // CHECK4-NEXT:    [[TMP18:%.*]] = getelementptr inbounds [[CLASS_ANON_0]], %class.anon.0* [[REF_TMP]], i32 0, i32 3
1344 // CHECK4-NEXT:    store float* [[SFVAR6]], float** [[TMP18]], align 4
1345 // CHECK4-NEXT:    call void @"_ZZZ4mainENK3$_0clEvENKUlvE_clEv"(%class.anon.0* nonnull align 4 dereferenceable(16) [[REF_TMP]])
1346 // CHECK4-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
1347 // CHECK4:       omp.body.continue:
1348 // CHECK4-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
1349 // CHECK4:       omp.inner.for.inc:
1350 // CHECK4-NEXT:    [[TMP19:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
1351 // CHECK4-NEXT:    [[ADD8:%.*]] = add nsw i32 [[TMP19]], 1
1352 // CHECK4-NEXT:    store i32 [[ADD8]], i32* [[DOTOMP_IV]], align 4
1353 // CHECK4-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP6:![0-9]+]]
1354 // CHECK4:       omp.inner.for.end:
1355 // CHECK4-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
1356 // CHECK4:       omp.loop.exit:
1357 // CHECK4-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP6]])
1358 // CHECK4-NEXT:    [[TMP20:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
1359 // CHECK4-NEXT:    [[TMP21:%.*]] = icmp ne i32 [[TMP20]], 0
1360 // CHECK4-NEXT:    br i1 [[TMP21]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
1361 // CHECK4:       .omp.final.then:
1362 // CHECK4-NEXT:    store i32 2, i32* [[I]], align 4
1363 // CHECK4-NEXT:    br label [[DOTOMP_FINAL_DONE]]
1364 // CHECK4:       .omp.final.done:
1365 // CHECK4-NEXT:    [[TMP22:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
1366 // CHECK4-NEXT:    [[TMP23:%.*]] = icmp ne i32 [[TMP22]], 0
1367 // CHECK4-NEXT:    br i1 [[TMP23]], label [[DOTOMP_LASTPRIVATE_THEN:%.*]], label [[DOTOMP_LASTPRIVATE_DONE:%.*]]
1368 // CHECK4:       .omp.lastprivate.then:
1369 // CHECK4-NEXT:    [[TMP24:%.*]] = load double, double* [[G2]], align 8
1370 // CHECK4-NEXT:    store volatile double [[TMP24]], double* [[TMP1]], align 8
1371 // CHECK4-NEXT:    [[TMP25:%.*]] = load double*, double** [[_TMP4]], align 4
1372 // CHECK4-NEXT:    [[TMP26:%.*]] = load double, double* [[TMP25]], align 4
1373 // CHECK4-NEXT:    store volatile double [[TMP26]], double* [[TMP4]], align 4
1374 // CHECK4-NEXT:    [[TMP27:%.*]] = load i32, i32* [[SVAR5]], align 4
1375 // CHECK4-NEXT:    store i32 [[TMP27]], i32* [[SVAR_ADDR]], align 4
1376 // CHECK4-NEXT:    [[TMP28:%.*]] = load float, float* [[SFVAR6]], align 4
1377 // CHECK4-NEXT:    store float [[TMP28]], float* [[CONV]], align 4
1378 // CHECK4-NEXT:    br label [[DOTOMP_LASTPRIVATE_DONE]]
1379 // CHECK4:       .omp.lastprivate.done:
1380 // CHECK4-NEXT:    ret void
1381 //
1382 //
1383 // CHECK4-LABEL: define {{[^@]+}}@.omp_offloading.requires_reg
1384 // CHECK4-SAME: () #[[ATTR4:[0-9]+]] {
1385 // CHECK4-NEXT:  entry:
1386 // CHECK4-NEXT:    call void @__tgt_register_requires(i64 1)
1387 // CHECK4-NEXT:    ret void
1388 //
1389 //
1390 // CHECK5-LABEL: define {{[^@]+}}@main
1391 // CHECK5-SAME: () #[[ATTR0:[0-9]+]] {
1392 // CHECK5-NEXT:  entry:
1393 // CHECK5-NEXT:    [[RETVAL:%.*]] = alloca i32, align 4
1394 // CHECK5-NEXT:    [[G:%.*]] = alloca double, align 8
1395 // CHECK5-NEXT:    [[G1:%.*]] = alloca double*, align 8
1396 // CHECK5-NEXT:    [[TEST:%.*]] = alloca [[STRUCT_S:%.*]], align 4
1397 // CHECK5-NEXT:    [[T_VAR:%.*]] = alloca i32, align 4
1398 // CHECK5-NEXT:    [[VEC:%.*]] = alloca [2 x i32], align 4
1399 // CHECK5-NEXT:    [[S_ARR:%.*]] = alloca [2 x %struct.S], align 4
1400 // CHECK5-NEXT:    [[VAR:%.*]] = alloca %struct.S*, align 8
1401 // CHECK5-NEXT:    [[TMP:%.*]] = alloca %struct.S*, align 8
1402 // CHECK5-NEXT:    [[T_VAR_CASTED:%.*]] = alloca i64, align 8
1403 // CHECK5-NEXT:    [[SVAR_CASTED:%.*]] = alloca i64, align 8
1404 // CHECK5-NEXT:    [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [5 x i8*], align 8
1405 // CHECK5-NEXT:    [[DOTOFFLOAD_PTRS:%.*]] = alloca [5 x i8*], align 8
1406 // CHECK5-NEXT:    [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [5 x i8*], align 8
1407 // CHECK5-NEXT:    [[_TMP2:%.*]] = alloca i32, align 4
1408 // CHECK5-NEXT:    [[I:%.*]] = alloca i32, align 4
1409 // CHECK5-NEXT:    store i32 0, i32* [[RETVAL]], align 4
1410 // CHECK5-NEXT:    store double* [[G]], double** [[G1]], align 8
1411 // CHECK5-NEXT:    call void @_ZN1SIfEC1Ev(%struct.S* nonnull align 4 dereferenceable(4) [[TEST]])
1412 // CHECK5-NEXT:    store i32 0, i32* [[T_VAR]], align 4
1413 // CHECK5-NEXT:    [[TMP0:%.*]] = bitcast [2 x i32]* [[VEC]] to i8*
1414 // CHECK5-NEXT:    call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 4 [[TMP0]], i8* align 4 bitcast ([2 x i32]* @__const.main.vec to i8*), i64 8, i1 false)
1415 // CHECK5-NEXT:    [[ARRAYINIT_BEGIN:%.*]] = getelementptr inbounds [2 x %struct.S], [2 x %struct.S]* [[S_ARR]], i64 0, i64 0
1416 // CHECK5-NEXT:    call void @_ZN1SIfEC1Ef(%struct.S* nonnull align 4 dereferenceable(4) [[ARRAYINIT_BEGIN]], float 1.000000e+00)
1417 // CHECK5-NEXT:    [[ARRAYINIT_ELEMENT:%.*]] = getelementptr inbounds [[STRUCT_S]], %struct.S* [[ARRAYINIT_BEGIN]], i64 1
1418 // CHECK5-NEXT:    call void @_ZN1SIfEC1Ef(%struct.S* nonnull align 4 dereferenceable(4) [[ARRAYINIT_ELEMENT]], float 2.000000e+00)
1419 // CHECK5-NEXT:    store %struct.S* [[TEST]], %struct.S** [[VAR]], align 8
1420 // CHECK5-NEXT:    [[TMP1:%.*]] = load %struct.S*, %struct.S** [[VAR]], align 8
1421 // CHECK5-NEXT:    store %struct.S* [[TMP1]], %struct.S** [[TMP]], align 8
1422 // CHECK5-NEXT:    [[TMP2:%.*]] = load i32, i32* [[T_VAR]], align 4
1423 // CHECK5-NEXT:    [[CONV:%.*]] = bitcast i64* [[T_VAR_CASTED]] to i32*
1424 // CHECK5-NEXT:    store i32 [[TMP2]], i32* [[CONV]], align 4
1425 // CHECK5-NEXT:    [[TMP3:%.*]] = load i64, i64* [[T_VAR_CASTED]], align 8
1426 // CHECK5-NEXT:    [[TMP4:%.*]] = load %struct.S*, %struct.S** [[TMP]], align 8
1427 // CHECK5-NEXT:    [[TMP5:%.*]] = load i32, i32* @_ZZ4mainE4svar, align 4
1428 // CHECK5-NEXT:    [[CONV1:%.*]] = bitcast i64* [[SVAR_CASTED]] to i32*
1429 // CHECK5-NEXT:    store i32 [[TMP5]], i32* [[CONV1]], align 4
1430 // CHECK5-NEXT:    [[TMP6:%.*]] = load i64, i64* [[SVAR_CASTED]], align 8
1431 // CHECK5-NEXT:    [[TMP7:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
1432 // CHECK5-NEXT:    [[TMP8:%.*]] = bitcast i8** [[TMP7]] to [2 x i32]**
1433 // CHECK5-NEXT:    store [2 x i32]* [[VEC]], [2 x i32]** [[TMP8]], align 8
1434 // CHECK5-NEXT:    [[TMP9:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0
1435 // CHECK5-NEXT:    [[TMP10:%.*]] = bitcast i8** [[TMP9]] to [2 x i32]**
1436 // CHECK5-NEXT:    store [2 x i32]* [[VEC]], [2 x i32]** [[TMP10]], align 8
1437 // CHECK5-NEXT:    [[TMP11:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 0
1438 // CHECK5-NEXT:    store i8* null, i8** [[TMP11]], align 8
1439 // CHECK5-NEXT:    [[TMP12:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1
1440 // CHECK5-NEXT:    [[TMP13:%.*]] = bitcast i8** [[TMP12]] to i64*
1441 // CHECK5-NEXT:    store i64 [[TMP3]], i64* [[TMP13]], align 8
1442 // CHECK5-NEXT:    [[TMP14:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1
1443 // CHECK5-NEXT:    [[TMP15:%.*]] = bitcast i8** [[TMP14]] to i64*
1444 // CHECK5-NEXT:    store i64 [[TMP3]], i64* [[TMP15]], align 8
1445 // CHECK5-NEXT:    [[TMP16:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 1
1446 // CHECK5-NEXT:    store i8* null, i8** [[TMP16]], align 8
1447 // CHECK5-NEXT:    [[TMP17:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2
1448 // CHECK5-NEXT:    [[TMP18:%.*]] = bitcast i8** [[TMP17]] to [2 x %struct.S]**
1449 // CHECK5-NEXT:    store [2 x %struct.S]* [[S_ARR]], [2 x %struct.S]** [[TMP18]], align 8
1450 // CHECK5-NEXT:    [[TMP19:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 2
1451 // CHECK5-NEXT:    [[TMP20:%.*]] = bitcast i8** [[TMP19]] to [2 x %struct.S]**
1452 // CHECK5-NEXT:    store [2 x %struct.S]* [[S_ARR]], [2 x %struct.S]** [[TMP20]], align 8
1453 // CHECK5-NEXT:    [[TMP21:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 2
1454 // CHECK5-NEXT:    store i8* null, i8** [[TMP21]], align 8
1455 // CHECK5-NEXT:    [[TMP22:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 3
1456 // CHECK5-NEXT:    [[TMP23:%.*]] = bitcast i8** [[TMP22]] to %struct.S**
1457 // CHECK5-NEXT:    store %struct.S* [[TMP4]], %struct.S** [[TMP23]], align 8
1458 // CHECK5-NEXT:    [[TMP24:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 3
1459 // CHECK5-NEXT:    [[TMP25:%.*]] = bitcast i8** [[TMP24]] to %struct.S**
1460 // CHECK5-NEXT:    store %struct.S* [[TMP4]], %struct.S** [[TMP25]], align 8
1461 // CHECK5-NEXT:    [[TMP26:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 3
1462 // CHECK5-NEXT:    store i8* null, i8** [[TMP26]], align 8
1463 // CHECK5-NEXT:    [[TMP27:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 4
1464 // CHECK5-NEXT:    [[TMP28:%.*]] = bitcast i8** [[TMP27]] to i64*
1465 // CHECK5-NEXT:    store i64 [[TMP6]], i64* [[TMP28]], align 8
1466 // CHECK5-NEXT:    [[TMP29:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 4
1467 // CHECK5-NEXT:    [[TMP30:%.*]] = bitcast i8** [[TMP29]] to i64*
1468 // CHECK5-NEXT:    store i64 [[TMP6]], i64* [[TMP30]], align 8
1469 // CHECK5-NEXT:    [[TMP31:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 4
1470 // CHECK5-NEXT:    store i8* null, i8** [[TMP31]], align 8
1471 // CHECK5-NEXT:    [[TMP32:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
1472 // CHECK5-NEXT:    [[TMP33:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0
1473 // CHECK5-NEXT:    call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3:[0-9]+]], i64 -1, i64 2)
1474 // CHECK5-NEXT:    [[TMP34:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l106.region_id, i32 5, i8** [[TMP32]], i8** [[TMP33]], i64* getelementptr inbounds ([5 x i64], [5 x i64]* @.offload_sizes, i32 0, i32 0), i64* getelementptr inbounds ([5 x i64], [5 x i64]* @.offload_maptypes, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0)
1475 // CHECK5-NEXT:    [[TMP35:%.*]] = icmp ne i32 [[TMP34]], 0
1476 // CHECK5-NEXT:    br i1 [[TMP35]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]]
1477 // CHECK5:       omp_offload.failed:
1478 // CHECK5-NEXT:    call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l106([2 x i32]* [[VEC]], i64 [[TMP3]], [2 x %struct.S]* [[S_ARR]], %struct.S* [[TMP4]], i64 [[TMP6]]) #[[ATTR4:[0-9]+]]
1479 // CHECK5-NEXT:    br label [[OMP_OFFLOAD_CONT]]
1480 // CHECK5:       omp_offload.cont:
1481 // CHECK5-NEXT:    [[CALL:%.*]] = call signext i32 @_Z5tmainIiET_v()
1482 // CHECK5-NEXT:    store i32 [[CALL]], i32* [[RETVAL]], align 4
1483 // CHECK5-NEXT:    [[ARRAY_BEGIN:%.*]] = getelementptr inbounds [2 x %struct.S], [2 x %struct.S]* [[S_ARR]], i32 0, i32 0
1484 // CHECK5-NEXT:    [[TMP36:%.*]] = getelementptr inbounds [[STRUCT_S]], %struct.S* [[ARRAY_BEGIN]], i64 2
1485 // CHECK5-NEXT:    br label [[ARRAYDESTROY_BODY:%.*]]
1486 // CHECK5:       arraydestroy.body:
1487 // CHECK5-NEXT:    [[ARRAYDESTROY_ELEMENTPAST:%.*]] = phi %struct.S* [ [[TMP36]], [[OMP_OFFLOAD_CONT]] ], [ [[ARRAYDESTROY_ELEMENT:%.*]], [[ARRAYDESTROY_BODY]] ]
1488 // CHECK5-NEXT:    [[ARRAYDESTROY_ELEMENT]] = getelementptr inbounds [[STRUCT_S]], %struct.S* [[ARRAYDESTROY_ELEMENTPAST]], i64 -1
1489 // CHECK5-NEXT:    call void @_ZN1SIfED1Ev(%struct.S* nonnull align 4 dereferenceable(4) [[ARRAYDESTROY_ELEMENT]]) #[[ATTR4]]
1490 // CHECK5-NEXT:    [[ARRAYDESTROY_DONE:%.*]] = icmp eq %struct.S* [[ARRAYDESTROY_ELEMENT]], [[ARRAY_BEGIN]]
1491 // CHECK5-NEXT:    br i1 [[ARRAYDESTROY_DONE]], label [[ARRAYDESTROY_DONE3:%.*]], label [[ARRAYDESTROY_BODY]]
1492 // CHECK5:       arraydestroy.done3:
1493 // CHECK5-NEXT:    call void @_ZN1SIfED1Ev(%struct.S* nonnull align 4 dereferenceable(4) [[TEST]]) #[[ATTR4]]
1494 // CHECK5-NEXT:    [[TMP37:%.*]] = load i32, i32* [[RETVAL]], align 4
1495 // CHECK5-NEXT:    ret i32 [[TMP37]]
1496 //
1497 //
1498 // CHECK5-LABEL: define {{[^@]+}}@_ZN1SIfEC1Ev
1499 // CHECK5-SAME: (%struct.S* nonnull align 4 dereferenceable(4) [[THIS:%.*]]) unnamed_addr #[[ATTR1:[0-9]+]] comdat align 2 {
1500 // CHECK5-NEXT:  entry:
1501 // CHECK5-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.S*, align 8
1502 // CHECK5-NEXT:    store %struct.S* [[THIS]], %struct.S** [[THIS_ADDR]], align 8
1503 // CHECK5-NEXT:    [[THIS1:%.*]] = load %struct.S*, %struct.S** [[THIS_ADDR]], align 8
1504 // CHECK5-NEXT:    call void @_ZN1SIfEC2Ev(%struct.S* nonnull align 4 dereferenceable(4) [[THIS1]])
1505 // CHECK5-NEXT:    ret void
1506 //
1507 //
1508 // CHECK5-LABEL: define {{[^@]+}}@_ZN1SIfEC1Ef
1509 // CHECK5-SAME: (%struct.S* nonnull align 4 dereferenceable(4) [[THIS:%.*]], float [[A:%.*]]) unnamed_addr #[[ATTR1]] comdat align 2 {
1510 // CHECK5-NEXT:  entry:
1511 // CHECK5-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.S*, align 8
1512 // CHECK5-NEXT:    [[A_ADDR:%.*]] = alloca float, align 4
1513 // CHECK5-NEXT:    store %struct.S* [[THIS]], %struct.S** [[THIS_ADDR]], align 8
1514 // CHECK5-NEXT:    store float [[A]], float* [[A_ADDR]], align 4
1515 // CHECK5-NEXT:    [[THIS1:%.*]] = load %struct.S*, %struct.S** [[THIS_ADDR]], align 8
1516 // CHECK5-NEXT:    [[TMP0:%.*]] = load float, float* [[A_ADDR]], align 4
1517 // CHECK5-NEXT:    call void @_ZN1SIfEC2Ef(%struct.S* nonnull align 4 dereferenceable(4) [[THIS1]], float [[TMP0]])
1518 // CHECK5-NEXT:    ret void
1519 //
1520 //
1521 // CHECK5-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l106
1522 // CHECK5-SAME: ([2 x i32]* nonnull align 4 dereferenceable(8) [[VEC:%.*]], i64 [[T_VAR:%.*]], [2 x %struct.S]* nonnull align 4 dereferenceable(8) [[S_ARR:%.*]], %struct.S* nonnull align 4 dereferenceable(4) [[VAR:%.*]], i64 [[SVAR:%.*]]) #[[ATTR3:[0-9]+]] {
1523 // CHECK5-NEXT:  entry:
1524 // CHECK5-NEXT:    [[VEC_ADDR:%.*]] = alloca [2 x i32]*, align 8
1525 // CHECK5-NEXT:    [[T_VAR_ADDR:%.*]] = alloca i64, align 8
1526 // CHECK5-NEXT:    [[S_ARR_ADDR:%.*]] = alloca [2 x %struct.S]*, align 8
1527 // CHECK5-NEXT:    [[VAR_ADDR:%.*]] = alloca %struct.S*, align 8
1528 // CHECK5-NEXT:    [[SVAR_ADDR:%.*]] = alloca i64, align 8
1529 // CHECK5-NEXT:    [[TMP:%.*]] = alloca %struct.S*, align 8
1530 // CHECK5-NEXT:    [[T_VAR_CASTED:%.*]] = alloca i64, align 8
1531 // CHECK5-NEXT:    [[SVAR_CASTED:%.*]] = alloca i64, align 8
1532 // CHECK5-NEXT:    store [2 x i32]* [[VEC]], [2 x i32]** [[VEC_ADDR]], align 8
1533 // CHECK5-NEXT:    store i64 [[T_VAR]], i64* [[T_VAR_ADDR]], align 8
1534 // CHECK5-NEXT:    store [2 x %struct.S]* [[S_ARR]], [2 x %struct.S]** [[S_ARR_ADDR]], align 8
1535 // CHECK5-NEXT:    store %struct.S* [[VAR]], %struct.S** [[VAR_ADDR]], align 8
1536 // CHECK5-NEXT:    store i64 [[SVAR]], i64* [[SVAR_ADDR]], align 8
1537 // CHECK5-NEXT:    [[TMP0:%.*]] = load [2 x i32]*, [2 x i32]** [[VEC_ADDR]], align 8
1538 // CHECK5-NEXT:    [[CONV:%.*]] = bitcast i64* [[T_VAR_ADDR]] to i32*
1539 // CHECK5-NEXT:    [[TMP1:%.*]] = load [2 x %struct.S]*, [2 x %struct.S]** [[S_ARR_ADDR]], align 8
1540 // CHECK5-NEXT:    [[TMP2:%.*]] = load %struct.S*, %struct.S** [[VAR_ADDR]], align 8
1541 // CHECK5-NEXT:    [[CONV1:%.*]] = bitcast i64* [[SVAR_ADDR]] to i32*
1542 // CHECK5-NEXT:    store %struct.S* [[TMP2]], %struct.S** [[TMP]], align 8
1543 // CHECK5-NEXT:    [[TMP3:%.*]] = load i32, i32* [[CONV]], align 8
1544 // CHECK5-NEXT:    [[CONV2:%.*]] = bitcast i64* [[T_VAR_CASTED]] to i32*
1545 // CHECK5-NEXT:    store i32 [[TMP3]], i32* [[CONV2]], align 4
1546 // CHECK5-NEXT:    [[TMP4:%.*]] = load i64, i64* [[T_VAR_CASTED]], align 8
1547 // CHECK5-NEXT:    [[TMP5:%.*]] = load %struct.S*, %struct.S** [[TMP]], align 8
1548 // CHECK5-NEXT:    [[TMP6:%.*]] = load i32, i32* [[CONV1]], align 8
1549 // CHECK5-NEXT:    [[CONV3:%.*]] = bitcast i64* [[SVAR_CASTED]] to i32*
1550 // CHECK5-NEXT:    store i32 [[TMP6]], i32* [[CONV3]], align 4
1551 // CHECK5-NEXT:    [[TMP7:%.*]] = load i64, i64* [[SVAR_CASTED]], align 8
1552 // CHECK5-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 5, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, [2 x i32]*, i64, [2 x %struct.S]*, %struct.S*, i64)* @.omp_outlined. to void (i32*, i32*, ...)*), [2 x i32]* [[TMP0]], i64 [[TMP4]], [2 x %struct.S]* [[TMP1]], %struct.S* [[TMP5]], i64 [[TMP7]])
1553 // CHECK5-NEXT:    ret void
1554 //
1555 //
1556 // CHECK5-LABEL: define {{[^@]+}}@.omp_outlined.
1557 // CHECK5-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], [2 x i32]* nonnull align 4 dereferenceable(8) [[VEC:%.*]], i64 [[T_VAR:%.*]], [2 x %struct.S]* nonnull align 4 dereferenceable(8) [[S_ARR:%.*]], %struct.S* nonnull align 4 dereferenceable(4) [[VAR:%.*]], i64 [[SVAR:%.*]]) #[[ATTR3]] {
1558 // CHECK5-NEXT:  entry:
1559 // CHECK5-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
1560 // CHECK5-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
1561 // CHECK5-NEXT:    [[VEC_ADDR:%.*]] = alloca [2 x i32]*, align 8
1562 // CHECK5-NEXT:    [[T_VAR_ADDR:%.*]] = alloca i64, align 8
1563 // CHECK5-NEXT:    [[S_ARR_ADDR:%.*]] = alloca [2 x %struct.S]*, align 8
1564 // CHECK5-NEXT:    [[VAR_ADDR:%.*]] = alloca %struct.S*, align 8
1565 // CHECK5-NEXT:    [[SVAR_ADDR:%.*]] = alloca i64, align 8
1566 // CHECK5-NEXT:    [[TMP:%.*]] = alloca %struct.S*, align 8
1567 // CHECK5-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
1568 // CHECK5-NEXT:    [[_TMP2:%.*]] = alloca i32, align 4
1569 // CHECK5-NEXT:    [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
1570 // CHECK5-NEXT:    [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
1571 // CHECK5-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
1572 // CHECK5-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
1573 // CHECK5-NEXT:    [[T_VAR3:%.*]] = alloca i32, align 4
1574 // CHECK5-NEXT:    [[VEC4:%.*]] = alloca [2 x i32], align 4
1575 // CHECK5-NEXT:    [[S_ARR5:%.*]] = alloca [2 x %struct.S], align 4
1576 // CHECK5-NEXT:    [[VAR6:%.*]] = alloca [[STRUCT_S:%.*]], align 4
1577 // CHECK5-NEXT:    [[_TMP7:%.*]] = alloca %struct.S*, align 8
1578 // CHECK5-NEXT:    [[SVAR8:%.*]] = alloca i32, align 4
1579 // CHECK5-NEXT:    [[I:%.*]] = alloca i32, align 4
1580 // CHECK5-NEXT:    [[T_VAR_CASTED:%.*]] = alloca i64, align 8
1581 // CHECK5-NEXT:    [[SVAR_CASTED:%.*]] = alloca i64, align 8
1582 // CHECK5-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
1583 // CHECK5-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
1584 // CHECK5-NEXT:    store [2 x i32]* [[VEC]], [2 x i32]** [[VEC_ADDR]], align 8
1585 // CHECK5-NEXT:    store i64 [[T_VAR]], i64* [[T_VAR_ADDR]], align 8
1586 // CHECK5-NEXT:    store [2 x %struct.S]* [[S_ARR]], [2 x %struct.S]** [[S_ARR_ADDR]], align 8
1587 // CHECK5-NEXT:    store %struct.S* [[VAR]], %struct.S** [[VAR_ADDR]], align 8
1588 // CHECK5-NEXT:    store i64 [[SVAR]], i64* [[SVAR_ADDR]], align 8
1589 // CHECK5-NEXT:    [[TMP0:%.*]] = load [2 x i32]*, [2 x i32]** [[VEC_ADDR]], align 8
1590 // CHECK5-NEXT:    [[CONV:%.*]] = bitcast i64* [[T_VAR_ADDR]] to i32*
1591 // CHECK5-NEXT:    [[TMP1:%.*]] = load [2 x %struct.S]*, [2 x %struct.S]** [[S_ARR_ADDR]], align 8
1592 // CHECK5-NEXT:    [[TMP2:%.*]] = load %struct.S*, %struct.S** [[VAR_ADDR]], align 8
1593 // CHECK5-NEXT:    [[CONV1:%.*]] = bitcast i64* [[SVAR_ADDR]] to i32*
1594 // CHECK5-NEXT:    store %struct.S* [[TMP2]], %struct.S** [[TMP]], align 8
1595 // CHECK5-NEXT:    store i32 0, i32* [[DOTOMP_COMB_LB]], align 4
1596 // CHECK5-NEXT:    store i32 1, i32* [[DOTOMP_COMB_UB]], align 4
1597 // CHECK5-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
1598 // CHECK5-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
1599 // CHECK5-NEXT:    [[ARRAY_BEGIN:%.*]] = getelementptr inbounds [2 x %struct.S], [2 x %struct.S]* [[S_ARR5]], i32 0, i32 0
1600 // CHECK5-NEXT:    [[ARRAYCTOR_END:%.*]] = getelementptr inbounds [[STRUCT_S]], %struct.S* [[ARRAY_BEGIN]], i64 2
1601 // CHECK5-NEXT:    br label [[ARRAYCTOR_LOOP:%.*]]
1602 // CHECK5:       arrayctor.loop:
1603 // CHECK5-NEXT:    [[ARRAYCTOR_CUR:%.*]] = phi %struct.S* [ [[ARRAY_BEGIN]], [[ENTRY:%.*]] ], [ [[ARRAYCTOR_NEXT:%.*]], [[ARRAYCTOR_LOOP]] ]
1604 // CHECK5-NEXT:    call void @_ZN1SIfEC1Ev(%struct.S* nonnull align 4 dereferenceable(4) [[ARRAYCTOR_CUR]])
1605 // CHECK5-NEXT:    [[ARRAYCTOR_NEXT]] = getelementptr inbounds [[STRUCT_S]], %struct.S* [[ARRAYCTOR_CUR]], i64 1
1606 // CHECK5-NEXT:    [[ARRAYCTOR_DONE:%.*]] = icmp eq %struct.S* [[ARRAYCTOR_NEXT]], [[ARRAYCTOR_END]]
1607 // CHECK5-NEXT:    br i1 [[ARRAYCTOR_DONE]], label [[ARRAYCTOR_CONT:%.*]], label [[ARRAYCTOR_LOOP]]
1608 // CHECK5:       arrayctor.cont:
1609 // CHECK5-NEXT:    [[TMP3:%.*]] = load %struct.S*, %struct.S** [[TMP]], align 8
1610 // CHECK5-NEXT:    call void @_ZN1SIfEC1Ev(%struct.S* nonnull align 4 dereferenceable(4) [[VAR6]])
1611 // CHECK5-NEXT:    store %struct.S* [[VAR6]], %struct.S** [[_TMP7]], align 8
1612 // CHECK5-NEXT:    [[TMP4:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
1613 // CHECK5-NEXT:    [[TMP5:%.*]] = load i32, i32* [[TMP4]], align 4
1614 // CHECK5-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1:[0-9]+]], i32 [[TMP5]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_COMB_LB]], i32* [[DOTOMP_COMB_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
1615 // CHECK5-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
1616 // CHECK5-NEXT:    [[CMP:%.*]] = icmp sgt i32 [[TMP6]], 1
1617 // CHECK5-NEXT:    br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
1618 // CHECK5:       cond.true:
1619 // CHECK5-NEXT:    br label [[COND_END:%.*]]
1620 // CHECK5:       cond.false:
1621 // CHECK5-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
1622 // CHECK5-NEXT:    br label [[COND_END]]
1623 // CHECK5:       cond.end:
1624 // CHECK5-NEXT:    [[COND:%.*]] = phi i32 [ 1, [[COND_TRUE]] ], [ [[TMP7]], [[COND_FALSE]] ]
1625 // CHECK5-NEXT:    store i32 [[COND]], i32* [[DOTOMP_COMB_UB]], align 4
1626 // CHECK5-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
1627 // CHECK5-NEXT:    store i32 [[TMP8]], i32* [[DOTOMP_IV]], align 4
1628 // CHECK5-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
1629 // CHECK5:       omp.inner.for.cond:
1630 // CHECK5-NEXT:    [[TMP9:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
1631 // CHECK5-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
1632 // CHECK5-NEXT:    [[CMP9:%.*]] = icmp sle i32 [[TMP9]], [[TMP10]]
1633 // CHECK5-NEXT:    br i1 [[CMP9]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_COND_CLEANUP:%.*]]
1634 // CHECK5:       omp.inner.for.cond.cleanup:
1635 // CHECK5-NEXT:    br label [[OMP_INNER_FOR_END:%.*]]
1636 // CHECK5:       omp.inner.for.body:
1637 // CHECK5-NEXT:    [[TMP11:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
1638 // CHECK5-NEXT:    [[TMP12:%.*]] = zext i32 [[TMP11]] to i64
1639 // CHECK5-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
1640 // CHECK5-NEXT:    [[TMP14:%.*]] = zext i32 [[TMP13]] to i64
1641 // CHECK5-NEXT:    [[TMP15:%.*]] = load i32, i32* [[T_VAR3]], align 4
1642 // CHECK5-NEXT:    [[CONV10:%.*]] = bitcast i64* [[T_VAR_CASTED]] to i32*
1643 // CHECK5-NEXT:    store i32 [[TMP15]], i32* [[CONV10]], align 4
1644 // CHECK5-NEXT:    [[TMP16:%.*]] = load i64, i64* [[T_VAR_CASTED]], align 8
1645 // CHECK5-NEXT:    [[TMP17:%.*]] = load %struct.S*, %struct.S** [[_TMP7]], align 8
1646 // CHECK5-NEXT:    [[TMP18:%.*]] = load i32, i32* [[SVAR8]], align 4
1647 // CHECK5-NEXT:    [[CONV11:%.*]] = bitcast i64* [[SVAR_CASTED]] to i32*
1648 // CHECK5-NEXT:    store i32 [[TMP18]], i32* [[CONV11]], align 4
1649 // CHECK5-NEXT:    [[TMP19:%.*]] = load i64, i64* [[SVAR_CASTED]], align 8
1650 // CHECK5-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 7, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, [2 x i32]*, i64, [2 x %struct.S]*, %struct.S*, i64)* @.omp_outlined..1 to void (i32*, i32*, ...)*), i64 [[TMP12]], i64 [[TMP14]], [2 x i32]* [[VEC4]], i64 [[TMP16]], [2 x %struct.S]* [[S_ARR5]], %struct.S* [[TMP17]], i64 [[TMP19]])
1651 // CHECK5-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
1652 // CHECK5:       omp.inner.for.inc:
1653 // CHECK5-NEXT:    [[TMP20:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
1654 // CHECK5-NEXT:    [[TMP21:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
1655 // CHECK5-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP20]], [[TMP21]]
1656 // CHECK5-NEXT:    store i32 [[ADD]], i32* [[DOTOMP_IV]], align 4
1657 // CHECK5-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP4:![0-9]+]]
1658 // CHECK5:       omp.inner.for.end:
1659 // CHECK5-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
1660 // CHECK5:       omp.loop.exit:
1661 // CHECK5-NEXT:    [[TMP22:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
1662 // CHECK5-NEXT:    [[TMP23:%.*]] = load i32, i32* [[TMP22]], align 4
1663 // CHECK5-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP23]])
1664 // CHECK5-NEXT:    [[TMP24:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
1665 // CHECK5-NEXT:    [[TMP25:%.*]] = icmp ne i32 [[TMP24]], 0
1666 // CHECK5-NEXT:    br i1 [[TMP25]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
1667 // CHECK5:       .omp.final.then:
1668 // CHECK5-NEXT:    store i32 2, i32* [[I]], align 4
1669 // CHECK5-NEXT:    br label [[DOTOMP_FINAL_DONE]]
1670 // CHECK5:       .omp.final.done:
1671 // CHECK5-NEXT:    [[TMP26:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
1672 // CHECK5-NEXT:    [[TMP27:%.*]] = icmp ne i32 [[TMP26]], 0
1673 // CHECK5-NEXT:    br i1 [[TMP27]], label [[DOTOMP_LASTPRIVATE_THEN:%.*]], label [[DOTOMP_LASTPRIVATE_DONE:%.*]]
1674 // CHECK5:       .omp.lastprivate.then:
1675 // CHECK5-NEXT:    [[TMP28:%.*]] = load i32, i32* [[T_VAR3]], align 4
1676 // CHECK5-NEXT:    store i32 [[TMP28]], i32* [[CONV]], align 8
1677 // CHECK5-NEXT:    [[TMP29:%.*]] = bitcast [2 x i32]* [[TMP0]] to i8*
1678 // CHECK5-NEXT:    [[TMP30:%.*]] = bitcast [2 x i32]* [[VEC4]] to i8*
1679 // CHECK5-NEXT:    call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 4 [[TMP29]], i8* align 4 [[TMP30]], i64 8, i1 false)
1680 // CHECK5-NEXT:    [[ARRAY_BEGIN12:%.*]] = getelementptr inbounds [2 x %struct.S], [2 x %struct.S]* [[TMP1]], i32 0, i32 0
1681 // CHECK5-NEXT:    [[TMP31:%.*]] = bitcast [2 x %struct.S]* [[S_ARR5]] to %struct.S*
1682 // CHECK5-NEXT:    [[TMP32:%.*]] = getelementptr [[STRUCT_S]], %struct.S* [[ARRAY_BEGIN12]], i64 2
1683 // CHECK5-NEXT:    [[OMP_ARRAYCPY_ISEMPTY:%.*]] = icmp eq %struct.S* [[ARRAY_BEGIN12]], [[TMP32]]
1684 // CHECK5-NEXT:    br i1 [[OMP_ARRAYCPY_ISEMPTY]], label [[OMP_ARRAYCPY_DONE13:%.*]], label [[OMP_ARRAYCPY_BODY:%.*]]
1685 // CHECK5:       omp.arraycpy.body:
1686 // CHECK5-NEXT:    [[OMP_ARRAYCPY_SRCELEMENTPAST:%.*]] = phi %struct.S* [ [[TMP31]], [[DOTOMP_LASTPRIVATE_THEN]] ], [ [[OMP_ARRAYCPY_SRC_ELEMENT:%.*]], [[OMP_ARRAYCPY_BODY]] ]
1687 // CHECK5-NEXT:    [[OMP_ARRAYCPY_DESTELEMENTPAST:%.*]] = phi %struct.S* [ [[ARRAY_BEGIN12]], [[DOTOMP_LASTPRIVATE_THEN]] ], [ [[OMP_ARRAYCPY_DEST_ELEMENT:%.*]], [[OMP_ARRAYCPY_BODY]] ]
1688 // CHECK5-NEXT:    [[TMP33:%.*]] = bitcast %struct.S* [[OMP_ARRAYCPY_DESTELEMENTPAST]] to i8*
1689 // CHECK5-NEXT:    [[TMP34:%.*]] = bitcast %struct.S* [[OMP_ARRAYCPY_SRCELEMENTPAST]] to i8*
1690 // CHECK5-NEXT:    call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 4 [[TMP33]], i8* align 4 [[TMP34]], i64 4, i1 false)
1691 // CHECK5-NEXT:    [[OMP_ARRAYCPY_DEST_ELEMENT]] = getelementptr [[STRUCT_S]], %struct.S* [[OMP_ARRAYCPY_DESTELEMENTPAST]], i32 1
1692 // CHECK5-NEXT:    [[OMP_ARRAYCPY_SRC_ELEMENT]] = getelementptr [[STRUCT_S]], %struct.S* [[OMP_ARRAYCPY_SRCELEMENTPAST]], i32 1
1693 // CHECK5-NEXT:    [[OMP_ARRAYCPY_DONE:%.*]] = icmp eq %struct.S* [[OMP_ARRAYCPY_DEST_ELEMENT]], [[TMP32]]
1694 // CHECK5-NEXT:    br i1 [[OMP_ARRAYCPY_DONE]], label [[OMP_ARRAYCPY_DONE13]], label [[OMP_ARRAYCPY_BODY]]
1695 // CHECK5:       omp.arraycpy.done13:
1696 // CHECK5-NEXT:    [[TMP35:%.*]] = load %struct.S*, %struct.S** [[_TMP7]], align 8
1697 // CHECK5-NEXT:    [[TMP36:%.*]] = bitcast %struct.S* [[TMP3]] to i8*
1698 // CHECK5-NEXT:    [[TMP37:%.*]] = bitcast %struct.S* [[TMP35]] to i8*
1699 // CHECK5-NEXT:    call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 4 [[TMP36]], i8* align 4 [[TMP37]], i64 4, i1 false)
1700 // CHECK5-NEXT:    [[TMP38:%.*]] = load i32, i32* [[SVAR8]], align 4
1701 // CHECK5-NEXT:    store i32 [[TMP38]], i32* [[CONV1]], align 8
1702 // CHECK5-NEXT:    br label [[DOTOMP_LASTPRIVATE_DONE]]
1703 // CHECK5:       .omp.lastprivate.done:
1704 // CHECK5-NEXT:    call void @_ZN1SIfED1Ev(%struct.S* nonnull align 4 dereferenceable(4) [[VAR6]]) #[[ATTR4]]
1705 // CHECK5-NEXT:    [[ARRAY_BEGIN14:%.*]] = getelementptr inbounds [2 x %struct.S], [2 x %struct.S]* [[S_ARR5]], i32 0, i32 0
1706 // CHECK5-NEXT:    [[TMP39:%.*]] = getelementptr inbounds [[STRUCT_S]], %struct.S* [[ARRAY_BEGIN14]], i64 2
1707 // CHECK5-NEXT:    br label [[ARRAYDESTROY_BODY:%.*]]
1708 // CHECK5:       arraydestroy.body:
1709 // CHECK5-NEXT:    [[ARRAYDESTROY_ELEMENTPAST:%.*]] = phi %struct.S* [ [[TMP39]], [[DOTOMP_LASTPRIVATE_DONE]] ], [ [[ARRAYDESTROY_ELEMENT:%.*]], [[ARRAYDESTROY_BODY]] ]
1710 // CHECK5-NEXT:    [[ARRAYDESTROY_ELEMENT]] = getelementptr inbounds [[STRUCT_S]], %struct.S* [[ARRAYDESTROY_ELEMENTPAST]], i64 -1
1711 // CHECK5-NEXT:    call void @_ZN1SIfED1Ev(%struct.S* nonnull align 4 dereferenceable(4) [[ARRAYDESTROY_ELEMENT]]) #[[ATTR4]]
1712 // CHECK5-NEXT:    [[ARRAYDESTROY_DONE:%.*]] = icmp eq %struct.S* [[ARRAYDESTROY_ELEMENT]], [[ARRAY_BEGIN14]]
1713 // CHECK5-NEXT:    br i1 [[ARRAYDESTROY_DONE]], label [[ARRAYDESTROY_DONE15:%.*]], label [[ARRAYDESTROY_BODY]]
1714 // CHECK5:       arraydestroy.done15:
1715 // CHECK5-NEXT:    ret void
1716 //
1717 //
1718 // CHECK5-LABEL: define {{[^@]+}}@.omp_outlined..1
1719 // CHECK5-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i64 [[DOTPREVIOUS_LB_:%.*]], i64 [[DOTPREVIOUS_UB_:%.*]], [2 x i32]* nonnull align 4 dereferenceable(8) [[VEC:%.*]], i64 [[T_VAR:%.*]], [2 x %struct.S]* nonnull align 4 dereferenceable(8) [[S_ARR:%.*]], %struct.S* nonnull align 4 dereferenceable(4) [[VAR:%.*]], i64 [[SVAR:%.*]]) #[[ATTR3]] {
1720 // CHECK5-NEXT:  entry:
1721 // CHECK5-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
1722 // CHECK5-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
1723 // CHECK5-NEXT:    [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i64, align 8
1724 // CHECK5-NEXT:    [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i64, align 8
1725 // CHECK5-NEXT:    [[VEC_ADDR:%.*]] = alloca [2 x i32]*, align 8
1726 // CHECK5-NEXT:    [[T_VAR_ADDR:%.*]] = alloca i64, align 8
1727 // CHECK5-NEXT:    [[S_ARR_ADDR:%.*]] = alloca [2 x %struct.S]*, align 8
1728 // CHECK5-NEXT:    [[VAR_ADDR:%.*]] = alloca %struct.S*, align 8
1729 // CHECK5-NEXT:    [[SVAR_ADDR:%.*]] = alloca i64, align 8
1730 // CHECK5-NEXT:    [[TMP:%.*]] = alloca %struct.S*, align 8
1731 // CHECK5-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
1732 // CHECK5-NEXT:    [[_TMP2:%.*]] = alloca i32, align 4
1733 // CHECK5-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
1734 // CHECK5-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
1735 // CHECK5-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
1736 // CHECK5-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
1737 // CHECK5-NEXT:    [[T_VAR5:%.*]] = alloca i32, align 4
1738 // CHECK5-NEXT:    [[VEC6:%.*]] = alloca [2 x i32], align 4
1739 // CHECK5-NEXT:    [[S_ARR7:%.*]] = alloca [2 x %struct.S], align 4
1740 // CHECK5-NEXT:    [[VAR8:%.*]] = alloca [[STRUCT_S:%.*]], align 4
1741 // CHECK5-NEXT:    [[_TMP9:%.*]] = alloca %struct.S*, align 8
1742 // CHECK5-NEXT:    [[SVAR10:%.*]] = alloca i32, align 4
1743 // CHECK5-NEXT:    [[I:%.*]] = alloca i32, align 4
1744 // CHECK5-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
1745 // CHECK5-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
1746 // CHECK5-NEXT:    store i64 [[DOTPREVIOUS_LB_]], i64* [[DOTPREVIOUS_LB__ADDR]], align 8
1747 // CHECK5-NEXT:    store i64 [[DOTPREVIOUS_UB_]], i64* [[DOTPREVIOUS_UB__ADDR]], align 8
1748 // CHECK5-NEXT:    store [2 x i32]* [[VEC]], [2 x i32]** [[VEC_ADDR]], align 8
1749 // CHECK5-NEXT:    store i64 [[T_VAR]], i64* [[T_VAR_ADDR]], align 8
1750 // CHECK5-NEXT:    store [2 x %struct.S]* [[S_ARR]], [2 x %struct.S]** [[S_ARR_ADDR]], align 8
1751 // CHECK5-NEXT:    store %struct.S* [[VAR]], %struct.S** [[VAR_ADDR]], align 8
1752 // CHECK5-NEXT:    store i64 [[SVAR]], i64* [[SVAR_ADDR]], align 8
1753 // CHECK5-NEXT:    [[TMP0:%.*]] = load [2 x i32]*, [2 x i32]** [[VEC_ADDR]], align 8
1754 // CHECK5-NEXT:    [[CONV:%.*]] = bitcast i64* [[T_VAR_ADDR]] to i32*
1755 // CHECK5-NEXT:    [[TMP1:%.*]] = load [2 x %struct.S]*, [2 x %struct.S]** [[S_ARR_ADDR]], align 8
1756 // CHECK5-NEXT:    [[TMP2:%.*]] = load %struct.S*, %struct.S** [[VAR_ADDR]], align 8
1757 // CHECK5-NEXT:    [[CONV1:%.*]] = bitcast i64* [[SVAR_ADDR]] to i32*
1758 // CHECK5-NEXT:    store %struct.S* [[TMP2]], %struct.S** [[TMP]], align 8
1759 // CHECK5-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
1760 // CHECK5-NEXT:    store i32 1, i32* [[DOTOMP_UB]], align 4
1761 // CHECK5-NEXT:    [[TMP3:%.*]] = load i64, i64* [[DOTPREVIOUS_LB__ADDR]], align 8
1762 // CHECK5-NEXT:    [[CONV3:%.*]] = trunc i64 [[TMP3]] to i32
1763 // CHECK5-NEXT:    [[TMP4:%.*]] = load i64, i64* [[DOTPREVIOUS_UB__ADDR]], align 8
1764 // CHECK5-NEXT:    [[CONV4:%.*]] = trunc i64 [[TMP4]] to i32
1765 // CHECK5-NEXT:    store i32 [[CONV3]], i32* [[DOTOMP_LB]], align 4
1766 // CHECK5-NEXT:    store i32 [[CONV4]], i32* [[DOTOMP_UB]], align 4
1767 // CHECK5-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
1768 // CHECK5-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
1769 // CHECK5-NEXT:    [[ARRAY_BEGIN:%.*]] = getelementptr inbounds [2 x %struct.S], [2 x %struct.S]* [[S_ARR7]], i32 0, i32 0
1770 // CHECK5-NEXT:    [[ARRAYCTOR_END:%.*]] = getelementptr inbounds [[STRUCT_S]], %struct.S* [[ARRAY_BEGIN]], i64 2
1771 // CHECK5-NEXT:    br label [[ARRAYCTOR_LOOP:%.*]]
1772 // CHECK5:       arrayctor.loop:
1773 // CHECK5-NEXT:    [[ARRAYCTOR_CUR:%.*]] = phi %struct.S* [ [[ARRAY_BEGIN]], [[ENTRY:%.*]] ], [ [[ARRAYCTOR_NEXT:%.*]], [[ARRAYCTOR_LOOP]] ]
1774 // CHECK5-NEXT:    call void @_ZN1SIfEC1Ev(%struct.S* nonnull align 4 dereferenceable(4) [[ARRAYCTOR_CUR]])
1775 // CHECK5-NEXT:    [[ARRAYCTOR_NEXT]] = getelementptr inbounds [[STRUCT_S]], %struct.S* [[ARRAYCTOR_CUR]], i64 1
1776 // CHECK5-NEXT:    [[ARRAYCTOR_DONE:%.*]] = icmp eq %struct.S* [[ARRAYCTOR_NEXT]], [[ARRAYCTOR_END]]
1777 // CHECK5-NEXT:    br i1 [[ARRAYCTOR_DONE]], label [[ARRAYCTOR_CONT:%.*]], label [[ARRAYCTOR_LOOP]]
1778 // CHECK5:       arrayctor.cont:
1779 // CHECK5-NEXT:    [[TMP5:%.*]] = load %struct.S*, %struct.S** [[TMP]], align 8
1780 // CHECK5-NEXT:    call void @_ZN1SIfEC1Ev(%struct.S* nonnull align 4 dereferenceable(4) [[VAR8]])
1781 // CHECK5-NEXT:    store %struct.S* [[VAR8]], %struct.S** [[_TMP9]], align 8
1782 // CHECK5-NEXT:    [[TMP6:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
1783 // CHECK5-NEXT:    [[TMP7:%.*]] = load i32, i32* [[TMP6]], align 4
1784 // CHECK5-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB2:[0-9]+]], i32 [[TMP7]], i32 34, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
1785 // CHECK5-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
1786 // CHECK5-NEXT:    [[CMP:%.*]] = icmp sgt i32 [[TMP8]], 1
1787 // CHECK5-NEXT:    br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
1788 // CHECK5:       cond.true:
1789 // CHECK5-NEXT:    br label [[COND_END:%.*]]
1790 // CHECK5:       cond.false:
1791 // CHECK5-NEXT:    [[TMP9:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
1792 // CHECK5-NEXT:    br label [[COND_END]]
1793 // CHECK5:       cond.end:
1794 // CHECK5-NEXT:    [[COND:%.*]] = phi i32 [ 1, [[COND_TRUE]] ], [ [[TMP9]], [[COND_FALSE]] ]
1795 // CHECK5-NEXT:    store i32 [[COND]], i32* [[DOTOMP_UB]], align 4
1796 // CHECK5-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
1797 // CHECK5-NEXT:    store i32 [[TMP10]], i32* [[DOTOMP_IV]], align 4
1798 // CHECK5-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
1799 // CHECK5:       omp.inner.for.cond:
1800 // CHECK5-NEXT:    [[TMP11:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
1801 // CHECK5-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
1802 // CHECK5-NEXT:    [[CMP11:%.*]] = icmp sle i32 [[TMP11]], [[TMP12]]
1803 // CHECK5-NEXT:    br i1 [[CMP11]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_COND_CLEANUP:%.*]]
1804 // CHECK5:       omp.inner.for.cond.cleanup:
1805 // CHECK5-NEXT:    br label [[OMP_INNER_FOR_END:%.*]]
1806 // CHECK5:       omp.inner.for.body:
1807 // CHECK5-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
1808 // CHECK5-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP13]], 1
1809 // CHECK5-NEXT:    [[ADD:%.*]] = add nsw i32 0, [[MUL]]
1810 // CHECK5-NEXT:    store i32 [[ADD]], i32* [[I]], align 4
1811 // CHECK5-NEXT:    [[TMP14:%.*]] = load i32, i32* [[T_VAR5]], align 4
1812 // CHECK5-NEXT:    [[TMP15:%.*]] = load i32, i32* [[I]], align 4
1813 // CHECK5-NEXT:    [[IDXPROM:%.*]] = sext i32 [[TMP15]] to i64
1814 // CHECK5-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds [2 x i32], [2 x i32]* [[VEC6]], i64 0, i64 [[IDXPROM]]
1815 // CHECK5-NEXT:    store i32 [[TMP14]], i32* [[ARRAYIDX]], align 4
1816 // CHECK5-NEXT:    [[TMP16:%.*]] = load %struct.S*, %struct.S** [[_TMP9]], align 8
1817 // CHECK5-NEXT:    [[TMP17:%.*]] = load i32, i32* [[I]], align 4
1818 // CHECK5-NEXT:    [[IDXPROM12:%.*]] = sext i32 [[TMP17]] to i64
1819 // CHECK5-NEXT:    [[ARRAYIDX13:%.*]] = getelementptr inbounds [2 x %struct.S], [2 x %struct.S]* [[S_ARR7]], i64 0, i64 [[IDXPROM12]]
1820 // CHECK5-NEXT:    [[TMP18:%.*]] = bitcast %struct.S* [[ARRAYIDX13]] to i8*
1821 // CHECK5-NEXT:    [[TMP19:%.*]] = bitcast %struct.S* [[TMP16]] to i8*
1822 // CHECK5-NEXT:    call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 4 [[TMP18]], i8* align 4 [[TMP19]], i64 4, i1 false)
1823 // CHECK5-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
1824 // CHECK5:       omp.body.continue:
1825 // CHECK5-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
1826 // CHECK5:       omp.inner.for.inc:
1827 // CHECK5-NEXT:    [[TMP20:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
1828 // CHECK5-NEXT:    [[ADD14:%.*]] = add nsw i32 [[TMP20]], 1
1829 // CHECK5-NEXT:    store i32 [[ADD14]], i32* [[DOTOMP_IV]], align 4
1830 // CHECK5-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP6:![0-9]+]]
1831 // CHECK5:       omp.inner.for.end:
1832 // CHECK5-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
1833 // CHECK5:       omp.loop.exit:
1834 // CHECK5-NEXT:    [[TMP21:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
1835 // CHECK5-NEXT:    [[TMP22:%.*]] = load i32, i32* [[TMP21]], align 4
1836 // CHECK5-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP22]])
1837 // CHECK5-NEXT:    [[TMP23:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
1838 // CHECK5-NEXT:    [[TMP24:%.*]] = icmp ne i32 [[TMP23]], 0
1839 // CHECK5-NEXT:    br i1 [[TMP24]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
1840 // CHECK5:       .omp.final.then:
1841 // CHECK5-NEXT:    store i32 2, i32* [[I]], align 4
1842 // CHECK5-NEXT:    br label [[DOTOMP_FINAL_DONE]]
1843 // CHECK5:       .omp.final.done:
1844 // CHECK5-NEXT:    [[TMP25:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
1845 // CHECK5-NEXT:    [[TMP26:%.*]] = icmp ne i32 [[TMP25]], 0
1846 // CHECK5-NEXT:    br i1 [[TMP26]], label [[DOTOMP_LASTPRIVATE_THEN:%.*]], label [[DOTOMP_LASTPRIVATE_DONE:%.*]]
1847 // CHECK5:       .omp.lastprivate.then:
1848 // CHECK5-NEXT:    [[TMP27:%.*]] = load i32, i32* [[T_VAR5]], align 4
1849 // CHECK5-NEXT:    store i32 [[TMP27]], i32* [[CONV]], align 8
1850 // CHECK5-NEXT:    [[TMP28:%.*]] = bitcast [2 x i32]* [[TMP0]] to i8*
1851 // CHECK5-NEXT:    [[TMP29:%.*]] = bitcast [2 x i32]* [[VEC6]] to i8*
1852 // CHECK5-NEXT:    call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 4 [[TMP28]], i8* align 4 [[TMP29]], i64 8, i1 false)
1853 // CHECK5-NEXT:    [[ARRAY_BEGIN15:%.*]] = getelementptr inbounds [2 x %struct.S], [2 x %struct.S]* [[TMP1]], i32 0, i32 0
1854 // CHECK5-NEXT:    [[TMP30:%.*]] = bitcast [2 x %struct.S]* [[S_ARR7]] to %struct.S*
1855 // CHECK5-NEXT:    [[TMP31:%.*]] = getelementptr [[STRUCT_S]], %struct.S* [[ARRAY_BEGIN15]], i64 2
1856 // CHECK5-NEXT:    [[OMP_ARRAYCPY_ISEMPTY:%.*]] = icmp eq %struct.S* [[ARRAY_BEGIN15]], [[TMP31]]
1857 // CHECK5-NEXT:    br i1 [[OMP_ARRAYCPY_ISEMPTY]], label [[OMP_ARRAYCPY_DONE16:%.*]], label [[OMP_ARRAYCPY_BODY:%.*]]
1858 // CHECK5:       omp.arraycpy.body:
1859 // CHECK5-NEXT:    [[OMP_ARRAYCPY_SRCELEMENTPAST:%.*]] = phi %struct.S* [ [[TMP30]], [[DOTOMP_LASTPRIVATE_THEN]] ], [ [[OMP_ARRAYCPY_SRC_ELEMENT:%.*]], [[OMP_ARRAYCPY_BODY]] ]
1860 // CHECK5-NEXT:    [[OMP_ARRAYCPY_DESTELEMENTPAST:%.*]] = phi %struct.S* [ [[ARRAY_BEGIN15]], [[DOTOMP_LASTPRIVATE_THEN]] ], [ [[OMP_ARRAYCPY_DEST_ELEMENT:%.*]], [[OMP_ARRAYCPY_BODY]] ]
1861 // CHECK5-NEXT:    [[TMP32:%.*]] = bitcast %struct.S* [[OMP_ARRAYCPY_DESTELEMENTPAST]] to i8*
1862 // CHECK5-NEXT:    [[TMP33:%.*]] = bitcast %struct.S* [[OMP_ARRAYCPY_SRCELEMENTPAST]] to i8*
1863 // CHECK5-NEXT:    call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 4 [[TMP32]], i8* align 4 [[TMP33]], i64 4, i1 false)
1864 // CHECK5-NEXT:    [[OMP_ARRAYCPY_DEST_ELEMENT]] = getelementptr [[STRUCT_S]], %struct.S* [[OMP_ARRAYCPY_DESTELEMENTPAST]], i32 1
1865 // CHECK5-NEXT:    [[OMP_ARRAYCPY_SRC_ELEMENT]] = getelementptr [[STRUCT_S]], %struct.S* [[OMP_ARRAYCPY_SRCELEMENTPAST]], i32 1
1866 // CHECK5-NEXT:    [[OMP_ARRAYCPY_DONE:%.*]] = icmp eq %struct.S* [[OMP_ARRAYCPY_DEST_ELEMENT]], [[TMP31]]
1867 // CHECK5-NEXT:    br i1 [[OMP_ARRAYCPY_DONE]], label [[OMP_ARRAYCPY_DONE16]], label [[OMP_ARRAYCPY_BODY]]
1868 // CHECK5:       omp.arraycpy.done16:
1869 // CHECK5-NEXT:    [[TMP34:%.*]] = load %struct.S*, %struct.S** [[_TMP9]], align 8
1870 // CHECK5-NEXT:    [[TMP35:%.*]] = bitcast %struct.S* [[TMP5]] to i8*
1871 // CHECK5-NEXT:    [[TMP36:%.*]] = bitcast %struct.S* [[TMP34]] to i8*
1872 // CHECK5-NEXT:    call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 4 [[TMP35]], i8* align 4 [[TMP36]], i64 4, i1 false)
1873 // CHECK5-NEXT:    [[TMP37:%.*]] = load i32, i32* [[SVAR10]], align 4
1874 // CHECK5-NEXT:    store i32 [[TMP37]], i32* [[CONV1]], align 8
1875 // CHECK5-NEXT:    br label [[DOTOMP_LASTPRIVATE_DONE]]
1876 // CHECK5:       .omp.lastprivate.done:
1877 // CHECK5-NEXT:    call void @_ZN1SIfED1Ev(%struct.S* nonnull align 4 dereferenceable(4) [[VAR8]]) #[[ATTR4]]
1878 // CHECK5-NEXT:    [[ARRAY_BEGIN17:%.*]] = getelementptr inbounds [2 x %struct.S], [2 x %struct.S]* [[S_ARR7]], i32 0, i32 0
1879 // CHECK5-NEXT:    [[TMP38:%.*]] = getelementptr inbounds [[STRUCT_S]], %struct.S* [[ARRAY_BEGIN17]], i64 2
1880 // CHECK5-NEXT:    br label [[ARRAYDESTROY_BODY:%.*]]
1881 // CHECK5:       arraydestroy.body:
1882 // CHECK5-NEXT:    [[ARRAYDESTROY_ELEMENTPAST:%.*]] = phi %struct.S* [ [[TMP38]], [[DOTOMP_LASTPRIVATE_DONE]] ], [ [[ARRAYDESTROY_ELEMENT:%.*]], [[ARRAYDESTROY_BODY]] ]
1883 // CHECK5-NEXT:    [[ARRAYDESTROY_ELEMENT]] = getelementptr inbounds [[STRUCT_S]], %struct.S* [[ARRAYDESTROY_ELEMENTPAST]], i64 -1
1884 // CHECK5-NEXT:    call void @_ZN1SIfED1Ev(%struct.S* nonnull align 4 dereferenceable(4) [[ARRAYDESTROY_ELEMENT]]) #[[ATTR4]]
1885 // CHECK5-NEXT:    [[ARRAYDESTROY_DONE:%.*]] = icmp eq %struct.S* [[ARRAYDESTROY_ELEMENT]], [[ARRAY_BEGIN17]]
1886 // CHECK5-NEXT:    br i1 [[ARRAYDESTROY_DONE]], label [[ARRAYDESTROY_DONE18:%.*]], label [[ARRAYDESTROY_BODY]]
1887 // CHECK5:       arraydestroy.done18:
1888 // CHECK5-NEXT:    ret void
1889 //
1890 //
1891 // CHECK5-LABEL: define {{[^@]+}}@_ZN1SIfED1Ev
1892 // CHECK5-SAME: (%struct.S* nonnull align 4 dereferenceable(4) [[THIS:%.*]]) unnamed_addr #[[ATTR1]] comdat align 2 {
1893 // CHECK5-NEXT:  entry:
1894 // CHECK5-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.S*, align 8
1895 // CHECK5-NEXT:    store %struct.S* [[THIS]], %struct.S** [[THIS_ADDR]], align 8
1896 // CHECK5-NEXT:    [[THIS1:%.*]] = load %struct.S*, %struct.S** [[THIS_ADDR]], align 8
1897 // CHECK5-NEXT:    call void @_ZN1SIfED2Ev(%struct.S* nonnull align 4 dereferenceable(4) [[THIS1]]) #[[ATTR4]]
1898 // CHECK5-NEXT:    ret void
1899 //
1900 //
1901 // CHECK5-LABEL: define {{[^@]+}}@_Z5tmainIiET_v
1902 // CHECK5-SAME: () #[[ATTR5:[0-9]+]] comdat {
1903 // CHECK5-NEXT:  entry:
1904 // CHECK5-NEXT:    [[RETVAL:%.*]] = alloca i32, align 4
1905 // CHECK5-NEXT:    [[TEST:%.*]] = alloca [[STRUCT_S_0:%.*]], align 4
1906 // CHECK5-NEXT:    [[T_VAR:%.*]] = alloca i32, align 4
1907 // CHECK5-NEXT:    [[VEC:%.*]] = alloca [2 x i32], align 4
1908 // CHECK5-NEXT:    [[S_ARR:%.*]] = alloca [2 x %struct.S.0], align 4
1909 // CHECK5-NEXT:    [[VAR:%.*]] = alloca %struct.S.0*, align 8
1910 // CHECK5-NEXT:    [[TMP:%.*]] = alloca %struct.S.0*, align 8
1911 // CHECK5-NEXT:    [[T_VAR_CASTED:%.*]] = alloca i64, align 8
1912 // CHECK5-NEXT:    [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [4 x i8*], align 8
1913 // CHECK5-NEXT:    [[DOTOFFLOAD_PTRS:%.*]] = alloca [4 x i8*], align 8
1914 // CHECK5-NEXT:    [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [4 x i8*], align 8
1915 // CHECK5-NEXT:    [[_TMP1:%.*]] = alloca i32, align 4
1916 // CHECK5-NEXT:    call void @_ZN1SIiEC1Ev(%struct.S.0* nonnull align 4 dereferenceable(4) [[TEST]])
1917 // CHECK5-NEXT:    store i32 0, i32* [[T_VAR]], align 4
1918 // CHECK5-NEXT:    [[TMP0:%.*]] = bitcast [2 x i32]* [[VEC]] to i8*
1919 // CHECK5-NEXT:    call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 4 [[TMP0]], i8* align 4 bitcast ([2 x i32]* @__const._Z5tmainIiET_v.vec to i8*), i64 8, i1 false)
1920 // CHECK5-NEXT:    [[ARRAYINIT_BEGIN:%.*]] = getelementptr inbounds [2 x %struct.S.0], [2 x %struct.S.0]* [[S_ARR]], i64 0, i64 0
1921 // CHECK5-NEXT:    call void @_ZN1SIiEC1Ei(%struct.S.0* nonnull align 4 dereferenceable(4) [[ARRAYINIT_BEGIN]], i32 signext 1)
1922 // CHECK5-NEXT:    [[ARRAYINIT_ELEMENT:%.*]] = getelementptr inbounds [[STRUCT_S_0]], %struct.S.0* [[ARRAYINIT_BEGIN]], i64 1
1923 // CHECK5-NEXT:    call void @_ZN1SIiEC1Ei(%struct.S.0* nonnull align 4 dereferenceable(4) [[ARRAYINIT_ELEMENT]], i32 signext 2)
1924 // CHECK5-NEXT:    store %struct.S.0* [[TEST]], %struct.S.0** [[VAR]], align 8
1925 // CHECK5-NEXT:    [[TMP1:%.*]] = load %struct.S.0*, %struct.S.0** [[VAR]], align 8
1926 // CHECK5-NEXT:    store %struct.S.0* [[TMP1]], %struct.S.0** [[TMP]], align 8
1927 // CHECK5-NEXT:    [[TMP2:%.*]] = load i32, i32* [[T_VAR]], align 4
1928 // CHECK5-NEXT:    [[CONV:%.*]] = bitcast i64* [[T_VAR_CASTED]] to i32*
1929 // CHECK5-NEXT:    store i32 [[TMP2]], i32* [[CONV]], align 4
1930 // CHECK5-NEXT:    [[TMP3:%.*]] = load i64, i64* [[T_VAR_CASTED]], align 8
1931 // CHECK5-NEXT:    [[TMP4:%.*]] = load %struct.S.0*, %struct.S.0** [[TMP]], align 8
1932 // CHECK5-NEXT:    [[TMP5:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
1933 // CHECK5-NEXT:    [[TMP6:%.*]] = bitcast i8** [[TMP5]] to [2 x i32]**
1934 // CHECK5-NEXT:    store [2 x i32]* [[VEC]], [2 x i32]** [[TMP6]], align 8
1935 // CHECK5-NEXT:    [[TMP7:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0
1936 // CHECK5-NEXT:    [[TMP8:%.*]] = bitcast i8** [[TMP7]] to [2 x i32]**
1937 // CHECK5-NEXT:    store [2 x i32]* [[VEC]], [2 x i32]** [[TMP8]], align 8
1938 // CHECK5-NEXT:    [[TMP9:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 0
1939 // CHECK5-NEXT:    store i8* null, i8** [[TMP9]], align 8
1940 // CHECK5-NEXT:    [[TMP10:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1
1941 // CHECK5-NEXT:    [[TMP11:%.*]] = bitcast i8** [[TMP10]] to i64*
1942 // CHECK5-NEXT:    store i64 [[TMP3]], i64* [[TMP11]], align 8
1943 // CHECK5-NEXT:    [[TMP12:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1
1944 // CHECK5-NEXT:    [[TMP13:%.*]] = bitcast i8** [[TMP12]] to i64*
1945 // CHECK5-NEXT:    store i64 [[TMP3]], i64* [[TMP13]], align 8
1946 // CHECK5-NEXT:    [[TMP14:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 1
1947 // CHECK5-NEXT:    store i8* null, i8** [[TMP14]], align 8
1948 // CHECK5-NEXT:    [[TMP15:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2
1949 // CHECK5-NEXT:    [[TMP16:%.*]] = bitcast i8** [[TMP15]] to [2 x %struct.S.0]**
1950 // CHECK5-NEXT:    store [2 x %struct.S.0]* [[S_ARR]], [2 x %struct.S.0]** [[TMP16]], align 8
1951 // CHECK5-NEXT:    [[TMP17:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 2
1952 // CHECK5-NEXT:    [[TMP18:%.*]] = bitcast i8** [[TMP17]] to [2 x %struct.S.0]**
1953 // CHECK5-NEXT:    store [2 x %struct.S.0]* [[S_ARR]], [2 x %struct.S.0]** [[TMP18]], align 8
1954 // CHECK5-NEXT:    [[TMP19:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 2
1955 // CHECK5-NEXT:    store i8* null, i8** [[TMP19]], align 8
1956 // CHECK5-NEXT:    [[TMP20:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 3
1957 // CHECK5-NEXT:    [[TMP21:%.*]] = bitcast i8** [[TMP20]] to %struct.S.0**
1958 // CHECK5-NEXT:    store %struct.S.0* [[TMP4]], %struct.S.0** [[TMP21]], align 8
1959 // CHECK5-NEXT:    [[TMP22:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 3
1960 // CHECK5-NEXT:    [[TMP23:%.*]] = bitcast i8** [[TMP22]] to %struct.S.0**
1961 // CHECK5-NEXT:    store %struct.S.0* [[TMP4]], %struct.S.0** [[TMP23]], align 8
1962 // CHECK5-NEXT:    [[TMP24:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 3
1963 // CHECK5-NEXT:    store i8* null, i8** [[TMP24]], align 8
1964 // CHECK5-NEXT:    [[TMP25:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
1965 // CHECK5-NEXT:    [[TMP26:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0
1966 // CHECK5-NEXT:    call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 2)
1967 // CHECK5-NEXT:    [[TMP27:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiET_v_l50.region_id, i32 4, i8** [[TMP25]], i8** [[TMP26]], i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_sizes.4, i32 0, i32 0), i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_maptypes.5, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0)
1968 // CHECK5-NEXT:    [[TMP28:%.*]] = icmp ne i32 [[TMP27]], 0
1969 // CHECK5-NEXT:    br i1 [[TMP28]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]]
1970 // CHECK5:       omp_offload.failed:
1971 // CHECK5-NEXT:    call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiET_v_l50([2 x i32]* [[VEC]], i64 [[TMP3]], [2 x %struct.S.0]* [[S_ARR]], %struct.S.0* [[TMP4]]) #[[ATTR4]]
1972 // CHECK5-NEXT:    br label [[OMP_OFFLOAD_CONT]]
1973 // CHECK5:       omp_offload.cont:
1974 // CHECK5-NEXT:    store i32 0, i32* [[RETVAL]], align 4
1975 // CHECK5-NEXT:    [[ARRAY_BEGIN:%.*]] = getelementptr inbounds [2 x %struct.S.0], [2 x %struct.S.0]* [[S_ARR]], i32 0, i32 0
1976 // CHECK5-NEXT:    [[TMP29:%.*]] = getelementptr inbounds [[STRUCT_S_0]], %struct.S.0* [[ARRAY_BEGIN]], i64 2
1977 // CHECK5-NEXT:    br label [[ARRAYDESTROY_BODY:%.*]]
1978 // CHECK5:       arraydestroy.body:
1979 // CHECK5-NEXT:    [[ARRAYDESTROY_ELEMENTPAST:%.*]] = phi %struct.S.0* [ [[TMP29]], [[OMP_OFFLOAD_CONT]] ], [ [[ARRAYDESTROY_ELEMENT:%.*]], [[ARRAYDESTROY_BODY]] ]
1980 // CHECK5-NEXT:    [[ARRAYDESTROY_ELEMENT]] = getelementptr inbounds [[STRUCT_S_0]], %struct.S.0* [[ARRAYDESTROY_ELEMENTPAST]], i64 -1
1981 // CHECK5-NEXT:    call void @_ZN1SIiED1Ev(%struct.S.0* nonnull align 4 dereferenceable(4) [[ARRAYDESTROY_ELEMENT]]) #[[ATTR4]]
1982 // CHECK5-NEXT:    [[ARRAYDESTROY_DONE:%.*]] = icmp eq %struct.S.0* [[ARRAYDESTROY_ELEMENT]], [[ARRAY_BEGIN]]
1983 // CHECK5-NEXT:    br i1 [[ARRAYDESTROY_DONE]], label [[ARRAYDESTROY_DONE2:%.*]], label [[ARRAYDESTROY_BODY]]
1984 // CHECK5:       arraydestroy.done2:
1985 // CHECK5-NEXT:    call void @_ZN1SIiED1Ev(%struct.S.0* nonnull align 4 dereferenceable(4) [[TEST]]) #[[ATTR4]]
1986 // CHECK5-NEXT:    [[TMP30:%.*]] = load i32, i32* [[RETVAL]], align 4
1987 // CHECK5-NEXT:    ret i32 [[TMP30]]
1988 //
1989 //
1990 // CHECK5-LABEL: define {{[^@]+}}@_ZN1SIfEC2Ev
1991 // CHECK5-SAME: (%struct.S* nonnull align 4 dereferenceable(4) [[THIS:%.*]]) unnamed_addr #[[ATTR1]] comdat align 2 {
1992 // CHECK5-NEXT:  entry:
1993 // CHECK5-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.S*, align 8
1994 // CHECK5-NEXT:    store %struct.S* [[THIS]], %struct.S** [[THIS_ADDR]], align 8
1995 // CHECK5-NEXT:    [[THIS1:%.*]] = load %struct.S*, %struct.S** [[THIS_ADDR]], align 8
1996 // CHECK5-NEXT:    [[F:%.*]] = getelementptr inbounds [[STRUCT_S:%.*]], %struct.S* [[THIS1]], i32 0, i32 0
1997 // CHECK5-NEXT:    store float 0.000000e+00, float* [[F]], align 4
1998 // CHECK5-NEXT:    ret void
1999 //
2000 //
2001 // CHECK5-LABEL: define {{[^@]+}}@_ZN1SIfEC2Ef
2002 // CHECK5-SAME: (%struct.S* nonnull align 4 dereferenceable(4) [[THIS:%.*]], float [[A:%.*]]) unnamed_addr #[[ATTR1]] comdat align 2 {
2003 // CHECK5-NEXT:  entry:
2004 // CHECK5-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.S*, align 8
2005 // CHECK5-NEXT:    [[A_ADDR:%.*]] = alloca float, align 4
2006 // CHECK5-NEXT:    store %struct.S* [[THIS]], %struct.S** [[THIS_ADDR]], align 8
2007 // CHECK5-NEXT:    store float [[A]], float* [[A_ADDR]], align 4
2008 // CHECK5-NEXT:    [[THIS1:%.*]] = load %struct.S*, %struct.S** [[THIS_ADDR]], align 8
2009 // CHECK5-NEXT:    [[F:%.*]] = getelementptr inbounds [[STRUCT_S:%.*]], %struct.S* [[THIS1]], i32 0, i32 0
2010 // CHECK5-NEXT:    [[TMP0:%.*]] = load float, float* [[A_ADDR]], align 4
2011 // CHECK5-NEXT:    store float [[TMP0]], float* [[F]], align 4
2012 // CHECK5-NEXT:    ret void
2013 //
2014 //
2015 // CHECK5-LABEL: define {{[^@]+}}@_ZN1SIfED2Ev
2016 // CHECK5-SAME: (%struct.S* nonnull align 4 dereferenceable(4) [[THIS:%.*]]) unnamed_addr #[[ATTR1]] comdat align 2 {
2017 // CHECK5-NEXT:  entry:
2018 // CHECK5-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.S*, align 8
2019 // CHECK5-NEXT:    store %struct.S* [[THIS]], %struct.S** [[THIS_ADDR]], align 8
2020 // CHECK5-NEXT:    [[THIS1:%.*]] = load %struct.S*, %struct.S** [[THIS_ADDR]], align 8
2021 // CHECK5-NEXT:    ret void
2022 //
2023 //
2024 // CHECK5-LABEL: define {{[^@]+}}@_ZN1SIiEC1Ev
2025 // CHECK5-SAME: (%struct.S.0* nonnull align 4 dereferenceable(4) [[THIS:%.*]]) unnamed_addr #[[ATTR1]] comdat align 2 {
2026 // CHECK5-NEXT:  entry:
2027 // CHECK5-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.S.0*, align 8
2028 // CHECK5-NEXT:    store %struct.S.0* [[THIS]], %struct.S.0** [[THIS_ADDR]], align 8
2029 // CHECK5-NEXT:    [[THIS1:%.*]] = load %struct.S.0*, %struct.S.0** [[THIS_ADDR]], align 8
2030 // CHECK5-NEXT:    call void @_ZN1SIiEC2Ev(%struct.S.0* nonnull align 4 dereferenceable(4) [[THIS1]])
2031 // CHECK5-NEXT:    ret void
2032 //
2033 //
2034 // CHECK5-LABEL: define {{[^@]+}}@_ZN1SIiEC1Ei
2035 // CHECK5-SAME: (%struct.S.0* nonnull align 4 dereferenceable(4) [[THIS:%.*]], i32 signext [[A:%.*]]) unnamed_addr #[[ATTR1]] comdat align 2 {
2036 // CHECK5-NEXT:  entry:
2037 // CHECK5-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.S.0*, align 8
2038 // CHECK5-NEXT:    [[A_ADDR:%.*]] = alloca i32, align 4
2039 // CHECK5-NEXT:    store %struct.S.0* [[THIS]], %struct.S.0** [[THIS_ADDR]], align 8
2040 // CHECK5-NEXT:    store i32 [[A]], i32* [[A_ADDR]], align 4
2041 // CHECK5-NEXT:    [[THIS1:%.*]] = load %struct.S.0*, %struct.S.0** [[THIS_ADDR]], align 8
2042 // CHECK5-NEXT:    [[TMP0:%.*]] = load i32, i32* [[A_ADDR]], align 4
2043 // CHECK5-NEXT:    call void @_ZN1SIiEC2Ei(%struct.S.0* nonnull align 4 dereferenceable(4) [[THIS1]], i32 signext [[TMP0]])
2044 // CHECK5-NEXT:    ret void
2045 //
2046 //
2047 // CHECK5-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiET_v_l50
2048 // CHECK5-SAME: ([2 x i32]* nonnull align 4 dereferenceable(8) [[VEC:%.*]], i64 [[T_VAR:%.*]], [2 x %struct.S.0]* nonnull align 4 dereferenceable(8) [[S_ARR:%.*]], %struct.S.0* nonnull align 4 dereferenceable(4) [[VAR:%.*]]) #[[ATTR3]] {
2049 // CHECK5-NEXT:  entry:
2050 // CHECK5-NEXT:    [[VEC_ADDR:%.*]] = alloca [2 x i32]*, align 8
2051 // CHECK5-NEXT:    [[T_VAR_ADDR:%.*]] = alloca i64, align 8
2052 // CHECK5-NEXT:    [[S_ARR_ADDR:%.*]] = alloca [2 x %struct.S.0]*, align 8
2053 // CHECK5-NEXT:    [[VAR_ADDR:%.*]] = alloca %struct.S.0*, align 8
2054 // CHECK5-NEXT:    [[TMP:%.*]] = alloca %struct.S.0*, align 8
2055 // CHECK5-NEXT:    [[T_VAR_CASTED:%.*]] = alloca i64, align 8
2056 // CHECK5-NEXT:    store [2 x i32]* [[VEC]], [2 x i32]** [[VEC_ADDR]], align 8
2057 // CHECK5-NEXT:    store i64 [[T_VAR]], i64* [[T_VAR_ADDR]], align 8
2058 // CHECK5-NEXT:    store [2 x %struct.S.0]* [[S_ARR]], [2 x %struct.S.0]** [[S_ARR_ADDR]], align 8
2059 // CHECK5-NEXT:    store %struct.S.0* [[VAR]], %struct.S.0** [[VAR_ADDR]], align 8
2060 // CHECK5-NEXT:    [[TMP0:%.*]] = load [2 x i32]*, [2 x i32]** [[VEC_ADDR]], align 8
2061 // CHECK5-NEXT:    [[CONV:%.*]] = bitcast i64* [[T_VAR_ADDR]] to i32*
2062 // CHECK5-NEXT:    [[TMP1:%.*]] = load [2 x %struct.S.0]*, [2 x %struct.S.0]** [[S_ARR_ADDR]], align 8
2063 // CHECK5-NEXT:    [[TMP2:%.*]] = load %struct.S.0*, %struct.S.0** [[VAR_ADDR]], align 8
2064 // CHECK5-NEXT:    store %struct.S.0* [[TMP2]], %struct.S.0** [[TMP]], align 8
2065 // CHECK5-NEXT:    [[TMP3:%.*]] = load i32, i32* [[CONV]], align 8
2066 // CHECK5-NEXT:    [[CONV1:%.*]] = bitcast i64* [[T_VAR_CASTED]] to i32*
2067 // CHECK5-NEXT:    store i32 [[TMP3]], i32* [[CONV1]], align 4
2068 // CHECK5-NEXT:    [[TMP4:%.*]] = load i64, i64* [[T_VAR_CASTED]], align 8
2069 // CHECK5-NEXT:    [[TMP5:%.*]] = load %struct.S.0*, %struct.S.0** [[TMP]], align 8
2070 // CHECK5-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, [2 x i32]*, i64, [2 x %struct.S.0]*, %struct.S.0*)* @.omp_outlined..2 to void (i32*, i32*, ...)*), [2 x i32]* [[TMP0]], i64 [[TMP4]], [2 x %struct.S.0]* [[TMP1]], %struct.S.0* [[TMP5]])
2071 // CHECK5-NEXT:    ret void
2072 //
2073 //
2074 // CHECK5-LABEL: define {{[^@]+}}@.omp_outlined..2
2075 // CHECK5-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], [2 x i32]* nonnull align 4 dereferenceable(8) [[VEC:%.*]], i64 [[T_VAR:%.*]], [2 x %struct.S.0]* nonnull align 4 dereferenceable(8) [[S_ARR:%.*]], %struct.S.0* nonnull align 4 dereferenceable(4) [[VAR:%.*]]) #[[ATTR3]] {
2076 // CHECK5-NEXT:  entry:
2077 // CHECK5-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
2078 // CHECK5-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
2079 // CHECK5-NEXT:    [[VEC_ADDR:%.*]] = alloca [2 x i32]*, align 8
2080 // CHECK5-NEXT:    [[T_VAR_ADDR:%.*]] = alloca i64, align 8
2081 // CHECK5-NEXT:    [[S_ARR_ADDR:%.*]] = alloca [2 x %struct.S.0]*, align 8
2082 // CHECK5-NEXT:    [[VAR_ADDR:%.*]] = alloca %struct.S.0*, align 8
2083 // CHECK5-NEXT:    [[TMP:%.*]] = alloca %struct.S.0*, align 8
2084 // CHECK5-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
2085 // CHECK5-NEXT:    [[_TMP1:%.*]] = alloca i32, align 4
2086 // CHECK5-NEXT:    [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
2087 // CHECK5-NEXT:    [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
2088 // CHECK5-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
2089 // CHECK5-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
2090 // CHECK5-NEXT:    [[T_VAR2:%.*]] = alloca i32, align 4
2091 // CHECK5-NEXT:    [[VEC3:%.*]] = alloca [2 x i32], align 4
2092 // CHECK5-NEXT:    [[S_ARR4:%.*]] = alloca [2 x %struct.S.0], align 4
2093 // CHECK5-NEXT:    [[VAR5:%.*]] = alloca [[STRUCT_S_0:%.*]], align 4
2094 // CHECK5-NEXT:    [[_TMP6:%.*]] = alloca %struct.S.0*, align 8
2095 // CHECK5-NEXT:    [[I:%.*]] = alloca i32, align 4
2096 // CHECK5-NEXT:    [[T_VAR_CASTED:%.*]] = alloca i64, align 8
2097 // CHECK5-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
2098 // CHECK5-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
2099 // CHECK5-NEXT:    store [2 x i32]* [[VEC]], [2 x i32]** [[VEC_ADDR]], align 8
2100 // CHECK5-NEXT:    store i64 [[T_VAR]], i64* [[T_VAR_ADDR]], align 8
2101 // CHECK5-NEXT:    store [2 x %struct.S.0]* [[S_ARR]], [2 x %struct.S.0]** [[S_ARR_ADDR]], align 8
2102 // CHECK5-NEXT:    store %struct.S.0* [[VAR]], %struct.S.0** [[VAR_ADDR]], align 8
2103 // CHECK5-NEXT:    [[TMP0:%.*]] = load [2 x i32]*, [2 x i32]** [[VEC_ADDR]], align 8
2104 // CHECK5-NEXT:    [[CONV:%.*]] = bitcast i64* [[T_VAR_ADDR]] to i32*
2105 // CHECK5-NEXT:    [[TMP1:%.*]] = load [2 x %struct.S.0]*, [2 x %struct.S.0]** [[S_ARR_ADDR]], align 8
2106 // CHECK5-NEXT:    [[TMP2:%.*]] = load %struct.S.0*, %struct.S.0** [[VAR_ADDR]], align 8
2107 // CHECK5-NEXT:    store %struct.S.0* [[TMP2]], %struct.S.0** [[TMP]], align 8
2108 // CHECK5-NEXT:    store i32 0, i32* [[DOTOMP_COMB_LB]], align 4
2109 // CHECK5-NEXT:    store i32 1, i32* [[DOTOMP_COMB_UB]], align 4
2110 // CHECK5-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
2111 // CHECK5-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
2112 // CHECK5-NEXT:    [[ARRAY_BEGIN:%.*]] = getelementptr inbounds [2 x %struct.S.0], [2 x %struct.S.0]* [[S_ARR4]], i32 0, i32 0
2113 // CHECK5-NEXT:    [[ARRAYCTOR_END:%.*]] = getelementptr inbounds [[STRUCT_S_0]], %struct.S.0* [[ARRAY_BEGIN]], i64 2
2114 // CHECK5-NEXT:    br label [[ARRAYCTOR_LOOP:%.*]]
2115 // CHECK5:       arrayctor.loop:
2116 // CHECK5-NEXT:    [[ARRAYCTOR_CUR:%.*]] = phi %struct.S.0* [ [[ARRAY_BEGIN]], [[ENTRY:%.*]] ], [ [[ARRAYCTOR_NEXT:%.*]], [[ARRAYCTOR_LOOP]] ]
2117 // CHECK5-NEXT:    call void @_ZN1SIiEC1Ev(%struct.S.0* nonnull align 4 dereferenceable(4) [[ARRAYCTOR_CUR]])
2118 // CHECK5-NEXT:    [[ARRAYCTOR_NEXT]] = getelementptr inbounds [[STRUCT_S_0]], %struct.S.0* [[ARRAYCTOR_CUR]], i64 1
2119 // CHECK5-NEXT:    [[ARRAYCTOR_DONE:%.*]] = icmp eq %struct.S.0* [[ARRAYCTOR_NEXT]], [[ARRAYCTOR_END]]
2120 // CHECK5-NEXT:    br i1 [[ARRAYCTOR_DONE]], label [[ARRAYCTOR_CONT:%.*]], label [[ARRAYCTOR_LOOP]]
2121 // CHECK5:       arrayctor.cont:
2122 // CHECK5-NEXT:    [[TMP3:%.*]] = load %struct.S.0*, %struct.S.0** [[TMP]], align 8
2123 // CHECK5-NEXT:    call void @_ZN1SIiEC1Ev(%struct.S.0* nonnull align 4 dereferenceable(4) [[VAR5]])
2124 // CHECK5-NEXT:    store %struct.S.0* [[VAR5]], %struct.S.0** [[_TMP6]], align 8
2125 // CHECK5-NEXT:    [[TMP4:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
2126 // CHECK5-NEXT:    [[TMP5:%.*]] = load i32, i32* [[TMP4]], align 4
2127 // CHECK5-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP5]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_COMB_LB]], i32* [[DOTOMP_COMB_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
2128 // CHECK5-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
2129 // CHECK5-NEXT:    [[CMP:%.*]] = icmp sgt i32 [[TMP6]], 1
2130 // CHECK5-NEXT:    br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
2131 // CHECK5:       cond.true:
2132 // CHECK5-NEXT:    br label [[COND_END:%.*]]
2133 // CHECK5:       cond.false:
2134 // CHECK5-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
2135 // CHECK5-NEXT:    br label [[COND_END]]
2136 // CHECK5:       cond.end:
2137 // CHECK5-NEXT:    [[COND:%.*]] = phi i32 [ 1, [[COND_TRUE]] ], [ [[TMP7]], [[COND_FALSE]] ]
2138 // CHECK5-NEXT:    store i32 [[COND]], i32* [[DOTOMP_COMB_UB]], align 4
2139 // CHECK5-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
2140 // CHECK5-NEXT:    store i32 [[TMP8]], i32* [[DOTOMP_IV]], align 4
2141 // CHECK5-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
2142 // CHECK5:       omp.inner.for.cond:
2143 // CHECK5-NEXT:    [[TMP9:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
2144 // CHECK5-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
2145 // CHECK5-NEXT:    [[CMP7:%.*]] = icmp sle i32 [[TMP9]], [[TMP10]]
2146 // CHECK5-NEXT:    br i1 [[CMP7]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_COND_CLEANUP:%.*]]
2147 // CHECK5:       omp.inner.for.cond.cleanup:
2148 // CHECK5-NEXT:    br label [[OMP_INNER_FOR_END:%.*]]
2149 // CHECK5:       omp.inner.for.body:
2150 // CHECK5-NEXT:    [[TMP11:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
2151 // CHECK5-NEXT:    [[TMP12:%.*]] = zext i32 [[TMP11]] to i64
2152 // CHECK5-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
2153 // CHECK5-NEXT:    [[TMP14:%.*]] = zext i32 [[TMP13]] to i64
2154 // CHECK5-NEXT:    [[TMP15:%.*]] = load i32, i32* [[T_VAR2]], align 4
2155 // CHECK5-NEXT:    [[CONV8:%.*]] = bitcast i64* [[T_VAR_CASTED]] to i32*
2156 // CHECK5-NEXT:    store i32 [[TMP15]], i32* [[CONV8]], align 4
2157 // CHECK5-NEXT:    [[TMP16:%.*]] = load i64, i64* [[T_VAR_CASTED]], align 8
2158 // CHECK5-NEXT:    [[TMP17:%.*]] = load %struct.S.0*, %struct.S.0** [[_TMP6]], align 8
2159 // CHECK5-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 6, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, [2 x i32]*, i64, [2 x %struct.S.0]*, %struct.S.0*)* @.omp_outlined..3 to void (i32*, i32*, ...)*), i64 [[TMP12]], i64 [[TMP14]], [2 x i32]* [[VEC3]], i64 [[TMP16]], [2 x %struct.S.0]* [[S_ARR4]], %struct.S.0* [[TMP17]])
2160 // CHECK5-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
2161 // CHECK5:       omp.inner.for.inc:
2162 // CHECK5-NEXT:    [[TMP18:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
2163 // CHECK5-NEXT:    [[TMP19:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
2164 // CHECK5-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP18]], [[TMP19]]
2165 // CHECK5-NEXT:    store i32 [[ADD]], i32* [[DOTOMP_IV]], align 4
2166 // CHECK5-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP9:![0-9]+]]
2167 // CHECK5:       omp.inner.for.end:
2168 // CHECK5-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
2169 // CHECK5:       omp.loop.exit:
2170 // CHECK5-NEXT:    [[TMP20:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
2171 // CHECK5-NEXT:    [[TMP21:%.*]] = load i32, i32* [[TMP20]], align 4
2172 // CHECK5-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP21]])
2173 // CHECK5-NEXT:    [[TMP22:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
2174 // CHECK5-NEXT:    [[TMP23:%.*]] = icmp ne i32 [[TMP22]], 0
2175 // CHECK5-NEXT:    br i1 [[TMP23]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
2176 // CHECK5:       .omp.final.then:
2177 // CHECK5-NEXT:    store i32 2, i32* [[I]], align 4
2178 // CHECK5-NEXT:    br label [[DOTOMP_FINAL_DONE]]
2179 // CHECK5:       .omp.final.done:
2180 // CHECK5-NEXT:    [[TMP24:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
2181 // CHECK5-NEXT:    [[TMP25:%.*]] = icmp ne i32 [[TMP24]], 0
2182 // CHECK5-NEXT:    br i1 [[TMP25]], label [[DOTOMP_LASTPRIVATE_THEN:%.*]], label [[DOTOMP_LASTPRIVATE_DONE:%.*]]
2183 // CHECK5:       .omp.lastprivate.then:
2184 // CHECK5-NEXT:    [[TMP26:%.*]] = load i32, i32* [[T_VAR2]], align 4
2185 // CHECK5-NEXT:    store i32 [[TMP26]], i32* [[CONV]], align 8
2186 // CHECK5-NEXT:    [[TMP27:%.*]] = bitcast [2 x i32]* [[TMP0]] to i8*
2187 // CHECK5-NEXT:    [[TMP28:%.*]] = bitcast [2 x i32]* [[VEC3]] to i8*
2188 // CHECK5-NEXT:    call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 4 [[TMP27]], i8* align 4 [[TMP28]], i64 8, i1 false)
2189 // CHECK5-NEXT:    [[ARRAY_BEGIN9:%.*]] = getelementptr inbounds [2 x %struct.S.0], [2 x %struct.S.0]* [[TMP1]], i32 0, i32 0
2190 // CHECK5-NEXT:    [[TMP29:%.*]] = bitcast [2 x %struct.S.0]* [[S_ARR4]] to %struct.S.0*
2191 // CHECK5-NEXT:    [[TMP30:%.*]] = getelementptr [[STRUCT_S_0]], %struct.S.0* [[ARRAY_BEGIN9]], i64 2
2192 // CHECK5-NEXT:    [[OMP_ARRAYCPY_ISEMPTY:%.*]] = icmp eq %struct.S.0* [[ARRAY_BEGIN9]], [[TMP30]]
2193 // CHECK5-NEXT:    br i1 [[OMP_ARRAYCPY_ISEMPTY]], label [[OMP_ARRAYCPY_DONE10:%.*]], label [[OMP_ARRAYCPY_BODY:%.*]]
2194 // CHECK5:       omp.arraycpy.body:
2195 // CHECK5-NEXT:    [[OMP_ARRAYCPY_SRCELEMENTPAST:%.*]] = phi %struct.S.0* [ [[TMP29]], [[DOTOMP_LASTPRIVATE_THEN]] ], [ [[OMP_ARRAYCPY_SRC_ELEMENT:%.*]], [[OMP_ARRAYCPY_BODY]] ]
2196 // CHECK5-NEXT:    [[OMP_ARRAYCPY_DESTELEMENTPAST:%.*]] = phi %struct.S.0* [ [[ARRAY_BEGIN9]], [[DOTOMP_LASTPRIVATE_THEN]] ], [ [[OMP_ARRAYCPY_DEST_ELEMENT:%.*]], [[OMP_ARRAYCPY_BODY]] ]
2197 // CHECK5-NEXT:    [[TMP31:%.*]] = bitcast %struct.S.0* [[OMP_ARRAYCPY_DESTELEMENTPAST]] to i8*
2198 // CHECK5-NEXT:    [[TMP32:%.*]] = bitcast %struct.S.0* [[OMP_ARRAYCPY_SRCELEMENTPAST]] to i8*
2199 // CHECK5-NEXT:    call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 4 [[TMP31]], i8* align 4 [[TMP32]], i64 4, i1 false)
2200 // CHECK5-NEXT:    [[OMP_ARRAYCPY_DEST_ELEMENT]] = getelementptr [[STRUCT_S_0]], %struct.S.0* [[OMP_ARRAYCPY_DESTELEMENTPAST]], i32 1
2201 // CHECK5-NEXT:    [[OMP_ARRAYCPY_SRC_ELEMENT]] = getelementptr [[STRUCT_S_0]], %struct.S.0* [[OMP_ARRAYCPY_SRCELEMENTPAST]], i32 1
2202 // CHECK5-NEXT:    [[OMP_ARRAYCPY_DONE:%.*]] = icmp eq %struct.S.0* [[OMP_ARRAYCPY_DEST_ELEMENT]], [[TMP30]]
2203 // CHECK5-NEXT:    br i1 [[OMP_ARRAYCPY_DONE]], label [[OMP_ARRAYCPY_DONE10]], label [[OMP_ARRAYCPY_BODY]]
2204 // CHECK5:       omp.arraycpy.done10:
2205 // CHECK5-NEXT:    [[TMP33:%.*]] = load %struct.S.0*, %struct.S.0** [[_TMP6]], align 8
2206 // CHECK5-NEXT:    [[TMP34:%.*]] = bitcast %struct.S.0* [[TMP3]] to i8*
2207 // CHECK5-NEXT:    [[TMP35:%.*]] = bitcast %struct.S.0* [[TMP33]] to i8*
2208 // CHECK5-NEXT:    call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 4 [[TMP34]], i8* align 4 [[TMP35]], i64 4, i1 false)
2209 // CHECK5-NEXT:    br label [[DOTOMP_LASTPRIVATE_DONE]]
2210 // CHECK5:       .omp.lastprivate.done:
2211 // CHECK5-NEXT:    call void @_ZN1SIiED1Ev(%struct.S.0* nonnull align 4 dereferenceable(4) [[VAR5]]) #[[ATTR4]]
2212 // CHECK5-NEXT:    [[ARRAY_BEGIN11:%.*]] = getelementptr inbounds [2 x %struct.S.0], [2 x %struct.S.0]* [[S_ARR4]], i32 0, i32 0
2213 // CHECK5-NEXT:    [[TMP36:%.*]] = getelementptr inbounds [[STRUCT_S_0]], %struct.S.0* [[ARRAY_BEGIN11]], i64 2
2214 // CHECK5-NEXT:    br label [[ARRAYDESTROY_BODY:%.*]]
2215 // CHECK5:       arraydestroy.body:
2216 // CHECK5-NEXT:    [[ARRAYDESTROY_ELEMENTPAST:%.*]] = phi %struct.S.0* [ [[TMP36]], [[DOTOMP_LASTPRIVATE_DONE]] ], [ [[ARRAYDESTROY_ELEMENT:%.*]], [[ARRAYDESTROY_BODY]] ]
2217 // CHECK5-NEXT:    [[ARRAYDESTROY_ELEMENT]] = getelementptr inbounds [[STRUCT_S_0]], %struct.S.0* [[ARRAYDESTROY_ELEMENTPAST]], i64 -1
2218 // CHECK5-NEXT:    call void @_ZN1SIiED1Ev(%struct.S.0* nonnull align 4 dereferenceable(4) [[ARRAYDESTROY_ELEMENT]]) #[[ATTR4]]
2219 // CHECK5-NEXT:    [[ARRAYDESTROY_DONE:%.*]] = icmp eq %struct.S.0* [[ARRAYDESTROY_ELEMENT]], [[ARRAY_BEGIN11]]
2220 // CHECK5-NEXT:    br i1 [[ARRAYDESTROY_DONE]], label [[ARRAYDESTROY_DONE12:%.*]], label [[ARRAYDESTROY_BODY]]
2221 // CHECK5:       arraydestroy.done12:
2222 // CHECK5-NEXT:    ret void
2223 //
2224 //
2225 // CHECK5-LABEL: define {{[^@]+}}@.omp_outlined..3
2226 // CHECK5-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i64 [[DOTPREVIOUS_LB_:%.*]], i64 [[DOTPREVIOUS_UB_:%.*]], [2 x i32]* nonnull align 4 dereferenceable(8) [[VEC:%.*]], i64 [[T_VAR:%.*]], [2 x %struct.S.0]* nonnull align 4 dereferenceable(8) [[S_ARR:%.*]], %struct.S.0* nonnull align 4 dereferenceable(4) [[VAR:%.*]]) #[[ATTR3]] {
2227 // CHECK5-NEXT:  entry:
2228 // CHECK5-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
2229 // CHECK5-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
2230 // CHECK5-NEXT:    [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i64, align 8
2231 // CHECK5-NEXT:    [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i64, align 8
2232 // CHECK5-NEXT:    [[VEC_ADDR:%.*]] = alloca [2 x i32]*, align 8
2233 // CHECK5-NEXT:    [[T_VAR_ADDR:%.*]] = alloca i64, align 8
2234 // CHECK5-NEXT:    [[S_ARR_ADDR:%.*]] = alloca [2 x %struct.S.0]*, align 8
2235 // CHECK5-NEXT:    [[VAR_ADDR:%.*]] = alloca %struct.S.0*, align 8
2236 // CHECK5-NEXT:    [[TMP:%.*]] = alloca %struct.S.0*, align 8
2237 // CHECK5-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
2238 // CHECK5-NEXT:    [[_TMP1:%.*]] = alloca i32, align 4
2239 // CHECK5-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
2240 // CHECK5-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
2241 // CHECK5-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
2242 // CHECK5-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
2243 // CHECK5-NEXT:    [[T_VAR4:%.*]] = alloca i32, align 4
2244 // CHECK5-NEXT:    [[VEC5:%.*]] = alloca [2 x i32], align 4
2245 // CHECK5-NEXT:    [[S_ARR6:%.*]] = alloca [2 x %struct.S.0], align 4
2246 // CHECK5-NEXT:    [[VAR7:%.*]] = alloca [[STRUCT_S_0:%.*]], align 4
2247 // CHECK5-NEXT:    [[_TMP8:%.*]] = alloca %struct.S.0*, align 8
2248 // CHECK5-NEXT:    [[I:%.*]] = alloca i32, align 4
2249 // CHECK5-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
2250 // CHECK5-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
2251 // CHECK5-NEXT:    store i64 [[DOTPREVIOUS_LB_]], i64* [[DOTPREVIOUS_LB__ADDR]], align 8
2252 // CHECK5-NEXT:    store i64 [[DOTPREVIOUS_UB_]], i64* [[DOTPREVIOUS_UB__ADDR]], align 8
2253 // CHECK5-NEXT:    store [2 x i32]* [[VEC]], [2 x i32]** [[VEC_ADDR]], align 8
2254 // CHECK5-NEXT:    store i64 [[T_VAR]], i64* [[T_VAR_ADDR]], align 8
2255 // CHECK5-NEXT:    store [2 x %struct.S.0]* [[S_ARR]], [2 x %struct.S.0]** [[S_ARR_ADDR]], align 8
2256 // CHECK5-NEXT:    store %struct.S.0* [[VAR]], %struct.S.0** [[VAR_ADDR]], align 8
2257 // CHECK5-NEXT:    [[TMP0:%.*]] = load [2 x i32]*, [2 x i32]** [[VEC_ADDR]], align 8
2258 // CHECK5-NEXT:    [[CONV:%.*]] = bitcast i64* [[T_VAR_ADDR]] to i32*
2259 // CHECK5-NEXT:    [[TMP1:%.*]] = load [2 x %struct.S.0]*, [2 x %struct.S.0]** [[S_ARR_ADDR]], align 8
2260 // CHECK5-NEXT:    [[TMP2:%.*]] = load %struct.S.0*, %struct.S.0** [[VAR_ADDR]], align 8
2261 // CHECK5-NEXT:    store %struct.S.0* [[TMP2]], %struct.S.0** [[TMP]], align 8
2262 // CHECK5-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
2263 // CHECK5-NEXT:    store i32 1, i32* [[DOTOMP_UB]], align 4
2264 // CHECK5-NEXT:    [[TMP3:%.*]] = load i64, i64* [[DOTPREVIOUS_LB__ADDR]], align 8
2265 // CHECK5-NEXT:    [[CONV2:%.*]] = trunc i64 [[TMP3]] to i32
2266 // CHECK5-NEXT:    [[TMP4:%.*]] = load i64, i64* [[DOTPREVIOUS_UB__ADDR]], align 8
2267 // CHECK5-NEXT:    [[CONV3:%.*]] = trunc i64 [[TMP4]] to i32
2268 // CHECK5-NEXT:    store i32 [[CONV2]], i32* [[DOTOMP_LB]], align 4
2269 // CHECK5-NEXT:    store i32 [[CONV3]], i32* [[DOTOMP_UB]], align 4
2270 // CHECK5-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
2271 // CHECK5-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
2272 // CHECK5-NEXT:    [[ARRAY_BEGIN:%.*]] = getelementptr inbounds [2 x %struct.S.0], [2 x %struct.S.0]* [[S_ARR6]], i32 0, i32 0
2273 // CHECK5-NEXT:    [[ARRAYCTOR_END:%.*]] = getelementptr inbounds [[STRUCT_S_0]], %struct.S.0* [[ARRAY_BEGIN]], i64 2
2274 // CHECK5-NEXT:    br label [[ARRAYCTOR_LOOP:%.*]]
2275 // CHECK5:       arrayctor.loop:
2276 // CHECK5-NEXT:    [[ARRAYCTOR_CUR:%.*]] = phi %struct.S.0* [ [[ARRAY_BEGIN]], [[ENTRY:%.*]] ], [ [[ARRAYCTOR_NEXT:%.*]], [[ARRAYCTOR_LOOP]] ]
2277 // CHECK5-NEXT:    call void @_ZN1SIiEC1Ev(%struct.S.0* nonnull align 4 dereferenceable(4) [[ARRAYCTOR_CUR]])
2278 // CHECK5-NEXT:    [[ARRAYCTOR_NEXT]] = getelementptr inbounds [[STRUCT_S_0]], %struct.S.0* [[ARRAYCTOR_CUR]], i64 1
2279 // CHECK5-NEXT:    [[ARRAYCTOR_DONE:%.*]] = icmp eq %struct.S.0* [[ARRAYCTOR_NEXT]], [[ARRAYCTOR_END]]
2280 // CHECK5-NEXT:    br i1 [[ARRAYCTOR_DONE]], label [[ARRAYCTOR_CONT:%.*]], label [[ARRAYCTOR_LOOP]]
2281 // CHECK5:       arrayctor.cont:
2282 // CHECK5-NEXT:    [[TMP5:%.*]] = load %struct.S.0*, %struct.S.0** [[TMP]], align 8
2283 // CHECK5-NEXT:    call void @_ZN1SIiEC1Ev(%struct.S.0* nonnull align 4 dereferenceable(4) [[VAR7]])
2284 // CHECK5-NEXT:    store %struct.S.0* [[VAR7]], %struct.S.0** [[_TMP8]], align 8
2285 // CHECK5-NEXT:    [[TMP6:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
2286 // CHECK5-NEXT:    [[TMP7:%.*]] = load i32, i32* [[TMP6]], align 4
2287 // CHECK5-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB2]], i32 [[TMP7]], i32 34, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
2288 // CHECK5-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
2289 // CHECK5-NEXT:    [[CMP:%.*]] = icmp sgt i32 [[TMP8]], 1
2290 // CHECK5-NEXT:    br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
2291 // CHECK5:       cond.true:
2292 // CHECK5-NEXT:    br label [[COND_END:%.*]]
2293 // CHECK5:       cond.false:
2294 // CHECK5-NEXT:    [[TMP9:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
2295 // CHECK5-NEXT:    br label [[COND_END]]
2296 // CHECK5:       cond.end:
2297 // CHECK5-NEXT:    [[COND:%.*]] = phi i32 [ 1, [[COND_TRUE]] ], [ [[TMP9]], [[COND_FALSE]] ]
2298 // CHECK5-NEXT:    store i32 [[COND]], i32* [[DOTOMP_UB]], align 4
2299 // CHECK5-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
2300 // CHECK5-NEXT:    store i32 [[TMP10]], i32* [[DOTOMP_IV]], align 4
2301 // CHECK5-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
2302 // CHECK5:       omp.inner.for.cond:
2303 // CHECK5-NEXT:    [[TMP11:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
2304 // CHECK5-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
2305 // CHECK5-NEXT:    [[CMP9:%.*]] = icmp sle i32 [[TMP11]], [[TMP12]]
2306 // CHECK5-NEXT:    br i1 [[CMP9]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_COND_CLEANUP:%.*]]
2307 // CHECK5:       omp.inner.for.cond.cleanup:
2308 // CHECK5-NEXT:    br label [[OMP_INNER_FOR_END:%.*]]
2309 // CHECK5:       omp.inner.for.body:
2310 // CHECK5-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
2311 // CHECK5-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP13]], 1
2312 // CHECK5-NEXT:    [[ADD:%.*]] = add nsw i32 0, [[MUL]]
2313 // CHECK5-NEXT:    store i32 [[ADD]], i32* [[I]], align 4
2314 // CHECK5-NEXT:    [[TMP14:%.*]] = load i32, i32* [[T_VAR4]], align 4
2315 // CHECK5-NEXT:    [[TMP15:%.*]] = load i32, i32* [[I]], align 4
2316 // CHECK5-NEXT:    [[IDXPROM:%.*]] = sext i32 [[TMP15]] to i64
2317 // CHECK5-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds [2 x i32], [2 x i32]* [[VEC5]], i64 0, i64 [[IDXPROM]]
2318 // CHECK5-NEXT:    store i32 [[TMP14]], i32* [[ARRAYIDX]], align 4
2319 // CHECK5-NEXT:    [[TMP16:%.*]] = load %struct.S.0*, %struct.S.0** [[_TMP8]], align 8
2320 // CHECK5-NEXT:    [[TMP17:%.*]] = load i32, i32* [[I]], align 4
2321 // CHECK5-NEXT:    [[IDXPROM10:%.*]] = sext i32 [[TMP17]] to i64
2322 // CHECK5-NEXT:    [[ARRAYIDX11:%.*]] = getelementptr inbounds [2 x %struct.S.0], [2 x %struct.S.0]* [[S_ARR6]], i64 0, i64 [[IDXPROM10]]
2323 // CHECK5-NEXT:    [[TMP18:%.*]] = bitcast %struct.S.0* [[ARRAYIDX11]] to i8*
2324 // CHECK5-NEXT:    [[TMP19:%.*]] = bitcast %struct.S.0* [[TMP16]] to i8*
2325 // CHECK5-NEXT:    call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 4 [[TMP18]], i8* align 4 [[TMP19]], i64 4, i1 false)
2326 // CHECK5-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
2327 // CHECK5:       omp.body.continue:
2328 // CHECK5-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
2329 // CHECK5:       omp.inner.for.inc:
2330 // CHECK5-NEXT:    [[TMP20:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
2331 // CHECK5-NEXT:    [[ADD12:%.*]] = add nsw i32 [[TMP20]], 1
2332 // CHECK5-NEXT:    store i32 [[ADD12]], i32* [[DOTOMP_IV]], align 4
2333 // CHECK5-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP10:![0-9]+]]
2334 // CHECK5:       omp.inner.for.end:
2335 // CHECK5-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
2336 // CHECK5:       omp.loop.exit:
2337 // CHECK5-NEXT:    [[TMP21:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
2338 // CHECK5-NEXT:    [[TMP22:%.*]] = load i32, i32* [[TMP21]], align 4
2339 // CHECK5-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP22]])
2340 // CHECK5-NEXT:    [[TMP23:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
2341 // CHECK5-NEXT:    [[TMP24:%.*]] = icmp ne i32 [[TMP23]], 0
2342 // CHECK5-NEXT:    br i1 [[TMP24]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
2343 // CHECK5:       .omp.final.then:
2344 // CHECK5-NEXT:    store i32 2, i32* [[I]], align 4
2345 // CHECK5-NEXT:    br label [[DOTOMP_FINAL_DONE]]
2346 // CHECK5:       .omp.final.done:
2347 // CHECK5-NEXT:    [[TMP25:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
2348 // CHECK5-NEXT:    [[TMP26:%.*]] = icmp ne i32 [[TMP25]], 0
2349 // CHECK5-NEXT:    br i1 [[TMP26]], label [[DOTOMP_LASTPRIVATE_THEN:%.*]], label [[DOTOMP_LASTPRIVATE_DONE:%.*]]
2350 // CHECK5:       .omp.lastprivate.then:
2351 // CHECK5-NEXT:    [[TMP27:%.*]] = load i32, i32* [[T_VAR4]], align 4
2352 // CHECK5-NEXT:    store i32 [[TMP27]], i32* [[CONV]], align 8
2353 // CHECK5-NEXT:    [[TMP28:%.*]] = bitcast [2 x i32]* [[TMP0]] to i8*
2354 // CHECK5-NEXT:    [[TMP29:%.*]] = bitcast [2 x i32]* [[VEC5]] to i8*
2355 // CHECK5-NEXT:    call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 4 [[TMP28]], i8* align 4 [[TMP29]], i64 8, i1 false)
2356 // CHECK5-NEXT:    [[ARRAY_BEGIN13:%.*]] = getelementptr inbounds [2 x %struct.S.0], [2 x %struct.S.0]* [[TMP1]], i32 0, i32 0
2357 // CHECK5-NEXT:    [[TMP30:%.*]] = bitcast [2 x %struct.S.0]* [[S_ARR6]] to %struct.S.0*
2358 // CHECK5-NEXT:    [[TMP31:%.*]] = getelementptr [[STRUCT_S_0]], %struct.S.0* [[ARRAY_BEGIN13]], i64 2
2359 // CHECK5-NEXT:    [[OMP_ARRAYCPY_ISEMPTY:%.*]] = icmp eq %struct.S.0* [[ARRAY_BEGIN13]], [[TMP31]]
2360 // CHECK5-NEXT:    br i1 [[OMP_ARRAYCPY_ISEMPTY]], label [[OMP_ARRAYCPY_DONE14:%.*]], label [[OMP_ARRAYCPY_BODY:%.*]]
2361 // CHECK5:       omp.arraycpy.body:
2362 // CHECK5-NEXT:    [[OMP_ARRAYCPY_SRCELEMENTPAST:%.*]] = phi %struct.S.0* [ [[TMP30]], [[DOTOMP_LASTPRIVATE_THEN]] ], [ [[OMP_ARRAYCPY_SRC_ELEMENT:%.*]], [[OMP_ARRAYCPY_BODY]] ]
2363 // CHECK5-NEXT:    [[OMP_ARRAYCPY_DESTELEMENTPAST:%.*]] = phi %struct.S.0* [ [[ARRAY_BEGIN13]], [[DOTOMP_LASTPRIVATE_THEN]] ], [ [[OMP_ARRAYCPY_DEST_ELEMENT:%.*]], [[OMP_ARRAYCPY_BODY]] ]
2364 // CHECK5-NEXT:    [[TMP32:%.*]] = bitcast %struct.S.0* [[OMP_ARRAYCPY_DESTELEMENTPAST]] to i8*
2365 // CHECK5-NEXT:    [[TMP33:%.*]] = bitcast %struct.S.0* [[OMP_ARRAYCPY_SRCELEMENTPAST]] to i8*
2366 // CHECK5-NEXT:    call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 4 [[TMP32]], i8* align 4 [[TMP33]], i64 4, i1 false)
2367 // CHECK5-NEXT:    [[OMP_ARRAYCPY_DEST_ELEMENT]] = getelementptr [[STRUCT_S_0]], %struct.S.0* [[OMP_ARRAYCPY_DESTELEMENTPAST]], i32 1
2368 // CHECK5-NEXT:    [[OMP_ARRAYCPY_SRC_ELEMENT]] = getelementptr [[STRUCT_S_0]], %struct.S.0* [[OMP_ARRAYCPY_SRCELEMENTPAST]], i32 1
2369 // CHECK5-NEXT:    [[OMP_ARRAYCPY_DONE:%.*]] = icmp eq %struct.S.0* [[OMP_ARRAYCPY_DEST_ELEMENT]], [[TMP31]]
2370 // CHECK5-NEXT:    br i1 [[OMP_ARRAYCPY_DONE]], label [[OMP_ARRAYCPY_DONE14]], label [[OMP_ARRAYCPY_BODY]]
2371 // CHECK5:       omp.arraycpy.done14:
2372 // CHECK5-NEXT:    [[TMP34:%.*]] = load %struct.S.0*, %struct.S.0** [[_TMP8]], align 8
2373 // CHECK5-NEXT:    [[TMP35:%.*]] = bitcast %struct.S.0* [[TMP5]] to i8*
2374 // CHECK5-NEXT:    [[TMP36:%.*]] = bitcast %struct.S.0* [[TMP34]] to i8*
2375 // CHECK5-NEXT:    call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 4 [[TMP35]], i8* align 4 [[TMP36]], i64 4, i1 false)
2376 // CHECK5-NEXT:    br label [[DOTOMP_LASTPRIVATE_DONE]]
2377 // CHECK5:       .omp.lastprivate.done:
2378 // CHECK5-NEXT:    call void @_ZN1SIiED1Ev(%struct.S.0* nonnull align 4 dereferenceable(4) [[VAR7]]) #[[ATTR4]]
2379 // CHECK5-NEXT:    [[ARRAY_BEGIN15:%.*]] = getelementptr inbounds [2 x %struct.S.0], [2 x %struct.S.0]* [[S_ARR6]], i32 0, i32 0
2380 // CHECK5-NEXT:    [[TMP37:%.*]] = getelementptr inbounds [[STRUCT_S_0]], %struct.S.0* [[ARRAY_BEGIN15]], i64 2
2381 // CHECK5-NEXT:    br label [[ARRAYDESTROY_BODY:%.*]]
2382 // CHECK5:       arraydestroy.body:
2383 // CHECK5-NEXT:    [[ARRAYDESTROY_ELEMENTPAST:%.*]] = phi %struct.S.0* [ [[TMP37]], [[DOTOMP_LASTPRIVATE_DONE]] ], [ [[ARRAYDESTROY_ELEMENT:%.*]], [[ARRAYDESTROY_BODY]] ]
2384 // CHECK5-NEXT:    [[ARRAYDESTROY_ELEMENT]] = getelementptr inbounds [[STRUCT_S_0]], %struct.S.0* [[ARRAYDESTROY_ELEMENTPAST]], i64 -1
2385 // CHECK5-NEXT:    call void @_ZN1SIiED1Ev(%struct.S.0* nonnull align 4 dereferenceable(4) [[ARRAYDESTROY_ELEMENT]]) #[[ATTR4]]
2386 // CHECK5-NEXT:    [[ARRAYDESTROY_DONE:%.*]] = icmp eq %struct.S.0* [[ARRAYDESTROY_ELEMENT]], [[ARRAY_BEGIN15]]
2387 // CHECK5-NEXT:    br i1 [[ARRAYDESTROY_DONE]], label [[ARRAYDESTROY_DONE16:%.*]], label [[ARRAYDESTROY_BODY]]
2388 // CHECK5:       arraydestroy.done16:
2389 // CHECK5-NEXT:    ret void
2390 //
2391 //
2392 // CHECK5-LABEL: define {{[^@]+}}@_ZN1SIiED1Ev
2393 // CHECK5-SAME: (%struct.S.0* nonnull align 4 dereferenceable(4) [[THIS:%.*]]) unnamed_addr #[[ATTR1]] comdat align 2 {
2394 // CHECK5-NEXT:  entry:
2395 // CHECK5-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.S.0*, align 8
2396 // CHECK5-NEXT:    store %struct.S.0* [[THIS]], %struct.S.0** [[THIS_ADDR]], align 8
2397 // CHECK5-NEXT:    [[THIS1:%.*]] = load %struct.S.0*, %struct.S.0** [[THIS_ADDR]], align 8
2398 // CHECK5-NEXT:    call void @_ZN1SIiED2Ev(%struct.S.0* nonnull align 4 dereferenceable(4) [[THIS1]]) #[[ATTR4]]
2399 // CHECK5-NEXT:    ret void
2400 //
2401 //
2402 // CHECK5-LABEL: define {{[^@]+}}@_ZN1SIiEC2Ev
2403 // CHECK5-SAME: (%struct.S.0* nonnull align 4 dereferenceable(4) [[THIS:%.*]]) unnamed_addr #[[ATTR1]] comdat align 2 {
2404 // CHECK5-NEXT:  entry:
2405 // CHECK5-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.S.0*, align 8
2406 // CHECK5-NEXT:    store %struct.S.0* [[THIS]], %struct.S.0** [[THIS_ADDR]], align 8
2407 // CHECK5-NEXT:    [[THIS1:%.*]] = load %struct.S.0*, %struct.S.0** [[THIS_ADDR]], align 8
2408 // CHECK5-NEXT:    [[F:%.*]] = getelementptr inbounds [[STRUCT_S_0:%.*]], %struct.S.0* [[THIS1]], i32 0, i32 0
2409 // CHECK5-NEXT:    store i32 0, i32* [[F]], align 4
2410 // CHECK5-NEXT:    ret void
2411 //
2412 //
2413 // CHECK5-LABEL: define {{[^@]+}}@_ZN1SIiEC2Ei
2414 // CHECK5-SAME: (%struct.S.0* nonnull align 4 dereferenceable(4) [[THIS:%.*]], i32 signext [[A:%.*]]) unnamed_addr #[[ATTR1]] comdat align 2 {
2415 // CHECK5-NEXT:  entry:
2416 // CHECK5-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.S.0*, align 8
2417 // CHECK5-NEXT:    [[A_ADDR:%.*]] = alloca i32, align 4
2418 // CHECK5-NEXT:    store %struct.S.0* [[THIS]], %struct.S.0** [[THIS_ADDR]], align 8
2419 // CHECK5-NEXT:    store i32 [[A]], i32* [[A_ADDR]], align 4
2420 // CHECK5-NEXT:    [[THIS1:%.*]] = load %struct.S.0*, %struct.S.0** [[THIS_ADDR]], align 8
2421 // CHECK5-NEXT:    [[F:%.*]] = getelementptr inbounds [[STRUCT_S_0:%.*]], %struct.S.0* [[THIS1]], i32 0, i32 0
2422 // CHECK5-NEXT:    [[TMP0:%.*]] = load i32, i32* [[A_ADDR]], align 4
2423 // CHECK5-NEXT:    store i32 [[TMP0]], i32* [[F]], align 4
2424 // CHECK5-NEXT:    ret void
2425 //
2426 //
2427 // CHECK5-LABEL: define {{[^@]+}}@_ZN1SIiED2Ev
2428 // CHECK5-SAME: (%struct.S.0* nonnull align 4 dereferenceable(4) [[THIS:%.*]]) unnamed_addr #[[ATTR1]] comdat align 2 {
2429 // CHECK5-NEXT:  entry:
2430 // CHECK5-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.S.0*, align 8
2431 // CHECK5-NEXT:    store %struct.S.0* [[THIS]], %struct.S.0** [[THIS_ADDR]], align 8
2432 // CHECK5-NEXT:    [[THIS1:%.*]] = load %struct.S.0*, %struct.S.0** [[THIS_ADDR]], align 8
2433 // CHECK5-NEXT:    ret void
2434 //
2435 //
2436 // CHECK5-LABEL: define {{[^@]+}}@.omp_offloading.requires_reg
2437 // CHECK5-SAME: () #[[ATTR6:[0-9]+]] {
2438 // CHECK5-NEXT:  entry:
2439 // CHECK5-NEXT:    call void @__tgt_register_requires(i64 1)
2440 // CHECK5-NEXT:    ret void
2441 //
2442 //
2443 // CHECK6-LABEL: define {{[^@]+}}@main
2444 // CHECK6-SAME: () #[[ATTR0:[0-9]+]] {
2445 // CHECK6-NEXT:  entry:
2446 // CHECK6-NEXT:    [[RETVAL:%.*]] = alloca i32, align 4
2447 // CHECK6-NEXT:    [[G:%.*]] = alloca double, align 8
2448 // CHECK6-NEXT:    [[G1:%.*]] = alloca double*, align 8
2449 // CHECK6-NEXT:    [[TEST:%.*]] = alloca [[STRUCT_S:%.*]], align 4
2450 // CHECK6-NEXT:    [[T_VAR:%.*]] = alloca i32, align 4
2451 // CHECK6-NEXT:    [[VEC:%.*]] = alloca [2 x i32], align 4
2452 // CHECK6-NEXT:    [[S_ARR:%.*]] = alloca [2 x %struct.S], align 4
2453 // CHECK6-NEXT:    [[VAR:%.*]] = alloca %struct.S*, align 8
2454 // CHECK6-NEXT:    [[TMP:%.*]] = alloca %struct.S*, align 8
2455 // CHECK6-NEXT:    [[T_VAR_CASTED:%.*]] = alloca i64, align 8
2456 // CHECK6-NEXT:    [[SVAR_CASTED:%.*]] = alloca i64, align 8
2457 // CHECK6-NEXT:    [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [5 x i8*], align 8
2458 // CHECK6-NEXT:    [[DOTOFFLOAD_PTRS:%.*]] = alloca [5 x i8*], align 8
2459 // CHECK6-NEXT:    [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [5 x i8*], align 8
2460 // CHECK6-NEXT:    [[_TMP2:%.*]] = alloca i32, align 4
2461 // CHECK6-NEXT:    [[I:%.*]] = alloca i32, align 4
2462 // CHECK6-NEXT:    store i32 0, i32* [[RETVAL]], align 4
2463 // CHECK6-NEXT:    store double* [[G]], double** [[G1]], align 8
2464 // CHECK6-NEXT:    call void @_ZN1SIfEC1Ev(%struct.S* nonnull align 4 dereferenceable(4) [[TEST]])
2465 // CHECK6-NEXT:    store i32 0, i32* [[T_VAR]], align 4
2466 // CHECK6-NEXT:    [[TMP0:%.*]] = bitcast [2 x i32]* [[VEC]] to i8*
2467 // CHECK6-NEXT:    call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 4 [[TMP0]], i8* align 4 bitcast ([2 x i32]* @__const.main.vec to i8*), i64 8, i1 false)
2468 // CHECK6-NEXT:    [[ARRAYINIT_BEGIN:%.*]] = getelementptr inbounds [2 x %struct.S], [2 x %struct.S]* [[S_ARR]], i64 0, i64 0
2469 // CHECK6-NEXT:    call void @_ZN1SIfEC1Ef(%struct.S* nonnull align 4 dereferenceable(4) [[ARRAYINIT_BEGIN]], float 1.000000e+00)
2470 // CHECK6-NEXT:    [[ARRAYINIT_ELEMENT:%.*]] = getelementptr inbounds [[STRUCT_S]], %struct.S* [[ARRAYINIT_BEGIN]], i64 1
2471 // CHECK6-NEXT:    call void @_ZN1SIfEC1Ef(%struct.S* nonnull align 4 dereferenceable(4) [[ARRAYINIT_ELEMENT]], float 2.000000e+00)
2472 // CHECK6-NEXT:    store %struct.S* [[TEST]], %struct.S** [[VAR]], align 8
2473 // CHECK6-NEXT:    [[TMP1:%.*]] = load %struct.S*, %struct.S** [[VAR]], align 8
2474 // CHECK6-NEXT:    store %struct.S* [[TMP1]], %struct.S** [[TMP]], align 8
2475 // CHECK6-NEXT:    [[TMP2:%.*]] = load i32, i32* [[T_VAR]], align 4
2476 // CHECK6-NEXT:    [[CONV:%.*]] = bitcast i64* [[T_VAR_CASTED]] to i32*
2477 // CHECK6-NEXT:    store i32 [[TMP2]], i32* [[CONV]], align 4
2478 // CHECK6-NEXT:    [[TMP3:%.*]] = load i64, i64* [[T_VAR_CASTED]], align 8
2479 // CHECK6-NEXT:    [[TMP4:%.*]] = load %struct.S*, %struct.S** [[TMP]], align 8
2480 // CHECK6-NEXT:    [[TMP5:%.*]] = load i32, i32* @_ZZ4mainE4svar, align 4
2481 // CHECK6-NEXT:    [[CONV1:%.*]] = bitcast i64* [[SVAR_CASTED]] to i32*
2482 // CHECK6-NEXT:    store i32 [[TMP5]], i32* [[CONV1]], align 4
2483 // CHECK6-NEXT:    [[TMP6:%.*]] = load i64, i64* [[SVAR_CASTED]], align 8
2484 // CHECK6-NEXT:    [[TMP7:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
2485 // CHECK6-NEXT:    [[TMP8:%.*]] = bitcast i8** [[TMP7]] to [2 x i32]**
2486 // CHECK6-NEXT:    store [2 x i32]* [[VEC]], [2 x i32]** [[TMP8]], align 8
2487 // CHECK6-NEXT:    [[TMP9:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0
2488 // CHECK6-NEXT:    [[TMP10:%.*]] = bitcast i8** [[TMP9]] to [2 x i32]**
2489 // CHECK6-NEXT:    store [2 x i32]* [[VEC]], [2 x i32]** [[TMP10]], align 8
2490 // CHECK6-NEXT:    [[TMP11:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 0
2491 // CHECK6-NEXT:    store i8* null, i8** [[TMP11]], align 8
2492 // CHECK6-NEXT:    [[TMP12:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1
2493 // CHECK6-NEXT:    [[TMP13:%.*]] = bitcast i8** [[TMP12]] to i64*
2494 // CHECK6-NEXT:    store i64 [[TMP3]], i64* [[TMP13]], align 8
2495 // CHECK6-NEXT:    [[TMP14:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1
2496 // CHECK6-NEXT:    [[TMP15:%.*]] = bitcast i8** [[TMP14]] to i64*
2497 // CHECK6-NEXT:    store i64 [[TMP3]], i64* [[TMP15]], align 8
2498 // CHECK6-NEXT:    [[TMP16:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 1
2499 // CHECK6-NEXT:    store i8* null, i8** [[TMP16]], align 8
2500 // CHECK6-NEXT:    [[TMP17:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2
2501 // CHECK6-NEXT:    [[TMP18:%.*]] = bitcast i8** [[TMP17]] to [2 x %struct.S]**
2502 // CHECK6-NEXT:    store [2 x %struct.S]* [[S_ARR]], [2 x %struct.S]** [[TMP18]], align 8
2503 // CHECK6-NEXT:    [[TMP19:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 2
2504 // CHECK6-NEXT:    [[TMP20:%.*]] = bitcast i8** [[TMP19]] to [2 x %struct.S]**
2505 // CHECK6-NEXT:    store [2 x %struct.S]* [[S_ARR]], [2 x %struct.S]** [[TMP20]], align 8
2506 // CHECK6-NEXT:    [[TMP21:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 2
2507 // CHECK6-NEXT:    store i8* null, i8** [[TMP21]], align 8
2508 // CHECK6-NEXT:    [[TMP22:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 3
2509 // CHECK6-NEXT:    [[TMP23:%.*]] = bitcast i8** [[TMP22]] to %struct.S**
2510 // CHECK6-NEXT:    store %struct.S* [[TMP4]], %struct.S** [[TMP23]], align 8
2511 // CHECK6-NEXT:    [[TMP24:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 3
2512 // CHECK6-NEXT:    [[TMP25:%.*]] = bitcast i8** [[TMP24]] to %struct.S**
2513 // CHECK6-NEXT:    store %struct.S* [[TMP4]], %struct.S** [[TMP25]], align 8
2514 // CHECK6-NEXT:    [[TMP26:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 3
2515 // CHECK6-NEXT:    store i8* null, i8** [[TMP26]], align 8
2516 // CHECK6-NEXT:    [[TMP27:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 4
2517 // CHECK6-NEXT:    [[TMP28:%.*]] = bitcast i8** [[TMP27]] to i64*
2518 // CHECK6-NEXT:    store i64 [[TMP6]], i64* [[TMP28]], align 8
2519 // CHECK6-NEXT:    [[TMP29:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 4
2520 // CHECK6-NEXT:    [[TMP30:%.*]] = bitcast i8** [[TMP29]] to i64*
2521 // CHECK6-NEXT:    store i64 [[TMP6]], i64* [[TMP30]], align 8
2522 // CHECK6-NEXT:    [[TMP31:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 4
2523 // CHECK6-NEXT:    store i8* null, i8** [[TMP31]], align 8
2524 // CHECK6-NEXT:    [[TMP32:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
2525 // CHECK6-NEXT:    [[TMP33:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0
2526 // CHECK6-NEXT:    call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3:[0-9]+]], i64 -1, i64 2)
2527 // CHECK6-NEXT:    [[TMP34:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l106.region_id, i32 5, i8** [[TMP32]], i8** [[TMP33]], i64* getelementptr inbounds ([5 x i64], [5 x i64]* @.offload_sizes, i32 0, i32 0), i64* getelementptr inbounds ([5 x i64], [5 x i64]* @.offload_maptypes, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0)
2528 // CHECK6-NEXT:    [[TMP35:%.*]] = icmp ne i32 [[TMP34]], 0
2529 // CHECK6-NEXT:    br i1 [[TMP35]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]]
2530 // CHECK6:       omp_offload.failed:
2531 // CHECK6-NEXT:    call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l106([2 x i32]* [[VEC]], i64 [[TMP3]], [2 x %struct.S]* [[S_ARR]], %struct.S* [[TMP4]], i64 [[TMP6]]) #[[ATTR4:[0-9]+]]
2532 // CHECK6-NEXT:    br label [[OMP_OFFLOAD_CONT]]
2533 // CHECK6:       omp_offload.cont:
2534 // CHECK6-NEXT:    [[CALL:%.*]] = call signext i32 @_Z5tmainIiET_v()
2535 // CHECK6-NEXT:    store i32 [[CALL]], i32* [[RETVAL]], align 4
2536 // CHECK6-NEXT:    [[ARRAY_BEGIN:%.*]] = getelementptr inbounds [2 x %struct.S], [2 x %struct.S]* [[S_ARR]], i32 0, i32 0
2537 // CHECK6-NEXT:    [[TMP36:%.*]] = getelementptr inbounds [[STRUCT_S]], %struct.S* [[ARRAY_BEGIN]], i64 2
2538 // CHECK6-NEXT:    br label [[ARRAYDESTROY_BODY:%.*]]
2539 // CHECK6:       arraydestroy.body:
2540 // CHECK6-NEXT:    [[ARRAYDESTROY_ELEMENTPAST:%.*]] = phi %struct.S* [ [[TMP36]], [[OMP_OFFLOAD_CONT]] ], [ [[ARRAYDESTROY_ELEMENT:%.*]], [[ARRAYDESTROY_BODY]] ]
2541 // CHECK6-NEXT:    [[ARRAYDESTROY_ELEMENT]] = getelementptr inbounds [[STRUCT_S]], %struct.S* [[ARRAYDESTROY_ELEMENTPAST]], i64 -1
2542 // CHECK6-NEXT:    call void @_ZN1SIfED1Ev(%struct.S* nonnull align 4 dereferenceable(4) [[ARRAYDESTROY_ELEMENT]]) #[[ATTR4]]
2543 // CHECK6-NEXT:    [[ARRAYDESTROY_DONE:%.*]] = icmp eq %struct.S* [[ARRAYDESTROY_ELEMENT]], [[ARRAY_BEGIN]]
2544 // CHECK6-NEXT:    br i1 [[ARRAYDESTROY_DONE]], label [[ARRAYDESTROY_DONE3:%.*]], label [[ARRAYDESTROY_BODY]]
2545 // CHECK6:       arraydestroy.done3:
2546 // CHECK6-NEXT:    call void @_ZN1SIfED1Ev(%struct.S* nonnull align 4 dereferenceable(4) [[TEST]]) #[[ATTR4]]
2547 // CHECK6-NEXT:    [[TMP37:%.*]] = load i32, i32* [[RETVAL]], align 4
2548 // CHECK6-NEXT:    ret i32 [[TMP37]]
2549 //
2550 //
2551 // CHECK6-LABEL: define {{[^@]+}}@_ZN1SIfEC1Ev
2552 // CHECK6-SAME: (%struct.S* nonnull align 4 dereferenceable(4) [[THIS:%.*]]) unnamed_addr #[[ATTR1:[0-9]+]] comdat align 2 {
2553 // CHECK6-NEXT:  entry:
2554 // CHECK6-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.S*, align 8
2555 // CHECK6-NEXT:    store %struct.S* [[THIS]], %struct.S** [[THIS_ADDR]], align 8
2556 // CHECK6-NEXT:    [[THIS1:%.*]] = load %struct.S*, %struct.S** [[THIS_ADDR]], align 8
2557 // CHECK6-NEXT:    call void @_ZN1SIfEC2Ev(%struct.S* nonnull align 4 dereferenceable(4) [[THIS1]])
2558 // CHECK6-NEXT:    ret void
2559 //
2560 //
2561 // CHECK6-LABEL: define {{[^@]+}}@_ZN1SIfEC1Ef
2562 // CHECK6-SAME: (%struct.S* nonnull align 4 dereferenceable(4) [[THIS:%.*]], float [[A:%.*]]) unnamed_addr #[[ATTR1]] comdat align 2 {
2563 // CHECK6-NEXT:  entry:
2564 // CHECK6-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.S*, align 8
2565 // CHECK6-NEXT:    [[A_ADDR:%.*]] = alloca float, align 4
2566 // CHECK6-NEXT:    store %struct.S* [[THIS]], %struct.S** [[THIS_ADDR]], align 8
2567 // CHECK6-NEXT:    store float [[A]], float* [[A_ADDR]], align 4
2568 // CHECK6-NEXT:    [[THIS1:%.*]] = load %struct.S*, %struct.S** [[THIS_ADDR]], align 8
2569 // CHECK6-NEXT:    [[TMP0:%.*]] = load float, float* [[A_ADDR]], align 4
2570 // CHECK6-NEXT:    call void @_ZN1SIfEC2Ef(%struct.S* nonnull align 4 dereferenceable(4) [[THIS1]], float [[TMP0]])
2571 // CHECK6-NEXT:    ret void
2572 //
2573 //
2574 // CHECK6-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l106
2575 // CHECK6-SAME: ([2 x i32]* nonnull align 4 dereferenceable(8) [[VEC:%.*]], i64 [[T_VAR:%.*]], [2 x %struct.S]* nonnull align 4 dereferenceable(8) [[S_ARR:%.*]], %struct.S* nonnull align 4 dereferenceable(4) [[VAR:%.*]], i64 [[SVAR:%.*]]) #[[ATTR3:[0-9]+]] {
2576 // CHECK6-NEXT:  entry:
2577 // CHECK6-NEXT:    [[VEC_ADDR:%.*]] = alloca [2 x i32]*, align 8
2578 // CHECK6-NEXT:    [[T_VAR_ADDR:%.*]] = alloca i64, align 8
2579 // CHECK6-NEXT:    [[S_ARR_ADDR:%.*]] = alloca [2 x %struct.S]*, align 8
2580 // CHECK6-NEXT:    [[VAR_ADDR:%.*]] = alloca %struct.S*, align 8
2581 // CHECK6-NEXT:    [[SVAR_ADDR:%.*]] = alloca i64, align 8
2582 // CHECK6-NEXT:    [[TMP:%.*]] = alloca %struct.S*, align 8
2583 // CHECK6-NEXT:    [[T_VAR_CASTED:%.*]] = alloca i64, align 8
2584 // CHECK6-NEXT:    [[SVAR_CASTED:%.*]] = alloca i64, align 8
2585 // CHECK6-NEXT:    store [2 x i32]* [[VEC]], [2 x i32]** [[VEC_ADDR]], align 8
2586 // CHECK6-NEXT:    store i64 [[T_VAR]], i64* [[T_VAR_ADDR]], align 8
2587 // CHECK6-NEXT:    store [2 x %struct.S]* [[S_ARR]], [2 x %struct.S]** [[S_ARR_ADDR]], align 8
2588 // CHECK6-NEXT:    store %struct.S* [[VAR]], %struct.S** [[VAR_ADDR]], align 8
2589 // CHECK6-NEXT:    store i64 [[SVAR]], i64* [[SVAR_ADDR]], align 8
2590 // CHECK6-NEXT:    [[TMP0:%.*]] = load [2 x i32]*, [2 x i32]** [[VEC_ADDR]], align 8
2591 // CHECK6-NEXT:    [[CONV:%.*]] = bitcast i64* [[T_VAR_ADDR]] to i32*
2592 // CHECK6-NEXT:    [[TMP1:%.*]] = load [2 x %struct.S]*, [2 x %struct.S]** [[S_ARR_ADDR]], align 8
2593 // CHECK6-NEXT:    [[TMP2:%.*]] = load %struct.S*, %struct.S** [[VAR_ADDR]], align 8
2594 // CHECK6-NEXT:    [[CONV1:%.*]] = bitcast i64* [[SVAR_ADDR]] to i32*
2595 // CHECK6-NEXT:    store %struct.S* [[TMP2]], %struct.S** [[TMP]], align 8
2596 // CHECK6-NEXT:    [[TMP3:%.*]] = load i32, i32* [[CONV]], align 8
2597 // CHECK6-NEXT:    [[CONV2:%.*]] = bitcast i64* [[T_VAR_CASTED]] to i32*
2598 // CHECK6-NEXT:    store i32 [[TMP3]], i32* [[CONV2]], align 4
2599 // CHECK6-NEXT:    [[TMP4:%.*]] = load i64, i64* [[T_VAR_CASTED]], align 8
2600 // CHECK6-NEXT:    [[TMP5:%.*]] = load %struct.S*, %struct.S** [[TMP]], align 8
2601 // CHECK6-NEXT:    [[TMP6:%.*]] = load i32, i32* [[CONV1]], align 8
2602 // CHECK6-NEXT:    [[CONV3:%.*]] = bitcast i64* [[SVAR_CASTED]] to i32*
2603 // CHECK6-NEXT:    store i32 [[TMP6]], i32* [[CONV3]], align 4
2604 // CHECK6-NEXT:    [[TMP7:%.*]] = load i64, i64* [[SVAR_CASTED]], align 8
2605 // CHECK6-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 5, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, [2 x i32]*, i64, [2 x %struct.S]*, %struct.S*, i64)* @.omp_outlined. to void (i32*, i32*, ...)*), [2 x i32]* [[TMP0]], i64 [[TMP4]], [2 x %struct.S]* [[TMP1]], %struct.S* [[TMP5]], i64 [[TMP7]])
2606 // CHECK6-NEXT:    ret void
2607 //
2608 //
2609 // CHECK6-LABEL: define {{[^@]+}}@.omp_outlined.
2610 // CHECK6-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], [2 x i32]* nonnull align 4 dereferenceable(8) [[VEC:%.*]], i64 [[T_VAR:%.*]], [2 x %struct.S]* nonnull align 4 dereferenceable(8) [[S_ARR:%.*]], %struct.S* nonnull align 4 dereferenceable(4) [[VAR:%.*]], i64 [[SVAR:%.*]]) #[[ATTR3]] {
2611 // CHECK6-NEXT:  entry:
2612 // CHECK6-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
2613 // CHECK6-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
2614 // CHECK6-NEXT:    [[VEC_ADDR:%.*]] = alloca [2 x i32]*, align 8
2615 // CHECK6-NEXT:    [[T_VAR_ADDR:%.*]] = alloca i64, align 8
2616 // CHECK6-NEXT:    [[S_ARR_ADDR:%.*]] = alloca [2 x %struct.S]*, align 8
2617 // CHECK6-NEXT:    [[VAR_ADDR:%.*]] = alloca %struct.S*, align 8
2618 // CHECK6-NEXT:    [[SVAR_ADDR:%.*]] = alloca i64, align 8
2619 // CHECK6-NEXT:    [[TMP:%.*]] = alloca %struct.S*, align 8
2620 // CHECK6-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
2621 // CHECK6-NEXT:    [[_TMP2:%.*]] = alloca i32, align 4
2622 // CHECK6-NEXT:    [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
2623 // CHECK6-NEXT:    [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
2624 // CHECK6-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
2625 // CHECK6-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
2626 // CHECK6-NEXT:    [[T_VAR3:%.*]] = alloca i32, align 4
2627 // CHECK6-NEXT:    [[VEC4:%.*]] = alloca [2 x i32], align 4
2628 // CHECK6-NEXT:    [[S_ARR5:%.*]] = alloca [2 x %struct.S], align 4
2629 // CHECK6-NEXT:    [[VAR6:%.*]] = alloca [[STRUCT_S:%.*]], align 4
2630 // CHECK6-NEXT:    [[_TMP7:%.*]] = alloca %struct.S*, align 8
2631 // CHECK6-NEXT:    [[SVAR8:%.*]] = alloca i32, align 4
2632 // CHECK6-NEXT:    [[I:%.*]] = alloca i32, align 4
2633 // CHECK6-NEXT:    [[T_VAR_CASTED:%.*]] = alloca i64, align 8
2634 // CHECK6-NEXT:    [[SVAR_CASTED:%.*]] = alloca i64, align 8
2635 // CHECK6-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
2636 // CHECK6-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
2637 // CHECK6-NEXT:    store [2 x i32]* [[VEC]], [2 x i32]** [[VEC_ADDR]], align 8
2638 // CHECK6-NEXT:    store i64 [[T_VAR]], i64* [[T_VAR_ADDR]], align 8
2639 // CHECK6-NEXT:    store [2 x %struct.S]* [[S_ARR]], [2 x %struct.S]** [[S_ARR_ADDR]], align 8
2640 // CHECK6-NEXT:    store %struct.S* [[VAR]], %struct.S** [[VAR_ADDR]], align 8
2641 // CHECK6-NEXT:    store i64 [[SVAR]], i64* [[SVAR_ADDR]], align 8
2642 // CHECK6-NEXT:    [[TMP0:%.*]] = load [2 x i32]*, [2 x i32]** [[VEC_ADDR]], align 8
2643 // CHECK6-NEXT:    [[CONV:%.*]] = bitcast i64* [[T_VAR_ADDR]] to i32*
2644 // CHECK6-NEXT:    [[TMP1:%.*]] = load [2 x %struct.S]*, [2 x %struct.S]** [[S_ARR_ADDR]], align 8
2645 // CHECK6-NEXT:    [[TMP2:%.*]] = load %struct.S*, %struct.S** [[VAR_ADDR]], align 8
2646 // CHECK6-NEXT:    [[CONV1:%.*]] = bitcast i64* [[SVAR_ADDR]] to i32*
2647 // CHECK6-NEXT:    store %struct.S* [[TMP2]], %struct.S** [[TMP]], align 8
2648 // CHECK6-NEXT:    store i32 0, i32* [[DOTOMP_COMB_LB]], align 4
2649 // CHECK6-NEXT:    store i32 1, i32* [[DOTOMP_COMB_UB]], align 4
2650 // CHECK6-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
2651 // CHECK6-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
2652 // CHECK6-NEXT:    [[ARRAY_BEGIN:%.*]] = getelementptr inbounds [2 x %struct.S], [2 x %struct.S]* [[S_ARR5]], i32 0, i32 0
2653 // CHECK6-NEXT:    [[ARRAYCTOR_END:%.*]] = getelementptr inbounds [[STRUCT_S]], %struct.S* [[ARRAY_BEGIN]], i64 2
2654 // CHECK6-NEXT:    br label [[ARRAYCTOR_LOOP:%.*]]
2655 // CHECK6:       arrayctor.loop:
2656 // CHECK6-NEXT:    [[ARRAYCTOR_CUR:%.*]] = phi %struct.S* [ [[ARRAY_BEGIN]], [[ENTRY:%.*]] ], [ [[ARRAYCTOR_NEXT:%.*]], [[ARRAYCTOR_LOOP]] ]
2657 // CHECK6-NEXT:    call void @_ZN1SIfEC1Ev(%struct.S* nonnull align 4 dereferenceable(4) [[ARRAYCTOR_CUR]])
2658 // CHECK6-NEXT:    [[ARRAYCTOR_NEXT]] = getelementptr inbounds [[STRUCT_S]], %struct.S* [[ARRAYCTOR_CUR]], i64 1
2659 // CHECK6-NEXT:    [[ARRAYCTOR_DONE:%.*]] = icmp eq %struct.S* [[ARRAYCTOR_NEXT]], [[ARRAYCTOR_END]]
2660 // CHECK6-NEXT:    br i1 [[ARRAYCTOR_DONE]], label [[ARRAYCTOR_CONT:%.*]], label [[ARRAYCTOR_LOOP]]
2661 // CHECK6:       arrayctor.cont:
2662 // CHECK6-NEXT:    [[TMP3:%.*]] = load %struct.S*, %struct.S** [[TMP]], align 8
2663 // CHECK6-NEXT:    call void @_ZN1SIfEC1Ev(%struct.S* nonnull align 4 dereferenceable(4) [[VAR6]])
2664 // CHECK6-NEXT:    store %struct.S* [[VAR6]], %struct.S** [[_TMP7]], align 8
2665 // CHECK6-NEXT:    [[TMP4:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
2666 // CHECK6-NEXT:    [[TMP5:%.*]] = load i32, i32* [[TMP4]], align 4
2667 // CHECK6-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1:[0-9]+]], i32 [[TMP5]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_COMB_LB]], i32* [[DOTOMP_COMB_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
2668 // CHECK6-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
2669 // CHECK6-NEXT:    [[CMP:%.*]] = icmp sgt i32 [[TMP6]], 1
2670 // CHECK6-NEXT:    br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
2671 // CHECK6:       cond.true:
2672 // CHECK6-NEXT:    br label [[COND_END:%.*]]
2673 // CHECK6:       cond.false:
2674 // CHECK6-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
2675 // CHECK6-NEXT:    br label [[COND_END]]
2676 // CHECK6:       cond.end:
2677 // CHECK6-NEXT:    [[COND:%.*]] = phi i32 [ 1, [[COND_TRUE]] ], [ [[TMP7]], [[COND_FALSE]] ]
2678 // CHECK6-NEXT:    store i32 [[COND]], i32* [[DOTOMP_COMB_UB]], align 4
2679 // CHECK6-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
2680 // CHECK6-NEXT:    store i32 [[TMP8]], i32* [[DOTOMP_IV]], align 4
2681 // CHECK6-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
2682 // CHECK6:       omp.inner.for.cond:
2683 // CHECK6-NEXT:    [[TMP9:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
2684 // CHECK6-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
2685 // CHECK6-NEXT:    [[CMP9:%.*]] = icmp sle i32 [[TMP9]], [[TMP10]]
2686 // CHECK6-NEXT:    br i1 [[CMP9]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_COND_CLEANUP:%.*]]
2687 // CHECK6:       omp.inner.for.cond.cleanup:
2688 // CHECK6-NEXT:    br label [[OMP_INNER_FOR_END:%.*]]
2689 // CHECK6:       omp.inner.for.body:
2690 // CHECK6-NEXT:    [[TMP11:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
2691 // CHECK6-NEXT:    [[TMP12:%.*]] = zext i32 [[TMP11]] to i64
2692 // CHECK6-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
2693 // CHECK6-NEXT:    [[TMP14:%.*]] = zext i32 [[TMP13]] to i64
2694 // CHECK6-NEXT:    [[TMP15:%.*]] = load i32, i32* [[T_VAR3]], align 4
2695 // CHECK6-NEXT:    [[CONV10:%.*]] = bitcast i64* [[T_VAR_CASTED]] to i32*
2696 // CHECK6-NEXT:    store i32 [[TMP15]], i32* [[CONV10]], align 4
2697 // CHECK6-NEXT:    [[TMP16:%.*]] = load i64, i64* [[T_VAR_CASTED]], align 8
2698 // CHECK6-NEXT:    [[TMP17:%.*]] = load %struct.S*, %struct.S** [[_TMP7]], align 8
2699 // CHECK6-NEXT:    [[TMP18:%.*]] = load i32, i32* [[SVAR8]], align 4
2700 // CHECK6-NEXT:    [[CONV11:%.*]] = bitcast i64* [[SVAR_CASTED]] to i32*
2701 // CHECK6-NEXT:    store i32 [[TMP18]], i32* [[CONV11]], align 4
2702 // CHECK6-NEXT:    [[TMP19:%.*]] = load i64, i64* [[SVAR_CASTED]], align 8
2703 // CHECK6-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 7, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, [2 x i32]*, i64, [2 x %struct.S]*, %struct.S*, i64)* @.omp_outlined..1 to void (i32*, i32*, ...)*), i64 [[TMP12]], i64 [[TMP14]], [2 x i32]* [[VEC4]], i64 [[TMP16]], [2 x %struct.S]* [[S_ARR5]], %struct.S* [[TMP17]], i64 [[TMP19]])
2704 // CHECK6-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
2705 // CHECK6:       omp.inner.for.inc:
2706 // CHECK6-NEXT:    [[TMP20:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
2707 // CHECK6-NEXT:    [[TMP21:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
2708 // CHECK6-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP20]], [[TMP21]]
2709 // CHECK6-NEXT:    store i32 [[ADD]], i32* [[DOTOMP_IV]], align 4
2710 // CHECK6-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP4:![0-9]+]]
2711 // CHECK6:       omp.inner.for.end:
2712 // CHECK6-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
2713 // CHECK6:       omp.loop.exit:
2714 // CHECK6-NEXT:    [[TMP22:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
2715 // CHECK6-NEXT:    [[TMP23:%.*]] = load i32, i32* [[TMP22]], align 4
2716 // CHECK6-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP23]])
2717 // CHECK6-NEXT:    [[TMP24:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
2718 // CHECK6-NEXT:    [[TMP25:%.*]] = icmp ne i32 [[TMP24]], 0
2719 // CHECK6-NEXT:    br i1 [[TMP25]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
2720 // CHECK6:       .omp.final.then:
2721 // CHECK6-NEXT:    store i32 2, i32* [[I]], align 4
2722 // CHECK6-NEXT:    br label [[DOTOMP_FINAL_DONE]]
2723 // CHECK6:       .omp.final.done:
2724 // CHECK6-NEXT:    [[TMP26:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
2725 // CHECK6-NEXT:    [[TMP27:%.*]] = icmp ne i32 [[TMP26]], 0
2726 // CHECK6-NEXT:    br i1 [[TMP27]], label [[DOTOMP_LASTPRIVATE_THEN:%.*]], label [[DOTOMP_LASTPRIVATE_DONE:%.*]]
2727 // CHECK6:       .omp.lastprivate.then:
2728 // CHECK6-NEXT:    [[TMP28:%.*]] = load i32, i32* [[T_VAR3]], align 4
2729 // CHECK6-NEXT:    store i32 [[TMP28]], i32* [[CONV]], align 8
2730 // CHECK6-NEXT:    [[TMP29:%.*]] = bitcast [2 x i32]* [[TMP0]] to i8*
2731 // CHECK6-NEXT:    [[TMP30:%.*]] = bitcast [2 x i32]* [[VEC4]] to i8*
2732 // CHECK6-NEXT:    call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 4 [[TMP29]], i8* align 4 [[TMP30]], i64 8, i1 false)
2733 // CHECK6-NEXT:    [[ARRAY_BEGIN12:%.*]] = getelementptr inbounds [2 x %struct.S], [2 x %struct.S]* [[TMP1]], i32 0, i32 0
2734 // CHECK6-NEXT:    [[TMP31:%.*]] = bitcast [2 x %struct.S]* [[S_ARR5]] to %struct.S*
2735 // CHECK6-NEXT:    [[TMP32:%.*]] = getelementptr [[STRUCT_S]], %struct.S* [[ARRAY_BEGIN12]], i64 2
2736 // CHECK6-NEXT:    [[OMP_ARRAYCPY_ISEMPTY:%.*]] = icmp eq %struct.S* [[ARRAY_BEGIN12]], [[TMP32]]
2737 // CHECK6-NEXT:    br i1 [[OMP_ARRAYCPY_ISEMPTY]], label [[OMP_ARRAYCPY_DONE13:%.*]], label [[OMP_ARRAYCPY_BODY:%.*]]
2738 // CHECK6:       omp.arraycpy.body:
2739 // CHECK6-NEXT:    [[OMP_ARRAYCPY_SRCELEMENTPAST:%.*]] = phi %struct.S* [ [[TMP31]], [[DOTOMP_LASTPRIVATE_THEN]] ], [ [[OMP_ARRAYCPY_SRC_ELEMENT:%.*]], [[OMP_ARRAYCPY_BODY]] ]
2740 // CHECK6-NEXT:    [[OMP_ARRAYCPY_DESTELEMENTPAST:%.*]] = phi %struct.S* [ [[ARRAY_BEGIN12]], [[DOTOMP_LASTPRIVATE_THEN]] ], [ [[OMP_ARRAYCPY_DEST_ELEMENT:%.*]], [[OMP_ARRAYCPY_BODY]] ]
2741 // CHECK6-NEXT:    [[TMP33:%.*]] = bitcast %struct.S* [[OMP_ARRAYCPY_DESTELEMENTPAST]] to i8*
2742 // CHECK6-NEXT:    [[TMP34:%.*]] = bitcast %struct.S* [[OMP_ARRAYCPY_SRCELEMENTPAST]] to i8*
2743 // CHECK6-NEXT:    call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 4 [[TMP33]], i8* align 4 [[TMP34]], i64 4, i1 false)
2744 // CHECK6-NEXT:    [[OMP_ARRAYCPY_DEST_ELEMENT]] = getelementptr [[STRUCT_S]], %struct.S* [[OMP_ARRAYCPY_DESTELEMENTPAST]], i32 1
2745 // CHECK6-NEXT:    [[OMP_ARRAYCPY_SRC_ELEMENT]] = getelementptr [[STRUCT_S]], %struct.S* [[OMP_ARRAYCPY_SRCELEMENTPAST]], i32 1
2746 // CHECK6-NEXT:    [[OMP_ARRAYCPY_DONE:%.*]] = icmp eq %struct.S* [[OMP_ARRAYCPY_DEST_ELEMENT]], [[TMP32]]
2747 // CHECK6-NEXT:    br i1 [[OMP_ARRAYCPY_DONE]], label [[OMP_ARRAYCPY_DONE13]], label [[OMP_ARRAYCPY_BODY]]
2748 // CHECK6:       omp.arraycpy.done13:
2749 // CHECK6-NEXT:    [[TMP35:%.*]] = load %struct.S*, %struct.S** [[_TMP7]], align 8
2750 // CHECK6-NEXT:    [[TMP36:%.*]] = bitcast %struct.S* [[TMP3]] to i8*
2751 // CHECK6-NEXT:    [[TMP37:%.*]] = bitcast %struct.S* [[TMP35]] to i8*
2752 // CHECK6-NEXT:    call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 4 [[TMP36]], i8* align 4 [[TMP37]], i64 4, i1 false)
2753 // CHECK6-NEXT:    [[TMP38:%.*]] = load i32, i32* [[SVAR8]], align 4
2754 // CHECK6-NEXT:    store i32 [[TMP38]], i32* [[CONV1]], align 8
2755 // CHECK6-NEXT:    br label [[DOTOMP_LASTPRIVATE_DONE]]
2756 // CHECK6:       .omp.lastprivate.done:
2757 // CHECK6-NEXT:    call void @_ZN1SIfED1Ev(%struct.S* nonnull align 4 dereferenceable(4) [[VAR6]]) #[[ATTR4]]
2758 // CHECK6-NEXT:    [[ARRAY_BEGIN14:%.*]] = getelementptr inbounds [2 x %struct.S], [2 x %struct.S]* [[S_ARR5]], i32 0, i32 0
2759 // CHECK6-NEXT:    [[TMP39:%.*]] = getelementptr inbounds [[STRUCT_S]], %struct.S* [[ARRAY_BEGIN14]], i64 2
2760 // CHECK6-NEXT:    br label [[ARRAYDESTROY_BODY:%.*]]
2761 // CHECK6:       arraydestroy.body:
2762 // CHECK6-NEXT:    [[ARRAYDESTROY_ELEMENTPAST:%.*]] = phi %struct.S* [ [[TMP39]], [[DOTOMP_LASTPRIVATE_DONE]] ], [ [[ARRAYDESTROY_ELEMENT:%.*]], [[ARRAYDESTROY_BODY]] ]
2763 // CHECK6-NEXT:    [[ARRAYDESTROY_ELEMENT]] = getelementptr inbounds [[STRUCT_S]], %struct.S* [[ARRAYDESTROY_ELEMENTPAST]], i64 -1
2764 // CHECK6-NEXT:    call void @_ZN1SIfED1Ev(%struct.S* nonnull align 4 dereferenceable(4) [[ARRAYDESTROY_ELEMENT]]) #[[ATTR4]]
2765 // CHECK6-NEXT:    [[ARRAYDESTROY_DONE:%.*]] = icmp eq %struct.S* [[ARRAYDESTROY_ELEMENT]], [[ARRAY_BEGIN14]]
2766 // CHECK6-NEXT:    br i1 [[ARRAYDESTROY_DONE]], label [[ARRAYDESTROY_DONE15:%.*]], label [[ARRAYDESTROY_BODY]]
2767 // CHECK6:       arraydestroy.done15:
2768 // CHECK6-NEXT:    ret void
2769 //
2770 //
2771 // CHECK6-LABEL: define {{[^@]+}}@.omp_outlined..1
2772 // CHECK6-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i64 [[DOTPREVIOUS_LB_:%.*]], i64 [[DOTPREVIOUS_UB_:%.*]], [2 x i32]* nonnull align 4 dereferenceable(8) [[VEC:%.*]], i64 [[T_VAR:%.*]], [2 x %struct.S]* nonnull align 4 dereferenceable(8) [[S_ARR:%.*]], %struct.S* nonnull align 4 dereferenceable(4) [[VAR:%.*]], i64 [[SVAR:%.*]]) #[[ATTR3]] {
2773 // CHECK6-NEXT:  entry:
2774 // CHECK6-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
2775 // CHECK6-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
2776 // CHECK6-NEXT:    [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i64, align 8
2777 // CHECK6-NEXT:    [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i64, align 8
2778 // CHECK6-NEXT:    [[VEC_ADDR:%.*]] = alloca [2 x i32]*, align 8
2779 // CHECK6-NEXT:    [[T_VAR_ADDR:%.*]] = alloca i64, align 8
2780 // CHECK6-NEXT:    [[S_ARR_ADDR:%.*]] = alloca [2 x %struct.S]*, align 8
2781 // CHECK6-NEXT:    [[VAR_ADDR:%.*]] = alloca %struct.S*, align 8
2782 // CHECK6-NEXT:    [[SVAR_ADDR:%.*]] = alloca i64, align 8
2783 // CHECK6-NEXT:    [[TMP:%.*]] = alloca %struct.S*, align 8
2784 // CHECK6-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
2785 // CHECK6-NEXT:    [[_TMP2:%.*]] = alloca i32, align 4
2786 // CHECK6-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
2787 // CHECK6-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
2788 // CHECK6-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
2789 // CHECK6-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
2790 // CHECK6-NEXT:    [[T_VAR5:%.*]] = alloca i32, align 4
2791 // CHECK6-NEXT:    [[VEC6:%.*]] = alloca [2 x i32], align 4
2792 // CHECK6-NEXT:    [[S_ARR7:%.*]] = alloca [2 x %struct.S], align 4
2793 // CHECK6-NEXT:    [[VAR8:%.*]] = alloca [[STRUCT_S:%.*]], align 4
2794 // CHECK6-NEXT:    [[_TMP9:%.*]] = alloca %struct.S*, align 8
2795 // CHECK6-NEXT:    [[SVAR10:%.*]] = alloca i32, align 4
2796 // CHECK6-NEXT:    [[I:%.*]] = alloca i32, align 4
2797 // CHECK6-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
2798 // CHECK6-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
2799 // CHECK6-NEXT:    store i64 [[DOTPREVIOUS_LB_]], i64* [[DOTPREVIOUS_LB__ADDR]], align 8
2800 // CHECK6-NEXT:    store i64 [[DOTPREVIOUS_UB_]], i64* [[DOTPREVIOUS_UB__ADDR]], align 8
2801 // CHECK6-NEXT:    store [2 x i32]* [[VEC]], [2 x i32]** [[VEC_ADDR]], align 8
2802 // CHECK6-NEXT:    store i64 [[T_VAR]], i64* [[T_VAR_ADDR]], align 8
2803 // CHECK6-NEXT:    store [2 x %struct.S]* [[S_ARR]], [2 x %struct.S]** [[S_ARR_ADDR]], align 8
2804 // CHECK6-NEXT:    store %struct.S* [[VAR]], %struct.S** [[VAR_ADDR]], align 8
2805 // CHECK6-NEXT:    store i64 [[SVAR]], i64* [[SVAR_ADDR]], align 8
2806 // CHECK6-NEXT:    [[TMP0:%.*]] = load [2 x i32]*, [2 x i32]** [[VEC_ADDR]], align 8
2807 // CHECK6-NEXT:    [[CONV:%.*]] = bitcast i64* [[T_VAR_ADDR]] to i32*
2808 // CHECK6-NEXT:    [[TMP1:%.*]] = load [2 x %struct.S]*, [2 x %struct.S]** [[S_ARR_ADDR]], align 8
2809 // CHECK6-NEXT:    [[TMP2:%.*]] = load %struct.S*, %struct.S** [[VAR_ADDR]], align 8
2810 // CHECK6-NEXT:    [[CONV1:%.*]] = bitcast i64* [[SVAR_ADDR]] to i32*
2811 // CHECK6-NEXT:    store %struct.S* [[TMP2]], %struct.S** [[TMP]], align 8
2812 // CHECK6-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
2813 // CHECK6-NEXT:    store i32 1, i32* [[DOTOMP_UB]], align 4
2814 // CHECK6-NEXT:    [[TMP3:%.*]] = load i64, i64* [[DOTPREVIOUS_LB__ADDR]], align 8
2815 // CHECK6-NEXT:    [[CONV3:%.*]] = trunc i64 [[TMP3]] to i32
2816 // CHECK6-NEXT:    [[TMP4:%.*]] = load i64, i64* [[DOTPREVIOUS_UB__ADDR]], align 8
2817 // CHECK6-NEXT:    [[CONV4:%.*]] = trunc i64 [[TMP4]] to i32
2818 // CHECK6-NEXT:    store i32 [[CONV3]], i32* [[DOTOMP_LB]], align 4
2819 // CHECK6-NEXT:    store i32 [[CONV4]], i32* [[DOTOMP_UB]], align 4
2820 // CHECK6-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
2821 // CHECK6-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
2822 // CHECK6-NEXT:    [[ARRAY_BEGIN:%.*]] = getelementptr inbounds [2 x %struct.S], [2 x %struct.S]* [[S_ARR7]], i32 0, i32 0
2823 // CHECK6-NEXT:    [[ARRAYCTOR_END:%.*]] = getelementptr inbounds [[STRUCT_S]], %struct.S* [[ARRAY_BEGIN]], i64 2
2824 // CHECK6-NEXT:    br label [[ARRAYCTOR_LOOP:%.*]]
2825 // CHECK6:       arrayctor.loop:
2826 // CHECK6-NEXT:    [[ARRAYCTOR_CUR:%.*]] = phi %struct.S* [ [[ARRAY_BEGIN]], [[ENTRY:%.*]] ], [ [[ARRAYCTOR_NEXT:%.*]], [[ARRAYCTOR_LOOP]] ]
2827 // CHECK6-NEXT:    call void @_ZN1SIfEC1Ev(%struct.S* nonnull align 4 dereferenceable(4) [[ARRAYCTOR_CUR]])
2828 // CHECK6-NEXT:    [[ARRAYCTOR_NEXT]] = getelementptr inbounds [[STRUCT_S]], %struct.S* [[ARRAYCTOR_CUR]], i64 1
2829 // CHECK6-NEXT:    [[ARRAYCTOR_DONE:%.*]] = icmp eq %struct.S* [[ARRAYCTOR_NEXT]], [[ARRAYCTOR_END]]
2830 // CHECK6-NEXT:    br i1 [[ARRAYCTOR_DONE]], label [[ARRAYCTOR_CONT:%.*]], label [[ARRAYCTOR_LOOP]]
2831 // CHECK6:       arrayctor.cont:
2832 // CHECK6-NEXT:    [[TMP5:%.*]] = load %struct.S*, %struct.S** [[TMP]], align 8
2833 // CHECK6-NEXT:    call void @_ZN1SIfEC1Ev(%struct.S* nonnull align 4 dereferenceable(4) [[VAR8]])
2834 // CHECK6-NEXT:    store %struct.S* [[VAR8]], %struct.S** [[_TMP9]], align 8
2835 // CHECK6-NEXT:    [[TMP6:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
2836 // CHECK6-NEXT:    [[TMP7:%.*]] = load i32, i32* [[TMP6]], align 4
2837 // CHECK6-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB2:[0-9]+]], i32 [[TMP7]], i32 34, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
2838 // CHECK6-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
2839 // CHECK6-NEXT:    [[CMP:%.*]] = icmp sgt i32 [[TMP8]], 1
2840 // CHECK6-NEXT:    br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
2841 // CHECK6:       cond.true:
2842 // CHECK6-NEXT:    br label [[COND_END:%.*]]
2843 // CHECK6:       cond.false:
2844 // CHECK6-NEXT:    [[TMP9:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
2845 // CHECK6-NEXT:    br label [[COND_END]]
2846 // CHECK6:       cond.end:
2847 // CHECK6-NEXT:    [[COND:%.*]] = phi i32 [ 1, [[COND_TRUE]] ], [ [[TMP9]], [[COND_FALSE]] ]
2848 // CHECK6-NEXT:    store i32 [[COND]], i32* [[DOTOMP_UB]], align 4
2849 // CHECK6-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
2850 // CHECK6-NEXT:    store i32 [[TMP10]], i32* [[DOTOMP_IV]], align 4
2851 // CHECK6-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
2852 // CHECK6:       omp.inner.for.cond:
2853 // CHECK6-NEXT:    [[TMP11:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
2854 // CHECK6-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
2855 // CHECK6-NEXT:    [[CMP11:%.*]] = icmp sle i32 [[TMP11]], [[TMP12]]
2856 // CHECK6-NEXT:    br i1 [[CMP11]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_COND_CLEANUP:%.*]]
2857 // CHECK6:       omp.inner.for.cond.cleanup:
2858 // CHECK6-NEXT:    br label [[OMP_INNER_FOR_END:%.*]]
2859 // CHECK6:       omp.inner.for.body:
2860 // CHECK6-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
2861 // CHECK6-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP13]], 1
2862 // CHECK6-NEXT:    [[ADD:%.*]] = add nsw i32 0, [[MUL]]
2863 // CHECK6-NEXT:    store i32 [[ADD]], i32* [[I]], align 4
2864 // CHECK6-NEXT:    [[TMP14:%.*]] = load i32, i32* [[T_VAR5]], align 4
2865 // CHECK6-NEXT:    [[TMP15:%.*]] = load i32, i32* [[I]], align 4
2866 // CHECK6-NEXT:    [[IDXPROM:%.*]] = sext i32 [[TMP15]] to i64
2867 // CHECK6-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds [2 x i32], [2 x i32]* [[VEC6]], i64 0, i64 [[IDXPROM]]
2868 // CHECK6-NEXT:    store i32 [[TMP14]], i32* [[ARRAYIDX]], align 4
2869 // CHECK6-NEXT:    [[TMP16:%.*]] = load %struct.S*, %struct.S** [[_TMP9]], align 8
2870 // CHECK6-NEXT:    [[TMP17:%.*]] = load i32, i32* [[I]], align 4
2871 // CHECK6-NEXT:    [[IDXPROM12:%.*]] = sext i32 [[TMP17]] to i64
2872 // CHECK6-NEXT:    [[ARRAYIDX13:%.*]] = getelementptr inbounds [2 x %struct.S], [2 x %struct.S]* [[S_ARR7]], i64 0, i64 [[IDXPROM12]]
2873 // CHECK6-NEXT:    [[TMP18:%.*]] = bitcast %struct.S* [[ARRAYIDX13]] to i8*
2874 // CHECK6-NEXT:    [[TMP19:%.*]] = bitcast %struct.S* [[TMP16]] to i8*
2875 // CHECK6-NEXT:    call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 4 [[TMP18]], i8* align 4 [[TMP19]], i64 4, i1 false)
2876 // CHECK6-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
2877 // CHECK6:       omp.body.continue:
2878 // CHECK6-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
2879 // CHECK6:       omp.inner.for.inc:
2880 // CHECK6-NEXT:    [[TMP20:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
2881 // CHECK6-NEXT:    [[ADD14:%.*]] = add nsw i32 [[TMP20]], 1
2882 // CHECK6-NEXT:    store i32 [[ADD14]], i32* [[DOTOMP_IV]], align 4
2883 // CHECK6-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP6:![0-9]+]]
2884 // CHECK6:       omp.inner.for.end:
2885 // CHECK6-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
2886 // CHECK6:       omp.loop.exit:
2887 // CHECK6-NEXT:    [[TMP21:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
2888 // CHECK6-NEXT:    [[TMP22:%.*]] = load i32, i32* [[TMP21]], align 4
2889 // CHECK6-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP22]])
2890 // CHECK6-NEXT:    [[TMP23:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
2891 // CHECK6-NEXT:    [[TMP24:%.*]] = icmp ne i32 [[TMP23]], 0
2892 // CHECK6-NEXT:    br i1 [[TMP24]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
2893 // CHECK6:       .omp.final.then:
2894 // CHECK6-NEXT:    store i32 2, i32* [[I]], align 4
2895 // CHECK6-NEXT:    br label [[DOTOMP_FINAL_DONE]]
2896 // CHECK6:       .omp.final.done:
2897 // CHECK6-NEXT:    [[TMP25:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
2898 // CHECK6-NEXT:    [[TMP26:%.*]] = icmp ne i32 [[TMP25]], 0
2899 // CHECK6-NEXT:    br i1 [[TMP26]], label [[DOTOMP_LASTPRIVATE_THEN:%.*]], label [[DOTOMP_LASTPRIVATE_DONE:%.*]]
2900 // CHECK6:       .omp.lastprivate.then:
2901 // CHECK6-NEXT:    [[TMP27:%.*]] = load i32, i32* [[T_VAR5]], align 4
2902 // CHECK6-NEXT:    store i32 [[TMP27]], i32* [[CONV]], align 8
2903 // CHECK6-NEXT:    [[TMP28:%.*]] = bitcast [2 x i32]* [[TMP0]] to i8*
2904 // CHECK6-NEXT:    [[TMP29:%.*]] = bitcast [2 x i32]* [[VEC6]] to i8*
2905 // CHECK6-NEXT:    call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 4 [[TMP28]], i8* align 4 [[TMP29]], i64 8, i1 false)
2906 // CHECK6-NEXT:    [[ARRAY_BEGIN15:%.*]] = getelementptr inbounds [2 x %struct.S], [2 x %struct.S]* [[TMP1]], i32 0, i32 0
2907 // CHECK6-NEXT:    [[TMP30:%.*]] = bitcast [2 x %struct.S]* [[S_ARR7]] to %struct.S*
2908 // CHECK6-NEXT:    [[TMP31:%.*]] = getelementptr [[STRUCT_S]], %struct.S* [[ARRAY_BEGIN15]], i64 2
2909 // CHECK6-NEXT:    [[OMP_ARRAYCPY_ISEMPTY:%.*]] = icmp eq %struct.S* [[ARRAY_BEGIN15]], [[TMP31]]
2910 // CHECK6-NEXT:    br i1 [[OMP_ARRAYCPY_ISEMPTY]], label [[OMP_ARRAYCPY_DONE16:%.*]], label [[OMP_ARRAYCPY_BODY:%.*]]
2911 // CHECK6:       omp.arraycpy.body:
2912 // CHECK6-NEXT:    [[OMP_ARRAYCPY_SRCELEMENTPAST:%.*]] = phi %struct.S* [ [[TMP30]], [[DOTOMP_LASTPRIVATE_THEN]] ], [ [[OMP_ARRAYCPY_SRC_ELEMENT:%.*]], [[OMP_ARRAYCPY_BODY]] ]
2913 // CHECK6-NEXT:    [[OMP_ARRAYCPY_DESTELEMENTPAST:%.*]] = phi %struct.S* [ [[ARRAY_BEGIN15]], [[DOTOMP_LASTPRIVATE_THEN]] ], [ [[OMP_ARRAYCPY_DEST_ELEMENT:%.*]], [[OMP_ARRAYCPY_BODY]] ]
2914 // CHECK6-NEXT:    [[TMP32:%.*]] = bitcast %struct.S* [[OMP_ARRAYCPY_DESTELEMENTPAST]] to i8*
2915 // CHECK6-NEXT:    [[TMP33:%.*]] = bitcast %struct.S* [[OMP_ARRAYCPY_SRCELEMENTPAST]] to i8*
2916 // CHECK6-NEXT:    call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 4 [[TMP32]], i8* align 4 [[TMP33]], i64 4, i1 false)
2917 // CHECK6-NEXT:    [[OMP_ARRAYCPY_DEST_ELEMENT]] = getelementptr [[STRUCT_S]], %struct.S* [[OMP_ARRAYCPY_DESTELEMENTPAST]], i32 1
2918 // CHECK6-NEXT:    [[OMP_ARRAYCPY_SRC_ELEMENT]] = getelementptr [[STRUCT_S]], %struct.S* [[OMP_ARRAYCPY_SRCELEMENTPAST]], i32 1
2919 // CHECK6-NEXT:    [[OMP_ARRAYCPY_DONE:%.*]] = icmp eq %struct.S* [[OMP_ARRAYCPY_DEST_ELEMENT]], [[TMP31]]
2920 // CHECK6-NEXT:    br i1 [[OMP_ARRAYCPY_DONE]], label [[OMP_ARRAYCPY_DONE16]], label [[OMP_ARRAYCPY_BODY]]
2921 // CHECK6:       omp.arraycpy.done16:
2922 // CHECK6-NEXT:    [[TMP34:%.*]] = load %struct.S*, %struct.S** [[_TMP9]], align 8
2923 // CHECK6-NEXT:    [[TMP35:%.*]] = bitcast %struct.S* [[TMP5]] to i8*
2924 // CHECK6-NEXT:    [[TMP36:%.*]] = bitcast %struct.S* [[TMP34]] to i8*
2925 // CHECK6-NEXT:    call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 4 [[TMP35]], i8* align 4 [[TMP36]], i64 4, i1 false)
2926 // CHECK6-NEXT:    [[TMP37:%.*]] = load i32, i32* [[SVAR10]], align 4
2927 // CHECK6-NEXT:    store i32 [[TMP37]], i32* [[CONV1]], align 8
2928 // CHECK6-NEXT:    br label [[DOTOMP_LASTPRIVATE_DONE]]
2929 // CHECK6:       .omp.lastprivate.done:
2930 // CHECK6-NEXT:    call void @_ZN1SIfED1Ev(%struct.S* nonnull align 4 dereferenceable(4) [[VAR8]]) #[[ATTR4]]
2931 // CHECK6-NEXT:    [[ARRAY_BEGIN17:%.*]] = getelementptr inbounds [2 x %struct.S], [2 x %struct.S]* [[S_ARR7]], i32 0, i32 0
2932 // CHECK6-NEXT:    [[TMP38:%.*]] = getelementptr inbounds [[STRUCT_S]], %struct.S* [[ARRAY_BEGIN17]], i64 2
2933 // CHECK6-NEXT:    br label [[ARRAYDESTROY_BODY:%.*]]
2934 // CHECK6:       arraydestroy.body:
2935 // CHECK6-NEXT:    [[ARRAYDESTROY_ELEMENTPAST:%.*]] = phi %struct.S* [ [[TMP38]], [[DOTOMP_LASTPRIVATE_DONE]] ], [ [[ARRAYDESTROY_ELEMENT:%.*]], [[ARRAYDESTROY_BODY]] ]
2936 // CHECK6-NEXT:    [[ARRAYDESTROY_ELEMENT]] = getelementptr inbounds [[STRUCT_S]], %struct.S* [[ARRAYDESTROY_ELEMENTPAST]], i64 -1
2937 // CHECK6-NEXT:    call void @_ZN1SIfED1Ev(%struct.S* nonnull align 4 dereferenceable(4) [[ARRAYDESTROY_ELEMENT]]) #[[ATTR4]]
2938 // CHECK6-NEXT:    [[ARRAYDESTROY_DONE:%.*]] = icmp eq %struct.S* [[ARRAYDESTROY_ELEMENT]], [[ARRAY_BEGIN17]]
2939 // CHECK6-NEXT:    br i1 [[ARRAYDESTROY_DONE]], label [[ARRAYDESTROY_DONE18:%.*]], label [[ARRAYDESTROY_BODY]]
2940 // CHECK6:       arraydestroy.done18:
2941 // CHECK6-NEXT:    ret void
2942 //
2943 //
2944 // CHECK6-LABEL: define {{[^@]+}}@_ZN1SIfED1Ev
2945 // CHECK6-SAME: (%struct.S* nonnull align 4 dereferenceable(4) [[THIS:%.*]]) unnamed_addr #[[ATTR1]] comdat align 2 {
2946 // CHECK6-NEXT:  entry:
2947 // CHECK6-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.S*, align 8
2948 // CHECK6-NEXT:    store %struct.S* [[THIS]], %struct.S** [[THIS_ADDR]], align 8
2949 // CHECK6-NEXT:    [[THIS1:%.*]] = load %struct.S*, %struct.S** [[THIS_ADDR]], align 8
2950 // CHECK6-NEXT:    call void @_ZN1SIfED2Ev(%struct.S* nonnull align 4 dereferenceable(4) [[THIS1]]) #[[ATTR4]]
2951 // CHECK6-NEXT:    ret void
2952 //
2953 //
2954 // CHECK6-LABEL: define {{[^@]+}}@_Z5tmainIiET_v
2955 // CHECK6-SAME: () #[[ATTR5:[0-9]+]] comdat {
2956 // CHECK6-NEXT:  entry:
2957 // CHECK6-NEXT:    [[RETVAL:%.*]] = alloca i32, align 4
2958 // CHECK6-NEXT:    [[TEST:%.*]] = alloca [[STRUCT_S_0:%.*]], align 4
2959 // CHECK6-NEXT:    [[T_VAR:%.*]] = alloca i32, align 4
2960 // CHECK6-NEXT:    [[VEC:%.*]] = alloca [2 x i32], align 4
2961 // CHECK6-NEXT:    [[S_ARR:%.*]] = alloca [2 x %struct.S.0], align 4
2962 // CHECK6-NEXT:    [[VAR:%.*]] = alloca %struct.S.0*, align 8
2963 // CHECK6-NEXT:    [[TMP:%.*]] = alloca %struct.S.0*, align 8
2964 // CHECK6-NEXT:    [[T_VAR_CASTED:%.*]] = alloca i64, align 8
2965 // CHECK6-NEXT:    [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [4 x i8*], align 8
2966 // CHECK6-NEXT:    [[DOTOFFLOAD_PTRS:%.*]] = alloca [4 x i8*], align 8
2967 // CHECK6-NEXT:    [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [4 x i8*], align 8
2968 // CHECK6-NEXT:    [[_TMP1:%.*]] = alloca i32, align 4
2969 // CHECK6-NEXT:    call void @_ZN1SIiEC1Ev(%struct.S.0* nonnull align 4 dereferenceable(4) [[TEST]])
2970 // CHECK6-NEXT:    store i32 0, i32* [[T_VAR]], align 4
2971 // CHECK6-NEXT:    [[TMP0:%.*]] = bitcast [2 x i32]* [[VEC]] to i8*
2972 // CHECK6-NEXT:    call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 4 [[TMP0]], i8* align 4 bitcast ([2 x i32]* @__const._Z5tmainIiET_v.vec to i8*), i64 8, i1 false)
2973 // CHECK6-NEXT:    [[ARRAYINIT_BEGIN:%.*]] = getelementptr inbounds [2 x %struct.S.0], [2 x %struct.S.0]* [[S_ARR]], i64 0, i64 0
2974 // CHECK6-NEXT:    call void @_ZN1SIiEC1Ei(%struct.S.0* nonnull align 4 dereferenceable(4) [[ARRAYINIT_BEGIN]], i32 signext 1)
2975 // CHECK6-NEXT:    [[ARRAYINIT_ELEMENT:%.*]] = getelementptr inbounds [[STRUCT_S_0]], %struct.S.0* [[ARRAYINIT_BEGIN]], i64 1
2976 // CHECK6-NEXT:    call void @_ZN1SIiEC1Ei(%struct.S.0* nonnull align 4 dereferenceable(4) [[ARRAYINIT_ELEMENT]], i32 signext 2)
2977 // CHECK6-NEXT:    store %struct.S.0* [[TEST]], %struct.S.0** [[VAR]], align 8
2978 // CHECK6-NEXT:    [[TMP1:%.*]] = load %struct.S.0*, %struct.S.0** [[VAR]], align 8
2979 // CHECK6-NEXT:    store %struct.S.0* [[TMP1]], %struct.S.0** [[TMP]], align 8
2980 // CHECK6-NEXT:    [[TMP2:%.*]] = load i32, i32* [[T_VAR]], align 4
2981 // CHECK6-NEXT:    [[CONV:%.*]] = bitcast i64* [[T_VAR_CASTED]] to i32*
2982 // CHECK6-NEXT:    store i32 [[TMP2]], i32* [[CONV]], align 4
2983 // CHECK6-NEXT:    [[TMP3:%.*]] = load i64, i64* [[T_VAR_CASTED]], align 8
2984 // CHECK6-NEXT:    [[TMP4:%.*]] = load %struct.S.0*, %struct.S.0** [[TMP]], align 8
2985 // CHECK6-NEXT:    [[TMP5:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
2986 // CHECK6-NEXT:    [[TMP6:%.*]] = bitcast i8** [[TMP5]] to [2 x i32]**
2987 // CHECK6-NEXT:    store [2 x i32]* [[VEC]], [2 x i32]** [[TMP6]], align 8
2988 // CHECK6-NEXT:    [[TMP7:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0
2989 // CHECK6-NEXT:    [[TMP8:%.*]] = bitcast i8** [[TMP7]] to [2 x i32]**
2990 // CHECK6-NEXT:    store [2 x i32]* [[VEC]], [2 x i32]** [[TMP8]], align 8
2991 // CHECK6-NEXT:    [[TMP9:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 0
2992 // CHECK6-NEXT:    store i8* null, i8** [[TMP9]], align 8
2993 // CHECK6-NEXT:    [[TMP10:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1
2994 // CHECK6-NEXT:    [[TMP11:%.*]] = bitcast i8** [[TMP10]] to i64*
2995 // CHECK6-NEXT:    store i64 [[TMP3]], i64* [[TMP11]], align 8
2996 // CHECK6-NEXT:    [[TMP12:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1
2997 // CHECK6-NEXT:    [[TMP13:%.*]] = bitcast i8** [[TMP12]] to i64*
2998 // CHECK6-NEXT:    store i64 [[TMP3]], i64* [[TMP13]], align 8
2999 // CHECK6-NEXT:    [[TMP14:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 1
3000 // CHECK6-NEXT:    store i8* null, i8** [[TMP14]], align 8
3001 // CHECK6-NEXT:    [[TMP15:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2
3002 // CHECK6-NEXT:    [[TMP16:%.*]] = bitcast i8** [[TMP15]] to [2 x %struct.S.0]**
3003 // CHECK6-NEXT:    store [2 x %struct.S.0]* [[S_ARR]], [2 x %struct.S.0]** [[TMP16]], align 8
3004 // CHECK6-NEXT:    [[TMP17:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 2
3005 // CHECK6-NEXT:    [[TMP18:%.*]] = bitcast i8** [[TMP17]] to [2 x %struct.S.0]**
3006 // CHECK6-NEXT:    store [2 x %struct.S.0]* [[S_ARR]], [2 x %struct.S.0]** [[TMP18]], align 8
3007 // CHECK6-NEXT:    [[TMP19:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 2
3008 // CHECK6-NEXT:    store i8* null, i8** [[TMP19]], align 8
3009 // CHECK6-NEXT:    [[TMP20:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 3
3010 // CHECK6-NEXT:    [[TMP21:%.*]] = bitcast i8** [[TMP20]] to %struct.S.0**
3011 // CHECK6-NEXT:    store %struct.S.0* [[TMP4]], %struct.S.0** [[TMP21]], align 8
3012 // CHECK6-NEXT:    [[TMP22:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 3
3013 // CHECK6-NEXT:    [[TMP23:%.*]] = bitcast i8** [[TMP22]] to %struct.S.0**
3014 // CHECK6-NEXT:    store %struct.S.0* [[TMP4]], %struct.S.0** [[TMP23]], align 8
3015 // CHECK6-NEXT:    [[TMP24:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 3
3016 // CHECK6-NEXT:    store i8* null, i8** [[TMP24]], align 8
3017 // CHECK6-NEXT:    [[TMP25:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
3018 // CHECK6-NEXT:    [[TMP26:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0
3019 // CHECK6-NEXT:    call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 2)
3020 // CHECK6-NEXT:    [[TMP27:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiET_v_l50.region_id, i32 4, i8** [[TMP25]], i8** [[TMP26]], i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_sizes.4, i32 0, i32 0), i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_maptypes.5, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0)
3021 // CHECK6-NEXT:    [[TMP28:%.*]] = icmp ne i32 [[TMP27]], 0
3022 // CHECK6-NEXT:    br i1 [[TMP28]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]]
3023 // CHECK6:       omp_offload.failed:
3024 // CHECK6-NEXT:    call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiET_v_l50([2 x i32]* [[VEC]], i64 [[TMP3]], [2 x %struct.S.0]* [[S_ARR]], %struct.S.0* [[TMP4]]) #[[ATTR4]]
3025 // CHECK6-NEXT:    br label [[OMP_OFFLOAD_CONT]]
3026 // CHECK6:       omp_offload.cont:
3027 // CHECK6-NEXT:    store i32 0, i32* [[RETVAL]], align 4
3028 // CHECK6-NEXT:    [[ARRAY_BEGIN:%.*]] = getelementptr inbounds [2 x %struct.S.0], [2 x %struct.S.0]* [[S_ARR]], i32 0, i32 0
3029 // CHECK6-NEXT:    [[TMP29:%.*]] = getelementptr inbounds [[STRUCT_S_0]], %struct.S.0* [[ARRAY_BEGIN]], i64 2
3030 // CHECK6-NEXT:    br label [[ARRAYDESTROY_BODY:%.*]]
3031 // CHECK6:       arraydestroy.body:
3032 // CHECK6-NEXT:    [[ARRAYDESTROY_ELEMENTPAST:%.*]] = phi %struct.S.0* [ [[TMP29]], [[OMP_OFFLOAD_CONT]] ], [ [[ARRAYDESTROY_ELEMENT:%.*]], [[ARRAYDESTROY_BODY]] ]
3033 // CHECK6-NEXT:    [[ARRAYDESTROY_ELEMENT]] = getelementptr inbounds [[STRUCT_S_0]], %struct.S.0* [[ARRAYDESTROY_ELEMENTPAST]], i64 -1
3034 // CHECK6-NEXT:    call void @_ZN1SIiED1Ev(%struct.S.0* nonnull align 4 dereferenceable(4) [[ARRAYDESTROY_ELEMENT]]) #[[ATTR4]]
3035 // CHECK6-NEXT:    [[ARRAYDESTROY_DONE:%.*]] = icmp eq %struct.S.0* [[ARRAYDESTROY_ELEMENT]], [[ARRAY_BEGIN]]
3036 // CHECK6-NEXT:    br i1 [[ARRAYDESTROY_DONE]], label [[ARRAYDESTROY_DONE2:%.*]], label [[ARRAYDESTROY_BODY]]
3037 // CHECK6:       arraydestroy.done2:
3038 // CHECK6-NEXT:    call void @_ZN1SIiED1Ev(%struct.S.0* nonnull align 4 dereferenceable(4) [[TEST]]) #[[ATTR4]]
3039 // CHECK6-NEXT:    [[TMP30:%.*]] = load i32, i32* [[RETVAL]], align 4
3040 // CHECK6-NEXT:    ret i32 [[TMP30]]
3041 //
3042 //
3043 // CHECK6-LABEL: define {{[^@]+}}@_ZN1SIfEC2Ev
3044 // CHECK6-SAME: (%struct.S* nonnull align 4 dereferenceable(4) [[THIS:%.*]]) unnamed_addr #[[ATTR1]] comdat align 2 {
3045 // CHECK6-NEXT:  entry:
3046 // CHECK6-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.S*, align 8
3047 // CHECK6-NEXT:    store %struct.S* [[THIS]], %struct.S** [[THIS_ADDR]], align 8
3048 // CHECK6-NEXT:    [[THIS1:%.*]] = load %struct.S*, %struct.S** [[THIS_ADDR]], align 8
3049 // CHECK6-NEXT:    [[F:%.*]] = getelementptr inbounds [[STRUCT_S:%.*]], %struct.S* [[THIS1]], i32 0, i32 0
3050 // CHECK6-NEXT:    store float 0.000000e+00, float* [[F]], align 4
3051 // CHECK6-NEXT:    ret void
3052 //
3053 //
3054 // CHECK6-LABEL: define {{[^@]+}}@_ZN1SIfEC2Ef
3055 // CHECK6-SAME: (%struct.S* nonnull align 4 dereferenceable(4) [[THIS:%.*]], float [[A:%.*]]) unnamed_addr #[[ATTR1]] comdat align 2 {
3056 // CHECK6-NEXT:  entry:
3057 // CHECK6-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.S*, align 8
3058 // CHECK6-NEXT:    [[A_ADDR:%.*]] = alloca float, align 4
3059 // CHECK6-NEXT:    store %struct.S* [[THIS]], %struct.S** [[THIS_ADDR]], align 8
3060 // CHECK6-NEXT:    store float [[A]], float* [[A_ADDR]], align 4
3061 // CHECK6-NEXT:    [[THIS1:%.*]] = load %struct.S*, %struct.S** [[THIS_ADDR]], align 8
3062 // CHECK6-NEXT:    [[F:%.*]] = getelementptr inbounds [[STRUCT_S:%.*]], %struct.S* [[THIS1]], i32 0, i32 0
3063 // CHECK6-NEXT:    [[TMP0:%.*]] = load float, float* [[A_ADDR]], align 4
3064 // CHECK6-NEXT:    store float [[TMP0]], float* [[F]], align 4
3065 // CHECK6-NEXT:    ret void
3066 //
3067 //
3068 // CHECK6-LABEL: define {{[^@]+}}@_ZN1SIfED2Ev
3069 // CHECK6-SAME: (%struct.S* nonnull align 4 dereferenceable(4) [[THIS:%.*]]) unnamed_addr #[[ATTR1]] comdat align 2 {
3070 // CHECK6-NEXT:  entry:
3071 // CHECK6-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.S*, align 8
3072 // CHECK6-NEXT:    store %struct.S* [[THIS]], %struct.S** [[THIS_ADDR]], align 8
3073 // CHECK6-NEXT:    [[THIS1:%.*]] = load %struct.S*, %struct.S** [[THIS_ADDR]], align 8
3074 // CHECK6-NEXT:    ret void
3075 //
3076 //
3077 // CHECK6-LABEL: define {{[^@]+}}@_ZN1SIiEC1Ev
3078 // CHECK6-SAME: (%struct.S.0* nonnull align 4 dereferenceable(4) [[THIS:%.*]]) unnamed_addr #[[ATTR1]] comdat align 2 {
3079 // CHECK6-NEXT:  entry:
3080 // CHECK6-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.S.0*, align 8
3081 // CHECK6-NEXT:    store %struct.S.0* [[THIS]], %struct.S.0** [[THIS_ADDR]], align 8
3082 // CHECK6-NEXT:    [[THIS1:%.*]] = load %struct.S.0*, %struct.S.0** [[THIS_ADDR]], align 8
3083 // CHECK6-NEXT:    call void @_ZN1SIiEC2Ev(%struct.S.0* nonnull align 4 dereferenceable(4) [[THIS1]])
3084 // CHECK6-NEXT:    ret void
3085 //
3086 //
3087 // CHECK6-LABEL: define {{[^@]+}}@_ZN1SIiEC1Ei
3088 // CHECK6-SAME: (%struct.S.0* nonnull align 4 dereferenceable(4) [[THIS:%.*]], i32 signext [[A:%.*]]) unnamed_addr #[[ATTR1]] comdat align 2 {
3089 // CHECK6-NEXT:  entry:
3090 // CHECK6-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.S.0*, align 8
3091 // CHECK6-NEXT:    [[A_ADDR:%.*]] = alloca i32, align 4
3092 // CHECK6-NEXT:    store %struct.S.0* [[THIS]], %struct.S.0** [[THIS_ADDR]], align 8
3093 // CHECK6-NEXT:    store i32 [[A]], i32* [[A_ADDR]], align 4
3094 // CHECK6-NEXT:    [[THIS1:%.*]] = load %struct.S.0*, %struct.S.0** [[THIS_ADDR]], align 8
3095 // CHECK6-NEXT:    [[TMP0:%.*]] = load i32, i32* [[A_ADDR]], align 4
3096 // CHECK6-NEXT:    call void @_ZN1SIiEC2Ei(%struct.S.0* nonnull align 4 dereferenceable(4) [[THIS1]], i32 signext [[TMP0]])
3097 // CHECK6-NEXT:    ret void
3098 //
3099 //
3100 // CHECK6-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiET_v_l50
3101 // CHECK6-SAME: ([2 x i32]* nonnull align 4 dereferenceable(8) [[VEC:%.*]], i64 [[T_VAR:%.*]], [2 x %struct.S.0]* nonnull align 4 dereferenceable(8) [[S_ARR:%.*]], %struct.S.0* nonnull align 4 dereferenceable(4) [[VAR:%.*]]) #[[ATTR3]] {
3102 // CHECK6-NEXT:  entry:
3103 // CHECK6-NEXT:    [[VEC_ADDR:%.*]] = alloca [2 x i32]*, align 8
3104 // CHECK6-NEXT:    [[T_VAR_ADDR:%.*]] = alloca i64, align 8
3105 // CHECK6-NEXT:    [[S_ARR_ADDR:%.*]] = alloca [2 x %struct.S.0]*, align 8
3106 // CHECK6-NEXT:    [[VAR_ADDR:%.*]] = alloca %struct.S.0*, align 8
3107 // CHECK6-NEXT:    [[TMP:%.*]] = alloca %struct.S.0*, align 8
3108 // CHECK6-NEXT:    [[T_VAR_CASTED:%.*]] = alloca i64, align 8
3109 // CHECK6-NEXT:    store [2 x i32]* [[VEC]], [2 x i32]** [[VEC_ADDR]], align 8
3110 // CHECK6-NEXT:    store i64 [[T_VAR]], i64* [[T_VAR_ADDR]], align 8
3111 // CHECK6-NEXT:    store [2 x %struct.S.0]* [[S_ARR]], [2 x %struct.S.0]** [[S_ARR_ADDR]], align 8
3112 // CHECK6-NEXT:    store %struct.S.0* [[VAR]], %struct.S.0** [[VAR_ADDR]], align 8
3113 // CHECK6-NEXT:    [[TMP0:%.*]] = load [2 x i32]*, [2 x i32]** [[VEC_ADDR]], align 8
3114 // CHECK6-NEXT:    [[CONV:%.*]] = bitcast i64* [[T_VAR_ADDR]] to i32*
3115 // CHECK6-NEXT:    [[TMP1:%.*]] = load [2 x %struct.S.0]*, [2 x %struct.S.0]** [[S_ARR_ADDR]], align 8
3116 // CHECK6-NEXT:    [[TMP2:%.*]] = load %struct.S.0*, %struct.S.0** [[VAR_ADDR]], align 8
3117 // CHECK6-NEXT:    store %struct.S.0* [[TMP2]], %struct.S.0** [[TMP]], align 8
3118 // CHECK6-NEXT:    [[TMP3:%.*]] = load i32, i32* [[CONV]], align 8
3119 // CHECK6-NEXT:    [[CONV1:%.*]] = bitcast i64* [[T_VAR_CASTED]] to i32*
3120 // CHECK6-NEXT:    store i32 [[TMP3]], i32* [[CONV1]], align 4
3121 // CHECK6-NEXT:    [[TMP4:%.*]] = load i64, i64* [[T_VAR_CASTED]], align 8
3122 // CHECK6-NEXT:    [[TMP5:%.*]] = load %struct.S.0*, %struct.S.0** [[TMP]], align 8
3123 // CHECK6-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, [2 x i32]*, i64, [2 x %struct.S.0]*, %struct.S.0*)* @.omp_outlined..2 to void (i32*, i32*, ...)*), [2 x i32]* [[TMP0]], i64 [[TMP4]], [2 x %struct.S.0]* [[TMP1]], %struct.S.0* [[TMP5]])
3124 // CHECK6-NEXT:    ret void
3125 //
3126 //
3127 // CHECK6-LABEL: define {{[^@]+}}@.omp_outlined..2
3128 // CHECK6-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], [2 x i32]* nonnull align 4 dereferenceable(8) [[VEC:%.*]], i64 [[T_VAR:%.*]], [2 x %struct.S.0]* nonnull align 4 dereferenceable(8) [[S_ARR:%.*]], %struct.S.0* nonnull align 4 dereferenceable(4) [[VAR:%.*]]) #[[ATTR3]] {
3129 // CHECK6-NEXT:  entry:
3130 // CHECK6-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
3131 // CHECK6-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
3132 // CHECK6-NEXT:    [[VEC_ADDR:%.*]] = alloca [2 x i32]*, align 8
3133 // CHECK6-NEXT:    [[T_VAR_ADDR:%.*]] = alloca i64, align 8
3134 // CHECK6-NEXT:    [[S_ARR_ADDR:%.*]] = alloca [2 x %struct.S.0]*, align 8
3135 // CHECK6-NEXT:    [[VAR_ADDR:%.*]] = alloca %struct.S.0*, align 8
3136 // CHECK6-NEXT:    [[TMP:%.*]] = alloca %struct.S.0*, align 8
3137 // CHECK6-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
3138 // CHECK6-NEXT:    [[_TMP1:%.*]] = alloca i32, align 4
3139 // CHECK6-NEXT:    [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
3140 // CHECK6-NEXT:    [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
3141 // CHECK6-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
3142 // CHECK6-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
3143 // CHECK6-NEXT:    [[T_VAR2:%.*]] = alloca i32, align 4
3144 // CHECK6-NEXT:    [[VEC3:%.*]] = alloca [2 x i32], align 4
3145 // CHECK6-NEXT:    [[S_ARR4:%.*]] = alloca [2 x %struct.S.0], align 4
3146 // CHECK6-NEXT:    [[VAR5:%.*]] = alloca [[STRUCT_S_0:%.*]], align 4
3147 // CHECK6-NEXT:    [[_TMP6:%.*]] = alloca %struct.S.0*, align 8
3148 // CHECK6-NEXT:    [[I:%.*]] = alloca i32, align 4
3149 // CHECK6-NEXT:    [[T_VAR_CASTED:%.*]] = alloca i64, align 8
3150 // CHECK6-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
3151 // CHECK6-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
3152 // CHECK6-NEXT:    store [2 x i32]* [[VEC]], [2 x i32]** [[VEC_ADDR]], align 8
3153 // CHECK6-NEXT:    store i64 [[T_VAR]], i64* [[T_VAR_ADDR]], align 8
3154 // CHECK6-NEXT:    store [2 x %struct.S.0]* [[S_ARR]], [2 x %struct.S.0]** [[S_ARR_ADDR]], align 8
3155 // CHECK6-NEXT:    store %struct.S.0* [[VAR]], %struct.S.0** [[VAR_ADDR]], align 8
3156 // CHECK6-NEXT:    [[TMP0:%.*]] = load [2 x i32]*, [2 x i32]** [[VEC_ADDR]], align 8
3157 // CHECK6-NEXT:    [[CONV:%.*]] = bitcast i64* [[T_VAR_ADDR]] to i32*
3158 // CHECK6-NEXT:    [[TMP1:%.*]] = load [2 x %struct.S.0]*, [2 x %struct.S.0]** [[S_ARR_ADDR]], align 8
3159 // CHECK6-NEXT:    [[TMP2:%.*]] = load %struct.S.0*, %struct.S.0** [[VAR_ADDR]], align 8
3160 // CHECK6-NEXT:    store %struct.S.0* [[TMP2]], %struct.S.0** [[TMP]], align 8
3161 // CHECK6-NEXT:    store i32 0, i32* [[DOTOMP_COMB_LB]], align 4
3162 // CHECK6-NEXT:    store i32 1, i32* [[DOTOMP_COMB_UB]], align 4
3163 // CHECK6-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
3164 // CHECK6-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
3165 // CHECK6-NEXT:    [[ARRAY_BEGIN:%.*]] = getelementptr inbounds [2 x %struct.S.0], [2 x %struct.S.0]* [[S_ARR4]], i32 0, i32 0
3166 // CHECK6-NEXT:    [[ARRAYCTOR_END:%.*]] = getelementptr inbounds [[STRUCT_S_0]], %struct.S.0* [[ARRAY_BEGIN]], i64 2
3167 // CHECK6-NEXT:    br label [[ARRAYCTOR_LOOP:%.*]]
3168 // CHECK6:       arrayctor.loop:
3169 // CHECK6-NEXT:    [[ARRAYCTOR_CUR:%.*]] = phi %struct.S.0* [ [[ARRAY_BEGIN]], [[ENTRY:%.*]] ], [ [[ARRAYCTOR_NEXT:%.*]], [[ARRAYCTOR_LOOP]] ]
3170 // CHECK6-NEXT:    call void @_ZN1SIiEC1Ev(%struct.S.0* nonnull align 4 dereferenceable(4) [[ARRAYCTOR_CUR]])
3171 // CHECK6-NEXT:    [[ARRAYCTOR_NEXT]] = getelementptr inbounds [[STRUCT_S_0]], %struct.S.0* [[ARRAYCTOR_CUR]], i64 1
3172 // CHECK6-NEXT:    [[ARRAYCTOR_DONE:%.*]] = icmp eq %struct.S.0* [[ARRAYCTOR_NEXT]], [[ARRAYCTOR_END]]
3173 // CHECK6-NEXT:    br i1 [[ARRAYCTOR_DONE]], label [[ARRAYCTOR_CONT:%.*]], label [[ARRAYCTOR_LOOP]]
3174 // CHECK6:       arrayctor.cont:
3175 // CHECK6-NEXT:    [[TMP3:%.*]] = load %struct.S.0*, %struct.S.0** [[TMP]], align 8
3176 // CHECK6-NEXT:    call void @_ZN1SIiEC1Ev(%struct.S.0* nonnull align 4 dereferenceable(4) [[VAR5]])
3177 // CHECK6-NEXT:    store %struct.S.0* [[VAR5]], %struct.S.0** [[_TMP6]], align 8
3178 // CHECK6-NEXT:    [[TMP4:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
3179 // CHECK6-NEXT:    [[TMP5:%.*]] = load i32, i32* [[TMP4]], align 4
3180 // CHECK6-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP5]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_COMB_LB]], i32* [[DOTOMP_COMB_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
3181 // CHECK6-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
3182 // CHECK6-NEXT:    [[CMP:%.*]] = icmp sgt i32 [[TMP6]], 1
3183 // CHECK6-NEXT:    br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
3184 // CHECK6:       cond.true:
3185 // CHECK6-NEXT:    br label [[COND_END:%.*]]
3186 // CHECK6:       cond.false:
3187 // CHECK6-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
3188 // CHECK6-NEXT:    br label [[COND_END]]
3189 // CHECK6:       cond.end:
3190 // CHECK6-NEXT:    [[COND:%.*]] = phi i32 [ 1, [[COND_TRUE]] ], [ [[TMP7]], [[COND_FALSE]] ]
3191 // CHECK6-NEXT:    store i32 [[COND]], i32* [[DOTOMP_COMB_UB]], align 4
3192 // CHECK6-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
3193 // CHECK6-NEXT:    store i32 [[TMP8]], i32* [[DOTOMP_IV]], align 4
3194 // CHECK6-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
3195 // CHECK6:       omp.inner.for.cond:
3196 // CHECK6-NEXT:    [[TMP9:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
3197 // CHECK6-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
3198 // CHECK6-NEXT:    [[CMP7:%.*]] = icmp sle i32 [[TMP9]], [[TMP10]]
3199 // CHECK6-NEXT:    br i1 [[CMP7]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_COND_CLEANUP:%.*]]
3200 // CHECK6:       omp.inner.for.cond.cleanup:
3201 // CHECK6-NEXT:    br label [[OMP_INNER_FOR_END:%.*]]
3202 // CHECK6:       omp.inner.for.body:
3203 // CHECK6-NEXT:    [[TMP11:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
3204 // CHECK6-NEXT:    [[TMP12:%.*]] = zext i32 [[TMP11]] to i64
3205 // CHECK6-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
3206 // CHECK6-NEXT:    [[TMP14:%.*]] = zext i32 [[TMP13]] to i64
3207 // CHECK6-NEXT:    [[TMP15:%.*]] = load i32, i32* [[T_VAR2]], align 4
3208 // CHECK6-NEXT:    [[CONV8:%.*]] = bitcast i64* [[T_VAR_CASTED]] to i32*
3209 // CHECK6-NEXT:    store i32 [[TMP15]], i32* [[CONV8]], align 4
3210 // CHECK6-NEXT:    [[TMP16:%.*]] = load i64, i64* [[T_VAR_CASTED]], align 8
3211 // CHECK6-NEXT:    [[TMP17:%.*]] = load %struct.S.0*, %struct.S.0** [[_TMP6]], align 8
3212 // CHECK6-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 6, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, [2 x i32]*, i64, [2 x %struct.S.0]*, %struct.S.0*)* @.omp_outlined..3 to void (i32*, i32*, ...)*), i64 [[TMP12]], i64 [[TMP14]], [2 x i32]* [[VEC3]], i64 [[TMP16]], [2 x %struct.S.0]* [[S_ARR4]], %struct.S.0* [[TMP17]])
3213 // CHECK6-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
3214 // CHECK6:       omp.inner.for.inc:
3215 // CHECK6-NEXT:    [[TMP18:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
3216 // CHECK6-NEXT:    [[TMP19:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
3217 // CHECK6-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP18]], [[TMP19]]
3218 // CHECK6-NEXT:    store i32 [[ADD]], i32* [[DOTOMP_IV]], align 4
3219 // CHECK6-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP9:![0-9]+]]
3220 // CHECK6:       omp.inner.for.end:
3221 // CHECK6-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
3222 // CHECK6:       omp.loop.exit:
3223 // CHECK6-NEXT:    [[TMP20:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
3224 // CHECK6-NEXT:    [[TMP21:%.*]] = load i32, i32* [[TMP20]], align 4
3225 // CHECK6-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP21]])
3226 // CHECK6-NEXT:    [[TMP22:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
3227 // CHECK6-NEXT:    [[TMP23:%.*]] = icmp ne i32 [[TMP22]], 0
3228 // CHECK6-NEXT:    br i1 [[TMP23]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
3229 // CHECK6:       .omp.final.then:
3230 // CHECK6-NEXT:    store i32 2, i32* [[I]], align 4
3231 // CHECK6-NEXT:    br label [[DOTOMP_FINAL_DONE]]
3232 // CHECK6:       .omp.final.done:
3233 // CHECK6-NEXT:    [[TMP24:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
3234 // CHECK6-NEXT:    [[TMP25:%.*]] = icmp ne i32 [[TMP24]], 0
3235 // CHECK6-NEXT:    br i1 [[TMP25]], label [[DOTOMP_LASTPRIVATE_THEN:%.*]], label [[DOTOMP_LASTPRIVATE_DONE:%.*]]
3236 // CHECK6:       .omp.lastprivate.then:
3237 // CHECK6-NEXT:    [[TMP26:%.*]] = load i32, i32* [[T_VAR2]], align 4
3238 // CHECK6-NEXT:    store i32 [[TMP26]], i32* [[CONV]], align 8
3239 // CHECK6-NEXT:    [[TMP27:%.*]] = bitcast [2 x i32]* [[TMP0]] to i8*
3240 // CHECK6-NEXT:    [[TMP28:%.*]] = bitcast [2 x i32]* [[VEC3]] to i8*
3241 // CHECK6-NEXT:    call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 4 [[TMP27]], i8* align 4 [[TMP28]], i64 8, i1 false)
3242 // CHECK6-NEXT:    [[ARRAY_BEGIN9:%.*]] = getelementptr inbounds [2 x %struct.S.0], [2 x %struct.S.0]* [[TMP1]], i32 0, i32 0
3243 // CHECK6-NEXT:    [[TMP29:%.*]] = bitcast [2 x %struct.S.0]* [[S_ARR4]] to %struct.S.0*
3244 // CHECK6-NEXT:    [[TMP30:%.*]] = getelementptr [[STRUCT_S_0]], %struct.S.0* [[ARRAY_BEGIN9]], i64 2
3245 // CHECK6-NEXT:    [[OMP_ARRAYCPY_ISEMPTY:%.*]] = icmp eq %struct.S.0* [[ARRAY_BEGIN9]], [[TMP30]]
3246 // CHECK6-NEXT:    br i1 [[OMP_ARRAYCPY_ISEMPTY]], label [[OMP_ARRAYCPY_DONE10:%.*]], label [[OMP_ARRAYCPY_BODY:%.*]]
3247 // CHECK6:       omp.arraycpy.body:
3248 // CHECK6-NEXT:    [[OMP_ARRAYCPY_SRCELEMENTPAST:%.*]] = phi %struct.S.0* [ [[TMP29]], [[DOTOMP_LASTPRIVATE_THEN]] ], [ [[OMP_ARRAYCPY_SRC_ELEMENT:%.*]], [[OMP_ARRAYCPY_BODY]] ]
3249 // CHECK6-NEXT:    [[OMP_ARRAYCPY_DESTELEMENTPAST:%.*]] = phi %struct.S.0* [ [[ARRAY_BEGIN9]], [[DOTOMP_LASTPRIVATE_THEN]] ], [ [[OMP_ARRAYCPY_DEST_ELEMENT:%.*]], [[OMP_ARRAYCPY_BODY]] ]
3250 // CHECK6-NEXT:    [[TMP31:%.*]] = bitcast %struct.S.0* [[OMP_ARRAYCPY_DESTELEMENTPAST]] to i8*
3251 // CHECK6-NEXT:    [[TMP32:%.*]] = bitcast %struct.S.0* [[OMP_ARRAYCPY_SRCELEMENTPAST]] to i8*
3252 // CHECK6-NEXT:    call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 4 [[TMP31]], i8* align 4 [[TMP32]], i64 4, i1 false)
3253 // CHECK6-NEXT:    [[OMP_ARRAYCPY_DEST_ELEMENT]] = getelementptr [[STRUCT_S_0]], %struct.S.0* [[OMP_ARRAYCPY_DESTELEMENTPAST]], i32 1
3254 // CHECK6-NEXT:    [[OMP_ARRAYCPY_SRC_ELEMENT]] = getelementptr [[STRUCT_S_0]], %struct.S.0* [[OMP_ARRAYCPY_SRCELEMENTPAST]], i32 1
3255 // CHECK6-NEXT:    [[OMP_ARRAYCPY_DONE:%.*]] = icmp eq %struct.S.0* [[OMP_ARRAYCPY_DEST_ELEMENT]], [[TMP30]]
3256 // CHECK6-NEXT:    br i1 [[OMP_ARRAYCPY_DONE]], label [[OMP_ARRAYCPY_DONE10]], label [[OMP_ARRAYCPY_BODY]]
3257 // CHECK6:       omp.arraycpy.done10:
3258 // CHECK6-NEXT:    [[TMP33:%.*]] = load %struct.S.0*, %struct.S.0** [[_TMP6]], align 8
3259 // CHECK6-NEXT:    [[TMP34:%.*]] = bitcast %struct.S.0* [[TMP3]] to i8*
3260 // CHECK6-NEXT:    [[TMP35:%.*]] = bitcast %struct.S.0* [[TMP33]] to i8*
3261 // CHECK6-NEXT:    call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 4 [[TMP34]], i8* align 4 [[TMP35]], i64 4, i1 false)
3262 // CHECK6-NEXT:    br label [[DOTOMP_LASTPRIVATE_DONE]]
3263 // CHECK6:       .omp.lastprivate.done:
3264 // CHECK6-NEXT:    call void @_ZN1SIiED1Ev(%struct.S.0* nonnull align 4 dereferenceable(4) [[VAR5]]) #[[ATTR4]]
3265 // CHECK6-NEXT:    [[ARRAY_BEGIN11:%.*]] = getelementptr inbounds [2 x %struct.S.0], [2 x %struct.S.0]* [[S_ARR4]], i32 0, i32 0
3266 // CHECK6-NEXT:    [[TMP36:%.*]] = getelementptr inbounds [[STRUCT_S_0]], %struct.S.0* [[ARRAY_BEGIN11]], i64 2
3267 // CHECK6-NEXT:    br label [[ARRAYDESTROY_BODY:%.*]]
3268 // CHECK6:       arraydestroy.body:
3269 // CHECK6-NEXT:    [[ARRAYDESTROY_ELEMENTPAST:%.*]] = phi %struct.S.0* [ [[TMP36]], [[DOTOMP_LASTPRIVATE_DONE]] ], [ [[ARRAYDESTROY_ELEMENT:%.*]], [[ARRAYDESTROY_BODY]] ]
3270 // CHECK6-NEXT:    [[ARRAYDESTROY_ELEMENT]] = getelementptr inbounds [[STRUCT_S_0]], %struct.S.0* [[ARRAYDESTROY_ELEMENTPAST]], i64 -1
3271 // CHECK6-NEXT:    call void @_ZN1SIiED1Ev(%struct.S.0* nonnull align 4 dereferenceable(4) [[ARRAYDESTROY_ELEMENT]]) #[[ATTR4]]
3272 // CHECK6-NEXT:    [[ARRAYDESTROY_DONE:%.*]] = icmp eq %struct.S.0* [[ARRAYDESTROY_ELEMENT]], [[ARRAY_BEGIN11]]
3273 // CHECK6-NEXT:    br i1 [[ARRAYDESTROY_DONE]], label [[ARRAYDESTROY_DONE12:%.*]], label [[ARRAYDESTROY_BODY]]
3274 // CHECK6:       arraydestroy.done12:
3275 // CHECK6-NEXT:    ret void
3276 //
3277 //
3278 // CHECK6-LABEL: define {{[^@]+}}@.omp_outlined..3
3279 // CHECK6-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i64 [[DOTPREVIOUS_LB_:%.*]], i64 [[DOTPREVIOUS_UB_:%.*]], [2 x i32]* nonnull align 4 dereferenceable(8) [[VEC:%.*]], i64 [[T_VAR:%.*]], [2 x %struct.S.0]* nonnull align 4 dereferenceable(8) [[S_ARR:%.*]], %struct.S.0* nonnull align 4 dereferenceable(4) [[VAR:%.*]]) #[[ATTR3]] {
3280 // CHECK6-NEXT:  entry:
3281 // CHECK6-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
3282 // CHECK6-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
3283 // CHECK6-NEXT:    [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i64, align 8
3284 // CHECK6-NEXT:    [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i64, align 8
3285 // CHECK6-NEXT:    [[VEC_ADDR:%.*]] = alloca [2 x i32]*, align 8
3286 // CHECK6-NEXT:    [[T_VAR_ADDR:%.*]] = alloca i64, align 8
3287 // CHECK6-NEXT:    [[S_ARR_ADDR:%.*]] = alloca [2 x %struct.S.0]*, align 8
3288 // CHECK6-NEXT:    [[VAR_ADDR:%.*]] = alloca %struct.S.0*, align 8
3289 // CHECK6-NEXT:    [[TMP:%.*]] = alloca %struct.S.0*, align 8
3290 // CHECK6-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
3291 // CHECK6-NEXT:    [[_TMP1:%.*]] = alloca i32, align 4
3292 // CHECK6-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
3293 // CHECK6-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
3294 // CHECK6-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
3295 // CHECK6-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
3296 // CHECK6-NEXT:    [[T_VAR4:%.*]] = alloca i32, align 4
3297 // CHECK6-NEXT:    [[VEC5:%.*]] = alloca [2 x i32], align 4
3298 // CHECK6-NEXT:    [[S_ARR6:%.*]] = alloca [2 x %struct.S.0], align 4
3299 // CHECK6-NEXT:    [[VAR7:%.*]] = alloca [[STRUCT_S_0:%.*]], align 4
3300 // CHECK6-NEXT:    [[_TMP8:%.*]] = alloca %struct.S.0*, align 8
3301 // CHECK6-NEXT:    [[I:%.*]] = alloca i32, align 4
3302 // CHECK6-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
3303 // CHECK6-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
3304 // CHECK6-NEXT:    store i64 [[DOTPREVIOUS_LB_]], i64* [[DOTPREVIOUS_LB__ADDR]], align 8
3305 // CHECK6-NEXT:    store i64 [[DOTPREVIOUS_UB_]], i64* [[DOTPREVIOUS_UB__ADDR]], align 8
3306 // CHECK6-NEXT:    store [2 x i32]* [[VEC]], [2 x i32]** [[VEC_ADDR]], align 8
3307 // CHECK6-NEXT:    store i64 [[T_VAR]], i64* [[T_VAR_ADDR]], align 8
3308 // CHECK6-NEXT:    store [2 x %struct.S.0]* [[S_ARR]], [2 x %struct.S.0]** [[S_ARR_ADDR]], align 8
3309 // CHECK6-NEXT:    store %struct.S.0* [[VAR]], %struct.S.0** [[VAR_ADDR]], align 8
3310 // CHECK6-NEXT:    [[TMP0:%.*]] = load [2 x i32]*, [2 x i32]** [[VEC_ADDR]], align 8
3311 // CHECK6-NEXT:    [[CONV:%.*]] = bitcast i64* [[T_VAR_ADDR]] to i32*
3312 // CHECK6-NEXT:    [[TMP1:%.*]] = load [2 x %struct.S.0]*, [2 x %struct.S.0]** [[S_ARR_ADDR]], align 8
3313 // CHECK6-NEXT:    [[TMP2:%.*]] = load %struct.S.0*, %struct.S.0** [[VAR_ADDR]], align 8
3314 // CHECK6-NEXT:    store %struct.S.0* [[TMP2]], %struct.S.0** [[TMP]], align 8
3315 // CHECK6-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
3316 // CHECK6-NEXT:    store i32 1, i32* [[DOTOMP_UB]], align 4
3317 // CHECK6-NEXT:    [[TMP3:%.*]] = load i64, i64* [[DOTPREVIOUS_LB__ADDR]], align 8
3318 // CHECK6-NEXT:    [[CONV2:%.*]] = trunc i64 [[TMP3]] to i32
3319 // CHECK6-NEXT:    [[TMP4:%.*]] = load i64, i64* [[DOTPREVIOUS_UB__ADDR]], align 8
3320 // CHECK6-NEXT:    [[CONV3:%.*]] = trunc i64 [[TMP4]] to i32
3321 // CHECK6-NEXT:    store i32 [[CONV2]], i32* [[DOTOMP_LB]], align 4
3322 // CHECK6-NEXT:    store i32 [[CONV3]], i32* [[DOTOMP_UB]], align 4
3323 // CHECK6-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
3324 // CHECK6-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
3325 // CHECK6-NEXT:    [[ARRAY_BEGIN:%.*]] = getelementptr inbounds [2 x %struct.S.0], [2 x %struct.S.0]* [[S_ARR6]], i32 0, i32 0
3326 // CHECK6-NEXT:    [[ARRAYCTOR_END:%.*]] = getelementptr inbounds [[STRUCT_S_0]], %struct.S.0* [[ARRAY_BEGIN]], i64 2
3327 // CHECK6-NEXT:    br label [[ARRAYCTOR_LOOP:%.*]]
3328 // CHECK6:       arrayctor.loop:
3329 // CHECK6-NEXT:    [[ARRAYCTOR_CUR:%.*]] = phi %struct.S.0* [ [[ARRAY_BEGIN]], [[ENTRY:%.*]] ], [ [[ARRAYCTOR_NEXT:%.*]], [[ARRAYCTOR_LOOP]] ]
3330 // CHECK6-NEXT:    call void @_ZN1SIiEC1Ev(%struct.S.0* nonnull align 4 dereferenceable(4) [[ARRAYCTOR_CUR]])
3331 // CHECK6-NEXT:    [[ARRAYCTOR_NEXT]] = getelementptr inbounds [[STRUCT_S_0]], %struct.S.0* [[ARRAYCTOR_CUR]], i64 1
3332 // CHECK6-NEXT:    [[ARRAYCTOR_DONE:%.*]] = icmp eq %struct.S.0* [[ARRAYCTOR_NEXT]], [[ARRAYCTOR_END]]
3333 // CHECK6-NEXT:    br i1 [[ARRAYCTOR_DONE]], label [[ARRAYCTOR_CONT:%.*]], label [[ARRAYCTOR_LOOP]]
3334 // CHECK6:       arrayctor.cont:
3335 // CHECK6-NEXT:    [[TMP5:%.*]] = load %struct.S.0*, %struct.S.0** [[TMP]], align 8
3336 // CHECK6-NEXT:    call void @_ZN1SIiEC1Ev(%struct.S.0* nonnull align 4 dereferenceable(4) [[VAR7]])
3337 // CHECK6-NEXT:    store %struct.S.0* [[VAR7]], %struct.S.0** [[_TMP8]], align 8
3338 // CHECK6-NEXT:    [[TMP6:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
3339 // CHECK6-NEXT:    [[TMP7:%.*]] = load i32, i32* [[TMP6]], align 4
3340 // CHECK6-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB2]], i32 [[TMP7]], i32 34, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
3341 // CHECK6-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
3342 // CHECK6-NEXT:    [[CMP:%.*]] = icmp sgt i32 [[TMP8]], 1
3343 // CHECK6-NEXT:    br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
3344 // CHECK6:       cond.true:
3345 // CHECK6-NEXT:    br label [[COND_END:%.*]]
3346 // CHECK6:       cond.false:
3347 // CHECK6-NEXT:    [[TMP9:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
3348 // CHECK6-NEXT:    br label [[COND_END]]
3349 // CHECK6:       cond.end:
3350 // CHECK6-NEXT:    [[COND:%.*]] = phi i32 [ 1, [[COND_TRUE]] ], [ [[TMP9]], [[COND_FALSE]] ]
3351 // CHECK6-NEXT:    store i32 [[COND]], i32* [[DOTOMP_UB]], align 4
3352 // CHECK6-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
3353 // CHECK6-NEXT:    store i32 [[TMP10]], i32* [[DOTOMP_IV]], align 4
3354 // CHECK6-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
3355 // CHECK6:       omp.inner.for.cond:
3356 // CHECK6-NEXT:    [[TMP11:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
3357 // CHECK6-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
3358 // CHECK6-NEXT:    [[CMP9:%.*]] = icmp sle i32 [[TMP11]], [[TMP12]]
3359 // CHECK6-NEXT:    br i1 [[CMP9]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_COND_CLEANUP:%.*]]
3360 // CHECK6:       omp.inner.for.cond.cleanup:
3361 // CHECK6-NEXT:    br label [[OMP_INNER_FOR_END:%.*]]
3362 // CHECK6:       omp.inner.for.body:
3363 // CHECK6-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
3364 // CHECK6-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP13]], 1
3365 // CHECK6-NEXT:    [[ADD:%.*]] = add nsw i32 0, [[MUL]]
3366 // CHECK6-NEXT:    store i32 [[ADD]], i32* [[I]], align 4
3367 // CHECK6-NEXT:    [[TMP14:%.*]] = load i32, i32* [[T_VAR4]], align 4
3368 // CHECK6-NEXT:    [[TMP15:%.*]] = load i32, i32* [[I]], align 4
3369 // CHECK6-NEXT:    [[IDXPROM:%.*]] = sext i32 [[TMP15]] to i64
3370 // CHECK6-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds [2 x i32], [2 x i32]* [[VEC5]], i64 0, i64 [[IDXPROM]]
3371 // CHECK6-NEXT:    store i32 [[TMP14]], i32* [[ARRAYIDX]], align 4
3372 // CHECK6-NEXT:    [[TMP16:%.*]] = load %struct.S.0*, %struct.S.0** [[_TMP8]], align 8
3373 // CHECK6-NEXT:    [[TMP17:%.*]] = load i32, i32* [[I]], align 4
3374 // CHECK6-NEXT:    [[IDXPROM10:%.*]] = sext i32 [[TMP17]] to i64
3375 // CHECK6-NEXT:    [[ARRAYIDX11:%.*]] = getelementptr inbounds [2 x %struct.S.0], [2 x %struct.S.0]* [[S_ARR6]], i64 0, i64 [[IDXPROM10]]
3376 // CHECK6-NEXT:    [[TMP18:%.*]] = bitcast %struct.S.0* [[ARRAYIDX11]] to i8*
3377 // CHECK6-NEXT:    [[TMP19:%.*]] = bitcast %struct.S.0* [[TMP16]] to i8*
3378 // CHECK6-NEXT:    call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 4 [[TMP18]], i8* align 4 [[TMP19]], i64 4, i1 false)
3379 // CHECK6-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
3380 // CHECK6:       omp.body.continue:
3381 // CHECK6-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
3382 // CHECK6:       omp.inner.for.inc:
3383 // CHECK6-NEXT:    [[TMP20:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
3384 // CHECK6-NEXT:    [[ADD12:%.*]] = add nsw i32 [[TMP20]], 1
3385 // CHECK6-NEXT:    store i32 [[ADD12]], i32* [[DOTOMP_IV]], align 4
3386 // CHECK6-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP10:![0-9]+]]
3387 // CHECK6:       omp.inner.for.end:
3388 // CHECK6-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
3389 // CHECK6:       omp.loop.exit:
3390 // CHECK6-NEXT:    [[TMP21:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
3391 // CHECK6-NEXT:    [[TMP22:%.*]] = load i32, i32* [[TMP21]], align 4
3392 // CHECK6-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP22]])
3393 // CHECK6-NEXT:    [[TMP23:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
3394 // CHECK6-NEXT:    [[TMP24:%.*]] = icmp ne i32 [[TMP23]], 0
3395 // CHECK6-NEXT:    br i1 [[TMP24]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
3396 // CHECK6:       .omp.final.then:
3397 // CHECK6-NEXT:    store i32 2, i32* [[I]], align 4
3398 // CHECK6-NEXT:    br label [[DOTOMP_FINAL_DONE]]
3399 // CHECK6:       .omp.final.done:
3400 // CHECK6-NEXT:    [[TMP25:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
3401 // CHECK6-NEXT:    [[TMP26:%.*]] = icmp ne i32 [[TMP25]], 0
3402 // CHECK6-NEXT:    br i1 [[TMP26]], label [[DOTOMP_LASTPRIVATE_THEN:%.*]], label [[DOTOMP_LASTPRIVATE_DONE:%.*]]
3403 // CHECK6:       .omp.lastprivate.then:
3404 // CHECK6-NEXT:    [[TMP27:%.*]] = load i32, i32* [[T_VAR4]], align 4
3405 // CHECK6-NEXT:    store i32 [[TMP27]], i32* [[CONV]], align 8
3406 // CHECK6-NEXT:    [[TMP28:%.*]] = bitcast [2 x i32]* [[TMP0]] to i8*
3407 // CHECK6-NEXT:    [[TMP29:%.*]] = bitcast [2 x i32]* [[VEC5]] to i8*
3408 // CHECK6-NEXT:    call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 4 [[TMP28]], i8* align 4 [[TMP29]], i64 8, i1 false)
3409 // CHECK6-NEXT:    [[ARRAY_BEGIN13:%.*]] = getelementptr inbounds [2 x %struct.S.0], [2 x %struct.S.0]* [[TMP1]], i32 0, i32 0
3410 // CHECK6-NEXT:    [[TMP30:%.*]] = bitcast [2 x %struct.S.0]* [[S_ARR6]] to %struct.S.0*
3411 // CHECK6-NEXT:    [[TMP31:%.*]] = getelementptr [[STRUCT_S_0]], %struct.S.0* [[ARRAY_BEGIN13]], i64 2
3412 // CHECK6-NEXT:    [[OMP_ARRAYCPY_ISEMPTY:%.*]] = icmp eq %struct.S.0* [[ARRAY_BEGIN13]], [[TMP31]]
3413 // CHECK6-NEXT:    br i1 [[OMP_ARRAYCPY_ISEMPTY]], label [[OMP_ARRAYCPY_DONE14:%.*]], label [[OMP_ARRAYCPY_BODY:%.*]]
3414 // CHECK6:       omp.arraycpy.body:
3415 // CHECK6-NEXT:    [[OMP_ARRAYCPY_SRCELEMENTPAST:%.*]] = phi %struct.S.0* [ [[TMP30]], [[DOTOMP_LASTPRIVATE_THEN]] ], [ [[OMP_ARRAYCPY_SRC_ELEMENT:%.*]], [[OMP_ARRAYCPY_BODY]] ]
3416 // CHECK6-NEXT:    [[OMP_ARRAYCPY_DESTELEMENTPAST:%.*]] = phi %struct.S.0* [ [[ARRAY_BEGIN13]], [[DOTOMP_LASTPRIVATE_THEN]] ], [ [[OMP_ARRAYCPY_DEST_ELEMENT:%.*]], [[OMP_ARRAYCPY_BODY]] ]
3417 // CHECK6-NEXT:    [[TMP32:%.*]] = bitcast %struct.S.0* [[OMP_ARRAYCPY_DESTELEMENTPAST]] to i8*
3418 // CHECK6-NEXT:    [[TMP33:%.*]] = bitcast %struct.S.0* [[OMP_ARRAYCPY_SRCELEMENTPAST]] to i8*
3419 // CHECK6-NEXT:    call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 4 [[TMP32]], i8* align 4 [[TMP33]], i64 4, i1 false)
3420 // CHECK6-NEXT:    [[OMP_ARRAYCPY_DEST_ELEMENT]] = getelementptr [[STRUCT_S_0]], %struct.S.0* [[OMP_ARRAYCPY_DESTELEMENTPAST]], i32 1
3421 // CHECK6-NEXT:    [[OMP_ARRAYCPY_SRC_ELEMENT]] = getelementptr [[STRUCT_S_0]], %struct.S.0* [[OMP_ARRAYCPY_SRCELEMENTPAST]], i32 1
3422 // CHECK6-NEXT:    [[OMP_ARRAYCPY_DONE:%.*]] = icmp eq %struct.S.0* [[OMP_ARRAYCPY_DEST_ELEMENT]], [[TMP31]]
3423 // CHECK6-NEXT:    br i1 [[OMP_ARRAYCPY_DONE]], label [[OMP_ARRAYCPY_DONE14]], label [[OMP_ARRAYCPY_BODY]]
3424 // CHECK6:       omp.arraycpy.done14:
3425 // CHECK6-NEXT:    [[TMP34:%.*]] = load %struct.S.0*, %struct.S.0** [[_TMP8]], align 8
3426 // CHECK6-NEXT:    [[TMP35:%.*]] = bitcast %struct.S.0* [[TMP5]] to i8*
3427 // CHECK6-NEXT:    [[TMP36:%.*]] = bitcast %struct.S.0* [[TMP34]] to i8*
3428 // CHECK6-NEXT:    call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 4 [[TMP35]], i8* align 4 [[TMP36]], i64 4, i1 false)
3429 // CHECK6-NEXT:    br label [[DOTOMP_LASTPRIVATE_DONE]]
3430 // CHECK6:       .omp.lastprivate.done:
3431 // CHECK6-NEXT:    call void @_ZN1SIiED1Ev(%struct.S.0* nonnull align 4 dereferenceable(4) [[VAR7]]) #[[ATTR4]]
3432 // CHECK6-NEXT:    [[ARRAY_BEGIN15:%.*]] = getelementptr inbounds [2 x %struct.S.0], [2 x %struct.S.0]* [[S_ARR6]], i32 0, i32 0
3433 // CHECK6-NEXT:    [[TMP37:%.*]] = getelementptr inbounds [[STRUCT_S_0]], %struct.S.0* [[ARRAY_BEGIN15]], i64 2
3434 // CHECK6-NEXT:    br label [[ARRAYDESTROY_BODY:%.*]]
3435 // CHECK6:       arraydestroy.body:
3436 // CHECK6-NEXT:    [[ARRAYDESTROY_ELEMENTPAST:%.*]] = phi %struct.S.0* [ [[TMP37]], [[DOTOMP_LASTPRIVATE_DONE]] ], [ [[ARRAYDESTROY_ELEMENT:%.*]], [[ARRAYDESTROY_BODY]] ]
3437 // CHECK6-NEXT:    [[ARRAYDESTROY_ELEMENT]] = getelementptr inbounds [[STRUCT_S_0]], %struct.S.0* [[ARRAYDESTROY_ELEMENTPAST]], i64 -1
3438 // CHECK6-NEXT:    call void @_ZN1SIiED1Ev(%struct.S.0* nonnull align 4 dereferenceable(4) [[ARRAYDESTROY_ELEMENT]]) #[[ATTR4]]
3439 // CHECK6-NEXT:    [[ARRAYDESTROY_DONE:%.*]] = icmp eq %struct.S.0* [[ARRAYDESTROY_ELEMENT]], [[ARRAY_BEGIN15]]
3440 // CHECK6-NEXT:    br i1 [[ARRAYDESTROY_DONE]], label [[ARRAYDESTROY_DONE16:%.*]], label [[ARRAYDESTROY_BODY]]
3441 // CHECK6:       arraydestroy.done16:
3442 // CHECK6-NEXT:    ret void
3443 //
3444 //
3445 // CHECK6-LABEL: define {{[^@]+}}@_ZN1SIiED1Ev
3446 // CHECK6-SAME: (%struct.S.0* nonnull align 4 dereferenceable(4) [[THIS:%.*]]) unnamed_addr #[[ATTR1]] comdat align 2 {
3447 // CHECK6-NEXT:  entry:
3448 // CHECK6-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.S.0*, align 8
3449 // CHECK6-NEXT:    store %struct.S.0* [[THIS]], %struct.S.0** [[THIS_ADDR]], align 8
3450 // CHECK6-NEXT:    [[THIS1:%.*]] = load %struct.S.0*, %struct.S.0** [[THIS_ADDR]], align 8
3451 // CHECK6-NEXT:    call void @_ZN1SIiED2Ev(%struct.S.0* nonnull align 4 dereferenceable(4) [[THIS1]]) #[[ATTR4]]
3452 // CHECK6-NEXT:    ret void
3453 //
3454 //
3455 // CHECK6-LABEL: define {{[^@]+}}@_ZN1SIiEC2Ev
3456 // CHECK6-SAME: (%struct.S.0* nonnull align 4 dereferenceable(4) [[THIS:%.*]]) unnamed_addr #[[ATTR1]] comdat align 2 {
3457 // CHECK6-NEXT:  entry:
3458 // CHECK6-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.S.0*, align 8
3459 // CHECK6-NEXT:    store %struct.S.0* [[THIS]], %struct.S.0** [[THIS_ADDR]], align 8
3460 // CHECK6-NEXT:    [[THIS1:%.*]] = load %struct.S.0*, %struct.S.0** [[THIS_ADDR]], align 8
3461 // CHECK6-NEXT:    [[F:%.*]] = getelementptr inbounds [[STRUCT_S_0:%.*]], %struct.S.0* [[THIS1]], i32 0, i32 0
3462 // CHECK6-NEXT:    store i32 0, i32* [[F]], align 4
3463 // CHECK6-NEXT:    ret void
3464 //
3465 //
3466 // CHECK6-LABEL: define {{[^@]+}}@_ZN1SIiEC2Ei
3467 // CHECK6-SAME: (%struct.S.0* nonnull align 4 dereferenceable(4) [[THIS:%.*]], i32 signext [[A:%.*]]) unnamed_addr #[[ATTR1]] comdat align 2 {
3468 // CHECK6-NEXT:  entry:
3469 // CHECK6-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.S.0*, align 8
3470 // CHECK6-NEXT:    [[A_ADDR:%.*]] = alloca i32, align 4
3471 // CHECK6-NEXT:    store %struct.S.0* [[THIS]], %struct.S.0** [[THIS_ADDR]], align 8
3472 // CHECK6-NEXT:    store i32 [[A]], i32* [[A_ADDR]], align 4
3473 // CHECK6-NEXT:    [[THIS1:%.*]] = load %struct.S.0*, %struct.S.0** [[THIS_ADDR]], align 8
3474 // CHECK6-NEXT:    [[F:%.*]] = getelementptr inbounds [[STRUCT_S_0:%.*]], %struct.S.0* [[THIS1]], i32 0, i32 0
3475 // CHECK6-NEXT:    [[TMP0:%.*]] = load i32, i32* [[A_ADDR]], align 4
3476 // CHECK6-NEXT:    store i32 [[TMP0]], i32* [[F]], align 4
3477 // CHECK6-NEXT:    ret void
3478 //
3479 //
3480 // CHECK6-LABEL: define {{[^@]+}}@_ZN1SIiED2Ev
3481 // CHECK6-SAME: (%struct.S.0* nonnull align 4 dereferenceable(4) [[THIS:%.*]]) unnamed_addr #[[ATTR1]] comdat align 2 {
3482 // CHECK6-NEXT:  entry:
3483 // CHECK6-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.S.0*, align 8
3484 // CHECK6-NEXT:    store %struct.S.0* [[THIS]], %struct.S.0** [[THIS_ADDR]], align 8
3485 // CHECK6-NEXT:    [[THIS1:%.*]] = load %struct.S.0*, %struct.S.0** [[THIS_ADDR]], align 8
3486 // CHECK6-NEXT:    ret void
3487 //
3488 //
3489 // CHECK6-LABEL: define {{[^@]+}}@.omp_offloading.requires_reg
3490 // CHECK6-SAME: () #[[ATTR6:[0-9]+]] {
3491 // CHECK6-NEXT:  entry:
3492 // CHECK6-NEXT:    call void @__tgt_register_requires(i64 1)
3493 // CHECK6-NEXT:    ret void
3494 //
3495 //
3496 // CHECK7-LABEL: define {{[^@]+}}@main
3497 // CHECK7-SAME: () #[[ATTR0:[0-9]+]] {
3498 // CHECK7-NEXT:  entry:
3499 // CHECK7-NEXT:    [[RETVAL:%.*]] = alloca i32, align 4
3500 // CHECK7-NEXT:    [[G:%.*]] = alloca double, align 8
3501 // CHECK7-NEXT:    [[G1:%.*]] = alloca double*, align 4
3502 // CHECK7-NEXT:    [[TEST:%.*]] = alloca [[STRUCT_S:%.*]], align 4
3503 // CHECK7-NEXT:    [[T_VAR:%.*]] = alloca i32, align 4
3504 // CHECK7-NEXT:    [[VEC:%.*]] = alloca [2 x i32], align 4
3505 // CHECK7-NEXT:    [[S_ARR:%.*]] = alloca [2 x %struct.S], align 4
3506 // CHECK7-NEXT:    [[VAR:%.*]] = alloca %struct.S*, align 4
3507 // CHECK7-NEXT:    [[TMP:%.*]] = alloca %struct.S*, align 4
3508 // CHECK7-NEXT:    [[T_VAR_CASTED:%.*]] = alloca i32, align 4
3509 // CHECK7-NEXT:    [[SVAR_CASTED:%.*]] = alloca i32, align 4
3510 // CHECK7-NEXT:    [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [5 x i8*], align 4
3511 // CHECK7-NEXT:    [[DOTOFFLOAD_PTRS:%.*]] = alloca [5 x i8*], align 4
3512 // CHECK7-NEXT:    [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [5 x i8*], align 4
3513 // CHECK7-NEXT:    [[_TMP1:%.*]] = alloca i32, align 4
3514 // CHECK7-NEXT:    [[I:%.*]] = alloca i32, align 4
3515 // CHECK7-NEXT:    store i32 0, i32* [[RETVAL]], align 4
3516 // CHECK7-NEXT:    store double* [[G]], double** [[G1]], align 4
3517 // CHECK7-NEXT:    call void @_ZN1SIfEC1Ev(%struct.S* nonnull align 4 dereferenceable(4) [[TEST]])
3518 // CHECK7-NEXT:    store i32 0, i32* [[T_VAR]], align 4
3519 // CHECK7-NEXT:    [[TMP0:%.*]] = bitcast [2 x i32]* [[VEC]] to i8*
3520 // CHECK7-NEXT:    call void @llvm.memcpy.p0i8.p0i8.i32(i8* align 4 [[TMP0]], i8* align 4 bitcast ([2 x i32]* @__const.main.vec to i8*), i32 8, i1 false)
3521 // CHECK7-NEXT:    [[ARRAYINIT_BEGIN:%.*]] = getelementptr inbounds [2 x %struct.S], [2 x %struct.S]* [[S_ARR]], i32 0, i32 0
3522 // CHECK7-NEXT:    call void @_ZN1SIfEC1Ef(%struct.S* nonnull align 4 dereferenceable(4) [[ARRAYINIT_BEGIN]], float 1.000000e+00)
3523 // CHECK7-NEXT:    [[ARRAYINIT_ELEMENT:%.*]] = getelementptr inbounds [[STRUCT_S]], %struct.S* [[ARRAYINIT_BEGIN]], i32 1
3524 // CHECK7-NEXT:    call void @_ZN1SIfEC1Ef(%struct.S* nonnull align 4 dereferenceable(4) [[ARRAYINIT_ELEMENT]], float 2.000000e+00)
3525 // CHECK7-NEXT:    store %struct.S* [[TEST]], %struct.S** [[VAR]], align 4
3526 // CHECK7-NEXT:    [[TMP1:%.*]] = load %struct.S*, %struct.S** [[VAR]], align 4
3527 // CHECK7-NEXT:    store %struct.S* [[TMP1]], %struct.S** [[TMP]], align 4
3528 // CHECK7-NEXT:    [[TMP2:%.*]] = load i32, i32* [[T_VAR]], align 4
3529 // CHECK7-NEXT:    store i32 [[TMP2]], i32* [[T_VAR_CASTED]], align 4
3530 // CHECK7-NEXT:    [[TMP3:%.*]] = load i32, i32* [[T_VAR_CASTED]], align 4
3531 // CHECK7-NEXT:    [[TMP4:%.*]] = load %struct.S*, %struct.S** [[TMP]], align 4
3532 // CHECK7-NEXT:    [[TMP5:%.*]] = load i32, i32* @_ZZ4mainE4svar, align 4
3533 // CHECK7-NEXT:    store i32 [[TMP5]], i32* [[SVAR_CASTED]], align 4
3534 // CHECK7-NEXT:    [[TMP6:%.*]] = load i32, i32* [[SVAR_CASTED]], align 4
3535 // CHECK7-NEXT:    [[TMP7:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
3536 // CHECK7-NEXT:    [[TMP8:%.*]] = bitcast i8** [[TMP7]] to [2 x i32]**
3537 // CHECK7-NEXT:    store [2 x i32]* [[VEC]], [2 x i32]** [[TMP8]], align 4
3538 // CHECK7-NEXT:    [[TMP9:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0
3539 // CHECK7-NEXT:    [[TMP10:%.*]] = bitcast i8** [[TMP9]] to [2 x i32]**
3540 // CHECK7-NEXT:    store [2 x i32]* [[VEC]], [2 x i32]** [[TMP10]], align 4
3541 // CHECK7-NEXT:    [[TMP11:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 0
3542 // CHECK7-NEXT:    store i8* null, i8** [[TMP11]], align 4
3543 // CHECK7-NEXT:    [[TMP12:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1
3544 // CHECK7-NEXT:    [[TMP13:%.*]] = bitcast i8** [[TMP12]] to i32*
3545 // CHECK7-NEXT:    store i32 [[TMP3]], i32* [[TMP13]], align 4
3546 // CHECK7-NEXT:    [[TMP14:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1
3547 // CHECK7-NEXT:    [[TMP15:%.*]] = bitcast i8** [[TMP14]] to i32*
3548 // CHECK7-NEXT:    store i32 [[TMP3]], i32* [[TMP15]], align 4
3549 // CHECK7-NEXT:    [[TMP16:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 1
3550 // CHECK7-NEXT:    store i8* null, i8** [[TMP16]], align 4
3551 // CHECK7-NEXT:    [[TMP17:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2
3552 // CHECK7-NEXT:    [[TMP18:%.*]] = bitcast i8** [[TMP17]] to [2 x %struct.S]**
3553 // CHECK7-NEXT:    store [2 x %struct.S]* [[S_ARR]], [2 x %struct.S]** [[TMP18]], align 4
3554 // CHECK7-NEXT:    [[TMP19:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 2
3555 // CHECK7-NEXT:    [[TMP20:%.*]] = bitcast i8** [[TMP19]] to [2 x %struct.S]**
3556 // CHECK7-NEXT:    store [2 x %struct.S]* [[S_ARR]], [2 x %struct.S]** [[TMP20]], align 4
3557 // CHECK7-NEXT:    [[TMP21:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 2
3558 // CHECK7-NEXT:    store i8* null, i8** [[TMP21]], align 4
3559 // CHECK7-NEXT:    [[TMP22:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 3
3560 // CHECK7-NEXT:    [[TMP23:%.*]] = bitcast i8** [[TMP22]] to %struct.S**
3561 // CHECK7-NEXT:    store %struct.S* [[TMP4]], %struct.S** [[TMP23]], align 4
3562 // CHECK7-NEXT:    [[TMP24:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 3
3563 // CHECK7-NEXT:    [[TMP25:%.*]] = bitcast i8** [[TMP24]] to %struct.S**
3564 // CHECK7-NEXT:    store %struct.S* [[TMP4]], %struct.S** [[TMP25]], align 4
3565 // CHECK7-NEXT:    [[TMP26:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 3
3566 // CHECK7-NEXT:    store i8* null, i8** [[TMP26]], align 4
3567 // CHECK7-NEXT:    [[TMP27:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 4
3568 // CHECK7-NEXT:    [[TMP28:%.*]] = bitcast i8** [[TMP27]] to i32*
3569 // CHECK7-NEXT:    store i32 [[TMP6]], i32* [[TMP28]], align 4
3570 // CHECK7-NEXT:    [[TMP29:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 4
3571 // CHECK7-NEXT:    [[TMP30:%.*]] = bitcast i8** [[TMP29]] to i32*
3572 // CHECK7-NEXT:    store i32 [[TMP6]], i32* [[TMP30]], align 4
3573 // CHECK7-NEXT:    [[TMP31:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 4
3574 // CHECK7-NEXT:    store i8* null, i8** [[TMP31]], align 4
3575 // CHECK7-NEXT:    [[TMP32:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
3576 // CHECK7-NEXT:    [[TMP33:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0
3577 // CHECK7-NEXT:    call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3:[0-9]+]], i64 -1, i64 2)
3578 // CHECK7-NEXT:    [[TMP34:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l106.region_id, i32 5, i8** [[TMP32]], i8** [[TMP33]], i64* getelementptr inbounds ([5 x i64], [5 x i64]* @.offload_sizes, i32 0, i32 0), i64* getelementptr inbounds ([5 x i64], [5 x i64]* @.offload_maptypes, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0)
3579 // CHECK7-NEXT:    [[TMP35:%.*]] = icmp ne i32 [[TMP34]], 0
3580 // CHECK7-NEXT:    br i1 [[TMP35]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]]
3581 // CHECK7:       omp_offload.failed:
3582 // CHECK7-NEXT:    call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l106([2 x i32]* [[VEC]], i32 [[TMP3]], [2 x %struct.S]* [[S_ARR]], %struct.S* [[TMP4]], i32 [[TMP6]]) #[[ATTR4:[0-9]+]]
3583 // CHECK7-NEXT:    br label [[OMP_OFFLOAD_CONT]]
3584 // CHECK7:       omp_offload.cont:
3585 // CHECK7-NEXT:    [[CALL:%.*]] = call i32 @_Z5tmainIiET_v()
3586 // CHECK7-NEXT:    store i32 [[CALL]], i32* [[RETVAL]], align 4
3587 // CHECK7-NEXT:    [[ARRAY_BEGIN:%.*]] = getelementptr inbounds [2 x %struct.S], [2 x %struct.S]* [[S_ARR]], i32 0, i32 0
3588 // CHECK7-NEXT:    [[TMP36:%.*]] = getelementptr inbounds [[STRUCT_S]], %struct.S* [[ARRAY_BEGIN]], i32 2
3589 // CHECK7-NEXT:    br label [[ARRAYDESTROY_BODY:%.*]]
3590 // CHECK7:       arraydestroy.body:
3591 // CHECK7-NEXT:    [[ARRAYDESTROY_ELEMENTPAST:%.*]] = phi %struct.S* [ [[TMP36]], [[OMP_OFFLOAD_CONT]] ], [ [[ARRAYDESTROY_ELEMENT:%.*]], [[ARRAYDESTROY_BODY]] ]
3592 // CHECK7-NEXT:    [[ARRAYDESTROY_ELEMENT]] = getelementptr inbounds [[STRUCT_S]], %struct.S* [[ARRAYDESTROY_ELEMENTPAST]], i32 -1
3593 // CHECK7-NEXT:    call void @_ZN1SIfED1Ev(%struct.S* nonnull align 4 dereferenceable(4) [[ARRAYDESTROY_ELEMENT]]) #[[ATTR4]]
3594 // CHECK7-NEXT:    [[ARRAYDESTROY_DONE:%.*]] = icmp eq %struct.S* [[ARRAYDESTROY_ELEMENT]], [[ARRAY_BEGIN]]
3595 // CHECK7-NEXT:    br i1 [[ARRAYDESTROY_DONE]], label [[ARRAYDESTROY_DONE2:%.*]], label [[ARRAYDESTROY_BODY]]
3596 // CHECK7:       arraydestroy.done2:
3597 // CHECK7-NEXT:    call void @_ZN1SIfED1Ev(%struct.S* nonnull align 4 dereferenceable(4) [[TEST]]) #[[ATTR4]]
3598 // CHECK7-NEXT:    [[TMP37:%.*]] = load i32, i32* [[RETVAL]], align 4
3599 // CHECK7-NEXT:    ret i32 [[TMP37]]
3600 //
3601 //
3602 // CHECK7-LABEL: define {{[^@]+}}@_ZN1SIfEC1Ev
3603 // CHECK7-SAME: (%struct.S* nonnull align 4 dereferenceable(4) [[THIS:%.*]]) unnamed_addr #[[ATTR1:[0-9]+]] comdat align 2 {
3604 // CHECK7-NEXT:  entry:
3605 // CHECK7-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.S*, align 4
3606 // CHECK7-NEXT:    store %struct.S* [[THIS]], %struct.S** [[THIS_ADDR]], align 4
3607 // CHECK7-NEXT:    [[THIS1:%.*]] = load %struct.S*, %struct.S** [[THIS_ADDR]], align 4
3608 // CHECK7-NEXT:    call void @_ZN1SIfEC2Ev(%struct.S* nonnull align 4 dereferenceable(4) [[THIS1]])
3609 // CHECK7-NEXT:    ret void
3610 //
3611 //
3612 // CHECK7-LABEL: define {{[^@]+}}@_ZN1SIfEC1Ef
3613 // CHECK7-SAME: (%struct.S* nonnull align 4 dereferenceable(4) [[THIS:%.*]], float [[A:%.*]]) unnamed_addr #[[ATTR1]] comdat align 2 {
3614 // CHECK7-NEXT:  entry:
3615 // CHECK7-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.S*, align 4
3616 // CHECK7-NEXT:    [[A_ADDR:%.*]] = alloca float, align 4
3617 // CHECK7-NEXT:    store %struct.S* [[THIS]], %struct.S** [[THIS_ADDR]], align 4
3618 // CHECK7-NEXT:    store float [[A]], float* [[A_ADDR]], align 4
3619 // CHECK7-NEXT:    [[THIS1:%.*]] = load %struct.S*, %struct.S** [[THIS_ADDR]], align 4
3620 // CHECK7-NEXT:    [[TMP0:%.*]] = load float, float* [[A_ADDR]], align 4
3621 // CHECK7-NEXT:    call void @_ZN1SIfEC2Ef(%struct.S* nonnull align 4 dereferenceable(4) [[THIS1]], float [[TMP0]])
3622 // CHECK7-NEXT:    ret void
3623 //
3624 //
3625 // CHECK7-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l106
3626 // CHECK7-SAME: ([2 x i32]* nonnull align 4 dereferenceable(8) [[VEC:%.*]], i32 [[T_VAR:%.*]], [2 x %struct.S]* nonnull align 4 dereferenceable(8) [[S_ARR:%.*]], %struct.S* nonnull align 4 dereferenceable(4) [[VAR:%.*]], i32 [[SVAR:%.*]]) #[[ATTR3:[0-9]+]] {
3627 // CHECK7-NEXT:  entry:
3628 // CHECK7-NEXT:    [[VEC_ADDR:%.*]] = alloca [2 x i32]*, align 4
3629 // CHECK7-NEXT:    [[T_VAR_ADDR:%.*]] = alloca i32, align 4
3630 // CHECK7-NEXT:    [[S_ARR_ADDR:%.*]] = alloca [2 x %struct.S]*, align 4
3631 // CHECK7-NEXT:    [[VAR_ADDR:%.*]] = alloca %struct.S*, align 4
3632 // CHECK7-NEXT:    [[SVAR_ADDR:%.*]] = alloca i32, align 4
3633 // CHECK7-NEXT:    [[TMP:%.*]] = alloca %struct.S*, align 4
3634 // CHECK7-NEXT:    [[T_VAR_CASTED:%.*]] = alloca i32, align 4
3635 // CHECK7-NEXT:    [[SVAR_CASTED:%.*]] = alloca i32, align 4
3636 // CHECK7-NEXT:    store [2 x i32]* [[VEC]], [2 x i32]** [[VEC_ADDR]], align 4
3637 // CHECK7-NEXT:    store i32 [[T_VAR]], i32* [[T_VAR_ADDR]], align 4
3638 // CHECK7-NEXT:    store [2 x %struct.S]* [[S_ARR]], [2 x %struct.S]** [[S_ARR_ADDR]], align 4
3639 // CHECK7-NEXT:    store %struct.S* [[VAR]], %struct.S** [[VAR_ADDR]], align 4
3640 // CHECK7-NEXT:    store i32 [[SVAR]], i32* [[SVAR_ADDR]], align 4
3641 // CHECK7-NEXT:    [[TMP0:%.*]] = load [2 x i32]*, [2 x i32]** [[VEC_ADDR]], align 4
3642 // CHECK7-NEXT:    [[TMP1:%.*]] = load [2 x %struct.S]*, [2 x %struct.S]** [[S_ARR_ADDR]], align 4
3643 // CHECK7-NEXT:    [[TMP2:%.*]] = load %struct.S*, %struct.S** [[VAR_ADDR]], align 4
3644 // CHECK7-NEXT:    store %struct.S* [[TMP2]], %struct.S** [[TMP]], align 4
3645 // CHECK7-NEXT:    [[TMP3:%.*]] = load i32, i32* [[T_VAR_ADDR]], align 4
3646 // CHECK7-NEXT:    store i32 [[TMP3]], i32* [[T_VAR_CASTED]], align 4
3647 // CHECK7-NEXT:    [[TMP4:%.*]] = load i32, i32* [[T_VAR_CASTED]], align 4
3648 // CHECK7-NEXT:    [[TMP5:%.*]] = load %struct.S*, %struct.S** [[TMP]], align 4
3649 // CHECK7-NEXT:    [[TMP6:%.*]] = load i32, i32* [[SVAR_ADDR]], align 4
3650 // CHECK7-NEXT:    store i32 [[TMP6]], i32* [[SVAR_CASTED]], align 4
3651 // CHECK7-NEXT:    [[TMP7:%.*]] = load i32, i32* [[SVAR_CASTED]], align 4
3652 // CHECK7-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 5, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, [2 x i32]*, i32, [2 x %struct.S]*, %struct.S*, i32)* @.omp_outlined. to void (i32*, i32*, ...)*), [2 x i32]* [[TMP0]], i32 [[TMP4]], [2 x %struct.S]* [[TMP1]], %struct.S* [[TMP5]], i32 [[TMP7]])
3653 // CHECK7-NEXT:    ret void
3654 //
3655 //
3656 // CHECK7-LABEL: define {{[^@]+}}@.omp_outlined.
3657 // CHECK7-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], [2 x i32]* nonnull align 4 dereferenceable(8) [[VEC:%.*]], i32 [[T_VAR:%.*]], [2 x %struct.S]* nonnull align 4 dereferenceable(8) [[S_ARR:%.*]], %struct.S* nonnull align 4 dereferenceable(4) [[VAR:%.*]], i32 [[SVAR:%.*]]) #[[ATTR3]] {
3658 // CHECK7-NEXT:  entry:
3659 // CHECK7-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
3660 // CHECK7-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
3661 // CHECK7-NEXT:    [[VEC_ADDR:%.*]] = alloca [2 x i32]*, align 4
3662 // CHECK7-NEXT:    [[T_VAR_ADDR:%.*]] = alloca i32, align 4
3663 // CHECK7-NEXT:    [[S_ARR_ADDR:%.*]] = alloca [2 x %struct.S]*, align 4
3664 // CHECK7-NEXT:    [[VAR_ADDR:%.*]] = alloca %struct.S*, align 4
3665 // CHECK7-NEXT:    [[SVAR_ADDR:%.*]] = alloca i32, align 4
3666 // CHECK7-NEXT:    [[TMP:%.*]] = alloca %struct.S*, align 4
3667 // CHECK7-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
3668 // CHECK7-NEXT:    [[_TMP1:%.*]] = alloca i32, align 4
3669 // CHECK7-NEXT:    [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
3670 // CHECK7-NEXT:    [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
3671 // CHECK7-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
3672 // CHECK7-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
3673 // CHECK7-NEXT:    [[T_VAR2:%.*]] = alloca i32, align 4
3674 // CHECK7-NEXT:    [[VEC3:%.*]] = alloca [2 x i32], align 4
3675 // CHECK7-NEXT:    [[S_ARR4:%.*]] = alloca [2 x %struct.S], align 4
3676 // CHECK7-NEXT:    [[VAR5:%.*]] = alloca [[STRUCT_S:%.*]], align 4
3677 // CHECK7-NEXT:    [[_TMP6:%.*]] = alloca %struct.S*, align 4
3678 // CHECK7-NEXT:    [[SVAR7:%.*]] = alloca i32, align 4
3679 // CHECK7-NEXT:    [[I:%.*]] = alloca i32, align 4
3680 // CHECK7-NEXT:    [[T_VAR_CASTED:%.*]] = alloca i32, align 4
3681 // CHECK7-NEXT:    [[SVAR_CASTED:%.*]] = alloca i32, align 4
3682 // CHECK7-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
3683 // CHECK7-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
3684 // CHECK7-NEXT:    store [2 x i32]* [[VEC]], [2 x i32]** [[VEC_ADDR]], align 4
3685 // CHECK7-NEXT:    store i32 [[T_VAR]], i32* [[T_VAR_ADDR]], align 4
3686 // CHECK7-NEXT:    store [2 x %struct.S]* [[S_ARR]], [2 x %struct.S]** [[S_ARR_ADDR]], align 4
3687 // CHECK7-NEXT:    store %struct.S* [[VAR]], %struct.S** [[VAR_ADDR]], align 4
3688 // CHECK7-NEXT:    store i32 [[SVAR]], i32* [[SVAR_ADDR]], align 4
3689 // CHECK7-NEXT:    [[TMP0:%.*]] = load [2 x i32]*, [2 x i32]** [[VEC_ADDR]], align 4
3690 // CHECK7-NEXT:    [[TMP1:%.*]] = load [2 x %struct.S]*, [2 x %struct.S]** [[S_ARR_ADDR]], align 4
3691 // CHECK7-NEXT:    [[TMP2:%.*]] = load %struct.S*, %struct.S** [[VAR_ADDR]], align 4
3692 // CHECK7-NEXT:    store %struct.S* [[TMP2]], %struct.S** [[TMP]], align 4
3693 // CHECK7-NEXT:    store i32 0, i32* [[DOTOMP_COMB_LB]], align 4
3694 // CHECK7-NEXT:    store i32 1, i32* [[DOTOMP_COMB_UB]], align 4
3695 // CHECK7-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
3696 // CHECK7-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
3697 // CHECK7-NEXT:    [[ARRAY_BEGIN:%.*]] = getelementptr inbounds [2 x %struct.S], [2 x %struct.S]* [[S_ARR4]], i32 0, i32 0
3698 // CHECK7-NEXT:    [[ARRAYCTOR_END:%.*]] = getelementptr inbounds [[STRUCT_S]], %struct.S* [[ARRAY_BEGIN]], i32 2
3699 // CHECK7-NEXT:    br label [[ARRAYCTOR_LOOP:%.*]]
3700 // CHECK7:       arrayctor.loop:
3701 // CHECK7-NEXT:    [[ARRAYCTOR_CUR:%.*]] = phi %struct.S* [ [[ARRAY_BEGIN]], [[ENTRY:%.*]] ], [ [[ARRAYCTOR_NEXT:%.*]], [[ARRAYCTOR_LOOP]] ]
3702 // CHECK7-NEXT:    call void @_ZN1SIfEC1Ev(%struct.S* nonnull align 4 dereferenceable(4) [[ARRAYCTOR_CUR]])
3703 // CHECK7-NEXT:    [[ARRAYCTOR_NEXT]] = getelementptr inbounds [[STRUCT_S]], %struct.S* [[ARRAYCTOR_CUR]], i32 1
3704 // CHECK7-NEXT:    [[ARRAYCTOR_DONE:%.*]] = icmp eq %struct.S* [[ARRAYCTOR_NEXT]], [[ARRAYCTOR_END]]
3705 // CHECK7-NEXT:    br i1 [[ARRAYCTOR_DONE]], label [[ARRAYCTOR_CONT:%.*]], label [[ARRAYCTOR_LOOP]]
3706 // CHECK7:       arrayctor.cont:
3707 // CHECK7-NEXT:    [[TMP3:%.*]] = load %struct.S*, %struct.S** [[TMP]], align 4
3708 // CHECK7-NEXT:    call void @_ZN1SIfEC1Ev(%struct.S* nonnull align 4 dereferenceable(4) [[VAR5]])
3709 // CHECK7-NEXT:    store %struct.S* [[VAR5]], %struct.S** [[_TMP6]], align 4
3710 // CHECK7-NEXT:    [[TMP4:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
3711 // CHECK7-NEXT:    [[TMP5:%.*]] = load i32, i32* [[TMP4]], align 4
3712 // CHECK7-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1:[0-9]+]], i32 [[TMP5]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_COMB_LB]], i32* [[DOTOMP_COMB_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
3713 // CHECK7-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
3714 // CHECK7-NEXT:    [[CMP:%.*]] = icmp sgt i32 [[TMP6]], 1
3715 // CHECK7-NEXT:    br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
3716 // CHECK7:       cond.true:
3717 // CHECK7-NEXT:    br label [[COND_END:%.*]]
3718 // CHECK7:       cond.false:
3719 // CHECK7-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
3720 // CHECK7-NEXT:    br label [[COND_END]]
3721 // CHECK7:       cond.end:
3722 // CHECK7-NEXT:    [[COND:%.*]] = phi i32 [ 1, [[COND_TRUE]] ], [ [[TMP7]], [[COND_FALSE]] ]
3723 // CHECK7-NEXT:    store i32 [[COND]], i32* [[DOTOMP_COMB_UB]], align 4
3724 // CHECK7-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
3725 // CHECK7-NEXT:    store i32 [[TMP8]], i32* [[DOTOMP_IV]], align 4
3726 // CHECK7-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
3727 // CHECK7:       omp.inner.for.cond:
3728 // CHECK7-NEXT:    [[TMP9:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
3729 // CHECK7-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
3730 // CHECK7-NEXT:    [[CMP8:%.*]] = icmp sle i32 [[TMP9]], [[TMP10]]
3731 // CHECK7-NEXT:    br i1 [[CMP8]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_COND_CLEANUP:%.*]]
3732 // CHECK7:       omp.inner.for.cond.cleanup:
3733 // CHECK7-NEXT:    br label [[OMP_INNER_FOR_END:%.*]]
3734 // CHECK7:       omp.inner.for.body:
3735 // CHECK7-NEXT:    [[TMP11:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
3736 // CHECK7-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
3737 // CHECK7-NEXT:    [[TMP13:%.*]] = load i32, i32* [[T_VAR2]], align 4
3738 // CHECK7-NEXT:    store i32 [[TMP13]], i32* [[T_VAR_CASTED]], align 4
3739 // CHECK7-NEXT:    [[TMP14:%.*]] = load i32, i32* [[T_VAR_CASTED]], align 4
3740 // CHECK7-NEXT:    [[TMP15:%.*]] = load %struct.S*, %struct.S** [[_TMP6]], align 4
3741 // CHECK7-NEXT:    [[TMP16:%.*]] = load i32, i32* [[SVAR7]], align 4
3742 // CHECK7-NEXT:    store i32 [[TMP16]], i32* [[SVAR_CASTED]], align 4
3743 // CHECK7-NEXT:    [[TMP17:%.*]] = load i32, i32* [[SVAR_CASTED]], align 4
3744 // CHECK7-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 7, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, [2 x i32]*, i32, [2 x %struct.S]*, %struct.S*, i32)* @.omp_outlined..1 to void (i32*, i32*, ...)*), i32 [[TMP11]], i32 [[TMP12]], [2 x i32]* [[VEC3]], i32 [[TMP14]], [2 x %struct.S]* [[S_ARR4]], %struct.S* [[TMP15]], i32 [[TMP17]])
3745 // CHECK7-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
3746 // CHECK7:       omp.inner.for.inc:
3747 // CHECK7-NEXT:    [[TMP18:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
3748 // CHECK7-NEXT:    [[TMP19:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
3749 // CHECK7-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP18]], [[TMP19]]
3750 // CHECK7-NEXT:    store i32 [[ADD]], i32* [[DOTOMP_IV]], align 4
3751 // CHECK7-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP5:![0-9]+]]
3752 // CHECK7:       omp.inner.for.end:
3753 // CHECK7-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
3754 // CHECK7:       omp.loop.exit:
3755 // CHECK7-NEXT:    [[TMP20:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
3756 // CHECK7-NEXT:    [[TMP21:%.*]] = load i32, i32* [[TMP20]], align 4
3757 // CHECK7-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP21]])
3758 // CHECK7-NEXT:    [[TMP22:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
3759 // CHECK7-NEXT:    [[TMP23:%.*]] = icmp ne i32 [[TMP22]], 0
3760 // CHECK7-NEXT:    br i1 [[TMP23]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
3761 // CHECK7:       .omp.final.then:
3762 // CHECK7-NEXT:    store i32 2, i32* [[I]], align 4
3763 // CHECK7-NEXT:    br label [[DOTOMP_FINAL_DONE]]
3764 // CHECK7:       .omp.final.done:
3765 // CHECK7-NEXT:    [[TMP24:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
3766 // CHECK7-NEXT:    [[TMP25:%.*]] = icmp ne i32 [[TMP24]], 0
3767 // CHECK7-NEXT:    br i1 [[TMP25]], label [[DOTOMP_LASTPRIVATE_THEN:%.*]], label [[DOTOMP_LASTPRIVATE_DONE:%.*]]
3768 // CHECK7:       .omp.lastprivate.then:
3769 // CHECK7-NEXT:    [[TMP26:%.*]] = load i32, i32* [[T_VAR2]], align 4
3770 // CHECK7-NEXT:    store i32 [[TMP26]], i32* [[T_VAR_ADDR]], align 4
3771 // CHECK7-NEXT:    [[TMP27:%.*]] = bitcast [2 x i32]* [[TMP0]] to i8*
3772 // CHECK7-NEXT:    [[TMP28:%.*]] = bitcast [2 x i32]* [[VEC3]] to i8*
3773 // CHECK7-NEXT:    call void @llvm.memcpy.p0i8.p0i8.i32(i8* align 4 [[TMP27]], i8* align 4 [[TMP28]], i32 8, i1 false)
3774 // CHECK7-NEXT:    [[ARRAY_BEGIN9:%.*]] = getelementptr inbounds [2 x %struct.S], [2 x %struct.S]* [[TMP1]], i32 0, i32 0
3775 // CHECK7-NEXT:    [[TMP29:%.*]] = bitcast [2 x %struct.S]* [[S_ARR4]] to %struct.S*
3776 // CHECK7-NEXT:    [[TMP30:%.*]] = getelementptr [[STRUCT_S]], %struct.S* [[ARRAY_BEGIN9]], i32 2
3777 // CHECK7-NEXT:    [[OMP_ARRAYCPY_ISEMPTY:%.*]] = icmp eq %struct.S* [[ARRAY_BEGIN9]], [[TMP30]]
3778 // CHECK7-NEXT:    br i1 [[OMP_ARRAYCPY_ISEMPTY]], label [[OMP_ARRAYCPY_DONE10:%.*]], label [[OMP_ARRAYCPY_BODY:%.*]]
3779 // CHECK7:       omp.arraycpy.body:
3780 // CHECK7-NEXT:    [[OMP_ARRAYCPY_SRCELEMENTPAST:%.*]] = phi %struct.S* [ [[TMP29]], [[DOTOMP_LASTPRIVATE_THEN]] ], [ [[OMP_ARRAYCPY_SRC_ELEMENT:%.*]], [[OMP_ARRAYCPY_BODY]] ]
3781 // CHECK7-NEXT:    [[OMP_ARRAYCPY_DESTELEMENTPAST:%.*]] = phi %struct.S* [ [[ARRAY_BEGIN9]], [[DOTOMP_LASTPRIVATE_THEN]] ], [ [[OMP_ARRAYCPY_DEST_ELEMENT:%.*]], [[OMP_ARRAYCPY_BODY]] ]
3782 // CHECK7-NEXT:    [[TMP31:%.*]] = bitcast %struct.S* [[OMP_ARRAYCPY_DESTELEMENTPAST]] to i8*
3783 // CHECK7-NEXT:    [[TMP32:%.*]] = bitcast %struct.S* [[OMP_ARRAYCPY_SRCELEMENTPAST]] to i8*
3784 // CHECK7-NEXT:    call void @llvm.memcpy.p0i8.p0i8.i32(i8* align 4 [[TMP31]], i8* align 4 [[TMP32]], i32 4, i1 false)
3785 // CHECK7-NEXT:    [[OMP_ARRAYCPY_DEST_ELEMENT]] = getelementptr [[STRUCT_S]], %struct.S* [[OMP_ARRAYCPY_DESTELEMENTPAST]], i32 1
3786 // CHECK7-NEXT:    [[OMP_ARRAYCPY_SRC_ELEMENT]] = getelementptr [[STRUCT_S]], %struct.S* [[OMP_ARRAYCPY_SRCELEMENTPAST]], i32 1
3787 // CHECK7-NEXT:    [[OMP_ARRAYCPY_DONE:%.*]] = icmp eq %struct.S* [[OMP_ARRAYCPY_DEST_ELEMENT]], [[TMP30]]
3788 // CHECK7-NEXT:    br i1 [[OMP_ARRAYCPY_DONE]], label [[OMP_ARRAYCPY_DONE10]], label [[OMP_ARRAYCPY_BODY]]
3789 // CHECK7:       omp.arraycpy.done10:
3790 // CHECK7-NEXT:    [[TMP33:%.*]] = load %struct.S*, %struct.S** [[_TMP6]], align 4
3791 // CHECK7-NEXT:    [[TMP34:%.*]] = bitcast %struct.S* [[TMP3]] to i8*
3792 // CHECK7-NEXT:    [[TMP35:%.*]] = bitcast %struct.S* [[TMP33]] to i8*
3793 // CHECK7-NEXT:    call void @llvm.memcpy.p0i8.p0i8.i32(i8* align 4 [[TMP34]], i8* align 4 [[TMP35]], i32 4, i1 false)
3794 // CHECK7-NEXT:    [[TMP36:%.*]] = load i32, i32* [[SVAR7]], align 4
3795 // CHECK7-NEXT:    store i32 [[TMP36]], i32* [[SVAR_ADDR]], align 4
3796 // CHECK7-NEXT:    br label [[DOTOMP_LASTPRIVATE_DONE]]
3797 // CHECK7:       .omp.lastprivate.done:
3798 // CHECK7-NEXT:    call void @_ZN1SIfED1Ev(%struct.S* nonnull align 4 dereferenceable(4) [[VAR5]]) #[[ATTR4]]
3799 // CHECK7-NEXT:    [[ARRAY_BEGIN11:%.*]] = getelementptr inbounds [2 x %struct.S], [2 x %struct.S]* [[S_ARR4]], i32 0, i32 0
3800 // CHECK7-NEXT:    [[TMP37:%.*]] = getelementptr inbounds [[STRUCT_S]], %struct.S* [[ARRAY_BEGIN11]], i32 2
3801 // CHECK7-NEXT:    br label [[ARRAYDESTROY_BODY:%.*]]
3802 // CHECK7:       arraydestroy.body:
3803 // CHECK7-NEXT:    [[ARRAYDESTROY_ELEMENTPAST:%.*]] = phi %struct.S* [ [[TMP37]], [[DOTOMP_LASTPRIVATE_DONE]] ], [ [[ARRAYDESTROY_ELEMENT:%.*]], [[ARRAYDESTROY_BODY]] ]
3804 // CHECK7-NEXT:    [[ARRAYDESTROY_ELEMENT]] = getelementptr inbounds [[STRUCT_S]], %struct.S* [[ARRAYDESTROY_ELEMENTPAST]], i32 -1
3805 // CHECK7-NEXT:    call void @_ZN1SIfED1Ev(%struct.S* nonnull align 4 dereferenceable(4) [[ARRAYDESTROY_ELEMENT]]) #[[ATTR4]]
3806 // CHECK7-NEXT:    [[ARRAYDESTROY_DONE:%.*]] = icmp eq %struct.S* [[ARRAYDESTROY_ELEMENT]], [[ARRAY_BEGIN11]]
3807 // CHECK7-NEXT:    br i1 [[ARRAYDESTROY_DONE]], label [[ARRAYDESTROY_DONE12:%.*]], label [[ARRAYDESTROY_BODY]]
3808 // CHECK7:       arraydestroy.done12:
3809 // CHECK7-NEXT:    ret void
3810 //
3811 //
3812 // CHECK7-LABEL: define {{[^@]+}}@.omp_outlined..1
3813 // CHECK7-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32 [[DOTPREVIOUS_LB_:%.*]], i32 [[DOTPREVIOUS_UB_:%.*]], [2 x i32]* nonnull align 4 dereferenceable(8) [[VEC:%.*]], i32 [[T_VAR:%.*]], [2 x %struct.S]* nonnull align 4 dereferenceable(8) [[S_ARR:%.*]], %struct.S* nonnull align 4 dereferenceable(4) [[VAR:%.*]], i32 [[SVAR:%.*]]) #[[ATTR3]] {
3814 // CHECK7-NEXT:  entry:
3815 // CHECK7-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
3816 // CHECK7-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
3817 // CHECK7-NEXT:    [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i32, align 4
3818 // CHECK7-NEXT:    [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i32, align 4
3819 // CHECK7-NEXT:    [[VEC_ADDR:%.*]] = alloca [2 x i32]*, align 4
3820 // CHECK7-NEXT:    [[T_VAR_ADDR:%.*]] = alloca i32, align 4
3821 // CHECK7-NEXT:    [[S_ARR_ADDR:%.*]] = alloca [2 x %struct.S]*, align 4
3822 // CHECK7-NEXT:    [[VAR_ADDR:%.*]] = alloca %struct.S*, align 4
3823 // CHECK7-NEXT:    [[SVAR_ADDR:%.*]] = alloca i32, align 4
3824 // CHECK7-NEXT:    [[TMP:%.*]] = alloca %struct.S*, align 4
3825 // CHECK7-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
3826 // CHECK7-NEXT:    [[_TMP1:%.*]] = alloca i32, align 4
3827 // CHECK7-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
3828 // CHECK7-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
3829 // CHECK7-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
3830 // CHECK7-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
3831 // CHECK7-NEXT:    [[T_VAR2:%.*]] = alloca i32, align 4
3832 // CHECK7-NEXT:    [[VEC3:%.*]] = alloca [2 x i32], align 4
3833 // CHECK7-NEXT:    [[S_ARR4:%.*]] = alloca [2 x %struct.S], align 4
3834 // CHECK7-NEXT:    [[VAR5:%.*]] = alloca [[STRUCT_S:%.*]], align 4
3835 // CHECK7-NEXT:    [[_TMP6:%.*]] = alloca %struct.S*, align 4
3836 // CHECK7-NEXT:    [[SVAR7:%.*]] = alloca i32, align 4
3837 // CHECK7-NEXT:    [[I:%.*]] = alloca i32, align 4
3838 // CHECK7-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
3839 // CHECK7-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
3840 // CHECK7-NEXT:    store i32 [[DOTPREVIOUS_LB_]], i32* [[DOTPREVIOUS_LB__ADDR]], align 4
3841 // CHECK7-NEXT:    store i32 [[DOTPREVIOUS_UB_]], i32* [[DOTPREVIOUS_UB__ADDR]], align 4
3842 // CHECK7-NEXT:    store [2 x i32]* [[VEC]], [2 x i32]** [[VEC_ADDR]], align 4
3843 // CHECK7-NEXT:    store i32 [[T_VAR]], i32* [[T_VAR_ADDR]], align 4
3844 // CHECK7-NEXT:    store [2 x %struct.S]* [[S_ARR]], [2 x %struct.S]** [[S_ARR_ADDR]], align 4
3845 // CHECK7-NEXT:    store %struct.S* [[VAR]], %struct.S** [[VAR_ADDR]], align 4
3846 // CHECK7-NEXT:    store i32 [[SVAR]], i32* [[SVAR_ADDR]], align 4
3847 // CHECK7-NEXT:    [[TMP0:%.*]] = load [2 x i32]*, [2 x i32]** [[VEC_ADDR]], align 4
3848 // CHECK7-NEXT:    [[TMP1:%.*]] = load [2 x %struct.S]*, [2 x %struct.S]** [[S_ARR_ADDR]], align 4
3849 // CHECK7-NEXT:    [[TMP2:%.*]] = load %struct.S*, %struct.S** [[VAR_ADDR]], align 4
3850 // CHECK7-NEXT:    store %struct.S* [[TMP2]], %struct.S** [[TMP]], align 4
3851 // CHECK7-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
3852 // CHECK7-NEXT:    store i32 1, i32* [[DOTOMP_UB]], align 4
3853 // CHECK7-NEXT:    [[TMP3:%.*]] = load i32, i32* [[DOTPREVIOUS_LB__ADDR]], align 4
3854 // CHECK7-NEXT:    [[TMP4:%.*]] = load i32, i32* [[DOTPREVIOUS_UB__ADDR]], align 4
3855 // CHECK7-NEXT:    store i32 [[TMP3]], i32* [[DOTOMP_LB]], align 4
3856 // CHECK7-NEXT:    store i32 [[TMP4]], i32* [[DOTOMP_UB]], align 4
3857 // CHECK7-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
3858 // CHECK7-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
3859 // CHECK7-NEXT:    [[ARRAY_BEGIN:%.*]] = getelementptr inbounds [2 x %struct.S], [2 x %struct.S]* [[S_ARR4]], i32 0, i32 0
3860 // CHECK7-NEXT:    [[ARRAYCTOR_END:%.*]] = getelementptr inbounds [[STRUCT_S]], %struct.S* [[ARRAY_BEGIN]], i32 2
3861 // CHECK7-NEXT:    br label [[ARRAYCTOR_LOOP:%.*]]
3862 // CHECK7:       arrayctor.loop:
3863 // CHECK7-NEXT:    [[ARRAYCTOR_CUR:%.*]] = phi %struct.S* [ [[ARRAY_BEGIN]], [[ENTRY:%.*]] ], [ [[ARRAYCTOR_NEXT:%.*]], [[ARRAYCTOR_LOOP]] ]
3864 // CHECK7-NEXT:    call void @_ZN1SIfEC1Ev(%struct.S* nonnull align 4 dereferenceable(4) [[ARRAYCTOR_CUR]])
3865 // CHECK7-NEXT:    [[ARRAYCTOR_NEXT]] = getelementptr inbounds [[STRUCT_S]], %struct.S* [[ARRAYCTOR_CUR]], i32 1
3866 // CHECK7-NEXT:    [[ARRAYCTOR_DONE:%.*]] = icmp eq %struct.S* [[ARRAYCTOR_NEXT]], [[ARRAYCTOR_END]]
3867 // CHECK7-NEXT:    br i1 [[ARRAYCTOR_DONE]], label [[ARRAYCTOR_CONT:%.*]], label [[ARRAYCTOR_LOOP]]
3868 // CHECK7:       arrayctor.cont:
3869 // CHECK7-NEXT:    [[TMP5:%.*]] = load %struct.S*, %struct.S** [[TMP]], align 4
3870 // CHECK7-NEXT:    call void @_ZN1SIfEC1Ev(%struct.S* nonnull align 4 dereferenceable(4) [[VAR5]])
3871 // CHECK7-NEXT:    store %struct.S* [[VAR5]], %struct.S** [[_TMP6]], align 4
3872 // CHECK7-NEXT:    [[TMP6:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
3873 // CHECK7-NEXT:    [[TMP7:%.*]] = load i32, i32* [[TMP6]], align 4
3874 // CHECK7-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB2:[0-9]+]], i32 [[TMP7]], i32 34, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
3875 // CHECK7-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
3876 // CHECK7-NEXT:    [[CMP:%.*]] = icmp sgt i32 [[TMP8]], 1
3877 // CHECK7-NEXT:    br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
3878 // CHECK7:       cond.true:
3879 // CHECK7-NEXT:    br label [[COND_END:%.*]]
3880 // CHECK7:       cond.false:
3881 // CHECK7-NEXT:    [[TMP9:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
3882 // CHECK7-NEXT:    br label [[COND_END]]
3883 // CHECK7:       cond.end:
3884 // CHECK7-NEXT:    [[COND:%.*]] = phi i32 [ 1, [[COND_TRUE]] ], [ [[TMP9]], [[COND_FALSE]] ]
3885 // CHECK7-NEXT:    store i32 [[COND]], i32* [[DOTOMP_UB]], align 4
3886 // CHECK7-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
3887 // CHECK7-NEXT:    store i32 [[TMP10]], i32* [[DOTOMP_IV]], align 4
3888 // CHECK7-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
3889 // CHECK7:       omp.inner.for.cond:
3890 // CHECK7-NEXT:    [[TMP11:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
3891 // CHECK7-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
3892 // CHECK7-NEXT:    [[CMP8:%.*]] = icmp sle i32 [[TMP11]], [[TMP12]]
3893 // CHECK7-NEXT:    br i1 [[CMP8]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_COND_CLEANUP:%.*]]
3894 // CHECK7:       omp.inner.for.cond.cleanup:
3895 // CHECK7-NEXT:    br label [[OMP_INNER_FOR_END:%.*]]
3896 // CHECK7:       omp.inner.for.body:
3897 // CHECK7-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
3898 // CHECK7-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP13]], 1
3899 // CHECK7-NEXT:    [[ADD:%.*]] = add nsw i32 0, [[MUL]]
3900 // CHECK7-NEXT:    store i32 [[ADD]], i32* [[I]], align 4
3901 // CHECK7-NEXT:    [[TMP14:%.*]] = load i32, i32* [[T_VAR2]], align 4
3902 // CHECK7-NEXT:    [[TMP15:%.*]] = load i32, i32* [[I]], align 4
3903 // CHECK7-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds [2 x i32], [2 x i32]* [[VEC3]], i32 0, i32 [[TMP15]]
3904 // CHECK7-NEXT:    store i32 [[TMP14]], i32* [[ARRAYIDX]], align 4
3905 // CHECK7-NEXT:    [[TMP16:%.*]] = load %struct.S*, %struct.S** [[_TMP6]], align 4
3906 // CHECK7-NEXT:    [[TMP17:%.*]] = load i32, i32* [[I]], align 4
3907 // CHECK7-NEXT:    [[ARRAYIDX9:%.*]] = getelementptr inbounds [2 x %struct.S], [2 x %struct.S]* [[S_ARR4]], i32 0, i32 [[TMP17]]
3908 // CHECK7-NEXT:    [[TMP18:%.*]] = bitcast %struct.S* [[ARRAYIDX9]] to i8*
3909 // CHECK7-NEXT:    [[TMP19:%.*]] = bitcast %struct.S* [[TMP16]] to i8*
3910 // CHECK7-NEXT:    call void @llvm.memcpy.p0i8.p0i8.i32(i8* align 4 [[TMP18]], i8* align 4 [[TMP19]], i32 4, i1 false)
3911 // CHECK7-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
3912 // CHECK7:       omp.body.continue:
3913 // CHECK7-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
3914 // CHECK7:       omp.inner.for.inc:
3915 // CHECK7-NEXT:    [[TMP20:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
3916 // CHECK7-NEXT:    [[ADD10:%.*]] = add nsw i32 [[TMP20]], 1
3917 // CHECK7-NEXT:    store i32 [[ADD10]], i32* [[DOTOMP_IV]], align 4
3918 // CHECK7-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP7:![0-9]+]]
3919 // CHECK7:       omp.inner.for.end:
3920 // CHECK7-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
3921 // CHECK7:       omp.loop.exit:
3922 // CHECK7-NEXT:    [[TMP21:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
3923 // CHECK7-NEXT:    [[TMP22:%.*]] = load i32, i32* [[TMP21]], align 4
3924 // CHECK7-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP22]])
3925 // CHECK7-NEXT:    [[TMP23:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
3926 // CHECK7-NEXT:    [[TMP24:%.*]] = icmp ne i32 [[TMP23]], 0
3927 // CHECK7-NEXT:    br i1 [[TMP24]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
3928 // CHECK7:       .omp.final.then:
3929 // CHECK7-NEXT:    store i32 2, i32* [[I]], align 4
3930 // CHECK7-NEXT:    br label [[DOTOMP_FINAL_DONE]]
3931 // CHECK7:       .omp.final.done:
3932 // CHECK7-NEXT:    [[TMP25:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
3933 // CHECK7-NEXT:    [[TMP26:%.*]] = icmp ne i32 [[TMP25]], 0
3934 // CHECK7-NEXT:    br i1 [[TMP26]], label [[DOTOMP_LASTPRIVATE_THEN:%.*]], label [[DOTOMP_LASTPRIVATE_DONE:%.*]]
3935 // CHECK7:       .omp.lastprivate.then:
3936 // CHECK7-NEXT:    [[TMP27:%.*]] = load i32, i32* [[T_VAR2]], align 4
3937 // CHECK7-NEXT:    store i32 [[TMP27]], i32* [[T_VAR_ADDR]], align 4
3938 // CHECK7-NEXT:    [[TMP28:%.*]] = bitcast [2 x i32]* [[TMP0]] to i8*
3939 // CHECK7-NEXT:    [[TMP29:%.*]] = bitcast [2 x i32]* [[VEC3]] to i8*
3940 // CHECK7-NEXT:    call void @llvm.memcpy.p0i8.p0i8.i32(i8* align 4 [[TMP28]], i8* align 4 [[TMP29]], i32 8, i1 false)
3941 // CHECK7-NEXT:    [[ARRAY_BEGIN11:%.*]] = getelementptr inbounds [2 x %struct.S], [2 x %struct.S]* [[TMP1]], i32 0, i32 0
3942 // CHECK7-NEXT:    [[TMP30:%.*]] = bitcast [2 x %struct.S]* [[S_ARR4]] to %struct.S*
3943 // CHECK7-NEXT:    [[TMP31:%.*]] = getelementptr [[STRUCT_S]], %struct.S* [[ARRAY_BEGIN11]], i32 2
3944 // CHECK7-NEXT:    [[OMP_ARRAYCPY_ISEMPTY:%.*]] = icmp eq %struct.S* [[ARRAY_BEGIN11]], [[TMP31]]
3945 // CHECK7-NEXT:    br i1 [[OMP_ARRAYCPY_ISEMPTY]], label [[OMP_ARRAYCPY_DONE12:%.*]], label [[OMP_ARRAYCPY_BODY:%.*]]
3946 // CHECK7:       omp.arraycpy.body:
3947 // CHECK7-NEXT:    [[OMP_ARRAYCPY_SRCELEMENTPAST:%.*]] = phi %struct.S* [ [[TMP30]], [[DOTOMP_LASTPRIVATE_THEN]] ], [ [[OMP_ARRAYCPY_SRC_ELEMENT:%.*]], [[OMP_ARRAYCPY_BODY]] ]
3948 // CHECK7-NEXT:    [[OMP_ARRAYCPY_DESTELEMENTPAST:%.*]] = phi %struct.S* [ [[ARRAY_BEGIN11]], [[DOTOMP_LASTPRIVATE_THEN]] ], [ [[OMP_ARRAYCPY_DEST_ELEMENT:%.*]], [[OMP_ARRAYCPY_BODY]] ]
3949 // CHECK7-NEXT:    [[TMP32:%.*]] = bitcast %struct.S* [[OMP_ARRAYCPY_DESTELEMENTPAST]] to i8*
3950 // CHECK7-NEXT:    [[TMP33:%.*]] = bitcast %struct.S* [[OMP_ARRAYCPY_SRCELEMENTPAST]] to i8*
3951 // CHECK7-NEXT:    call void @llvm.memcpy.p0i8.p0i8.i32(i8* align 4 [[TMP32]], i8* align 4 [[TMP33]], i32 4, i1 false)
3952 // CHECK7-NEXT:    [[OMP_ARRAYCPY_DEST_ELEMENT]] = getelementptr [[STRUCT_S]], %struct.S* [[OMP_ARRAYCPY_DESTELEMENTPAST]], i32 1
3953 // CHECK7-NEXT:    [[OMP_ARRAYCPY_SRC_ELEMENT]] = getelementptr [[STRUCT_S]], %struct.S* [[OMP_ARRAYCPY_SRCELEMENTPAST]], i32 1
3954 // CHECK7-NEXT:    [[OMP_ARRAYCPY_DONE:%.*]] = icmp eq %struct.S* [[OMP_ARRAYCPY_DEST_ELEMENT]], [[TMP31]]
3955 // CHECK7-NEXT:    br i1 [[OMP_ARRAYCPY_DONE]], label [[OMP_ARRAYCPY_DONE12]], label [[OMP_ARRAYCPY_BODY]]
3956 // CHECK7:       omp.arraycpy.done12:
3957 // CHECK7-NEXT:    [[TMP34:%.*]] = load %struct.S*, %struct.S** [[_TMP6]], align 4
3958 // CHECK7-NEXT:    [[TMP35:%.*]] = bitcast %struct.S* [[TMP5]] to i8*
3959 // CHECK7-NEXT:    [[TMP36:%.*]] = bitcast %struct.S* [[TMP34]] to i8*
3960 // CHECK7-NEXT:    call void @llvm.memcpy.p0i8.p0i8.i32(i8* align 4 [[TMP35]], i8* align 4 [[TMP36]], i32 4, i1 false)
3961 // CHECK7-NEXT:    [[TMP37:%.*]] = load i32, i32* [[SVAR7]], align 4
3962 // CHECK7-NEXT:    store i32 [[TMP37]], i32* [[SVAR_ADDR]], align 4
3963 // CHECK7-NEXT:    br label [[DOTOMP_LASTPRIVATE_DONE]]
3964 // CHECK7:       .omp.lastprivate.done:
3965 // CHECK7-NEXT:    call void @_ZN1SIfED1Ev(%struct.S* nonnull align 4 dereferenceable(4) [[VAR5]]) #[[ATTR4]]
3966 // CHECK7-NEXT:    [[ARRAY_BEGIN13:%.*]] = getelementptr inbounds [2 x %struct.S], [2 x %struct.S]* [[S_ARR4]], i32 0, i32 0
3967 // CHECK7-NEXT:    [[TMP38:%.*]] = getelementptr inbounds [[STRUCT_S]], %struct.S* [[ARRAY_BEGIN13]], i32 2
3968 // CHECK7-NEXT:    br label [[ARRAYDESTROY_BODY:%.*]]
3969 // CHECK7:       arraydestroy.body:
3970 // CHECK7-NEXT:    [[ARRAYDESTROY_ELEMENTPAST:%.*]] = phi %struct.S* [ [[TMP38]], [[DOTOMP_LASTPRIVATE_DONE]] ], [ [[ARRAYDESTROY_ELEMENT:%.*]], [[ARRAYDESTROY_BODY]] ]
3971 // CHECK7-NEXT:    [[ARRAYDESTROY_ELEMENT]] = getelementptr inbounds [[STRUCT_S]], %struct.S* [[ARRAYDESTROY_ELEMENTPAST]], i32 -1
3972 // CHECK7-NEXT:    call void @_ZN1SIfED1Ev(%struct.S* nonnull align 4 dereferenceable(4) [[ARRAYDESTROY_ELEMENT]]) #[[ATTR4]]
3973 // CHECK7-NEXT:    [[ARRAYDESTROY_DONE:%.*]] = icmp eq %struct.S* [[ARRAYDESTROY_ELEMENT]], [[ARRAY_BEGIN13]]
3974 // CHECK7-NEXT:    br i1 [[ARRAYDESTROY_DONE]], label [[ARRAYDESTROY_DONE14:%.*]], label [[ARRAYDESTROY_BODY]]
3975 // CHECK7:       arraydestroy.done14:
3976 // CHECK7-NEXT:    ret void
3977 //
3978 //
3979 // CHECK7-LABEL: define {{[^@]+}}@_ZN1SIfED1Ev
3980 // CHECK7-SAME: (%struct.S* nonnull align 4 dereferenceable(4) [[THIS:%.*]]) unnamed_addr #[[ATTR1]] comdat align 2 {
3981 // CHECK7-NEXT:  entry:
3982 // CHECK7-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.S*, align 4
3983 // CHECK7-NEXT:    store %struct.S* [[THIS]], %struct.S** [[THIS_ADDR]], align 4
3984 // CHECK7-NEXT:    [[THIS1:%.*]] = load %struct.S*, %struct.S** [[THIS_ADDR]], align 4
3985 // CHECK7-NEXT:    call void @_ZN1SIfED2Ev(%struct.S* nonnull align 4 dereferenceable(4) [[THIS1]]) #[[ATTR4]]
3986 // CHECK7-NEXT:    ret void
3987 //
3988 //
3989 // CHECK7-LABEL: define {{[^@]+}}@_Z5tmainIiET_v
3990 // CHECK7-SAME: () #[[ATTR5:[0-9]+]] comdat {
3991 // CHECK7-NEXT:  entry:
3992 // CHECK7-NEXT:    [[RETVAL:%.*]] = alloca i32, align 4
3993 // CHECK7-NEXT:    [[TEST:%.*]] = alloca [[STRUCT_S_0:%.*]], align 4
3994 // CHECK7-NEXT:    [[T_VAR:%.*]] = alloca i32, align 4
3995 // CHECK7-NEXT:    [[VEC:%.*]] = alloca [2 x i32], align 4
3996 // CHECK7-NEXT:    [[S_ARR:%.*]] = alloca [2 x %struct.S.0], align 4
3997 // CHECK7-NEXT:    [[VAR:%.*]] = alloca %struct.S.0*, align 4
3998 // CHECK7-NEXT:    [[TMP:%.*]] = alloca %struct.S.0*, align 4
3999 // CHECK7-NEXT:    [[T_VAR_CASTED:%.*]] = alloca i32, align 4
4000 // CHECK7-NEXT:    [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [4 x i8*], align 4
4001 // CHECK7-NEXT:    [[DOTOFFLOAD_PTRS:%.*]] = alloca [4 x i8*], align 4
4002 // CHECK7-NEXT:    [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [4 x i8*], align 4
4003 // CHECK7-NEXT:    [[_TMP1:%.*]] = alloca i32, align 4
4004 // CHECK7-NEXT:    call void @_ZN1SIiEC1Ev(%struct.S.0* nonnull align 4 dereferenceable(4) [[TEST]])
4005 // CHECK7-NEXT:    store i32 0, i32* [[T_VAR]], align 4
4006 // CHECK7-NEXT:    [[TMP0:%.*]] = bitcast [2 x i32]* [[VEC]] to i8*
4007 // CHECK7-NEXT:    call void @llvm.memcpy.p0i8.p0i8.i32(i8* align 4 [[TMP0]], i8* align 4 bitcast ([2 x i32]* @__const._Z5tmainIiET_v.vec to i8*), i32 8, i1 false)
4008 // CHECK7-NEXT:    [[ARRAYINIT_BEGIN:%.*]] = getelementptr inbounds [2 x %struct.S.0], [2 x %struct.S.0]* [[S_ARR]], i32 0, i32 0
4009 // CHECK7-NEXT:    call void @_ZN1SIiEC1Ei(%struct.S.0* nonnull align 4 dereferenceable(4) [[ARRAYINIT_BEGIN]], i32 1)
4010 // CHECK7-NEXT:    [[ARRAYINIT_ELEMENT:%.*]] = getelementptr inbounds [[STRUCT_S_0]], %struct.S.0* [[ARRAYINIT_BEGIN]], i32 1
4011 // CHECK7-NEXT:    call void @_ZN1SIiEC1Ei(%struct.S.0* nonnull align 4 dereferenceable(4) [[ARRAYINIT_ELEMENT]], i32 2)
4012 // CHECK7-NEXT:    store %struct.S.0* [[TEST]], %struct.S.0** [[VAR]], align 4
4013 // CHECK7-NEXT:    [[TMP1:%.*]] = load %struct.S.0*, %struct.S.0** [[VAR]], align 4
4014 // CHECK7-NEXT:    store %struct.S.0* [[TMP1]], %struct.S.0** [[TMP]], align 4
4015 // CHECK7-NEXT:    [[TMP2:%.*]] = load i32, i32* [[T_VAR]], align 4
4016 // CHECK7-NEXT:    store i32 [[TMP2]], i32* [[T_VAR_CASTED]], align 4
4017 // CHECK7-NEXT:    [[TMP3:%.*]] = load i32, i32* [[T_VAR_CASTED]], align 4
4018 // CHECK7-NEXT:    [[TMP4:%.*]] = load %struct.S.0*, %struct.S.0** [[TMP]], align 4
4019 // CHECK7-NEXT:    [[TMP5:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
4020 // CHECK7-NEXT:    [[TMP6:%.*]] = bitcast i8** [[TMP5]] to [2 x i32]**
4021 // CHECK7-NEXT:    store [2 x i32]* [[VEC]], [2 x i32]** [[TMP6]], align 4
4022 // CHECK7-NEXT:    [[TMP7:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0
4023 // CHECK7-NEXT:    [[TMP8:%.*]] = bitcast i8** [[TMP7]] to [2 x i32]**
4024 // CHECK7-NEXT:    store [2 x i32]* [[VEC]], [2 x i32]** [[TMP8]], align 4
4025 // CHECK7-NEXT:    [[TMP9:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 0
4026 // CHECK7-NEXT:    store i8* null, i8** [[TMP9]], align 4
4027 // CHECK7-NEXT:    [[TMP10:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1
4028 // CHECK7-NEXT:    [[TMP11:%.*]] = bitcast i8** [[TMP10]] to i32*
4029 // CHECK7-NEXT:    store i32 [[TMP3]], i32* [[TMP11]], align 4
4030 // CHECK7-NEXT:    [[TMP12:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1
4031 // CHECK7-NEXT:    [[TMP13:%.*]] = bitcast i8** [[TMP12]] to i32*
4032 // CHECK7-NEXT:    store i32 [[TMP3]], i32* [[TMP13]], align 4
4033 // CHECK7-NEXT:    [[TMP14:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 1
4034 // CHECK7-NEXT:    store i8* null, i8** [[TMP14]], align 4
4035 // CHECK7-NEXT:    [[TMP15:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2
4036 // CHECK7-NEXT:    [[TMP16:%.*]] = bitcast i8** [[TMP15]] to [2 x %struct.S.0]**
4037 // CHECK7-NEXT:    store [2 x %struct.S.0]* [[S_ARR]], [2 x %struct.S.0]** [[TMP16]], align 4
4038 // CHECK7-NEXT:    [[TMP17:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 2
4039 // CHECK7-NEXT:    [[TMP18:%.*]] = bitcast i8** [[TMP17]] to [2 x %struct.S.0]**
4040 // CHECK7-NEXT:    store [2 x %struct.S.0]* [[S_ARR]], [2 x %struct.S.0]** [[TMP18]], align 4
4041 // CHECK7-NEXT:    [[TMP19:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 2
4042 // CHECK7-NEXT:    store i8* null, i8** [[TMP19]], align 4
4043 // CHECK7-NEXT:    [[TMP20:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 3
4044 // CHECK7-NEXT:    [[TMP21:%.*]] = bitcast i8** [[TMP20]] to %struct.S.0**
4045 // CHECK7-NEXT:    store %struct.S.0* [[TMP4]], %struct.S.0** [[TMP21]], align 4
4046 // CHECK7-NEXT:    [[TMP22:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 3
4047 // CHECK7-NEXT:    [[TMP23:%.*]] = bitcast i8** [[TMP22]] to %struct.S.0**
4048 // CHECK7-NEXT:    store %struct.S.0* [[TMP4]], %struct.S.0** [[TMP23]], align 4
4049 // CHECK7-NEXT:    [[TMP24:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 3
4050 // CHECK7-NEXT:    store i8* null, i8** [[TMP24]], align 4
4051 // CHECK7-NEXT:    [[TMP25:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
4052 // CHECK7-NEXT:    [[TMP26:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0
4053 // CHECK7-NEXT:    call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 2)
4054 // CHECK7-NEXT:    [[TMP27:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiET_v_l50.region_id, i32 4, i8** [[TMP25]], i8** [[TMP26]], i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_sizes.4, i32 0, i32 0), i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_maptypes.5, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0)
4055 // CHECK7-NEXT:    [[TMP28:%.*]] = icmp ne i32 [[TMP27]], 0
4056 // CHECK7-NEXT:    br i1 [[TMP28]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]]
4057 // CHECK7:       omp_offload.failed:
4058 // CHECK7-NEXT:    call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiET_v_l50([2 x i32]* [[VEC]], i32 [[TMP3]], [2 x %struct.S.0]* [[S_ARR]], %struct.S.0* [[TMP4]]) #[[ATTR4]]
4059 // CHECK7-NEXT:    br label [[OMP_OFFLOAD_CONT]]
4060 // CHECK7:       omp_offload.cont:
4061 // CHECK7-NEXT:    store i32 0, i32* [[RETVAL]], align 4
4062 // CHECK7-NEXT:    [[ARRAY_BEGIN:%.*]] = getelementptr inbounds [2 x %struct.S.0], [2 x %struct.S.0]* [[S_ARR]], i32 0, i32 0
4063 // CHECK7-NEXT:    [[TMP29:%.*]] = getelementptr inbounds [[STRUCT_S_0]], %struct.S.0* [[ARRAY_BEGIN]], i32 2
4064 // CHECK7-NEXT:    br label [[ARRAYDESTROY_BODY:%.*]]
4065 // CHECK7:       arraydestroy.body:
4066 // CHECK7-NEXT:    [[ARRAYDESTROY_ELEMENTPAST:%.*]] = phi %struct.S.0* [ [[TMP29]], [[OMP_OFFLOAD_CONT]] ], [ [[ARRAYDESTROY_ELEMENT:%.*]], [[ARRAYDESTROY_BODY]] ]
4067 // CHECK7-NEXT:    [[ARRAYDESTROY_ELEMENT]] = getelementptr inbounds [[STRUCT_S_0]], %struct.S.0* [[ARRAYDESTROY_ELEMENTPAST]], i32 -1
4068 // CHECK7-NEXT:    call void @_ZN1SIiED1Ev(%struct.S.0* nonnull align 4 dereferenceable(4) [[ARRAYDESTROY_ELEMENT]]) #[[ATTR4]]
4069 // CHECK7-NEXT:    [[ARRAYDESTROY_DONE:%.*]] = icmp eq %struct.S.0* [[ARRAYDESTROY_ELEMENT]], [[ARRAY_BEGIN]]
4070 // CHECK7-NEXT:    br i1 [[ARRAYDESTROY_DONE]], label [[ARRAYDESTROY_DONE2:%.*]], label [[ARRAYDESTROY_BODY]]
4071 // CHECK7:       arraydestroy.done2:
4072 // CHECK7-NEXT:    call void @_ZN1SIiED1Ev(%struct.S.0* nonnull align 4 dereferenceable(4) [[TEST]]) #[[ATTR4]]
4073 // CHECK7-NEXT:    [[TMP30:%.*]] = load i32, i32* [[RETVAL]], align 4
4074 // CHECK7-NEXT:    ret i32 [[TMP30]]
4075 //
4076 //
4077 // CHECK7-LABEL: define {{[^@]+}}@_ZN1SIfEC2Ev
4078 // CHECK7-SAME: (%struct.S* nonnull align 4 dereferenceable(4) [[THIS:%.*]]) unnamed_addr #[[ATTR1]] comdat align 2 {
4079 // CHECK7-NEXT:  entry:
4080 // CHECK7-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.S*, align 4
4081 // CHECK7-NEXT:    store %struct.S* [[THIS]], %struct.S** [[THIS_ADDR]], align 4
4082 // CHECK7-NEXT:    [[THIS1:%.*]] = load %struct.S*, %struct.S** [[THIS_ADDR]], align 4
4083 // CHECK7-NEXT:    [[F:%.*]] = getelementptr inbounds [[STRUCT_S:%.*]], %struct.S* [[THIS1]], i32 0, i32 0
4084 // CHECK7-NEXT:    store float 0.000000e+00, float* [[F]], align 4
4085 // CHECK7-NEXT:    ret void
4086 //
4087 //
4088 // CHECK7-LABEL: define {{[^@]+}}@_ZN1SIfEC2Ef
4089 // CHECK7-SAME: (%struct.S* nonnull align 4 dereferenceable(4) [[THIS:%.*]], float [[A:%.*]]) unnamed_addr #[[ATTR1]] comdat align 2 {
4090 // CHECK7-NEXT:  entry:
4091 // CHECK7-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.S*, align 4
4092 // CHECK7-NEXT:    [[A_ADDR:%.*]] = alloca float, align 4
4093 // CHECK7-NEXT:    store %struct.S* [[THIS]], %struct.S** [[THIS_ADDR]], align 4
4094 // CHECK7-NEXT:    store float [[A]], float* [[A_ADDR]], align 4
4095 // CHECK7-NEXT:    [[THIS1:%.*]] = load %struct.S*, %struct.S** [[THIS_ADDR]], align 4
4096 // CHECK7-NEXT:    [[F:%.*]] = getelementptr inbounds [[STRUCT_S:%.*]], %struct.S* [[THIS1]], i32 0, i32 0
4097 // CHECK7-NEXT:    [[TMP0:%.*]] = load float, float* [[A_ADDR]], align 4
4098 // CHECK7-NEXT:    store float [[TMP0]], float* [[F]], align 4
4099 // CHECK7-NEXT:    ret void
4100 //
4101 //
4102 // CHECK7-LABEL: define {{[^@]+}}@_ZN1SIfED2Ev
4103 // CHECK7-SAME: (%struct.S* nonnull align 4 dereferenceable(4) [[THIS:%.*]]) unnamed_addr #[[ATTR1]] comdat align 2 {
4104 // CHECK7-NEXT:  entry:
4105 // CHECK7-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.S*, align 4
4106 // CHECK7-NEXT:    store %struct.S* [[THIS]], %struct.S** [[THIS_ADDR]], align 4
4107 // CHECK7-NEXT:    [[THIS1:%.*]] = load %struct.S*, %struct.S** [[THIS_ADDR]], align 4
4108 // CHECK7-NEXT:    ret void
4109 //
4110 //
4111 // CHECK7-LABEL: define {{[^@]+}}@_ZN1SIiEC1Ev
4112 // CHECK7-SAME: (%struct.S.0* nonnull align 4 dereferenceable(4) [[THIS:%.*]]) unnamed_addr #[[ATTR1]] comdat align 2 {
4113 // CHECK7-NEXT:  entry:
4114 // CHECK7-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.S.0*, align 4
4115 // CHECK7-NEXT:    store %struct.S.0* [[THIS]], %struct.S.0** [[THIS_ADDR]], align 4
4116 // CHECK7-NEXT:    [[THIS1:%.*]] = load %struct.S.0*, %struct.S.0** [[THIS_ADDR]], align 4
4117 // CHECK7-NEXT:    call void @_ZN1SIiEC2Ev(%struct.S.0* nonnull align 4 dereferenceable(4) [[THIS1]])
4118 // CHECK7-NEXT:    ret void
4119 //
4120 //
4121 // CHECK7-LABEL: define {{[^@]+}}@_ZN1SIiEC1Ei
4122 // CHECK7-SAME: (%struct.S.0* nonnull align 4 dereferenceable(4) [[THIS:%.*]], i32 [[A:%.*]]) unnamed_addr #[[ATTR1]] comdat align 2 {
4123 // CHECK7-NEXT:  entry:
4124 // CHECK7-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.S.0*, align 4
4125 // CHECK7-NEXT:    [[A_ADDR:%.*]] = alloca i32, align 4
4126 // CHECK7-NEXT:    store %struct.S.0* [[THIS]], %struct.S.0** [[THIS_ADDR]], align 4
4127 // CHECK7-NEXT:    store i32 [[A]], i32* [[A_ADDR]], align 4
4128 // CHECK7-NEXT:    [[THIS1:%.*]] = load %struct.S.0*, %struct.S.0** [[THIS_ADDR]], align 4
4129 // CHECK7-NEXT:    [[TMP0:%.*]] = load i32, i32* [[A_ADDR]], align 4
4130 // CHECK7-NEXT:    call void @_ZN1SIiEC2Ei(%struct.S.0* nonnull align 4 dereferenceable(4) [[THIS1]], i32 [[TMP0]])
4131 // CHECK7-NEXT:    ret void
4132 //
4133 //
4134 // CHECK7-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiET_v_l50
4135 // CHECK7-SAME: ([2 x i32]* nonnull align 4 dereferenceable(8) [[VEC:%.*]], i32 [[T_VAR:%.*]], [2 x %struct.S.0]* nonnull align 4 dereferenceable(8) [[S_ARR:%.*]], %struct.S.0* nonnull align 4 dereferenceable(4) [[VAR:%.*]]) #[[ATTR3]] {
4136 // CHECK7-NEXT:  entry:
4137 // CHECK7-NEXT:    [[VEC_ADDR:%.*]] = alloca [2 x i32]*, align 4
4138 // CHECK7-NEXT:    [[T_VAR_ADDR:%.*]] = alloca i32, align 4
4139 // CHECK7-NEXT:    [[S_ARR_ADDR:%.*]] = alloca [2 x %struct.S.0]*, align 4
4140 // CHECK7-NEXT:    [[VAR_ADDR:%.*]] = alloca %struct.S.0*, align 4
4141 // CHECK7-NEXT:    [[TMP:%.*]] = alloca %struct.S.0*, align 4
4142 // CHECK7-NEXT:    [[T_VAR_CASTED:%.*]] = alloca i32, align 4
4143 // CHECK7-NEXT:    store [2 x i32]* [[VEC]], [2 x i32]** [[VEC_ADDR]], align 4
4144 // CHECK7-NEXT:    store i32 [[T_VAR]], i32* [[T_VAR_ADDR]], align 4
4145 // CHECK7-NEXT:    store [2 x %struct.S.0]* [[S_ARR]], [2 x %struct.S.0]** [[S_ARR_ADDR]], align 4
4146 // CHECK7-NEXT:    store %struct.S.0* [[VAR]], %struct.S.0** [[VAR_ADDR]], align 4
4147 // CHECK7-NEXT:    [[TMP0:%.*]] = load [2 x i32]*, [2 x i32]** [[VEC_ADDR]], align 4
4148 // CHECK7-NEXT:    [[TMP1:%.*]] = load [2 x %struct.S.0]*, [2 x %struct.S.0]** [[S_ARR_ADDR]], align 4
4149 // CHECK7-NEXT:    [[TMP2:%.*]] = load %struct.S.0*, %struct.S.0** [[VAR_ADDR]], align 4
4150 // CHECK7-NEXT:    store %struct.S.0* [[TMP2]], %struct.S.0** [[TMP]], align 4
4151 // CHECK7-NEXT:    [[TMP3:%.*]] = load i32, i32* [[T_VAR_ADDR]], align 4
4152 // CHECK7-NEXT:    store i32 [[TMP3]], i32* [[T_VAR_CASTED]], align 4
4153 // CHECK7-NEXT:    [[TMP4:%.*]] = load i32, i32* [[T_VAR_CASTED]], align 4
4154 // CHECK7-NEXT:    [[TMP5:%.*]] = load %struct.S.0*, %struct.S.0** [[TMP]], align 4
4155 // CHECK7-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, [2 x i32]*, i32, [2 x %struct.S.0]*, %struct.S.0*)* @.omp_outlined..2 to void (i32*, i32*, ...)*), [2 x i32]* [[TMP0]], i32 [[TMP4]], [2 x %struct.S.0]* [[TMP1]], %struct.S.0* [[TMP5]])
4156 // CHECK7-NEXT:    ret void
4157 //
4158 //
4159 // CHECK7-LABEL: define {{[^@]+}}@.omp_outlined..2
4160 // CHECK7-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], [2 x i32]* nonnull align 4 dereferenceable(8) [[VEC:%.*]], i32 [[T_VAR:%.*]], [2 x %struct.S.0]* nonnull align 4 dereferenceable(8) [[S_ARR:%.*]], %struct.S.0* nonnull align 4 dereferenceable(4) [[VAR:%.*]]) #[[ATTR3]] {
4161 // CHECK7-NEXT:  entry:
4162 // CHECK7-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
4163 // CHECK7-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
4164 // CHECK7-NEXT:    [[VEC_ADDR:%.*]] = alloca [2 x i32]*, align 4
4165 // CHECK7-NEXT:    [[T_VAR_ADDR:%.*]] = alloca i32, align 4
4166 // CHECK7-NEXT:    [[S_ARR_ADDR:%.*]] = alloca [2 x %struct.S.0]*, align 4
4167 // CHECK7-NEXT:    [[VAR_ADDR:%.*]] = alloca %struct.S.0*, align 4
4168 // CHECK7-NEXT:    [[TMP:%.*]] = alloca %struct.S.0*, align 4
4169 // CHECK7-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
4170 // CHECK7-NEXT:    [[_TMP1:%.*]] = alloca i32, align 4
4171 // CHECK7-NEXT:    [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
4172 // CHECK7-NEXT:    [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
4173 // CHECK7-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
4174 // CHECK7-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
4175 // CHECK7-NEXT:    [[T_VAR2:%.*]] = alloca i32, align 4
4176 // CHECK7-NEXT:    [[VEC3:%.*]] = alloca [2 x i32], align 4
4177 // CHECK7-NEXT:    [[S_ARR4:%.*]] = alloca [2 x %struct.S.0], align 4
4178 // CHECK7-NEXT:    [[VAR5:%.*]] = alloca [[STRUCT_S_0:%.*]], align 4
4179 // CHECK7-NEXT:    [[_TMP6:%.*]] = alloca %struct.S.0*, align 4
4180 // CHECK7-NEXT:    [[I:%.*]] = alloca i32, align 4
4181 // CHECK7-NEXT:    [[T_VAR_CASTED:%.*]] = alloca i32, align 4
4182 // CHECK7-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
4183 // CHECK7-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
4184 // CHECK7-NEXT:    store [2 x i32]* [[VEC]], [2 x i32]** [[VEC_ADDR]], align 4
4185 // CHECK7-NEXT:    store i32 [[T_VAR]], i32* [[T_VAR_ADDR]], align 4
4186 // CHECK7-NEXT:    store [2 x %struct.S.0]* [[S_ARR]], [2 x %struct.S.0]** [[S_ARR_ADDR]], align 4
4187 // CHECK7-NEXT:    store %struct.S.0* [[VAR]], %struct.S.0** [[VAR_ADDR]], align 4
4188 // CHECK7-NEXT:    [[TMP0:%.*]] = load [2 x i32]*, [2 x i32]** [[VEC_ADDR]], align 4
4189 // CHECK7-NEXT:    [[TMP1:%.*]] = load [2 x %struct.S.0]*, [2 x %struct.S.0]** [[S_ARR_ADDR]], align 4
4190 // CHECK7-NEXT:    [[TMP2:%.*]] = load %struct.S.0*, %struct.S.0** [[VAR_ADDR]], align 4
4191 // CHECK7-NEXT:    store %struct.S.0* [[TMP2]], %struct.S.0** [[TMP]], align 4
4192 // CHECK7-NEXT:    store i32 0, i32* [[DOTOMP_COMB_LB]], align 4
4193 // CHECK7-NEXT:    store i32 1, i32* [[DOTOMP_COMB_UB]], align 4
4194 // CHECK7-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
4195 // CHECK7-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
4196 // CHECK7-NEXT:    [[ARRAY_BEGIN:%.*]] = getelementptr inbounds [2 x %struct.S.0], [2 x %struct.S.0]* [[S_ARR4]], i32 0, i32 0
4197 // CHECK7-NEXT:    [[ARRAYCTOR_END:%.*]] = getelementptr inbounds [[STRUCT_S_0]], %struct.S.0* [[ARRAY_BEGIN]], i32 2
4198 // CHECK7-NEXT:    br label [[ARRAYCTOR_LOOP:%.*]]
4199 // CHECK7:       arrayctor.loop:
4200 // CHECK7-NEXT:    [[ARRAYCTOR_CUR:%.*]] = phi %struct.S.0* [ [[ARRAY_BEGIN]], [[ENTRY:%.*]] ], [ [[ARRAYCTOR_NEXT:%.*]], [[ARRAYCTOR_LOOP]] ]
4201 // CHECK7-NEXT:    call void @_ZN1SIiEC1Ev(%struct.S.0* nonnull align 4 dereferenceable(4) [[ARRAYCTOR_CUR]])
4202 // CHECK7-NEXT:    [[ARRAYCTOR_NEXT]] = getelementptr inbounds [[STRUCT_S_0]], %struct.S.0* [[ARRAYCTOR_CUR]], i32 1
4203 // CHECK7-NEXT:    [[ARRAYCTOR_DONE:%.*]] = icmp eq %struct.S.0* [[ARRAYCTOR_NEXT]], [[ARRAYCTOR_END]]
4204 // CHECK7-NEXT:    br i1 [[ARRAYCTOR_DONE]], label [[ARRAYCTOR_CONT:%.*]], label [[ARRAYCTOR_LOOP]]
4205 // CHECK7:       arrayctor.cont:
4206 // CHECK7-NEXT:    [[TMP3:%.*]] = load %struct.S.0*, %struct.S.0** [[TMP]], align 4
4207 // CHECK7-NEXT:    call void @_ZN1SIiEC1Ev(%struct.S.0* nonnull align 4 dereferenceable(4) [[VAR5]])
4208 // CHECK7-NEXT:    store %struct.S.0* [[VAR5]], %struct.S.0** [[_TMP6]], align 4
4209 // CHECK7-NEXT:    [[TMP4:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
4210 // CHECK7-NEXT:    [[TMP5:%.*]] = load i32, i32* [[TMP4]], align 4
4211 // CHECK7-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP5]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_COMB_LB]], i32* [[DOTOMP_COMB_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
4212 // CHECK7-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
4213 // CHECK7-NEXT:    [[CMP:%.*]] = icmp sgt i32 [[TMP6]], 1
4214 // CHECK7-NEXT:    br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
4215 // CHECK7:       cond.true:
4216 // CHECK7-NEXT:    br label [[COND_END:%.*]]
4217 // CHECK7:       cond.false:
4218 // CHECK7-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
4219 // CHECK7-NEXT:    br label [[COND_END]]
4220 // CHECK7:       cond.end:
4221 // CHECK7-NEXT:    [[COND:%.*]] = phi i32 [ 1, [[COND_TRUE]] ], [ [[TMP7]], [[COND_FALSE]] ]
4222 // CHECK7-NEXT:    store i32 [[COND]], i32* [[DOTOMP_COMB_UB]], align 4
4223 // CHECK7-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
4224 // CHECK7-NEXT:    store i32 [[TMP8]], i32* [[DOTOMP_IV]], align 4
4225 // CHECK7-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
4226 // CHECK7:       omp.inner.for.cond:
4227 // CHECK7-NEXT:    [[TMP9:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
4228 // CHECK7-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
4229 // CHECK7-NEXT:    [[CMP7:%.*]] = icmp sle i32 [[TMP9]], [[TMP10]]
4230 // CHECK7-NEXT:    br i1 [[CMP7]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_COND_CLEANUP:%.*]]
4231 // CHECK7:       omp.inner.for.cond.cleanup:
4232 // CHECK7-NEXT:    br label [[OMP_INNER_FOR_END:%.*]]
4233 // CHECK7:       omp.inner.for.body:
4234 // CHECK7-NEXT:    [[TMP11:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
4235 // CHECK7-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
4236 // CHECK7-NEXT:    [[TMP13:%.*]] = load i32, i32* [[T_VAR2]], align 4
4237 // CHECK7-NEXT:    store i32 [[TMP13]], i32* [[T_VAR_CASTED]], align 4
4238 // CHECK7-NEXT:    [[TMP14:%.*]] = load i32, i32* [[T_VAR_CASTED]], align 4
4239 // CHECK7-NEXT:    [[TMP15:%.*]] = load %struct.S.0*, %struct.S.0** [[_TMP6]], align 4
4240 // CHECK7-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 6, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, [2 x i32]*, i32, [2 x %struct.S.0]*, %struct.S.0*)* @.omp_outlined..3 to void (i32*, i32*, ...)*), i32 [[TMP11]], i32 [[TMP12]], [2 x i32]* [[VEC3]], i32 [[TMP14]], [2 x %struct.S.0]* [[S_ARR4]], %struct.S.0* [[TMP15]])
4241 // CHECK7-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
4242 // CHECK7:       omp.inner.for.inc:
4243 // CHECK7-NEXT:    [[TMP16:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
4244 // CHECK7-NEXT:    [[TMP17:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
4245 // CHECK7-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP16]], [[TMP17]]
4246 // CHECK7-NEXT:    store i32 [[ADD]], i32* [[DOTOMP_IV]], align 4
4247 // CHECK7-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP10:![0-9]+]]
4248 // CHECK7:       omp.inner.for.end:
4249 // CHECK7-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
4250 // CHECK7:       omp.loop.exit:
4251 // CHECK7-NEXT:    [[TMP18:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
4252 // CHECK7-NEXT:    [[TMP19:%.*]] = load i32, i32* [[TMP18]], align 4
4253 // CHECK7-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP19]])
4254 // CHECK7-NEXT:    [[TMP20:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
4255 // CHECK7-NEXT:    [[TMP21:%.*]] = icmp ne i32 [[TMP20]], 0
4256 // CHECK7-NEXT:    br i1 [[TMP21]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
4257 // CHECK7:       .omp.final.then:
4258 // CHECK7-NEXT:    store i32 2, i32* [[I]], align 4
4259 // CHECK7-NEXT:    br label [[DOTOMP_FINAL_DONE]]
4260 // CHECK7:       .omp.final.done:
4261 // CHECK7-NEXT:    [[TMP22:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
4262 // CHECK7-NEXT:    [[TMP23:%.*]] = icmp ne i32 [[TMP22]], 0
4263 // CHECK7-NEXT:    br i1 [[TMP23]], label [[DOTOMP_LASTPRIVATE_THEN:%.*]], label [[DOTOMP_LASTPRIVATE_DONE:%.*]]
4264 // CHECK7:       .omp.lastprivate.then:
4265 // CHECK7-NEXT:    [[TMP24:%.*]] = load i32, i32* [[T_VAR2]], align 4
4266 // CHECK7-NEXT:    store i32 [[TMP24]], i32* [[T_VAR_ADDR]], align 4
4267 // CHECK7-NEXT:    [[TMP25:%.*]] = bitcast [2 x i32]* [[TMP0]] to i8*
4268 // CHECK7-NEXT:    [[TMP26:%.*]] = bitcast [2 x i32]* [[VEC3]] to i8*
4269 // CHECK7-NEXT:    call void @llvm.memcpy.p0i8.p0i8.i32(i8* align 4 [[TMP25]], i8* align 4 [[TMP26]], i32 8, i1 false)
4270 // CHECK7-NEXT:    [[ARRAY_BEGIN8:%.*]] = getelementptr inbounds [2 x %struct.S.0], [2 x %struct.S.0]* [[TMP1]], i32 0, i32 0
4271 // CHECK7-NEXT:    [[TMP27:%.*]] = bitcast [2 x %struct.S.0]* [[S_ARR4]] to %struct.S.0*
4272 // CHECK7-NEXT:    [[TMP28:%.*]] = getelementptr [[STRUCT_S_0]], %struct.S.0* [[ARRAY_BEGIN8]], i32 2
4273 // CHECK7-NEXT:    [[OMP_ARRAYCPY_ISEMPTY:%.*]] = icmp eq %struct.S.0* [[ARRAY_BEGIN8]], [[TMP28]]
4274 // CHECK7-NEXT:    br i1 [[OMP_ARRAYCPY_ISEMPTY]], label [[OMP_ARRAYCPY_DONE9:%.*]], label [[OMP_ARRAYCPY_BODY:%.*]]
4275 // CHECK7:       omp.arraycpy.body:
4276 // CHECK7-NEXT:    [[OMP_ARRAYCPY_SRCELEMENTPAST:%.*]] = phi %struct.S.0* [ [[TMP27]], [[DOTOMP_LASTPRIVATE_THEN]] ], [ [[OMP_ARRAYCPY_SRC_ELEMENT:%.*]], [[OMP_ARRAYCPY_BODY]] ]
4277 // CHECK7-NEXT:    [[OMP_ARRAYCPY_DESTELEMENTPAST:%.*]] = phi %struct.S.0* [ [[ARRAY_BEGIN8]], [[DOTOMP_LASTPRIVATE_THEN]] ], [ [[OMP_ARRAYCPY_DEST_ELEMENT:%.*]], [[OMP_ARRAYCPY_BODY]] ]
4278 // CHECK7-NEXT:    [[TMP29:%.*]] = bitcast %struct.S.0* [[OMP_ARRAYCPY_DESTELEMENTPAST]] to i8*
4279 // CHECK7-NEXT:    [[TMP30:%.*]] = bitcast %struct.S.0* [[OMP_ARRAYCPY_SRCELEMENTPAST]] to i8*
4280 // CHECK7-NEXT:    call void @llvm.memcpy.p0i8.p0i8.i32(i8* align 4 [[TMP29]], i8* align 4 [[TMP30]], i32 4, i1 false)
4281 // CHECK7-NEXT:    [[OMP_ARRAYCPY_DEST_ELEMENT]] = getelementptr [[STRUCT_S_0]], %struct.S.0* [[OMP_ARRAYCPY_DESTELEMENTPAST]], i32 1
4282 // CHECK7-NEXT:    [[OMP_ARRAYCPY_SRC_ELEMENT]] = getelementptr [[STRUCT_S_0]], %struct.S.0* [[OMP_ARRAYCPY_SRCELEMENTPAST]], i32 1
4283 // CHECK7-NEXT:    [[OMP_ARRAYCPY_DONE:%.*]] = icmp eq %struct.S.0* [[OMP_ARRAYCPY_DEST_ELEMENT]], [[TMP28]]
4284 // CHECK7-NEXT:    br i1 [[OMP_ARRAYCPY_DONE]], label [[OMP_ARRAYCPY_DONE9]], label [[OMP_ARRAYCPY_BODY]]
4285 // CHECK7:       omp.arraycpy.done9:
4286 // CHECK7-NEXT:    [[TMP31:%.*]] = load %struct.S.0*, %struct.S.0** [[_TMP6]], align 4
4287 // CHECK7-NEXT:    [[TMP32:%.*]] = bitcast %struct.S.0* [[TMP3]] to i8*
4288 // CHECK7-NEXT:    [[TMP33:%.*]] = bitcast %struct.S.0* [[TMP31]] to i8*
4289 // CHECK7-NEXT:    call void @llvm.memcpy.p0i8.p0i8.i32(i8* align 4 [[TMP32]], i8* align 4 [[TMP33]], i32 4, i1 false)
4290 // CHECK7-NEXT:    br label [[DOTOMP_LASTPRIVATE_DONE]]
4291 // CHECK7:       .omp.lastprivate.done:
4292 // CHECK7-NEXT:    call void @_ZN1SIiED1Ev(%struct.S.0* nonnull align 4 dereferenceable(4) [[VAR5]]) #[[ATTR4]]
4293 // CHECK7-NEXT:    [[ARRAY_BEGIN10:%.*]] = getelementptr inbounds [2 x %struct.S.0], [2 x %struct.S.0]* [[S_ARR4]], i32 0, i32 0
4294 // CHECK7-NEXT:    [[TMP34:%.*]] = getelementptr inbounds [[STRUCT_S_0]], %struct.S.0* [[ARRAY_BEGIN10]], i32 2
4295 // CHECK7-NEXT:    br label [[ARRAYDESTROY_BODY:%.*]]
4296 // CHECK7:       arraydestroy.body:
4297 // CHECK7-NEXT:    [[ARRAYDESTROY_ELEMENTPAST:%.*]] = phi %struct.S.0* [ [[TMP34]], [[DOTOMP_LASTPRIVATE_DONE]] ], [ [[ARRAYDESTROY_ELEMENT:%.*]], [[ARRAYDESTROY_BODY]] ]
4298 // CHECK7-NEXT:    [[ARRAYDESTROY_ELEMENT]] = getelementptr inbounds [[STRUCT_S_0]], %struct.S.0* [[ARRAYDESTROY_ELEMENTPAST]], i32 -1
4299 // CHECK7-NEXT:    call void @_ZN1SIiED1Ev(%struct.S.0* nonnull align 4 dereferenceable(4) [[ARRAYDESTROY_ELEMENT]]) #[[ATTR4]]
4300 // CHECK7-NEXT:    [[ARRAYDESTROY_DONE:%.*]] = icmp eq %struct.S.0* [[ARRAYDESTROY_ELEMENT]], [[ARRAY_BEGIN10]]
4301 // CHECK7-NEXT:    br i1 [[ARRAYDESTROY_DONE]], label [[ARRAYDESTROY_DONE11:%.*]], label [[ARRAYDESTROY_BODY]]
4302 // CHECK7:       arraydestroy.done11:
4303 // CHECK7-NEXT:    ret void
4304 //
4305 //
4306 // CHECK7-LABEL: define {{[^@]+}}@.omp_outlined..3
4307 // CHECK7-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32 [[DOTPREVIOUS_LB_:%.*]], i32 [[DOTPREVIOUS_UB_:%.*]], [2 x i32]* nonnull align 4 dereferenceable(8) [[VEC:%.*]], i32 [[T_VAR:%.*]], [2 x %struct.S.0]* nonnull align 4 dereferenceable(8) [[S_ARR:%.*]], %struct.S.0* nonnull align 4 dereferenceable(4) [[VAR:%.*]]) #[[ATTR3]] {
4308 // CHECK7-NEXT:  entry:
4309 // CHECK7-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
4310 // CHECK7-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
4311 // CHECK7-NEXT:    [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i32, align 4
4312 // CHECK7-NEXT:    [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i32, align 4
4313 // CHECK7-NEXT:    [[VEC_ADDR:%.*]] = alloca [2 x i32]*, align 4
4314 // CHECK7-NEXT:    [[T_VAR_ADDR:%.*]] = alloca i32, align 4
4315 // CHECK7-NEXT:    [[S_ARR_ADDR:%.*]] = alloca [2 x %struct.S.0]*, align 4
4316 // CHECK7-NEXT:    [[VAR_ADDR:%.*]] = alloca %struct.S.0*, align 4
4317 // CHECK7-NEXT:    [[TMP:%.*]] = alloca %struct.S.0*, align 4
4318 // CHECK7-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
4319 // CHECK7-NEXT:    [[_TMP1:%.*]] = alloca i32, align 4
4320 // CHECK7-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
4321 // CHECK7-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
4322 // CHECK7-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
4323 // CHECK7-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
4324 // CHECK7-NEXT:    [[T_VAR2:%.*]] = alloca i32, align 4
4325 // CHECK7-NEXT:    [[VEC3:%.*]] = alloca [2 x i32], align 4
4326 // CHECK7-NEXT:    [[S_ARR4:%.*]] = alloca [2 x %struct.S.0], align 4
4327 // CHECK7-NEXT:    [[VAR5:%.*]] = alloca [[STRUCT_S_0:%.*]], align 4
4328 // CHECK7-NEXT:    [[_TMP6:%.*]] = alloca %struct.S.0*, align 4
4329 // CHECK7-NEXT:    [[I:%.*]] = alloca i32, align 4
4330 // CHECK7-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
4331 // CHECK7-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
4332 // CHECK7-NEXT:    store i32 [[DOTPREVIOUS_LB_]], i32* [[DOTPREVIOUS_LB__ADDR]], align 4
4333 // CHECK7-NEXT:    store i32 [[DOTPREVIOUS_UB_]], i32* [[DOTPREVIOUS_UB__ADDR]], align 4
4334 // CHECK7-NEXT:    store [2 x i32]* [[VEC]], [2 x i32]** [[VEC_ADDR]], align 4
4335 // CHECK7-NEXT:    store i32 [[T_VAR]], i32* [[T_VAR_ADDR]], align 4
4336 // CHECK7-NEXT:    store [2 x %struct.S.0]* [[S_ARR]], [2 x %struct.S.0]** [[S_ARR_ADDR]], align 4
4337 // CHECK7-NEXT:    store %struct.S.0* [[VAR]], %struct.S.0** [[VAR_ADDR]], align 4
4338 // CHECK7-NEXT:    [[TMP0:%.*]] = load [2 x i32]*, [2 x i32]** [[VEC_ADDR]], align 4
4339 // CHECK7-NEXT:    [[TMP1:%.*]] = load [2 x %struct.S.0]*, [2 x %struct.S.0]** [[S_ARR_ADDR]], align 4
4340 // CHECK7-NEXT:    [[TMP2:%.*]] = load %struct.S.0*, %struct.S.0** [[VAR_ADDR]], align 4
4341 // CHECK7-NEXT:    store %struct.S.0* [[TMP2]], %struct.S.0** [[TMP]], align 4
4342 // CHECK7-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
4343 // CHECK7-NEXT:    store i32 1, i32* [[DOTOMP_UB]], align 4
4344 // CHECK7-NEXT:    [[TMP3:%.*]] = load i32, i32* [[DOTPREVIOUS_LB__ADDR]], align 4
4345 // CHECK7-NEXT:    [[TMP4:%.*]] = load i32, i32* [[DOTPREVIOUS_UB__ADDR]], align 4
4346 // CHECK7-NEXT:    store i32 [[TMP3]], i32* [[DOTOMP_LB]], align 4
4347 // CHECK7-NEXT:    store i32 [[TMP4]], i32* [[DOTOMP_UB]], align 4
4348 // CHECK7-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
4349 // CHECK7-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
4350 // CHECK7-NEXT:    [[ARRAY_BEGIN:%.*]] = getelementptr inbounds [2 x %struct.S.0], [2 x %struct.S.0]* [[S_ARR4]], i32 0, i32 0
4351 // CHECK7-NEXT:    [[ARRAYCTOR_END:%.*]] = getelementptr inbounds [[STRUCT_S_0]], %struct.S.0* [[ARRAY_BEGIN]], i32 2
4352 // CHECK7-NEXT:    br label [[ARRAYCTOR_LOOP:%.*]]
4353 // CHECK7:       arrayctor.loop:
4354 // CHECK7-NEXT:    [[ARRAYCTOR_CUR:%.*]] = phi %struct.S.0* [ [[ARRAY_BEGIN]], [[ENTRY:%.*]] ], [ [[ARRAYCTOR_NEXT:%.*]], [[ARRAYCTOR_LOOP]] ]
4355 // CHECK7-NEXT:    call void @_ZN1SIiEC1Ev(%struct.S.0* nonnull align 4 dereferenceable(4) [[ARRAYCTOR_CUR]])
4356 // CHECK7-NEXT:    [[ARRAYCTOR_NEXT]] = getelementptr inbounds [[STRUCT_S_0]], %struct.S.0* [[ARRAYCTOR_CUR]], i32 1
4357 // CHECK7-NEXT:    [[ARRAYCTOR_DONE:%.*]] = icmp eq %struct.S.0* [[ARRAYCTOR_NEXT]], [[ARRAYCTOR_END]]
4358 // CHECK7-NEXT:    br i1 [[ARRAYCTOR_DONE]], label [[ARRAYCTOR_CONT:%.*]], label [[ARRAYCTOR_LOOP]]
4359 // CHECK7:       arrayctor.cont:
4360 // CHECK7-NEXT:    [[TMP5:%.*]] = load %struct.S.0*, %struct.S.0** [[TMP]], align 4
4361 // CHECK7-NEXT:    call void @_ZN1SIiEC1Ev(%struct.S.0* nonnull align 4 dereferenceable(4) [[VAR5]])
4362 // CHECK7-NEXT:    store %struct.S.0* [[VAR5]], %struct.S.0** [[_TMP6]], align 4
4363 // CHECK7-NEXT:    [[TMP6:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
4364 // CHECK7-NEXT:    [[TMP7:%.*]] = load i32, i32* [[TMP6]], align 4
4365 // CHECK7-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB2]], i32 [[TMP7]], i32 34, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
4366 // CHECK7-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
4367 // CHECK7-NEXT:    [[CMP:%.*]] = icmp sgt i32 [[TMP8]], 1
4368 // CHECK7-NEXT:    br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
4369 // CHECK7:       cond.true:
4370 // CHECK7-NEXT:    br label [[COND_END:%.*]]
4371 // CHECK7:       cond.false:
4372 // CHECK7-NEXT:    [[TMP9:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
4373 // CHECK7-NEXT:    br label [[COND_END]]
4374 // CHECK7:       cond.end:
4375 // CHECK7-NEXT:    [[COND:%.*]] = phi i32 [ 1, [[COND_TRUE]] ], [ [[TMP9]], [[COND_FALSE]] ]
4376 // CHECK7-NEXT:    store i32 [[COND]], i32* [[DOTOMP_UB]], align 4
4377 // CHECK7-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
4378 // CHECK7-NEXT:    store i32 [[TMP10]], i32* [[DOTOMP_IV]], align 4
4379 // CHECK7-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
4380 // CHECK7:       omp.inner.for.cond:
4381 // CHECK7-NEXT:    [[TMP11:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
4382 // CHECK7-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
4383 // CHECK7-NEXT:    [[CMP7:%.*]] = icmp sle i32 [[TMP11]], [[TMP12]]
4384 // CHECK7-NEXT:    br i1 [[CMP7]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_COND_CLEANUP:%.*]]
4385 // CHECK7:       omp.inner.for.cond.cleanup:
4386 // CHECK7-NEXT:    br label [[OMP_INNER_FOR_END:%.*]]
4387 // CHECK7:       omp.inner.for.body:
4388 // CHECK7-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
4389 // CHECK7-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP13]], 1
4390 // CHECK7-NEXT:    [[ADD:%.*]] = add nsw i32 0, [[MUL]]
4391 // CHECK7-NEXT:    store i32 [[ADD]], i32* [[I]], align 4
4392 // CHECK7-NEXT:    [[TMP14:%.*]] = load i32, i32* [[T_VAR2]], align 4
4393 // CHECK7-NEXT:    [[TMP15:%.*]] = load i32, i32* [[I]], align 4
4394 // CHECK7-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds [2 x i32], [2 x i32]* [[VEC3]], i32 0, i32 [[TMP15]]
4395 // CHECK7-NEXT:    store i32 [[TMP14]], i32* [[ARRAYIDX]], align 4
4396 // CHECK7-NEXT:    [[TMP16:%.*]] = load %struct.S.0*, %struct.S.0** [[_TMP6]], align 4
4397 // CHECK7-NEXT:    [[TMP17:%.*]] = load i32, i32* [[I]], align 4
4398 // CHECK7-NEXT:    [[ARRAYIDX8:%.*]] = getelementptr inbounds [2 x %struct.S.0], [2 x %struct.S.0]* [[S_ARR4]], i32 0, i32 [[TMP17]]
4399 // CHECK7-NEXT:    [[TMP18:%.*]] = bitcast %struct.S.0* [[ARRAYIDX8]] to i8*
4400 // CHECK7-NEXT:    [[TMP19:%.*]] = bitcast %struct.S.0* [[TMP16]] to i8*
4401 // CHECK7-NEXT:    call void @llvm.memcpy.p0i8.p0i8.i32(i8* align 4 [[TMP18]], i8* align 4 [[TMP19]], i32 4, i1 false)
4402 // CHECK7-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
4403 // CHECK7:       omp.body.continue:
4404 // CHECK7-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
4405 // CHECK7:       omp.inner.for.inc:
4406 // CHECK7-NEXT:    [[TMP20:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
4407 // CHECK7-NEXT:    [[ADD9:%.*]] = add nsw i32 [[TMP20]], 1
4408 // CHECK7-NEXT:    store i32 [[ADD9]], i32* [[DOTOMP_IV]], align 4
4409 // CHECK7-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP11:![0-9]+]]
4410 // CHECK7:       omp.inner.for.end:
4411 // CHECK7-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
4412 // CHECK7:       omp.loop.exit:
4413 // CHECK7-NEXT:    [[TMP21:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
4414 // CHECK7-NEXT:    [[TMP22:%.*]] = load i32, i32* [[TMP21]], align 4
4415 // CHECK7-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP22]])
4416 // CHECK7-NEXT:    [[TMP23:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
4417 // CHECK7-NEXT:    [[TMP24:%.*]] = icmp ne i32 [[TMP23]], 0
4418 // CHECK7-NEXT:    br i1 [[TMP24]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
4419 // CHECK7:       .omp.final.then:
4420 // CHECK7-NEXT:    store i32 2, i32* [[I]], align 4
4421 // CHECK7-NEXT:    br label [[DOTOMP_FINAL_DONE]]
4422 // CHECK7:       .omp.final.done:
4423 // CHECK7-NEXT:    [[TMP25:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
4424 // CHECK7-NEXT:    [[TMP26:%.*]] = icmp ne i32 [[TMP25]], 0
4425 // CHECK7-NEXT:    br i1 [[TMP26]], label [[DOTOMP_LASTPRIVATE_THEN:%.*]], label [[DOTOMP_LASTPRIVATE_DONE:%.*]]
4426 // CHECK7:       .omp.lastprivate.then:
4427 // CHECK7-NEXT:    [[TMP27:%.*]] = load i32, i32* [[T_VAR2]], align 4
4428 // CHECK7-NEXT:    store i32 [[TMP27]], i32* [[T_VAR_ADDR]], align 4
4429 // CHECK7-NEXT:    [[TMP28:%.*]] = bitcast [2 x i32]* [[TMP0]] to i8*
4430 // CHECK7-NEXT:    [[TMP29:%.*]] = bitcast [2 x i32]* [[VEC3]] to i8*
4431 // CHECK7-NEXT:    call void @llvm.memcpy.p0i8.p0i8.i32(i8* align 4 [[TMP28]], i8* align 4 [[TMP29]], i32 8, i1 false)
4432 // CHECK7-NEXT:    [[ARRAY_BEGIN10:%.*]] = getelementptr inbounds [2 x %struct.S.0], [2 x %struct.S.0]* [[TMP1]], i32 0, i32 0
4433 // CHECK7-NEXT:    [[TMP30:%.*]] = bitcast [2 x %struct.S.0]* [[S_ARR4]] to %struct.S.0*
4434 // CHECK7-NEXT:    [[TMP31:%.*]] = getelementptr [[STRUCT_S_0]], %struct.S.0* [[ARRAY_BEGIN10]], i32 2
4435 // CHECK7-NEXT:    [[OMP_ARRAYCPY_ISEMPTY:%.*]] = icmp eq %struct.S.0* [[ARRAY_BEGIN10]], [[TMP31]]
4436 // CHECK7-NEXT:    br i1 [[OMP_ARRAYCPY_ISEMPTY]], label [[OMP_ARRAYCPY_DONE11:%.*]], label [[OMP_ARRAYCPY_BODY:%.*]]
4437 // CHECK7:       omp.arraycpy.body:
4438 // CHECK7-NEXT:    [[OMP_ARRAYCPY_SRCELEMENTPAST:%.*]] = phi %struct.S.0* [ [[TMP30]], [[DOTOMP_LASTPRIVATE_THEN]] ], [ [[OMP_ARRAYCPY_SRC_ELEMENT:%.*]], [[OMP_ARRAYCPY_BODY]] ]
4439 // CHECK7-NEXT:    [[OMP_ARRAYCPY_DESTELEMENTPAST:%.*]] = phi %struct.S.0* [ [[ARRAY_BEGIN10]], [[DOTOMP_LASTPRIVATE_THEN]] ], [ [[OMP_ARRAYCPY_DEST_ELEMENT:%.*]], [[OMP_ARRAYCPY_BODY]] ]
4440 // CHECK7-NEXT:    [[TMP32:%.*]] = bitcast %struct.S.0* [[OMP_ARRAYCPY_DESTELEMENTPAST]] to i8*
4441 // CHECK7-NEXT:    [[TMP33:%.*]] = bitcast %struct.S.0* [[OMP_ARRAYCPY_SRCELEMENTPAST]] to i8*
4442 // CHECK7-NEXT:    call void @llvm.memcpy.p0i8.p0i8.i32(i8* align 4 [[TMP32]], i8* align 4 [[TMP33]], i32 4, i1 false)
4443 // CHECK7-NEXT:    [[OMP_ARRAYCPY_DEST_ELEMENT]] = getelementptr [[STRUCT_S_0]], %struct.S.0* [[OMP_ARRAYCPY_DESTELEMENTPAST]], i32 1
4444 // CHECK7-NEXT:    [[OMP_ARRAYCPY_SRC_ELEMENT]] = getelementptr [[STRUCT_S_0]], %struct.S.0* [[OMP_ARRAYCPY_SRCELEMENTPAST]], i32 1
4445 // CHECK7-NEXT:    [[OMP_ARRAYCPY_DONE:%.*]] = icmp eq %struct.S.0* [[OMP_ARRAYCPY_DEST_ELEMENT]], [[TMP31]]
4446 // CHECK7-NEXT:    br i1 [[OMP_ARRAYCPY_DONE]], label [[OMP_ARRAYCPY_DONE11]], label [[OMP_ARRAYCPY_BODY]]
4447 // CHECK7:       omp.arraycpy.done11:
4448 // CHECK7-NEXT:    [[TMP34:%.*]] = load %struct.S.0*, %struct.S.0** [[_TMP6]], align 4
4449 // CHECK7-NEXT:    [[TMP35:%.*]] = bitcast %struct.S.0* [[TMP5]] to i8*
4450 // CHECK7-NEXT:    [[TMP36:%.*]] = bitcast %struct.S.0* [[TMP34]] to i8*
4451 // CHECK7-NEXT:    call void @llvm.memcpy.p0i8.p0i8.i32(i8* align 4 [[TMP35]], i8* align 4 [[TMP36]], i32 4, i1 false)
4452 // CHECK7-NEXT:    br label [[DOTOMP_LASTPRIVATE_DONE]]
4453 // CHECK7:       .omp.lastprivate.done:
4454 // CHECK7-NEXT:    call void @_ZN1SIiED1Ev(%struct.S.0* nonnull align 4 dereferenceable(4) [[VAR5]]) #[[ATTR4]]
4455 // CHECK7-NEXT:    [[ARRAY_BEGIN12:%.*]] = getelementptr inbounds [2 x %struct.S.0], [2 x %struct.S.0]* [[S_ARR4]], i32 0, i32 0
4456 // CHECK7-NEXT:    [[TMP37:%.*]] = getelementptr inbounds [[STRUCT_S_0]], %struct.S.0* [[ARRAY_BEGIN12]], i32 2
4457 // CHECK7-NEXT:    br label [[ARRAYDESTROY_BODY:%.*]]
4458 // CHECK7:       arraydestroy.body:
4459 // CHECK7-NEXT:    [[ARRAYDESTROY_ELEMENTPAST:%.*]] = phi %struct.S.0* [ [[TMP37]], [[DOTOMP_LASTPRIVATE_DONE]] ], [ [[ARRAYDESTROY_ELEMENT:%.*]], [[ARRAYDESTROY_BODY]] ]
4460 // CHECK7-NEXT:    [[ARRAYDESTROY_ELEMENT]] = getelementptr inbounds [[STRUCT_S_0]], %struct.S.0* [[ARRAYDESTROY_ELEMENTPAST]], i32 -1
4461 // CHECK7-NEXT:    call void @_ZN1SIiED1Ev(%struct.S.0* nonnull align 4 dereferenceable(4) [[ARRAYDESTROY_ELEMENT]]) #[[ATTR4]]
4462 // CHECK7-NEXT:    [[ARRAYDESTROY_DONE:%.*]] = icmp eq %struct.S.0* [[ARRAYDESTROY_ELEMENT]], [[ARRAY_BEGIN12]]
4463 // CHECK7-NEXT:    br i1 [[ARRAYDESTROY_DONE]], label [[ARRAYDESTROY_DONE13:%.*]], label [[ARRAYDESTROY_BODY]]
4464 // CHECK7:       arraydestroy.done13:
4465 // CHECK7-NEXT:    ret void
4466 //
4467 //
4468 // CHECK7-LABEL: define {{[^@]+}}@_ZN1SIiED1Ev
4469 // CHECK7-SAME: (%struct.S.0* nonnull align 4 dereferenceable(4) [[THIS:%.*]]) unnamed_addr #[[ATTR1]] comdat align 2 {
4470 // CHECK7-NEXT:  entry:
4471 // CHECK7-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.S.0*, align 4
4472 // CHECK7-NEXT:    store %struct.S.0* [[THIS]], %struct.S.0** [[THIS_ADDR]], align 4
4473 // CHECK7-NEXT:    [[THIS1:%.*]] = load %struct.S.0*, %struct.S.0** [[THIS_ADDR]], align 4
4474 // CHECK7-NEXT:    call void @_ZN1SIiED2Ev(%struct.S.0* nonnull align 4 dereferenceable(4) [[THIS1]]) #[[ATTR4]]
4475 // CHECK7-NEXT:    ret void
4476 //
4477 //
4478 // CHECK7-LABEL: define {{[^@]+}}@_ZN1SIiEC2Ev
4479 // CHECK7-SAME: (%struct.S.0* nonnull align 4 dereferenceable(4) [[THIS:%.*]]) unnamed_addr #[[ATTR1]] comdat align 2 {
4480 // CHECK7-NEXT:  entry:
4481 // CHECK7-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.S.0*, align 4
4482 // CHECK7-NEXT:    store %struct.S.0* [[THIS]], %struct.S.0** [[THIS_ADDR]], align 4
4483 // CHECK7-NEXT:    [[THIS1:%.*]] = load %struct.S.0*, %struct.S.0** [[THIS_ADDR]], align 4
4484 // CHECK7-NEXT:    [[F:%.*]] = getelementptr inbounds [[STRUCT_S_0:%.*]], %struct.S.0* [[THIS1]], i32 0, i32 0
4485 // CHECK7-NEXT:    store i32 0, i32* [[F]], align 4
4486 // CHECK7-NEXT:    ret void
4487 //
4488 //
4489 // CHECK7-LABEL: define {{[^@]+}}@_ZN1SIiEC2Ei
4490 // CHECK7-SAME: (%struct.S.0* nonnull align 4 dereferenceable(4) [[THIS:%.*]], i32 [[A:%.*]]) unnamed_addr #[[ATTR1]] comdat align 2 {
4491 // CHECK7-NEXT:  entry:
4492 // CHECK7-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.S.0*, align 4
4493 // CHECK7-NEXT:    [[A_ADDR:%.*]] = alloca i32, align 4
4494 // CHECK7-NEXT:    store %struct.S.0* [[THIS]], %struct.S.0** [[THIS_ADDR]], align 4
4495 // CHECK7-NEXT:    store i32 [[A]], i32* [[A_ADDR]], align 4
4496 // CHECK7-NEXT:    [[THIS1:%.*]] = load %struct.S.0*, %struct.S.0** [[THIS_ADDR]], align 4
4497 // CHECK7-NEXT:    [[F:%.*]] = getelementptr inbounds [[STRUCT_S_0:%.*]], %struct.S.0* [[THIS1]], i32 0, i32 0
4498 // CHECK7-NEXT:    [[TMP0:%.*]] = load i32, i32* [[A_ADDR]], align 4
4499 // CHECK7-NEXT:    store i32 [[TMP0]], i32* [[F]], align 4
4500 // CHECK7-NEXT:    ret void
4501 //
4502 //
4503 // CHECK7-LABEL: define {{[^@]+}}@_ZN1SIiED2Ev
4504 // CHECK7-SAME: (%struct.S.0* nonnull align 4 dereferenceable(4) [[THIS:%.*]]) unnamed_addr #[[ATTR1]] comdat align 2 {
4505 // CHECK7-NEXT:  entry:
4506 // CHECK7-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.S.0*, align 4
4507 // CHECK7-NEXT:    store %struct.S.0* [[THIS]], %struct.S.0** [[THIS_ADDR]], align 4
4508 // CHECK7-NEXT:    [[THIS1:%.*]] = load %struct.S.0*, %struct.S.0** [[THIS_ADDR]], align 4
4509 // CHECK7-NEXT:    ret void
4510 //
4511 //
4512 // CHECK7-LABEL: define {{[^@]+}}@.omp_offloading.requires_reg
4513 // CHECK7-SAME: () #[[ATTR6:[0-9]+]] {
4514 // CHECK7-NEXT:  entry:
4515 // CHECK7-NEXT:    call void @__tgt_register_requires(i64 1)
4516 // CHECK7-NEXT:    ret void
4517 //
4518 //
4519 // CHECK8-LABEL: define {{[^@]+}}@main
4520 // CHECK8-SAME: () #[[ATTR0:[0-9]+]] {
4521 // CHECK8-NEXT:  entry:
4522 // CHECK8-NEXT:    [[RETVAL:%.*]] = alloca i32, align 4
4523 // CHECK8-NEXT:    [[G:%.*]] = alloca double, align 8
4524 // CHECK8-NEXT:    [[G1:%.*]] = alloca double*, align 4
4525 // CHECK8-NEXT:    [[TEST:%.*]] = alloca [[STRUCT_S:%.*]], align 4
4526 // CHECK8-NEXT:    [[T_VAR:%.*]] = alloca i32, align 4
4527 // CHECK8-NEXT:    [[VEC:%.*]] = alloca [2 x i32], align 4
4528 // CHECK8-NEXT:    [[S_ARR:%.*]] = alloca [2 x %struct.S], align 4
4529 // CHECK8-NEXT:    [[VAR:%.*]] = alloca %struct.S*, align 4
4530 // CHECK8-NEXT:    [[TMP:%.*]] = alloca %struct.S*, align 4
4531 // CHECK8-NEXT:    [[T_VAR_CASTED:%.*]] = alloca i32, align 4
4532 // CHECK8-NEXT:    [[SVAR_CASTED:%.*]] = alloca i32, align 4
4533 // CHECK8-NEXT:    [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [5 x i8*], align 4
4534 // CHECK8-NEXT:    [[DOTOFFLOAD_PTRS:%.*]] = alloca [5 x i8*], align 4
4535 // CHECK8-NEXT:    [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [5 x i8*], align 4
4536 // CHECK8-NEXT:    [[_TMP1:%.*]] = alloca i32, align 4
4537 // CHECK8-NEXT:    [[I:%.*]] = alloca i32, align 4
4538 // CHECK8-NEXT:    store i32 0, i32* [[RETVAL]], align 4
4539 // CHECK8-NEXT:    store double* [[G]], double** [[G1]], align 4
4540 // CHECK8-NEXT:    call void @_ZN1SIfEC1Ev(%struct.S* nonnull align 4 dereferenceable(4) [[TEST]])
4541 // CHECK8-NEXT:    store i32 0, i32* [[T_VAR]], align 4
4542 // CHECK8-NEXT:    [[TMP0:%.*]] = bitcast [2 x i32]* [[VEC]] to i8*
4543 // CHECK8-NEXT:    call void @llvm.memcpy.p0i8.p0i8.i32(i8* align 4 [[TMP0]], i8* align 4 bitcast ([2 x i32]* @__const.main.vec to i8*), i32 8, i1 false)
4544 // CHECK8-NEXT:    [[ARRAYINIT_BEGIN:%.*]] = getelementptr inbounds [2 x %struct.S], [2 x %struct.S]* [[S_ARR]], i32 0, i32 0
4545 // CHECK8-NEXT:    call void @_ZN1SIfEC1Ef(%struct.S* nonnull align 4 dereferenceable(4) [[ARRAYINIT_BEGIN]], float 1.000000e+00)
4546 // CHECK8-NEXT:    [[ARRAYINIT_ELEMENT:%.*]] = getelementptr inbounds [[STRUCT_S]], %struct.S* [[ARRAYINIT_BEGIN]], i32 1
4547 // CHECK8-NEXT:    call void @_ZN1SIfEC1Ef(%struct.S* nonnull align 4 dereferenceable(4) [[ARRAYINIT_ELEMENT]], float 2.000000e+00)
4548 // CHECK8-NEXT:    store %struct.S* [[TEST]], %struct.S** [[VAR]], align 4
4549 // CHECK8-NEXT:    [[TMP1:%.*]] = load %struct.S*, %struct.S** [[VAR]], align 4
4550 // CHECK8-NEXT:    store %struct.S* [[TMP1]], %struct.S** [[TMP]], align 4
4551 // CHECK8-NEXT:    [[TMP2:%.*]] = load i32, i32* [[T_VAR]], align 4
4552 // CHECK8-NEXT:    store i32 [[TMP2]], i32* [[T_VAR_CASTED]], align 4
4553 // CHECK8-NEXT:    [[TMP3:%.*]] = load i32, i32* [[T_VAR_CASTED]], align 4
4554 // CHECK8-NEXT:    [[TMP4:%.*]] = load %struct.S*, %struct.S** [[TMP]], align 4
4555 // CHECK8-NEXT:    [[TMP5:%.*]] = load i32, i32* @_ZZ4mainE4svar, align 4
4556 // CHECK8-NEXT:    store i32 [[TMP5]], i32* [[SVAR_CASTED]], align 4
4557 // CHECK8-NEXT:    [[TMP6:%.*]] = load i32, i32* [[SVAR_CASTED]], align 4
4558 // CHECK8-NEXT:    [[TMP7:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
4559 // CHECK8-NEXT:    [[TMP8:%.*]] = bitcast i8** [[TMP7]] to [2 x i32]**
4560 // CHECK8-NEXT:    store [2 x i32]* [[VEC]], [2 x i32]** [[TMP8]], align 4
4561 // CHECK8-NEXT:    [[TMP9:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0
4562 // CHECK8-NEXT:    [[TMP10:%.*]] = bitcast i8** [[TMP9]] to [2 x i32]**
4563 // CHECK8-NEXT:    store [2 x i32]* [[VEC]], [2 x i32]** [[TMP10]], align 4
4564 // CHECK8-NEXT:    [[TMP11:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 0
4565 // CHECK8-NEXT:    store i8* null, i8** [[TMP11]], align 4
4566 // CHECK8-NEXT:    [[TMP12:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1
4567 // CHECK8-NEXT:    [[TMP13:%.*]] = bitcast i8** [[TMP12]] to i32*
4568 // CHECK8-NEXT:    store i32 [[TMP3]], i32* [[TMP13]], align 4
4569 // CHECK8-NEXT:    [[TMP14:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1
4570 // CHECK8-NEXT:    [[TMP15:%.*]] = bitcast i8** [[TMP14]] to i32*
4571 // CHECK8-NEXT:    store i32 [[TMP3]], i32* [[TMP15]], align 4
4572 // CHECK8-NEXT:    [[TMP16:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 1
4573 // CHECK8-NEXT:    store i8* null, i8** [[TMP16]], align 4
4574 // CHECK8-NEXT:    [[TMP17:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2
4575 // CHECK8-NEXT:    [[TMP18:%.*]] = bitcast i8** [[TMP17]] to [2 x %struct.S]**
4576 // CHECK8-NEXT:    store [2 x %struct.S]* [[S_ARR]], [2 x %struct.S]** [[TMP18]], align 4
4577 // CHECK8-NEXT:    [[TMP19:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 2
4578 // CHECK8-NEXT:    [[TMP20:%.*]] = bitcast i8** [[TMP19]] to [2 x %struct.S]**
4579 // CHECK8-NEXT:    store [2 x %struct.S]* [[S_ARR]], [2 x %struct.S]** [[TMP20]], align 4
4580 // CHECK8-NEXT:    [[TMP21:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 2
4581 // CHECK8-NEXT:    store i8* null, i8** [[TMP21]], align 4
4582 // CHECK8-NEXT:    [[TMP22:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 3
4583 // CHECK8-NEXT:    [[TMP23:%.*]] = bitcast i8** [[TMP22]] to %struct.S**
4584 // CHECK8-NEXT:    store %struct.S* [[TMP4]], %struct.S** [[TMP23]], align 4
4585 // CHECK8-NEXT:    [[TMP24:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 3
4586 // CHECK8-NEXT:    [[TMP25:%.*]] = bitcast i8** [[TMP24]] to %struct.S**
4587 // CHECK8-NEXT:    store %struct.S* [[TMP4]], %struct.S** [[TMP25]], align 4
4588 // CHECK8-NEXT:    [[TMP26:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 3
4589 // CHECK8-NEXT:    store i8* null, i8** [[TMP26]], align 4
4590 // CHECK8-NEXT:    [[TMP27:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 4
4591 // CHECK8-NEXT:    [[TMP28:%.*]] = bitcast i8** [[TMP27]] to i32*
4592 // CHECK8-NEXT:    store i32 [[TMP6]], i32* [[TMP28]], align 4
4593 // CHECK8-NEXT:    [[TMP29:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 4
4594 // CHECK8-NEXT:    [[TMP30:%.*]] = bitcast i8** [[TMP29]] to i32*
4595 // CHECK8-NEXT:    store i32 [[TMP6]], i32* [[TMP30]], align 4
4596 // CHECK8-NEXT:    [[TMP31:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 4
4597 // CHECK8-NEXT:    store i8* null, i8** [[TMP31]], align 4
4598 // CHECK8-NEXT:    [[TMP32:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
4599 // CHECK8-NEXT:    [[TMP33:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0
4600 // CHECK8-NEXT:    call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3:[0-9]+]], i64 -1, i64 2)
4601 // CHECK8-NEXT:    [[TMP34:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l106.region_id, i32 5, i8** [[TMP32]], i8** [[TMP33]], i64* getelementptr inbounds ([5 x i64], [5 x i64]* @.offload_sizes, i32 0, i32 0), i64* getelementptr inbounds ([5 x i64], [5 x i64]* @.offload_maptypes, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0)
4602 // CHECK8-NEXT:    [[TMP35:%.*]] = icmp ne i32 [[TMP34]], 0
4603 // CHECK8-NEXT:    br i1 [[TMP35]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]]
4604 // CHECK8:       omp_offload.failed:
4605 // CHECK8-NEXT:    call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l106([2 x i32]* [[VEC]], i32 [[TMP3]], [2 x %struct.S]* [[S_ARR]], %struct.S* [[TMP4]], i32 [[TMP6]]) #[[ATTR4:[0-9]+]]
4606 // CHECK8-NEXT:    br label [[OMP_OFFLOAD_CONT]]
4607 // CHECK8:       omp_offload.cont:
4608 // CHECK8-NEXT:    [[CALL:%.*]] = call i32 @_Z5tmainIiET_v()
4609 // CHECK8-NEXT:    store i32 [[CALL]], i32* [[RETVAL]], align 4
4610 // CHECK8-NEXT:    [[ARRAY_BEGIN:%.*]] = getelementptr inbounds [2 x %struct.S], [2 x %struct.S]* [[S_ARR]], i32 0, i32 0
4611 // CHECK8-NEXT:    [[TMP36:%.*]] = getelementptr inbounds [[STRUCT_S]], %struct.S* [[ARRAY_BEGIN]], i32 2
4612 // CHECK8-NEXT:    br label [[ARRAYDESTROY_BODY:%.*]]
4613 // CHECK8:       arraydestroy.body:
4614 // CHECK8-NEXT:    [[ARRAYDESTROY_ELEMENTPAST:%.*]] = phi %struct.S* [ [[TMP36]], [[OMP_OFFLOAD_CONT]] ], [ [[ARRAYDESTROY_ELEMENT:%.*]], [[ARRAYDESTROY_BODY]] ]
4615 // CHECK8-NEXT:    [[ARRAYDESTROY_ELEMENT]] = getelementptr inbounds [[STRUCT_S]], %struct.S* [[ARRAYDESTROY_ELEMENTPAST]], i32 -1
4616 // CHECK8-NEXT:    call void @_ZN1SIfED1Ev(%struct.S* nonnull align 4 dereferenceable(4) [[ARRAYDESTROY_ELEMENT]]) #[[ATTR4]]
4617 // CHECK8-NEXT:    [[ARRAYDESTROY_DONE:%.*]] = icmp eq %struct.S* [[ARRAYDESTROY_ELEMENT]], [[ARRAY_BEGIN]]
4618 // CHECK8-NEXT:    br i1 [[ARRAYDESTROY_DONE]], label [[ARRAYDESTROY_DONE2:%.*]], label [[ARRAYDESTROY_BODY]]
4619 // CHECK8:       arraydestroy.done2:
4620 // CHECK8-NEXT:    call void @_ZN1SIfED1Ev(%struct.S* nonnull align 4 dereferenceable(4) [[TEST]]) #[[ATTR4]]
4621 // CHECK8-NEXT:    [[TMP37:%.*]] = load i32, i32* [[RETVAL]], align 4
4622 // CHECK8-NEXT:    ret i32 [[TMP37]]
4623 //
4624 //
4625 // CHECK8-LABEL: define {{[^@]+}}@_ZN1SIfEC1Ev
4626 // CHECK8-SAME: (%struct.S* nonnull align 4 dereferenceable(4) [[THIS:%.*]]) unnamed_addr #[[ATTR1:[0-9]+]] comdat align 2 {
4627 // CHECK8-NEXT:  entry:
4628 // CHECK8-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.S*, align 4
4629 // CHECK8-NEXT:    store %struct.S* [[THIS]], %struct.S** [[THIS_ADDR]], align 4
4630 // CHECK8-NEXT:    [[THIS1:%.*]] = load %struct.S*, %struct.S** [[THIS_ADDR]], align 4
4631 // CHECK8-NEXT:    call void @_ZN1SIfEC2Ev(%struct.S* nonnull align 4 dereferenceable(4) [[THIS1]])
4632 // CHECK8-NEXT:    ret void
4633 //
4634 //
4635 // CHECK8-LABEL: define {{[^@]+}}@_ZN1SIfEC1Ef
4636 // CHECK8-SAME: (%struct.S* nonnull align 4 dereferenceable(4) [[THIS:%.*]], float [[A:%.*]]) unnamed_addr #[[ATTR1]] comdat align 2 {
4637 // CHECK8-NEXT:  entry:
4638 // CHECK8-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.S*, align 4
4639 // CHECK8-NEXT:    [[A_ADDR:%.*]] = alloca float, align 4
4640 // CHECK8-NEXT:    store %struct.S* [[THIS]], %struct.S** [[THIS_ADDR]], align 4
4641 // CHECK8-NEXT:    store float [[A]], float* [[A_ADDR]], align 4
4642 // CHECK8-NEXT:    [[THIS1:%.*]] = load %struct.S*, %struct.S** [[THIS_ADDR]], align 4
4643 // CHECK8-NEXT:    [[TMP0:%.*]] = load float, float* [[A_ADDR]], align 4
4644 // CHECK8-NEXT:    call void @_ZN1SIfEC2Ef(%struct.S* nonnull align 4 dereferenceable(4) [[THIS1]], float [[TMP0]])
4645 // CHECK8-NEXT:    ret void
4646 //
4647 //
4648 // CHECK8-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l106
4649 // CHECK8-SAME: ([2 x i32]* nonnull align 4 dereferenceable(8) [[VEC:%.*]], i32 [[T_VAR:%.*]], [2 x %struct.S]* nonnull align 4 dereferenceable(8) [[S_ARR:%.*]], %struct.S* nonnull align 4 dereferenceable(4) [[VAR:%.*]], i32 [[SVAR:%.*]]) #[[ATTR3:[0-9]+]] {
4650 // CHECK8-NEXT:  entry:
4651 // CHECK8-NEXT:    [[VEC_ADDR:%.*]] = alloca [2 x i32]*, align 4
4652 // CHECK8-NEXT:    [[T_VAR_ADDR:%.*]] = alloca i32, align 4
4653 // CHECK8-NEXT:    [[S_ARR_ADDR:%.*]] = alloca [2 x %struct.S]*, align 4
4654 // CHECK8-NEXT:    [[VAR_ADDR:%.*]] = alloca %struct.S*, align 4
4655 // CHECK8-NEXT:    [[SVAR_ADDR:%.*]] = alloca i32, align 4
4656 // CHECK8-NEXT:    [[TMP:%.*]] = alloca %struct.S*, align 4
4657 // CHECK8-NEXT:    [[T_VAR_CASTED:%.*]] = alloca i32, align 4
4658 // CHECK8-NEXT:    [[SVAR_CASTED:%.*]] = alloca i32, align 4
4659 // CHECK8-NEXT:    store [2 x i32]* [[VEC]], [2 x i32]** [[VEC_ADDR]], align 4
4660 // CHECK8-NEXT:    store i32 [[T_VAR]], i32* [[T_VAR_ADDR]], align 4
4661 // CHECK8-NEXT:    store [2 x %struct.S]* [[S_ARR]], [2 x %struct.S]** [[S_ARR_ADDR]], align 4
4662 // CHECK8-NEXT:    store %struct.S* [[VAR]], %struct.S** [[VAR_ADDR]], align 4
4663 // CHECK8-NEXT:    store i32 [[SVAR]], i32* [[SVAR_ADDR]], align 4
4664 // CHECK8-NEXT:    [[TMP0:%.*]] = load [2 x i32]*, [2 x i32]** [[VEC_ADDR]], align 4
4665 // CHECK8-NEXT:    [[TMP1:%.*]] = load [2 x %struct.S]*, [2 x %struct.S]** [[S_ARR_ADDR]], align 4
4666 // CHECK8-NEXT:    [[TMP2:%.*]] = load %struct.S*, %struct.S** [[VAR_ADDR]], align 4
4667 // CHECK8-NEXT:    store %struct.S* [[TMP2]], %struct.S** [[TMP]], align 4
4668 // CHECK8-NEXT:    [[TMP3:%.*]] = load i32, i32* [[T_VAR_ADDR]], align 4
4669 // CHECK8-NEXT:    store i32 [[TMP3]], i32* [[T_VAR_CASTED]], align 4
4670 // CHECK8-NEXT:    [[TMP4:%.*]] = load i32, i32* [[T_VAR_CASTED]], align 4
4671 // CHECK8-NEXT:    [[TMP5:%.*]] = load %struct.S*, %struct.S** [[TMP]], align 4
4672 // CHECK8-NEXT:    [[TMP6:%.*]] = load i32, i32* [[SVAR_ADDR]], align 4
4673 // CHECK8-NEXT:    store i32 [[TMP6]], i32* [[SVAR_CASTED]], align 4
4674 // CHECK8-NEXT:    [[TMP7:%.*]] = load i32, i32* [[SVAR_CASTED]], align 4
4675 // CHECK8-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 5, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, [2 x i32]*, i32, [2 x %struct.S]*, %struct.S*, i32)* @.omp_outlined. to void (i32*, i32*, ...)*), [2 x i32]* [[TMP0]], i32 [[TMP4]], [2 x %struct.S]* [[TMP1]], %struct.S* [[TMP5]], i32 [[TMP7]])
4676 // CHECK8-NEXT:    ret void
4677 //
4678 //
4679 // CHECK8-LABEL: define {{[^@]+}}@.omp_outlined.
4680 // CHECK8-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], [2 x i32]* nonnull align 4 dereferenceable(8) [[VEC:%.*]], i32 [[T_VAR:%.*]], [2 x %struct.S]* nonnull align 4 dereferenceable(8) [[S_ARR:%.*]], %struct.S* nonnull align 4 dereferenceable(4) [[VAR:%.*]], i32 [[SVAR:%.*]]) #[[ATTR3]] {
4681 // CHECK8-NEXT:  entry:
4682 // CHECK8-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
4683 // CHECK8-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
4684 // CHECK8-NEXT:    [[VEC_ADDR:%.*]] = alloca [2 x i32]*, align 4
4685 // CHECK8-NEXT:    [[T_VAR_ADDR:%.*]] = alloca i32, align 4
4686 // CHECK8-NEXT:    [[S_ARR_ADDR:%.*]] = alloca [2 x %struct.S]*, align 4
4687 // CHECK8-NEXT:    [[VAR_ADDR:%.*]] = alloca %struct.S*, align 4
4688 // CHECK8-NEXT:    [[SVAR_ADDR:%.*]] = alloca i32, align 4
4689 // CHECK8-NEXT:    [[TMP:%.*]] = alloca %struct.S*, align 4
4690 // CHECK8-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
4691 // CHECK8-NEXT:    [[_TMP1:%.*]] = alloca i32, align 4
4692 // CHECK8-NEXT:    [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
4693 // CHECK8-NEXT:    [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
4694 // CHECK8-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
4695 // CHECK8-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
4696 // CHECK8-NEXT:    [[T_VAR2:%.*]] = alloca i32, align 4
4697 // CHECK8-NEXT:    [[VEC3:%.*]] = alloca [2 x i32], align 4
4698 // CHECK8-NEXT:    [[S_ARR4:%.*]] = alloca [2 x %struct.S], align 4
4699 // CHECK8-NEXT:    [[VAR5:%.*]] = alloca [[STRUCT_S:%.*]], align 4
4700 // CHECK8-NEXT:    [[_TMP6:%.*]] = alloca %struct.S*, align 4
4701 // CHECK8-NEXT:    [[SVAR7:%.*]] = alloca i32, align 4
4702 // CHECK8-NEXT:    [[I:%.*]] = alloca i32, align 4
4703 // CHECK8-NEXT:    [[T_VAR_CASTED:%.*]] = alloca i32, align 4
4704 // CHECK8-NEXT:    [[SVAR_CASTED:%.*]] = alloca i32, align 4
4705 // CHECK8-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
4706 // CHECK8-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
4707 // CHECK8-NEXT:    store [2 x i32]* [[VEC]], [2 x i32]** [[VEC_ADDR]], align 4
4708 // CHECK8-NEXT:    store i32 [[T_VAR]], i32* [[T_VAR_ADDR]], align 4
4709 // CHECK8-NEXT:    store [2 x %struct.S]* [[S_ARR]], [2 x %struct.S]** [[S_ARR_ADDR]], align 4
4710 // CHECK8-NEXT:    store %struct.S* [[VAR]], %struct.S** [[VAR_ADDR]], align 4
4711 // CHECK8-NEXT:    store i32 [[SVAR]], i32* [[SVAR_ADDR]], align 4
4712 // CHECK8-NEXT:    [[TMP0:%.*]] = load [2 x i32]*, [2 x i32]** [[VEC_ADDR]], align 4
4713 // CHECK8-NEXT:    [[TMP1:%.*]] = load [2 x %struct.S]*, [2 x %struct.S]** [[S_ARR_ADDR]], align 4
4714 // CHECK8-NEXT:    [[TMP2:%.*]] = load %struct.S*, %struct.S** [[VAR_ADDR]], align 4
4715 // CHECK8-NEXT:    store %struct.S* [[TMP2]], %struct.S** [[TMP]], align 4
4716 // CHECK8-NEXT:    store i32 0, i32* [[DOTOMP_COMB_LB]], align 4
4717 // CHECK8-NEXT:    store i32 1, i32* [[DOTOMP_COMB_UB]], align 4
4718 // CHECK8-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
4719 // CHECK8-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
4720 // CHECK8-NEXT:    [[ARRAY_BEGIN:%.*]] = getelementptr inbounds [2 x %struct.S], [2 x %struct.S]* [[S_ARR4]], i32 0, i32 0
4721 // CHECK8-NEXT:    [[ARRAYCTOR_END:%.*]] = getelementptr inbounds [[STRUCT_S]], %struct.S* [[ARRAY_BEGIN]], i32 2
4722 // CHECK8-NEXT:    br label [[ARRAYCTOR_LOOP:%.*]]
4723 // CHECK8:       arrayctor.loop:
4724 // CHECK8-NEXT:    [[ARRAYCTOR_CUR:%.*]] = phi %struct.S* [ [[ARRAY_BEGIN]], [[ENTRY:%.*]] ], [ [[ARRAYCTOR_NEXT:%.*]], [[ARRAYCTOR_LOOP]] ]
4725 // CHECK8-NEXT:    call void @_ZN1SIfEC1Ev(%struct.S* nonnull align 4 dereferenceable(4) [[ARRAYCTOR_CUR]])
4726 // CHECK8-NEXT:    [[ARRAYCTOR_NEXT]] = getelementptr inbounds [[STRUCT_S]], %struct.S* [[ARRAYCTOR_CUR]], i32 1
4727 // CHECK8-NEXT:    [[ARRAYCTOR_DONE:%.*]] = icmp eq %struct.S* [[ARRAYCTOR_NEXT]], [[ARRAYCTOR_END]]
4728 // CHECK8-NEXT:    br i1 [[ARRAYCTOR_DONE]], label [[ARRAYCTOR_CONT:%.*]], label [[ARRAYCTOR_LOOP]]
4729 // CHECK8:       arrayctor.cont:
4730 // CHECK8-NEXT:    [[TMP3:%.*]] = load %struct.S*, %struct.S** [[TMP]], align 4
4731 // CHECK8-NEXT:    call void @_ZN1SIfEC1Ev(%struct.S* nonnull align 4 dereferenceable(4) [[VAR5]])
4732 // CHECK8-NEXT:    store %struct.S* [[VAR5]], %struct.S** [[_TMP6]], align 4
4733 // CHECK8-NEXT:    [[TMP4:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
4734 // CHECK8-NEXT:    [[TMP5:%.*]] = load i32, i32* [[TMP4]], align 4
4735 // CHECK8-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1:[0-9]+]], i32 [[TMP5]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_COMB_LB]], i32* [[DOTOMP_COMB_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
4736 // CHECK8-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
4737 // CHECK8-NEXT:    [[CMP:%.*]] = icmp sgt i32 [[TMP6]], 1
4738 // CHECK8-NEXT:    br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
4739 // CHECK8:       cond.true:
4740 // CHECK8-NEXT:    br label [[COND_END:%.*]]
4741 // CHECK8:       cond.false:
4742 // CHECK8-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
4743 // CHECK8-NEXT:    br label [[COND_END]]
4744 // CHECK8:       cond.end:
4745 // CHECK8-NEXT:    [[COND:%.*]] = phi i32 [ 1, [[COND_TRUE]] ], [ [[TMP7]], [[COND_FALSE]] ]
4746 // CHECK8-NEXT:    store i32 [[COND]], i32* [[DOTOMP_COMB_UB]], align 4
4747 // CHECK8-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
4748 // CHECK8-NEXT:    store i32 [[TMP8]], i32* [[DOTOMP_IV]], align 4
4749 // CHECK8-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
4750 // CHECK8:       omp.inner.for.cond:
4751 // CHECK8-NEXT:    [[TMP9:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
4752 // CHECK8-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
4753 // CHECK8-NEXT:    [[CMP8:%.*]] = icmp sle i32 [[TMP9]], [[TMP10]]
4754 // CHECK8-NEXT:    br i1 [[CMP8]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_COND_CLEANUP:%.*]]
4755 // CHECK8:       omp.inner.for.cond.cleanup:
4756 // CHECK8-NEXT:    br label [[OMP_INNER_FOR_END:%.*]]
4757 // CHECK8:       omp.inner.for.body:
4758 // CHECK8-NEXT:    [[TMP11:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
4759 // CHECK8-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
4760 // CHECK8-NEXT:    [[TMP13:%.*]] = load i32, i32* [[T_VAR2]], align 4
4761 // CHECK8-NEXT:    store i32 [[TMP13]], i32* [[T_VAR_CASTED]], align 4
4762 // CHECK8-NEXT:    [[TMP14:%.*]] = load i32, i32* [[T_VAR_CASTED]], align 4
4763 // CHECK8-NEXT:    [[TMP15:%.*]] = load %struct.S*, %struct.S** [[_TMP6]], align 4
4764 // CHECK8-NEXT:    [[TMP16:%.*]] = load i32, i32* [[SVAR7]], align 4
4765 // CHECK8-NEXT:    store i32 [[TMP16]], i32* [[SVAR_CASTED]], align 4
4766 // CHECK8-NEXT:    [[TMP17:%.*]] = load i32, i32* [[SVAR_CASTED]], align 4
4767 // CHECK8-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 7, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, [2 x i32]*, i32, [2 x %struct.S]*, %struct.S*, i32)* @.omp_outlined..1 to void (i32*, i32*, ...)*), i32 [[TMP11]], i32 [[TMP12]], [2 x i32]* [[VEC3]], i32 [[TMP14]], [2 x %struct.S]* [[S_ARR4]], %struct.S* [[TMP15]], i32 [[TMP17]])
4768 // CHECK8-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
4769 // CHECK8:       omp.inner.for.inc:
4770 // CHECK8-NEXT:    [[TMP18:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
4771 // CHECK8-NEXT:    [[TMP19:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
4772 // CHECK8-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP18]], [[TMP19]]
4773 // CHECK8-NEXT:    store i32 [[ADD]], i32* [[DOTOMP_IV]], align 4
4774 // CHECK8-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP5:![0-9]+]]
4775 // CHECK8:       omp.inner.for.end:
4776 // CHECK8-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
4777 // CHECK8:       omp.loop.exit:
4778 // CHECK8-NEXT:    [[TMP20:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
4779 // CHECK8-NEXT:    [[TMP21:%.*]] = load i32, i32* [[TMP20]], align 4
4780 // CHECK8-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP21]])
4781 // CHECK8-NEXT:    [[TMP22:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
4782 // CHECK8-NEXT:    [[TMP23:%.*]] = icmp ne i32 [[TMP22]], 0
4783 // CHECK8-NEXT:    br i1 [[TMP23]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
4784 // CHECK8:       .omp.final.then:
4785 // CHECK8-NEXT:    store i32 2, i32* [[I]], align 4
4786 // CHECK8-NEXT:    br label [[DOTOMP_FINAL_DONE]]
4787 // CHECK8:       .omp.final.done:
4788 // CHECK8-NEXT:    [[TMP24:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
4789 // CHECK8-NEXT:    [[TMP25:%.*]] = icmp ne i32 [[TMP24]], 0
4790 // CHECK8-NEXT:    br i1 [[TMP25]], label [[DOTOMP_LASTPRIVATE_THEN:%.*]], label [[DOTOMP_LASTPRIVATE_DONE:%.*]]
4791 // CHECK8:       .omp.lastprivate.then:
4792 // CHECK8-NEXT:    [[TMP26:%.*]] = load i32, i32* [[T_VAR2]], align 4
4793 // CHECK8-NEXT:    store i32 [[TMP26]], i32* [[T_VAR_ADDR]], align 4
4794 // CHECK8-NEXT:    [[TMP27:%.*]] = bitcast [2 x i32]* [[TMP0]] to i8*
4795 // CHECK8-NEXT:    [[TMP28:%.*]] = bitcast [2 x i32]* [[VEC3]] to i8*
4796 // CHECK8-NEXT:    call void @llvm.memcpy.p0i8.p0i8.i32(i8* align 4 [[TMP27]], i8* align 4 [[TMP28]], i32 8, i1 false)
4797 // CHECK8-NEXT:    [[ARRAY_BEGIN9:%.*]] = getelementptr inbounds [2 x %struct.S], [2 x %struct.S]* [[TMP1]], i32 0, i32 0
4798 // CHECK8-NEXT:    [[TMP29:%.*]] = bitcast [2 x %struct.S]* [[S_ARR4]] to %struct.S*
4799 // CHECK8-NEXT:    [[TMP30:%.*]] = getelementptr [[STRUCT_S]], %struct.S* [[ARRAY_BEGIN9]], i32 2
4800 // CHECK8-NEXT:    [[OMP_ARRAYCPY_ISEMPTY:%.*]] = icmp eq %struct.S* [[ARRAY_BEGIN9]], [[TMP30]]
4801 // CHECK8-NEXT:    br i1 [[OMP_ARRAYCPY_ISEMPTY]], label [[OMP_ARRAYCPY_DONE10:%.*]], label [[OMP_ARRAYCPY_BODY:%.*]]
4802 // CHECK8:       omp.arraycpy.body:
4803 // CHECK8-NEXT:    [[OMP_ARRAYCPY_SRCELEMENTPAST:%.*]] = phi %struct.S* [ [[TMP29]], [[DOTOMP_LASTPRIVATE_THEN]] ], [ [[OMP_ARRAYCPY_SRC_ELEMENT:%.*]], [[OMP_ARRAYCPY_BODY]] ]
4804 // CHECK8-NEXT:    [[OMP_ARRAYCPY_DESTELEMENTPAST:%.*]] = phi %struct.S* [ [[ARRAY_BEGIN9]], [[DOTOMP_LASTPRIVATE_THEN]] ], [ [[OMP_ARRAYCPY_DEST_ELEMENT:%.*]], [[OMP_ARRAYCPY_BODY]] ]
4805 // CHECK8-NEXT:    [[TMP31:%.*]] = bitcast %struct.S* [[OMP_ARRAYCPY_DESTELEMENTPAST]] to i8*
4806 // CHECK8-NEXT:    [[TMP32:%.*]] = bitcast %struct.S* [[OMP_ARRAYCPY_SRCELEMENTPAST]] to i8*
4807 // CHECK8-NEXT:    call void @llvm.memcpy.p0i8.p0i8.i32(i8* align 4 [[TMP31]], i8* align 4 [[TMP32]], i32 4, i1 false)
4808 // CHECK8-NEXT:    [[OMP_ARRAYCPY_DEST_ELEMENT]] = getelementptr [[STRUCT_S]], %struct.S* [[OMP_ARRAYCPY_DESTELEMENTPAST]], i32 1
4809 // CHECK8-NEXT:    [[OMP_ARRAYCPY_SRC_ELEMENT]] = getelementptr [[STRUCT_S]], %struct.S* [[OMP_ARRAYCPY_SRCELEMENTPAST]], i32 1
4810 // CHECK8-NEXT:    [[OMP_ARRAYCPY_DONE:%.*]] = icmp eq %struct.S* [[OMP_ARRAYCPY_DEST_ELEMENT]], [[TMP30]]
4811 // CHECK8-NEXT:    br i1 [[OMP_ARRAYCPY_DONE]], label [[OMP_ARRAYCPY_DONE10]], label [[OMP_ARRAYCPY_BODY]]
4812 // CHECK8:       omp.arraycpy.done10:
4813 // CHECK8-NEXT:    [[TMP33:%.*]] = load %struct.S*, %struct.S** [[_TMP6]], align 4
4814 // CHECK8-NEXT:    [[TMP34:%.*]] = bitcast %struct.S* [[TMP3]] to i8*
4815 // CHECK8-NEXT:    [[TMP35:%.*]] = bitcast %struct.S* [[TMP33]] to i8*
4816 // CHECK8-NEXT:    call void @llvm.memcpy.p0i8.p0i8.i32(i8* align 4 [[TMP34]], i8* align 4 [[TMP35]], i32 4, i1 false)
4817 // CHECK8-NEXT:    [[TMP36:%.*]] = load i32, i32* [[SVAR7]], align 4
4818 // CHECK8-NEXT:    store i32 [[TMP36]], i32* [[SVAR_ADDR]], align 4
4819 // CHECK8-NEXT:    br label [[DOTOMP_LASTPRIVATE_DONE]]
4820 // CHECK8:       .omp.lastprivate.done:
4821 // CHECK8-NEXT:    call void @_ZN1SIfED1Ev(%struct.S* nonnull align 4 dereferenceable(4) [[VAR5]]) #[[ATTR4]]
4822 // CHECK8-NEXT:    [[ARRAY_BEGIN11:%.*]] = getelementptr inbounds [2 x %struct.S], [2 x %struct.S]* [[S_ARR4]], i32 0, i32 0
4823 // CHECK8-NEXT:    [[TMP37:%.*]] = getelementptr inbounds [[STRUCT_S]], %struct.S* [[ARRAY_BEGIN11]], i32 2
4824 // CHECK8-NEXT:    br label [[ARRAYDESTROY_BODY:%.*]]
4825 // CHECK8:       arraydestroy.body:
4826 // CHECK8-NEXT:    [[ARRAYDESTROY_ELEMENTPAST:%.*]] = phi %struct.S* [ [[TMP37]], [[DOTOMP_LASTPRIVATE_DONE]] ], [ [[ARRAYDESTROY_ELEMENT:%.*]], [[ARRAYDESTROY_BODY]] ]
4827 // CHECK8-NEXT:    [[ARRAYDESTROY_ELEMENT]] = getelementptr inbounds [[STRUCT_S]], %struct.S* [[ARRAYDESTROY_ELEMENTPAST]], i32 -1
4828 // CHECK8-NEXT:    call void @_ZN1SIfED1Ev(%struct.S* nonnull align 4 dereferenceable(4) [[ARRAYDESTROY_ELEMENT]]) #[[ATTR4]]
4829 // CHECK8-NEXT:    [[ARRAYDESTROY_DONE:%.*]] = icmp eq %struct.S* [[ARRAYDESTROY_ELEMENT]], [[ARRAY_BEGIN11]]
4830 // CHECK8-NEXT:    br i1 [[ARRAYDESTROY_DONE]], label [[ARRAYDESTROY_DONE12:%.*]], label [[ARRAYDESTROY_BODY]]
4831 // CHECK8:       arraydestroy.done12:
4832 // CHECK8-NEXT:    ret void
4833 //
4834 //
4835 // CHECK8-LABEL: define {{[^@]+}}@.omp_outlined..1
4836 // CHECK8-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32 [[DOTPREVIOUS_LB_:%.*]], i32 [[DOTPREVIOUS_UB_:%.*]], [2 x i32]* nonnull align 4 dereferenceable(8) [[VEC:%.*]], i32 [[T_VAR:%.*]], [2 x %struct.S]* nonnull align 4 dereferenceable(8) [[S_ARR:%.*]], %struct.S* nonnull align 4 dereferenceable(4) [[VAR:%.*]], i32 [[SVAR:%.*]]) #[[ATTR3]] {
4837 // CHECK8-NEXT:  entry:
4838 // CHECK8-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
4839 // CHECK8-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
4840 // CHECK8-NEXT:    [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i32, align 4
4841 // CHECK8-NEXT:    [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i32, align 4
4842 // CHECK8-NEXT:    [[VEC_ADDR:%.*]] = alloca [2 x i32]*, align 4
4843 // CHECK8-NEXT:    [[T_VAR_ADDR:%.*]] = alloca i32, align 4
4844 // CHECK8-NEXT:    [[S_ARR_ADDR:%.*]] = alloca [2 x %struct.S]*, align 4
4845 // CHECK8-NEXT:    [[VAR_ADDR:%.*]] = alloca %struct.S*, align 4
4846 // CHECK8-NEXT:    [[SVAR_ADDR:%.*]] = alloca i32, align 4
4847 // CHECK8-NEXT:    [[TMP:%.*]] = alloca %struct.S*, align 4
4848 // CHECK8-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
4849 // CHECK8-NEXT:    [[_TMP1:%.*]] = alloca i32, align 4
4850 // CHECK8-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
4851 // CHECK8-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
4852 // CHECK8-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
4853 // CHECK8-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
4854 // CHECK8-NEXT:    [[T_VAR2:%.*]] = alloca i32, align 4
4855 // CHECK8-NEXT:    [[VEC3:%.*]] = alloca [2 x i32], align 4
4856 // CHECK8-NEXT:    [[S_ARR4:%.*]] = alloca [2 x %struct.S], align 4
4857 // CHECK8-NEXT:    [[VAR5:%.*]] = alloca [[STRUCT_S:%.*]], align 4
4858 // CHECK8-NEXT:    [[_TMP6:%.*]] = alloca %struct.S*, align 4
4859 // CHECK8-NEXT:    [[SVAR7:%.*]] = alloca i32, align 4
4860 // CHECK8-NEXT:    [[I:%.*]] = alloca i32, align 4
4861 // CHECK8-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
4862 // CHECK8-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
4863 // CHECK8-NEXT:    store i32 [[DOTPREVIOUS_LB_]], i32* [[DOTPREVIOUS_LB__ADDR]], align 4
4864 // CHECK8-NEXT:    store i32 [[DOTPREVIOUS_UB_]], i32* [[DOTPREVIOUS_UB__ADDR]], align 4
4865 // CHECK8-NEXT:    store [2 x i32]* [[VEC]], [2 x i32]** [[VEC_ADDR]], align 4
4866 // CHECK8-NEXT:    store i32 [[T_VAR]], i32* [[T_VAR_ADDR]], align 4
4867 // CHECK8-NEXT:    store [2 x %struct.S]* [[S_ARR]], [2 x %struct.S]** [[S_ARR_ADDR]], align 4
4868 // CHECK8-NEXT:    store %struct.S* [[VAR]], %struct.S** [[VAR_ADDR]], align 4
4869 // CHECK8-NEXT:    store i32 [[SVAR]], i32* [[SVAR_ADDR]], align 4
4870 // CHECK8-NEXT:    [[TMP0:%.*]] = load [2 x i32]*, [2 x i32]** [[VEC_ADDR]], align 4
4871 // CHECK8-NEXT:    [[TMP1:%.*]] = load [2 x %struct.S]*, [2 x %struct.S]** [[S_ARR_ADDR]], align 4
4872 // CHECK8-NEXT:    [[TMP2:%.*]] = load %struct.S*, %struct.S** [[VAR_ADDR]], align 4
4873 // CHECK8-NEXT:    store %struct.S* [[TMP2]], %struct.S** [[TMP]], align 4
4874 // CHECK8-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
4875 // CHECK8-NEXT:    store i32 1, i32* [[DOTOMP_UB]], align 4
4876 // CHECK8-NEXT:    [[TMP3:%.*]] = load i32, i32* [[DOTPREVIOUS_LB__ADDR]], align 4
4877 // CHECK8-NEXT:    [[TMP4:%.*]] = load i32, i32* [[DOTPREVIOUS_UB__ADDR]], align 4
4878 // CHECK8-NEXT:    store i32 [[TMP3]], i32* [[DOTOMP_LB]], align 4
4879 // CHECK8-NEXT:    store i32 [[TMP4]], i32* [[DOTOMP_UB]], align 4
4880 // CHECK8-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
4881 // CHECK8-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
4882 // CHECK8-NEXT:    [[ARRAY_BEGIN:%.*]] = getelementptr inbounds [2 x %struct.S], [2 x %struct.S]* [[S_ARR4]], i32 0, i32 0
4883 // CHECK8-NEXT:    [[ARRAYCTOR_END:%.*]] = getelementptr inbounds [[STRUCT_S]], %struct.S* [[ARRAY_BEGIN]], i32 2
4884 // CHECK8-NEXT:    br label [[ARRAYCTOR_LOOP:%.*]]
4885 // CHECK8:       arrayctor.loop:
4886 // CHECK8-NEXT:    [[ARRAYCTOR_CUR:%.*]] = phi %struct.S* [ [[ARRAY_BEGIN]], [[ENTRY:%.*]] ], [ [[ARRAYCTOR_NEXT:%.*]], [[ARRAYCTOR_LOOP]] ]
4887 // CHECK8-NEXT:    call void @_ZN1SIfEC1Ev(%struct.S* nonnull align 4 dereferenceable(4) [[ARRAYCTOR_CUR]])
4888 // CHECK8-NEXT:    [[ARRAYCTOR_NEXT]] = getelementptr inbounds [[STRUCT_S]], %struct.S* [[ARRAYCTOR_CUR]], i32 1
4889 // CHECK8-NEXT:    [[ARRAYCTOR_DONE:%.*]] = icmp eq %struct.S* [[ARRAYCTOR_NEXT]], [[ARRAYCTOR_END]]
4890 // CHECK8-NEXT:    br i1 [[ARRAYCTOR_DONE]], label [[ARRAYCTOR_CONT:%.*]], label [[ARRAYCTOR_LOOP]]
4891 // CHECK8:       arrayctor.cont:
4892 // CHECK8-NEXT:    [[TMP5:%.*]] = load %struct.S*, %struct.S** [[TMP]], align 4
4893 // CHECK8-NEXT:    call void @_ZN1SIfEC1Ev(%struct.S* nonnull align 4 dereferenceable(4) [[VAR5]])
4894 // CHECK8-NEXT:    store %struct.S* [[VAR5]], %struct.S** [[_TMP6]], align 4
4895 // CHECK8-NEXT:    [[TMP6:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
4896 // CHECK8-NEXT:    [[TMP7:%.*]] = load i32, i32* [[TMP6]], align 4
4897 // CHECK8-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB2:[0-9]+]], i32 [[TMP7]], i32 34, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
4898 // CHECK8-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
4899 // CHECK8-NEXT:    [[CMP:%.*]] = icmp sgt i32 [[TMP8]], 1
4900 // CHECK8-NEXT:    br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
4901 // CHECK8:       cond.true:
4902 // CHECK8-NEXT:    br label [[COND_END:%.*]]
4903 // CHECK8:       cond.false:
4904 // CHECK8-NEXT:    [[TMP9:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
4905 // CHECK8-NEXT:    br label [[COND_END]]
4906 // CHECK8:       cond.end:
4907 // CHECK8-NEXT:    [[COND:%.*]] = phi i32 [ 1, [[COND_TRUE]] ], [ [[TMP9]], [[COND_FALSE]] ]
4908 // CHECK8-NEXT:    store i32 [[COND]], i32* [[DOTOMP_UB]], align 4
4909 // CHECK8-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
4910 // CHECK8-NEXT:    store i32 [[TMP10]], i32* [[DOTOMP_IV]], align 4
4911 // CHECK8-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
4912 // CHECK8:       omp.inner.for.cond:
4913 // CHECK8-NEXT:    [[TMP11:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
4914 // CHECK8-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
4915 // CHECK8-NEXT:    [[CMP8:%.*]] = icmp sle i32 [[TMP11]], [[TMP12]]
4916 // CHECK8-NEXT:    br i1 [[CMP8]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_COND_CLEANUP:%.*]]
4917 // CHECK8:       omp.inner.for.cond.cleanup:
4918 // CHECK8-NEXT:    br label [[OMP_INNER_FOR_END:%.*]]
4919 // CHECK8:       omp.inner.for.body:
4920 // CHECK8-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
4921 // CHECK8-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP13]], 1
4922 // CHECK8-NEXT:    [[ADD:%.*]] = add nsw i32 0, [[MUL]]
4923 // CHECK8-NEXT:    store i32 [[ADD]], i32* [[I]], align 4
4924 // CHECK8-NEXT:    [[TMP14:%.*]] = load i32, i32* [[T_VAR2]], align 4
4925 // CHECK8-NEXT:    [[TMP15:%.*]] = load i32, i32* [[I]], align 4
4926 // CHECK8-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds [2 x i32], [2 x i32]* [[VEC3]], i32 0, i32 [[TMP15]]
4927 // CHECK8-NEXT:    store i32 [[TMP14]], i32* [[ARRAYIDX]], align 4
4928 // CHECK8-NEXT:    [[TMP16:%.*]] = load %struct.S*, %struct.S** [[_TMP6]], align 4
4929 // CHECK8-NEXT:    [[TMP17:%.*]] = load i32, i32* [[I]], align 4
4930 // CHECK8-NEXT:    [[ARRAYIDX9:%.*]] = getelementptr inbounds [2 x %struct.S], [2 x %struct.S]* [[S_ARR4]], i32 0, i32 [[TMP17]]
4931 // CHECK8-NEXT:    [[TMP18:%.*]] = bitcast %struct.S* [[ARRAYIDX9]] to i8*
4932 // CHECK8-NEXT:    [[TMP19:%.*]] = bitcast %struct.S* [[TMP16]] to i8*
4933 // CHECK8-NEXT:    call void @llvm.memcpy.p0i8.p0i8.i32(i8* align 4 [[TMP18]], i8* align 4 [[TMP19]], i32 4, i1 false)
4934 // CHECK8-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
4935 // CHECK8:       omp.body.continue:
4936 // CHECK8-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
4937 // CHECK8:       omp.inner.for.inc:
4938 // CHECK8-NEXT:    [[TMP20:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
4939 // CHECK8-NEXT:    [[ADD10:%.*]] = add nsw i32 [[TMP20]], 1
4940 // CHECK8-NEXT:    store i32 [[ADD10]], i32* [[DOTOMP_IV]], align 4
4941 // CHECK8-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP7:![0-9]+]]
4942 // CHECK8:       omp.inner.for.end:
4943 // CHECK8-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
4944 // CHECK8:       omp.loop.exit:
4945 // CHECK8-NEXT:    [[TMP21:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
4946 // CHECK8-NEXT:    [[TMP22:%.*]] = load i32, i32* [[TMP21]], align 4
4947 // CHECK8-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP22]])
4948 // CHECK8-NEXT:    [[TMP23:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
4949 // CHECK8-NEXT:    [[TMP24:%.*]] = icmp ne i32 [[TMP23]], 0
4950 // CHECK8-NEXT:    br i1 [[TMP24]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
4951 // CHECK8:       .omp.final.then:
4952 // CHECK8-NEXT:    store i32 2, i32* [[I]], align 4
4953 // CHECK8-NEXT:    br label [[DOTOMP_FINAL_DONE]]
4954 // CHECK8:       .omp.final.done:
4955 // CHECK8-NEXT:    [[TMP25:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
4956 // CHECK8-NEXT:    [[TMP26:%.*]] = icmp ne i32 [[TMP25]], 0
4957 // CHECK8-NEXT:    br i1 [[TMP26]], label [[DOTOMP_LASTPRIVATE_THEN:%.*]], label [[DOTOMP_LASTPRIVATE_DONE:%.*]]
4958 // CHECK8:       .omp.lastprivate.then:
4959 // CHECK8-NEXT:    [[TMP27:%.*]] = load i32, i32* [[T_VAR2]], align 4
4960 // CHECK8-NEXT:    store i32 [[TMP27]], i32* [[T_VAR_ADDR]], align 4
4961 // CHECK8-NEXT:    [[TMP28:%.*]] = bitcast [2 x i32]* [[TMP0]] to i8*
4962 // CHECK8-NEXT:    [[TMP29:%.*]] = bitcast [2 x i32]* [[VEC3]] to i8*
4963 // CHECK8-NEXT:    call void @llvm.memcpy.p0i8.p0i8.i32(i8* align 4 [[TMP28]], i8* align 4 [[TMP29]], i32 8, i1 false)
4964 // CHECK8-NEXT:    [[ARRAY_BEGIN11:%.*]] = getelementptr inbounds [2 x %struct.S], [2 x %struct.S]* [[TMP1]], i32 0, i32 0
4965 // CHECK8-NEXT:    [[TMP30:%.*]] = bitcast [2 x %struct.S]* [[S_ARR4]] to %struct.S*
4966 // CHECK8-NEXT:    [[TMP31:%.*]] = getelementptr [[STRUCT_S]], %struct.S* [[ARRAY_BEGIN11]], i32 2
4967 // CHECK8-NEXT:    [[OMP_ARRAYCPY_ISEMPTY:%.*]] = icmp eq %struct.S* [[ARRAY_BEGIN11]], [[TMP31]]
4968 // CHECK8-NEXT:    br i1 [[OMP_ARRAYCPY_ISEMPTY]], label [[OMP_ARRAYCPY_DONE12:%.*]], label [[OMP_ARRAYCPY_BODY:%.*]]
4969 // CHECK8:       omp.arraycpy.body:
4970 // CHECK8-NEXT:    [[OMP_ARRAYCPY_SRCELEMENTPAST:%.*]] = phi %struct.S* [ [[TMP30]], [[DOTOMP_LASTPRIVATE_THEN]] ], [ [[OMP_ARRAYCPY_SRC_ELEMENT:%.*]], [[OMP_ARRAYCPY_BODY]] ]
4971 // CHECK8-NEXT:    [[OMP_ARRAYCPY_DESTELEMENTPAST:%.*]] = phi %struct.S* [ [[ARRAY_BEGIN11]], [[DOTOMP_LASTPRIVATE_THEN]] ], [ [[OMP_ARRAYCPY_DEST_ELEMENT:%.*]], [[OMP_ARRAYCPY_BODY]] ]
4972 // CHECK8-NEXT:    [[TMP32:%.*]] = bitcast %struct.S* [[OMP_ARRAYCPY_DESTELEMENTPAST]] to i8*
4973 // CHECK8-NEXT:    [[TMP33:%.*]] = bitcast %struct.S* [[OMP_ARRAYCPY_SRCELEMENTPAST]] to i8*
4974 // CHECK8-NEXT:    call void @llvm.memcpy.p0i8.p0i8.i32(i8* align 4 [[TMP32]], i8* align 4 [[TMP33]], i32 4, i1 false)
4975 // CHECK8-NEXT:    [[OMP_ARRAYCPY_DEST_ELEMENT]] = getelementptr [[STRUCT_S]], %struct.S* [[OMP_ARRAYCPY_DESTELEMENTPAST]], i32 1
4976 // CHECK8-NEXT:    [[OMP_ARRAYCPY_SRC_ELEMENT]] = getelementptr [[STRUCT_S]], %struct.S* [[OMP_ARRAYCPY_SRCELEMENTPAST]], i32 1
4977 // CHECK8-NEXT:    [[OMP_ARRAYCPY_DONE:%.*]] = icmp eq %struct.S* [[OMP_ARRAYCPY_DEST_ELEMENT]], [[TMP31]]
4978 // CHECK8-NEXT:    br i1 [[OMP_ARRAYCPY_DONE]], label [[OMP_ARRAYCPY_DONE12]], label [[OMP_ARRAYCPY_BODY]]
4979 // CHECK8:       omp.arraycpy.done12:
4980 // CHECK8-NEXT:    [[TMP34:%.*]] = load %struct.S*, %struct.S** [[_TMP6]], align 4
4981 // CHECK8-NEXT:    [[TMP35:%.*]] = bitcast %struct.S* [[TMP5]] to i8*
4982 // CHECK8-NEXT:    [[TMP36:%.*]] = bitcast %struct.S* [[TMP34]] to i8*
4983 // CHECK8-NEXT:    call void @llvm.memcpy.p0i8.p0i8.i32(i8* align 4 [[TMP35]], i8* align 4 [[TMP36]], i32 4, i1 false)
4984 // CHECK8-NEXT:    [[TMP37:%.*]] = load i32, i32* [[SVAR7]], align 4
4985 // CHECK8-NEXT:    store i32 [[TMP37]], i32* [[SVAR_ADDR]], align 4
4986 // CHECK8-NEXT:    br label [[DOTOMP_LASTPRIVATE_DONE]]
4987 // CHECK8:       .omp.lastprivate.done:
4988 // CHECK8-NEXT:    call void @_ZN1SIfED1Ev(%struct.S* nonnull align 4 dereferenceable(4) [[VAR5]]) #[[ATTR4]]
4989 // CHECK8-NEXT:    [[ARRAY_BEGIN13:%.*]] = getelementptr inbounds [2 x %struct.S], [2 x %struct.S]* [[S_ARR4]], i32 0, i32 0
4990 // CHECK8-NEXT:    [[TMP38:%.*]] = getelementptr inbounds [[STRUCT_S]], %struct.S* [[ARRAY_BEGIN13]], i32 2
4991 // CHECK8-NEXT:    br label [[ARRAYDESTROY_BODY:%.*]]
4992 // CHECK8:       arraydestroy.body:
4993 // CHECK8-NEXT:    [[ARRAYDESTROY_ELEMENTPAST:%.*]] = phi %struct.S* [ [[TMP38]], [[DOTOMP_LASTPRIVATE_DONE]] ], [ [[ARRAYDESTROY_ELEMENT:%.*]], [[ARRAYDESTROY_BODY]] ]
4994 // CHECK8-NEXT:    [[ARRAYDESTROY_ELEMENT]] = getelementptr inbounds [[STRUCT_S]], %struct.S* [[ARRAYDESTROY_ELEMENTPAST]], i32 -1
4995 // CHECK8-NEXT:    call void @_ZN1SIfED1Ev(%struct.S* nonnull align 4 dereferenceable(4) [[ARRAYDESTROY_ELEMENT]]) #[[ATTR4]]
4996 // CHECK8-NEXT:    [[ARRAYDESTROY_DONE:%.*]] = icmp eq %struct.S* [[ARRAYDESTROY_ELEMENT]], [[ARRAY_BEGIN13]]
4997 // CHECK8-NEXT:    br i1 [[ARRAYDESTROY_DONE]], label [[ARRAYDESTROY_DONE14:%.*]], label [[ARRAYDESTROY_BODY]]
4998 // CHECK8:       arraydestroy.done14:
4999 // CHECK8-NEXT:    ret void
5000 //
5001 //
5002 // CHECK8-LABEL: define {{[^@]+}}@_ZN1SIfED1Ev
5003 // CHECK8-SAME: (%struct.S* nonnull align 4 dereferenceable(4) [[THIS:%.*]]) unnamed_addr #[[ATTR1]] comdat align 2 {
5004 // CHECK8-NEXT:  entry:
5005 // CHECK8-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.S*, align 4
5006 // CHECK8-NEXT:    store %struct.S* [[THIS]], %struct.S** [[THIS_ADDR]], align 4
5007 // CHECK8-NEXT:    [[THIS1:%.*]] = load %struct.S*, %struct.S** [[THIS_ADDR]], align 4
5008 // CHECK8-NEXT:    call void @_ZN1SIfED2Ev(%struct.S* nonnull align 4 dereferenceable(4) [[THIS1]]) #[[ATTR4]]
5009 // CHECK8-NEXT:    ret void
5010 //
5011 //
5012 // CHECK8-LABEL: define {{[^@]+}}@_Z5tmainIiET_v
5013 // CHECK8-SAME: () #[[ATTR5:[0-9]+]] comdat {
5014 // CHECK8-NEXT:  entry:
5015 // CHECK8-NEXT:    [[RETVAL:%.*]] = alloca i32, align 4
5016 // CHECK8-NEXT:    [[TEST:%.*]] = alloca [[STRUCT_S_0:%.*]], align 4
5017 // CHECK8-NEXT:    [[T_VAR:%.*]] = alloca i32, align 4
5018 // CHECK8-NEXT:    [[VEC:%.*]] = alloca [2 x i32], align 4
5019 // CHECK8-NEXT:    [[S_ARR:%.*]] = alloca [2 x %struct.S.0], align 4
5020 // CHECK8-NEXT:    [[VAR:%.*]] = alloca %struct.S.0*, align 4
5021 // CHECK8-NEXT:    [[TMP:%.*]] = alloca %struct.S.0*, align 4
5022 // CHECK8-NEXT:    [[T_VAR_CASTED:%.*]] = alloca i32, align 4
5023 // CHECK8-NEXT:    [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [4 x i8*], align 4
5024 // CHECK8-NEXT:    [[DOTOFFLOAD_PTRS:%.*]] = alloca [4 x i8*], align 4
5025 // CHECK8-NEXT:    [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [4 x i8*], align 4
5026 // CHECK8-NEXT:    [[_TMP1:%.*]] = alloca i32, align 4
5027 // CHECK8-NEXT:    call void @_ZN1SIiEC1Ev(%struct.S.0* nonnull align 4 dereferenceable(4) [[TEST]])
5028 // CHECK8-NEXT:    store i32 0, i32* [[T_VAR]], align 4
5029 // CHECK8-NEXT:    [[TMP0:%.*]] = bitcast [2 x i32]* [[VEC]] to i8*
5030 // CHECK8-NEXT:    call void @llvm.memcpy.p0i8.p0i8.i32(i8* align 4 [[TMP0]], i8* align 4 bitcast ([2 x i32]* @__const._Z5tmainIiET_v.vec to i8*), i32 8, i1 false)
5031 // CHECK8-NEXT:    [[ARRAYINIT_BEGIN:%.*]] = getelementptr inbounds [2 x %struct.S.0], [2 x %struct.S.0]* [[S_ARR]], i32 0, i32 0
5032 // CHECK8-NEXT:    call void @_ZN1SIiEC1Ei(%struct.S.0* nonnull align 4 dereferenceable(4) [[ARRAYINIT_BEGIN]], i32 1)
5033 // CHECK8-NEXT:    [[ARRAYINIT_ELEMENT:%.*]] = getelementptr inbounds [[STRUCT_S_0]], %struct.S.0* [[ARRAYINIT_BEGIN]], i32 1
5034 // CHECK8-NEXT:    call void @_ZN1SIiEC1Ei(%struct.S.0* nonnull align 4 dereferenceable(4) [[ARRAYINIT_ELEMENT]], i32 2)
5035 // CHECK8-NEXT:    store %struct.S.0* [[TEST]], %struct.S.0** [[VAR]], align 4
5036 // CHECK8-NEXT:    [[TMP1:%.*]] = load %struct.S.0*, %struct.S.0** [[VAR]], align 4
5037 // CHECK8-NEXT:    store %struct.S.0* [[TMP1]], %struct.S.0** [[TMP]], align 4
5038 // CHECK8-NEXT:    [[TMP2:%.*]] = load i32, i32* [[T_VAR]], align 4
5039 // CHECK8-NEXT:    store i32 [[TMP2]], i32* [[T_VAR_CASTED]], align 4
5040 // CHECK8-NEXT:    [[TMP3:%.*]] = load i32, i32* [[T_VAR_CASTED]], align 4
5041 // CHECK8-NEXT:    [[TMP4:%.*]] = load %struct.S.0*, %struct.S.0** [[TMP]], align 4
5042 // CHECK8-NEXT:    [[TMP5:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
5043 // CHECK8-NEXT:    [[TMP6:%.*]] = bitcast i8** [[TMP5]] to [2 x i32]**
5044 // CHECK8-NEXT:    store [2 x i32]* [[VEC]], [2 x i32]** [[TMP6]], align 4
5045 // CHECK8-NEXT:    [[TMP7:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0
5046 // CHECK8-NEXT:    [[TMP8:%.*]] = bitcast i8** [[TMP7]] to [2 x i32]**
5047 // CHECK8-NEXT:    store [2 x i32]* [[VEC]], [2 x i32]** [[TMP8]], align 4
5048 // CHECK8-NEXT:    [[TMP9:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 0
5049 // CHECK8-NEXT:    store i8* null, i8** [[TMP9]], align 4
5050 // CHECK8-NEXT:    [[TMP10:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1
5051 // CHECK8-NEXT:    [[TMP11:%.*]] = bitcast i8** [[TMP10]] to i32*
5052 // CHECK8-NEXT:    store i32 [[TMP3]], i32* [[TMP11]], align 4
5053 // CHECK8-NEXT:    [[TMP12:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1
5054 // CHECK8-NEXT:    [[TMP13:%.*]] = bitcast i8** [[TMP12]] to i32*
5055 // CHECK8-NEXT:    store i32 [[TMP3]], i32* [[TMP13]], align 4
5056 // CHECK8-NEXT:    [[TMP14:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 1
5057 // CHECK8-NEXT:    store i8* null, i8** [[TMP14]], align 4
5058 // CHECK8-NEXT:    [[TMP15:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2
5059 // CHECK8-NEXT:    [[TMP16:%.*]] = bitcast i8** [[TMP15]] to [2 x %struct.S.0]**
5060 // CHECK8-NEXT:    store [2 x %struct.S.0]* [[S_ARR]], [2 x %struct.S.0]** [[TMP16]], align 4
5061 // CHECK8-NEXT:    [[TMP17:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 2
5062 // CHECK8-NEXT:    [[TMP18:%.*]] = bitcast i8** [[TMP17]] to [2 x %struct.S.0]**
5063 // CHECK8-NEXT:    store [2 x %struct.S.0]* [[S_ARR]], [2 x %struct.S.0]** [[TMP18]], align 4
5064 // CHECK8-NEXT:    [[TMP19:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 2
5065 // CHECK8-NEXT:    store i8* null, i8** [[TMP19]], align 4
5066 // CHECK8-NEXT:    [[TMP20:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 3
5067 // CHECK8-NEXT:    [[TMP21:%.*]] = bitcast i8** [[TMP20]] to %struct.S.0**
5068 // CHECK8-NEXT:    store %struct.S.0* [[TMP4]], %struct.S.0** [[TMP21]], align 4
5069 // CHECK8-NEXT:    [[TMP22:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 3
5070 // CHECK8-NEXT:    [[TMP23:%.*]] = bitcast i8** [[TMP22]] to %struct.S.0**
5071 // CHECK8-NEXT:    store %struct.S.0* [[TMP4]], %struct.S.0** [[TMP23]], align 4
5072 // CHECK8-NEXT:    [[TMP24:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 3
5073 // CHECK8-NEXT:    store i8* null, i8** [[TMP24]], align 4
5074 // CHECK8-NEXT:    [[TMP25:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
5075 // CHECK8-NEXT:    [[TMP26:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0
5076 // CHECK8-NEXT:    call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 2)
5077 // CHECK8-NEXT:    [[TMP27:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiET_v_l50.region_id, i32 4, i8** [[TMP25]], i8** [[TMP26]], i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_sizes.4, i32 0, i32 0), i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_maptypes.5, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0)
5078 // CHECK8-NEXT:    [[TMP28:%.*]] = icmp ne i32 [[TMP27]], 0
5079 // CHECK8-NEXT:    br i1 [[TMP28]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]]
5080 // CHECK8:       omp_offload.failed:
5081 // CHECK8-NEXT:    call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiET_v_l50([2 x i32]* [[VEC]], i32 [[TMP3]], [2 x %struct.S.0]* [[S_ARR]], %struct.S.0* [[TMP4]]) #[[ATTR4]]
5082 // CHECK8-NEXT:    br label [[OMP_OFFLOAD_CONT]]
5083 // CHECK8:       omp_offload.cont:
5084 // CHECK8-NEXT:    store i32 0, i32* [[RETVAL]], align 4
5085 // CHECK8-NEXT:    [[ARRAY_BEGIN:%.*]] = getelementptr inbounds [2 x %struct.S.0], [2 x %struct.S.0]* [[S_ARR]], i32 0, i32 0
5086 // CHECK8-NEXT:    [[TMP29:%.*]] = getelementptr inbounds [[STRUCT_S_0]], %struct.S.0* [[ARRAY_BEGIN]], i32 2
5087 // CHECK8-NEXT:    br label [[ARRAYDESTROY_BODY:%.*]]
5088 // CHECK8:       arraydestroy.body:
5089 // CHECK8-NEXT:    [[ARRAYDESTROY_ELEMENTPAST:%.*]] = phi %struct.S.0* [ [[TMP29]], [[OMP_OFFLOAD_CONT]] ], [ [[ARRAYDESTROY_ELEMENT:%.*]], [[ARRAYDESTROY_BODY]] ]
5090 // CHECK8-NEXT:    [[ARRAYDESTROY_ELEMENT]] = getelementptr inbounds [[STRUCT_S_0]], %struct.S.0* [[ARRAYDESTROY_ELEMENTPAST]], i32 -1
5091 // CHECK8-NEXT:    call void @_ZN1SIiED1Ev(%struct.S.0* nonnull align 4 dereferenceable(4) [[ARRAYDESTROY_ELEMENT]]) #[[ATTR4]]
5092 // CHECK8-NEXT:    [[ARRAYDESTROY_DONE:%.*]] = icmp eq %struct.S.0* [[ARRAYDESTROY_ELEMENT]], [[ARRAY_BEGIN]]
5093 // CHECK8-NEXT:    br i1 [[ARRAYDESTROY_DONE]], label [[ARRAYDESTROY_DONE2:%.*]], label [[ARRAYDESTROY_BODY]]
5094 // CHECK8:       arraydestroy.done2:
5095 // CHECK8-NEXT:    call void @_ZN1SIiED1Ev(%struct.S.0* nonnull align 4 dereferenceable(4) [[TEST]]) #[[ATTR4]]
5096 // CHECK8-NEXT:    [[TMP30:%.*]] = load i32, i32* [[RETVAL]], align 4
5097 // CHECK8-NEXT:    ret i32 [[TMP30]]
5098 //
5099 //
5100 // CHECK8-LABEL: define {{[^@]+}}@_ZN1SIfEC2Ev
5101 // CHECK8-SAME: (%struct.S* nonnull align 4 dereferenceable(4) [[THIS:%.*]]) unnamed_addr #[[ATTR1]] comdat align 2 {
5102 // CHECK8-NEXT:  entry:
5103 // CHECK8-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.S*, align 4
5104 // CHECK8-NEXT:    store %struct.S* [[THIS]], %struct.S** [[THIS_ADDR]], align 4
5105 // CHECK8-NEXT:    [[THIS1:%.*]] = load %struct.S*, %struct.S** [[THIS_ADDR]], align 4
5106 // CHECK8-NEXT:    [[F:%.*]] = getelementptr inbounds [[STRUCT_S:%.*]], %struct.S* [[THIS1]], i32 0, i32 0
5107 // CHECK8-NEXT:    store float 0.000000e+00, float* [[F]], align 4
5108 // CHECK8-NEXT:    ret void
5109 //
5110 //
5111 // CHECK8-LABEL: define {{[^@]+}}@_ZN1SIfEC2Ef
5112 // CHECK8-SAME: (%struct.S* nonnull align 4 dereferenceable(4) [[THIS:%.*]], float [[A:%.*]]) unnamed_addr #[[ATTR1]] comdat align 2 {
5113 // CHECK8-NEXT:  entry:
5114 // CHECK8-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.S*, align 4
5115 // CHECK8-NEXT:    [[A_ADDR:%.*]] = alloca float, align 4
5116 // CHECK8-NEXT:    store %struct.S* [[THIS]], %struct.S** [[THIS_ADDR]], align 4
5117 // CHECK8-NEXT:    store float [[A]], float* [[A_ADDR]], align 4
5118 // CHECK8-NEXT:    [[THIS1:%.*]] = load %struct.S*, %struct.S** [[THIS_ADDR]], align 4
5119 // CHECK8-NEXT:    [[F:%.*]] = getelementptr inbounds [[STRUCT_S:%.*]], %struct.S* [[THIS1]], i32 0, i32 0
5120 // CHECK8-NEXT:    [[TMP0:%.*]] = load float, float* [[A_ADDR]], align 4
5121 // CHECK8-NEXT:    store float [[TMP0]], float* [[F]], align 4
5122 // CHECK8-NEXT:    ret void
5123 //
5124 //
5125 // CHECK8-LABEL: define {{[^@]+}}@_ZN1SIfED2Ev
5126 // CHECK8-SAME: (%struct.S* nonnull align 4 dereferenceable(4) [[THIS:%.*]]) unnamed_addr #[[ATTR1]] comdat align 2 {
5127 // CHECK8-NEXT:  entry:
5128 // CHECK8-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.S*, align 4
5129 // CHECK8-NEXT:    store %struct.S* [[THIS]], %struct.S** [[THIS_ADDR]], align 4
5130 // CHECK8-NEXT:    [[THIS1:%.*]] = load %struct.S*, %struct.S** [[THIS_ADDR]], align 4
5131 // CHECK8-NEXT:    ret void
5132 //
5133 //
5134 // CHECK8-LABEL: define {{[^@]+}}@_ZN1SIiEC1Ev
5135 // CHECK8-SAME: (%struct.S.0* nonnull align 4 dereferenceable(4) [[THIS:%.*]]) unnamed_addr #[[ATTR1]] comdat align 2 {
5136 // CHECK8-NEXT:  entry:
5137 // CHECK8-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.S.0*, align 4
5138 // CHECK8-NEXT:    store %struct.S.0* [[THIS]], %struct.S.0** [[THIS_ADDR]], align 4
5139 // CHECK8-NEXT:    [[THIS1:%.*]] = load %struct.S.0*, %struct.S.0** [[THIS_ADDR]], align 4
5140 // CHECK8-NEXT:    call void @_ZN1SIiEC2Ev(%struct.S.0* nonnull align 4 dereferenceable(4) [[THIS1]])
5141 // CHECK8-NEXT:    ret void
5142 //
5143 //
5144 // CHECK8-LABEL: define {{[^@]+}}@_ZN1SIiEC1Ei
5145 // CHECK8-SAME: (%struct.S.0* nonnull align 4 dereferenceable(4) [[THIS:%.*]], i32 [[A:%.*]]) unnamed_addr #[[ATTR1]] comdat align 2 {
5146 // CHECK8-NEXT:  entry:
5147 // CHECK8-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.S.0*, align 4
5148 // CHECK8-NEXT:    [[A_ADDR:%.*]] = alloca i32, align 4
5149 // CHECK8-NEXT:    store %struct.S.0* [[THIS]], %struct.S.0** [[THIS_ADDR]], align 4
5150 // CHECK8-NEXT:    store i32 [[A]], i32* [[A_ADDR]], align 4
5151 // CHECK8-NEXT:    [[THIS1:%.*]] = load %struct.S.0*, %struct.S.0** [[THIS_ADDR]], align 4
5152 // CHECK8-NEXT:    [[TMP0:%.*]] = load i32, i32* [[A_ADDR]], align 4
5153 // CHECK8-NEXT:    call void @_ZN1SIiEC2Ei(%struct.S.0* nonnull align 4 dereferenceable(4) [[THIS1]], i32 [[TMP0]])
5154 // CHECK8-NEXT:    ret void
5155 //
5156 //
5157 // CHECK8-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiET_v_l50
5158 // CHECK8-SAME: ([2 x i32]* nonnull align 4 dereferenceable(8) [[VEC:%.*]], i32 [[T_VAR:%.*]], [2 x %struct.S.0]* nonnull align 4 dereferenceable(8) [[S_ARR:%.*]], %struct.S.0* nonnull align 4 dereferenceable(4) [[VAR:%.*]]) #[[ATTR3]] {
5159 // CHECK8-NEXT:  entry:
5160 // CHECK8-NEXT:    [[VEC_ADDR:%.*]] = alloca [2 x i32]*, align 4
5161 // CHECK8-NEXT:    [[T_VAR_ADDR:%.*]] = alloca i32, align 4
5162 // CHECK8-NEXT:    [[S_ARR_ADDR:%.*]] = alloca [2 x %struct.S.0]*, align 4
5163 // CHECK8-NEXT:    [[VAR_ADDR:%.*]] = alloca %struct.S.0*, align 4
5164 // CHECK8-NEXT:    [[TMP:%.*]] = alloca %struct.S.0*, align 4
5165 // CHECK8-NEXT:    [[T_VAR_CASTED:%.*]] = alloca i32, align 4
5166 // CHECK8-NEXT:    store [2 x i32]* [[VEC]], [2 x i32]** [[VEC_ADDR]], align 4
5167 // CHECK8-NEXT:    store i32 [[T_VAR]], i32* [[T_VAR_ADDR]], align 4
5168 // CHECK8-NEXT:    store [2 x %struct.S.0]* [[S_ARR]], [2 x %struct.S.0]** [[S_ARR_ADDR]], align 4
5169 // CHECK8-NEXT:    store %struct.S.0* [[VAR]], %struct.S.0** [[VAR_ADDR]], align 4
5170 // CHECK8-NEXT:    [[TMP0:%.*]] = load [2 x i32]*, [2 x i32]** [[VEC_ADDR]], align 4
5171 // CHECK8-NEXT:    [[TMP1:%.*]] = load [2 x %struct.S.0]*, [2 x %struct.S.0]** [[S_ARR_ADDR]], align 4
5172 // CHECK8-NEXT:    [[TMP2:%.*]] = load %struct.S.0*, %struct.S.0** [[VAR_ADDR]], align 4
5173 // CHECK8-NEXT:    store %struct.S.0* [[TMP2]], %struct.S.0** [[TMP]], align 4
5174 // CHECK8-NEXT:    [[TMP3:%.*]] = load i32, i32* [[T_VAR_ADDR]], align 4
5175 // CHECK8-NEXT:    store i32 [[TMP3]], i32* [[T_VAR_CASTED]], align 4
5176 // CHECK8-NEXT:    [[TMP4:%.*]] = load i32, i32* [[T_VAR_CASTED]], align 4
5177 // CHECK8-NEXT:    [[TMP5:%.*]] = load %struct.S.0*, %struct.S.0** [[TMP]], align 4
5178 // CHECK8-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, [2 x i32]*, i32, [2 x %struct.S.0]*, %struct.S.0*)* @.omp_outlined..2 to void (i32*, i32*, ...)*), [2 x i32]* [[TMP0]], i32 [[TMP4]], [2 x %struct.S.0]* [[TMP1]], %struct.S.0* [[TMP5]])
5179 // CHECK8-NEXT:    ret void
5180 //
5181 //
5182 // CHECK8-LABEL: define {{[^@]+}}@.omp_outlined..2
5183 // CHECK8-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], [2 x i32]* nonnull align 4 dereferenceable(8) [[VEC:%.*]], i32 [[T_VAR:%.*]], [2 x %struct.S.0]* nonnull align 4 dereferenceable(8) [[S_ARR:%.*]], %struct.S.0* nonnull align 4 dereferenceable(4) [[VAR:%.*]]) #[[ATTR3]] {
5184 // CHECK8-NEXT:  entry:
5185 // CHECK8-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
5186 // CHECK8-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
5187 // CHECK8-NEXT:    [[VEC_ADDR:%.*]] = alloca [2 x i32]*, align 4
5188 // CHECK8-NEXT:    [[T_VAR_ADDR:%.*]] = alloca i32, align 4
5189 // CHECK8-NEXT:    [[S_ARR_ADDR:%.*]] = alloca [2 x %struct.S.0]*, align 4
5190 // CHECK8-NEXT:    [[VAR_ADDR:%.*]] = alloca %struct.S.0*, align 4
5191 // CHECK8-NEXT:    [[TMP:%.*]] = alloca %struct.S.0*, align 4
5192 // CHECK8-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
5193 // CHECK8-NEXT:    [[_TMP1:%.*]] = alloca i32, align 4
5194 // CHECK8-NEXT:    [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
5195 // CHECK8-NEXT:    [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
5196 // CHECK8-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
5197 // CHECK8-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
5198 // CHECK8-NEXT:    [[T_VAR2:%.*]] = alloca i32, align 4
5199 // CHECK8-NEXT:    [[VEC3:%.*]] = alloca [2 x i32], align 4
5200 // CHECK8-NEXT:    [[S_ARR4:%.*]] = alloca [2 x %struct.S.0], align 4
5201 // CHECK8-NEXT:    [[VAR5:%.*]] = alloca [[STRUCT_S_0:%.*]], align 4
5202 // CHECK8-NEXT:    [[_TMP6:%.*]] = alloca %struct.S.0*, align 4
5203 // CHECK8-NEXT:    [[I:%.*]] = alloca i32, align 4
5204 // CHECK8-NEXT:    [[T_VAR_CASTED:%.*]] = alloca i32, align 4
5205 // CHECK8-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
5206 // CHECK8-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
5207 // CHECK8-NEXT:    store [2 x i32]* [[VEC]], [2 x i32]** [[VEC_ADDR]], align 4
5208 // CHECK8-NEXT:    store i32 [[T_VAR]], i32* [[T_VAR_ADDR]], align 4
5209 // CHECK8-NEXT:    store [2 x %struct.S.0]* [[S_ARR]], [2 x %struct.S.0]** [[S_ARR_ADDR]], align 4
5210 // CHECK8-NEXT:    store %struct.S.0* [[VAR]], %struct.S.0** [[VAR_ADDR]], align 4
5211 // CHECK8-NEXT:    [[TMP0:%.*]] = load [2 x i32]*, [2 x i32]** [[VEC_ADDR]], align 4
5212 // CHECK8-NEXT:    [[TMP1:%.*]] = load [2 x %struct.S.0]*, [2 x %struct.S.0]** [[S_ARR_ADDR]], align 4
5213 // CHECK8-NEXT:    [[TMP2:%.*]] = load %struct.S.0*, %struct.S.0** [[VAR_ADDR]], align 4
5214 // CHECK8-NEXT:    store %struct.S.0* [[TMP2]], %struct.S.0** [[TMP]], align 4
5215 // CHECK8-NEXT:    store i32 0, i32* [[DOTOMP_COMB_LB]], align 4
5216 // CHECK8-NEXT:    store i32 1, i32* [[DOTOMP_COMB_UB]], align 4
5217 // CHECK8-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
5218 // CHECK8-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
5219 // CHECK8-NEXT:    [[ARRAY_BEGIN:%.*]] = getelementptr inbounds [2 x %struct.S.0], [2 x %struct.S.0]* [[S_ARR4]], i32 0, i32 0
5220 // CHECK8-NEXT:    [[ARRAYCTOR_END:%.*]] = getelementptr inbounds [[STRUCT_S_0]], %struct.S.0* [[ARRAY_BEGIN]], i32 2
5221 // CHECK8-NEXT:    br label [[ARRAYCTOR_LOOP:%.*]]
5222 // CHECK8:       arrayctor.loop:
5223 // CHECK8-NEXT:    [[ARRAYCTOR_CUR:%.*]] = phi %struct.S.0* [ [[ARRAY_BEGIN]], [[ENTRY:%.*]] ], [ [[ARRAYCTOR_NEXT:%.*]], [[ARRAYCTOR_LOOP]] ]
5224 // CHECK8-NEXT:    call void @_ZN1SIiEC1Ev(%struct.S.0* nonnull align 4 dereferenceable(4) [[ARRAYCTOR_CUR]])
5225 // CHECK8-NEXT:    [[ARRAYCTOR_NEXT]] = getelementptr inbounds [[STRUCT_S_0]], %struct.S.0* [[ARRAYCTOR_CUR]], i32 1
5226 // CHECK8-NEXT:    [[ARRAYCTOR_DONE:%.*]] = icmp eq %struct.S.0* [[ARRAYCTOR_NEXT]], [[ARRAYCTOR_END]]
5227 // CHECK8-NEXT:    br i1 [[ARRAYCTOR_DONE]], label [[ARRAYCTOR_CONT:%.*]], label [[ARRAYCTOR_LOOP]]
5228 // CHECK8:       arrayctor.cont:
5229 // CHECK8-NEXT:    [[TMP3:%.*]] = load %struct.S.0*, %struct.S.0** [[TMP]], align 4
5230 // CHECK8-NEXT:    call void @_ZN1SIiEC1Ev(%struct.S.0* nonnull align 4 dereferenceable(4) [[VAR5]])
5231 // CHECK8-NEXT:    store %struct.S.0* [[VAR5]], %struct.S.0** [[_TMP6]], align 4
5232 // CHECK8-NEXT:    [[TMP4:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
5233 // CHECK8-NEXT:    [[TMP5:%.*]] = load i32, i32* [[TMP4]], align 4
5234 // CHECK8-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP5]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_COMB_LB]], i32* [[DOTOMP_COMB_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
5235 // CHECK8-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
5236 // CHECK8-NEXT:    [[CMP:%.*]] = icmp sgt i32 [[TMP6]], 1
5237 // CHECK8-NEXT:    br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
5238 // CHECK8:       cond.true:
5239 // CHECK8-NEXT:    br label [[COND_END:%.*]]
5240 // CHECK8:       cond.false:
5241 // CHECK8-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
5242 // CHECK8-NEXT:    br label [[COND_END]]
5243 // CHECK8:       cond.end:
5244 // CHECK8-NEXT:    [[COND:%.*]] = phi i32 [ 1, [[COND_TRUE]] ], [ [[TMP7]], [[COND_FALSE]] ]
5245 // CHECK8-NEXT:    store i32 [[COND]], i32* [[DOTOMP_COMB_UB]], align 4
5246 // CHECK8-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
5247 // CHECK8-NEXT:    store i32 [[TMP8]], i32* [[DOTOMP_IV]], align 4
5248 // CHECK8-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
5249 // CHECK8:       omp.inner.for.cond:
5250 // CHECK8-NEXT:    [[TMP9:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
5251 // CHECK8-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
5252 // CHECK8-NEXT:    [[CMP7:%.*]] = icmp sle i32 [[TMP9]], [[TMP10]]
5253 // CHECK8-NEXT:    br i1 [[CMP7]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_COND_CLEANUP:%.*]]
5254 // CHECK8:       omp.inner.for.cond.cleanup:
5255 // CHECK8-NEXT:    br label [[OMP_INNER_FOR_END:%.*]]
5256 // CHECK8:       omp.inner.for.body:
5257 // CHECK8-NEXT:    [[TMP11:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
5258 // CHECK8-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
5259 // CHECK8-NEXT:    [[TMP13:%.*]] = load i32, i32* [[T_VAR2]], align 4
5260 // CHECK8-NEXT:    store i32 [[TMP13]], i32* [[T_VAR_CASTED]], align 4
5261 // CHECK8-NEXT:    [[TMP14:%.*]] = load i32, i32* [[T_VAR_CASTED]], align 4
5262 // CHECK8-NEXT:    [[TMP15:%.*]] = load %struct.S.0*, %struct.S.0** [[_TMP6]], align 4
5263 // CHECK8-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 6, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, [2 x i32]*, i32, [2 x %struct.S.0]*, %struct.S.0*)* @.omp_outlined..3 to void (i32*, i32*, ...)*), i32 [[TMP11]], i32 [[TMP12]], [2 x i32]* [[VEC3]], i32 [[TMP14]], [2 x %struct.S.0]* [[S_ARR4]], %struct.S.0* [[TMP15]])
5264 // CHECK8-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
5265 // CHECK8:       omp.inner.for.inc:
5266 // CHECK8-NEXT:    [[TMP16:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
5267 // CHECK8-NEXT:    [[TMP17:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
5268 // CHECK8-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP16]], [[TMP17]]
5269 // CHECK8-NEXT:    store i32 [[ADD]], i32* [[DOTOMP_IV]], align 4
5270 // CHECK8-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP10:![0-9]+]]
5271 // CHECK8:       omp.inner.for.end:
5272 // CHECK8-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
5273 // CHECK8:       omp.loop.exit:
5274 // CHECK8-NEXT:    [[TMP18:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
5275 // CHECK8-NEXT:    [[TMP19:%.*]] = load i32, i32* [[TMP18]], align 4
5276 // CHECK8-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP19]])
5277 // CHECK8-NEXT:    [[TMP20:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
5278 // CHECK8-NEXT:    [[TMP21:%.*]] = icmp ne i32 [[TMP20]], 0
5279 // CHECK8-NEXT:    br i1 [[TMP21]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
5280 // CHECK8:       .omp.final.then:
5281 // CHECK8-NEXT:    store i32 2, i32* [[I]], align 4
5282 // CHECK8-NEXT:    br label [[DOTOMP_FINAL_DONE]]
5283 // CHECK8:       .omp.final.done:
5284 // CHECK8-NEXT:    [[TMP22:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
5285 // CHECK8-NEXT:    [[TMP23:%.*]] = icmp ne i32 [[TMP22]], 0
5286 // CHECK8-NEXT:    br i1 [[TMP23]], label [[DOTOMP_LASTPRIVATE_THEN:%.*]], label [[DOTOMP_LASTPRIVATE_DONE:%.*]]
5287 // CHECK8:       .omp.lastprivate.then:
5288 // CHECK8-NEXT:    [[TMP24:%.*]] = load i32, i32* [[T_VAR2]], align 4
5289 // CHECK8-NEXT:    store i32 [[TMP24]], i32* [[T_VAR_ADDR]], align 4
5290 // CHECK8-NEXT:    [[TMP25:%.*]] = bitcast [2 x i32]* [[TMP0]] to i8*
5291 // CHECK8-NEXT:    [[TMP26:%.*]] = bitcast [2 x i32]* [[VEC3]] to i8*
5292 // CHECK8-NEXT:    call void @llvm.memcpy.p0i8.p0i8.i32(i8* align 4 [[TMP25]], i8* align 4 [[TMP26]], i32 8, i1 false)
5293 // CHECK8-NEXT:    [[ARRAY_BEGIN8:%.*]] = getelementptr inbounds [2 x %struct.S.0], [2 x %struct.S.0]* [[TMP1]], i32 0, i32 0
5294 // CHECK8-NEXT:    [[TMP27:%.*]] = bitcast [2 x %struct.S.0]* [[S_ARR4]] to %struct.S.0*
5295 // CHECK8-NEXT:    [[TMP28:%.*]] = getelementptr [[STRUCT_S_0]], %struct.S.0* [[ARRAY_BEGIN8]], i32 2
5296 // CHECK8-NEXT:    [[OMP_ARRAYCPY_ISEMPTY:%.*]] = icmp eq %struct.S.0* [[ARRAY_BEGIN8]], [[TMP28]]
5297 // CHECK8-NEXT:    br i1 [[OMP_ARRAYCPY_ISEMPTY]], label [[OMP_ARRAYCPY_DONE9:%.*]], label [[OMP_ARRAYCPY_BODY:%.*]]
5298 // CHECK8:       omp.arraycpy.body:
5299 // CHECK8-NEXT:    [[OMP_ARRAYCPY_SRCELEMENTPAST:%.*]] = phi %struct.S.0* [ [[TMP27]], [[DOTOMP_LASTPRIVATE_THEN]] ], [ [[OMP_ARRAYCPY_SRC_ELEMENT:%.*]], [[OMP_ARRAYCPY_BODY]] ]
5300 // CHECK8-NEXT:    [[OMP_ARRAYCPY_DESTELEMENTPAST:%.*]] = phi %struct.S.0* [ [[ARRAY_BEGIN8]], [[DOTOMP_LASTPRIVATE_THEN]] ], [ [[OMP_ARRAYCPY_DEST_ELEMENT:%.*]], [[OMP_ARRAYCPY_BODY]] ]
5301 // CHECK8-NEXT:    [[TMP29:%.*]] = bitcast %struct.S.0* [[OMP_ARRAYCPY_DESTELEMENTPAST]] to i8*
5302 // CHECK8-NEXT:    [[TMP30:%.*]] = bitcast %struct.S.0* [[OMP_ARRAYCPY_SRCELEMENTPAST]] to i8*
5303 // CHECK8-NEXT:    call void @llvm.memcpy.p0i8.p0i8.i32(i8* align 4 [[TMP29]], i8* align 4 [[TMP30]], i32 4, i1 false)
5304 // CHECK8-NEXT:    [[OMP_ARRAYCPY_DEST_ELEMENT]] = getelementptr [[STRUCT_S_0]], %struct.S.0* [[OMP_ARRAYCPY_DESTELEMENTPAST]], i32 1
5305 // CHECK8-NEXT:    [[OMP_ARRAYCPY_SRC_ELEMENT]] = getelementptr [[STRUCT_S_0]], %struct.S.0* [[OMP_ARRAYCPY_SRCELEMENTPAST]], i32 1
5306 // CHECK8-NEXT:    [[OMP_ARRAYCPY_DONE:%.*]] = icmp eq %struct.S.0* [[OMP_ARRAYCPY_DEST_ELEMENT]], [[TMP28]]
5307 // CHECK8-NEXT:    br i1 [[OMP_ARRAYCPY_DONE]], label [[OMP_ARRAYCPY_DONE9]], label [[OMP_ARRAYCPY_BODY]]
5308 // CHECK8:       omp.arraycpy.done9:
5309 // CHECK8-NEXT:    [[TMP31:%.*]] = load %struct.S.0*, %struct.S.0** [[_TMP6]], align 4
5310 // CHECK8-NEXT:    [[TMP32:%.*]] = bitcast %struct.S.0* [[TMP3]] to i8*
5311 // CHECK8-NEXT:    [[TMP33:%.*]] = bitcast %struct.S.0* [[TMP31]] to i8*
5312 // CHECK8-NEXT:    call void @llvm.memcpy.p0i8.p0i8.i32(i8* align 4 [[TMP32]], i8* align 4 [[TMP33]], i32 4, i1 false)
5313 // CHECK8-NEXT:    br label [[DOTOMP_LASTPRIVATE_DONE]]
5314 // CHECK8:       .omp.lastprivate.done:
5315 // CHECK8-NEXT:    call void @_ZN1SIiED1Ev(%struct.S.0* nonnull align 4 dereferenceable(4) [[VAR5]]) #[[ATTR4]]
5316 // CHECK8-NEXT:    [[ARRAY_BEGIN10:%.*]] = getelementptr inbounds [2 x %struct.S.0], [2 x %struct.S.0]* [[S_ARR4]], i32 0, i32 0
5317 // CHECK8-NEXT:    [[TMP34:%.*]] = getelementptr inbounds [[STRUCT_S_0]], %struct.S.0* [[ARRAY_BEGIN10]], i32 2
5318 // CHECK8-NEXT:    br label [[ARRAYDESTROY_BODY:%.*]]
5319 // CHECK8:       arraydestroy.body:
5320 // CHECK8-NEXT:    [[ARRAYDESTROY_ELEMENTPAST:%.*]] = phi %struct.S.0* [ [[TMP34]], [[DOTOMP_LASTPRIVATE_DONE]] ], [ [[ARRAYDESTROY_ELEMENT:%.*]], [[ARRAYDESTROY_BODY]] ]
5321 // CHECK8-NEXT:    [[ARRAYDESTROY_ELEMENT]] = getelementptr inbounds [[STRUCT_S_0]], %struct.S.0* [[ARRAYDESTROY_ELEMENTPAST]], i32 -1
5322 // CHECK8-NEXT:    call void @_ZN1SIiED1Ev(%struct.S.0* nonnull align 4 dereferenceable(4) [[ARRAYDESTROY_ELEMENT]]) #[[ATTR4]]
5323 // CHECK8-NEXT:    [[ARRAYDESTROY_DONE:%.*]] = icmp eq %struct.S.0* [[ARRAYDESTROY_ELEMENT]], [[ARRAY_BEGIN10]]
5324 // CHECK8-NEXT:    br i1 [[ARRAYDESTROY_DONE]], label [[ARRAYDESTROY_DONE11:%.*]], label [[ARRAYDESTROY_BODY]]
5325 // CHECK8:       arraydestroy.done11:
5326 // CHECK8-NEXT:    ret void
5327 //
5328 //
5329 // CHECK8-LABEL: define {{[^@]+}}@.omp_outlined..3
5330 // CHECK8-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32 [[DOTPREVIOUS_LB_:%.*]], i32 [[DOTPREVIOUS_UB_:%.*]], [2 x i32]* nonnull align 4 dereferenceable(8) [[VEC:%.*]], i32 [[T_VAR:%.*]], [2 x %struct.S.0]* nonnull align 4 dereferenceable(8) [[S_ARR:%.*]], %struct.S.0* nonnull align 4 dereferenceable(4) [[VAR:%.*]]) #[[ATTR3]] {
5331 // CHECK8-NEXT:  entry:
5332 // CHECK8-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
5333 // CHECK8-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
5334 // CHECK8-NEXT:    [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i32, align 4
5335 // CHECK8-NEXT:    [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i32, align 4
5336 // CHECK8-NEXT:    [[VEC_ADDR:%.*]] = alloca [2 x i32]*, align 4
5337 // CHECK8-NEXT:    [[T_VAR_ADDR:%.*]] = alloca i32, align 4
5338 // CHECK8-NEXT:    [[S_ARR_ADDR:%.*]] = alloca [2 x %struct.S.0]*, align 4
5339 // CHECK8-NEXT:    [[VAR_ADDR:%.*]] = alloca %struct.S.0*, align 4
5340 // CHECK8-NEXT:    [[TMP:%.*]] = alloca %struct.S.0*, align 4
5341 // CHECK8-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
5342 // CHECK8-NEXT:    [[_TMP1:%.*]] = alloca i32, align 4
5343 // CHECK8-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
5344 // CHECK8-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
5345 // CHECK8-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
5346 // CHECK8-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
5347 // CHECK8-NEXT:    [[T_VAR2:%.*]] = alloca i32, align 4
5348 // CHECK8-NEXT:    [[VEC3:%.*]] = alloca [2 x i32], align 4
5349 // CHECK8-NEXT:    [[S_ARR4:%.*]] = alloca [2 x %struct.S.0], align 4
5350 // CHECK8-NEXT:    [[VAR5:%.*]] = alloca [[STRUCT_S_0:%.*]], align 4
5351 // CHECK8-NEXT:    [[_TMP6:%.*]] = alloca %struct.S.0*, align 4
5352 // CHECK8-NEXT:    [[I:%.*]] = alloca i32, align 4
5353 // CHECK8-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
5354 // CHECK8-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
5355 // CHECK8-NEXT:    store i32 [[DOTPREVIOUS_LB_]], i32* [[DOTPREVIOUS_LB__ADDR]], align 4
5356 // CHECK8-NEXT:    store i32 [[DOTPREVIOUS_UB_]], i32* [[DOTPREVIOUS_UB__ADDR]], align 4
5357 // CHECK8-NEXT:    store [2 x i32]* [[VEC]], [2 x i32]** [[VEC_ADDR]], align 4
5358 // CHECK8-NEXT:    store i32 [[T_VAR]], i32* [[T_VAR_ADDR]], align 4
5359 // CHECK8-NEXT:    store [2 x %struct.S.0]* [[S_ARR]], [2 x %struct.S.0]** [[S_ARR_ADDR]], align 4
5360 // CHECK8-NEXT:    store %struct.S.0* [[VAR]], %struct.S.0** [[VAR_ADDR]], align 4
5361 // CHECK8-NEXT:    [[TMP0:%.*]] = load [2 x i32]*, [2 x i32]** [[VEC_ADDR]], align 4
5362 // CHECK8-NEXT:    [[TMP1:%.*]] = load [2 x %struct.S.0]*, [2 x %struct.S.0]** [[S_ARR_ADDR]], align 4
5363 // CHECK8-NEXT:    [[TMP2:%.*]] = load %struct.S.0*, %struct.S.0** [[VAR_ADDR]], align 4
5364 // CHECK8-NEXT:    store %struct.S.0* [[TMP2]], %struct.S.0** [[TMP]], align 4
5365 // CHECK8-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
5366 // CHECK8-NEXT:    store i32 1, i32* [[DOTOMP_UB]], align 4
5367 // CHECK8-NEXT:    [[TMP3:%.*]] = load i32, i32* [[DOTPREVIOUS_LB__ADDR]], align 4
5368 // CHECK8-NEXT:    [[TMP4:%.*]] = load i32, i32* [[DOTPREVIOUS_UB__ADDR]], align 4
5369 // CHECK8-NEXT:    store i32 [[TMP3]], i32* [[DOTOMP_LB]], align 4
5370 // CHECK8-NEXT:    store i32 [[TMP4]], i32* [[DOTOMP_UB]], align 4
5371 // CHECK8-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
5372 // CHECK8-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
5373 // CHECK8-NEXT:    [[ARRAY_BEGIN:%.*]] = getelementptr inbounds [2 x %struct.S.0], [2 x %struct.S.0]* [[S_ARR4]], i32 0, i32 0
5374 // CHECK8-NEXT:    [[ARRAYCTOR_END:%.*]] = getelementptr inbounds [[STRUCT_S_0]], %struct.S.0* [[ARRAY_BEGIN]], i32 2
5375 // CHECK8-NEXT:    br label [[ARRAYCTOR_LOOP:%.*]]
5376 // CHECK8:       arrayctor.loop:
5377 // CHECK8-NEXT:    [[ARRAYCTOR_CUR:%.*]] = phi %struct.S.0* [ [[ARRAY_BEGIN]], [[ENTRY:%.*]] ], [ [[ARRAYCTOR_NEXT:%.*]], [[ARRAYCTOR_LOOP]] ]
5378 // CHECK8-NEXT:    call void @_ZN1SIiEC1Ev(%struct.S.0* nonnull align 4 dereferenceable(4) [[ARRAYCTOR_CUR]])
5379 // CHECK8-NEXT:    [[ARRAYCTOR_NEXT]] = getelementptr inbounds [[STRUCT_S_0]], %struct.S.0* [[ARRAYCTOR_CUR]], i32 1
5380 // CHECK8-NEXT:    [[ARRAYCTOR_DONE:%.*]] = icmp eq %struct.S.0* [[ARRAYCTOR_NEXT]], [[ARRAYCTOR_END]]
5381 // CHECK8-NEXT:    br i1 [[ARRAYCTOR_DONE]], label [[ARRAYCTOR_CONT:%.*]], label [[ARRAYCTOR_LOOP]]
5382 // CHECK8:       arrayctor.cont:
5383 // CHECK8-NEXT:    [[TMP5:%.*]] = load %struct.S.0*, %struct.S.0** [[TMP]], align 4
5384 // CHECK8-NEXT:    call void @_ZN1SIiEC1Ev(%struct.S.0* nonnull align 4 dereferenceable(4) [[VAR5]])
5385 // CHECK8-NEXT:    store %struct.S.0* [[VAR5]], %struct.S.0** [[_TMP6]], align 4
5386 // CHECK8-NEXT:    [[TMP6:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
5387 // CHECK8-NEXT:    [[TMP7:%.*]] = load i32, i32* [[TMP6]], align 4
5388 // CHECK8-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB2]], i32 [[TMP7]], i32 34, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
5389 // CHECK8-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
5390 // CHECK8-NEXT:    [[CMP:%.*]] = icmp sgt i32 [[TMP8]], 1
5391 // CHECK8-NEXT:    br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
5392 // CHECK8:       cond.true:
5393 // CHECK8-NEXT:    br label [[COND_END:%.*]]
5394 // CHECK8:       cond.false:
5395 // CHECK8-NEXT:    [[TMP9:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
5396 // CHECK8-NEXT:    br label [[COND_END]]
5397 // CHECK8:       cond.end:
5398 // CHECK8-NEXT:    [[COND:%.*]] = phi i32 [ 1, [[COND_TRUE]] ], [ [[TMP9]], [[COND_FALSE]] ]
5399 // CHECK8-NEXT:    store i32 [[COND]], i32* [[DOTOMP_UB]], align 4
5400 // CHECK8-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
5401 // CHECK8-NEXT:    store i32 [[TMP10]], i32* [[DOTOMP_IV]], align 4
5402 // CHECK8-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
5403 // CHECK8:       omp.inner.for.cond:
5404 // CHECK8-NEXT:    [[TMP11:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
5405 // CHECK8-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
5406 // CHECK8-NEXT:    [[CMP7:%.*]] = icmp sle i32 [[TMP11]], [[TMP12]]
5407 // CHECK8-NEXT:    br i1 [[CMP7]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_COND_CLEANUP:%.*]]
5408 // CHECK8:       omp.inner.for.cond.cleanup:
5409 // CHECK8-NEXT:    br label [[OMP_INNER_FOR_END:%.*]]
5410 // CHECK8:       omp.inner.for.body:
5411 // CHECK8-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
5412 // CHECK8-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP13]], 1
5413 // CHECK8-NEXT:    [[ADD:%.*]] = add nsw i32 0, [[MUL]]
5414 // CHECK8-NEXT:    store i32 [[ADD]], i32* [[I]], align 4
5415 // CHECK8-NEXT:    [[TMP14:%.*]] = load i32, i32* [[T_VAR2]], align 4
5416 // CHECK8-NEXT:    [[TMP15:%.*]] = load i32, i32* [[I]], align 4
5417 // CHECK8-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds [2 x i32], [2 x i32]* [[VEC3]], i32 0, i32 [[TMP15]]
5418 // CHECK8-NEXT:    store i32 [[TMP14]], i32* [[ARRAYIDX]], align 4
5419 // CHECK8-NEXT:    [[TMP16:%.*]] = load %struct.S.0*, %struct.S.0** [[_TMP6]], align 4
5420 // CHECK8-NEXT:    [[TMP17:%.*]] = load i32, i32* [[I]], align 4
5421 // CHECK8-NEXT:    [[ARRAYIDX8:%.*]] = getelementptr inbounds [2 x %struct.S.0], [2 x %struct.S.0]* [[S_ARR4]], i32 0, i32 [[TMP17]]
5422 // CHECK8-NEXT:    [[TMP18:%.*]] = bitcast %struct.S.0* [[ARRAYIDX8]] to i8*
5423 // CHECK8-NEXT:    [[TMP19:%.*]] = bitcast %struct.S.0* [[TMP16]] to i8*
5424 // CHECK8-NEXT:    call void @llvm.memcpy.p0i8.p0i8.i32(i8* align 4 [[TMP18]], i8* align 4 [[TMP19]], i32 4, i1 false)
5425 // CHECK8-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
5426 // CHECK8:       omp.body.continue:
5427 // CHECK8-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
5428 // CHECK8:       omp.inner.for.inc:
5429 // CHECK8-NEXT:    [[TMP20:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
5430 // CHECK8-NEXT:    [[ADD9:%.*]] = add nsw i32 [[TMP20]], 1
5431 // CHECK8-NEXT:    store i32 [[ADD9]], i32* [[DOTOMP_IV]], align 4
5432 // CHECK8-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP11:![0-9]+]]
5433 // CHECK8:       omp.inner.for.end:
5434 // CHECK8-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
5435 // CHECK8:       omp.loop.exit:
5436 // CHECK8-NEXT:    [[TMP21:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
5437 // CHECK8-NEXT:    [[TMP22:%.*]] = load i32, i32* [[TMP21]], align 4
5438 // CHECK8-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP22]])
5439 // CHECK8-NEXT:    [[TMP23:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
5440 // CHECK8-NEXT:    [[TMP24:%.*]] = icmp ne i32 [[TMP23]], 0
5441 // CHECK8-NEXT:    br i1 [[TMP24]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
5442 // CHECK8:       .omp.final.then:
5443 // CHECK8-NEXT:    store i32 2, i32* [[I]], align 4
5444 // CHECK8-NEXT:    br label [[DOTOMP_FINAL_DONE]]
5445 // CHECK8:       .omp.final.done:
5446 // CHECK8-NEXT:    [[TMP25:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
5447 // CHECK8-NEXT:    [[TMP26:%.*]] = icmp ne i32 [[TMP25]], 0
5448 // CHECK8-NEXT:    br i1 [[TMP26]], label [[DOTOMP_LASTPRIVATE_THEN:%.*]], label [[DOTOMP_LASTPRIVATE_DONE:%.*]]
5449 // CHECK8:       .omp.lastprivate.then:
5450 // CHECK8-NEXT:    [[TMP27:%.*]] = load i32, i32* [[T_VAR2]], align 4
5451 // CHECK8-NEXT:    store i32 [[TMP27]], i32* [[T_VAR_ADDR]], align 4
5452 // CHECK8-NEXT:    [[TMP28:%.*]] = bitcast [2 x i32]* [[TMP0]] to i8*
5453 // CHECK8-NEXT:    [[TMP29:%.*]] = bitcast [2 x i32]* [[VEC3]] to i8*
5454 // CHECK8-NEXT:    call void @llvm.memcpy.p0i8.p0i8.i32(i8* align 4 [[TMP28]], i8* align 4 [[TMP29]], i32 8, i1 false)
5455 // CHECK8-NEXT:    [[ARRAY_BEGIN10:%.*]] = getelementptr inbounds [2 x %struct.S.0], [2 x %struct.S.0]* [[TMP1]], i32 0, i32 0
5456 // CHECK8-NEXT:    [[TMP30:%.*]] = bitcast [2 x %struct.S.0]* [[S_ARR4]] to %struct.S.0*
5457 // CHECK8-NEXT:    [[TMP31:%.*]] = getelementptr [[STRUCT_S_0]], %struct.S.0* [[ARRAY_BEGIN10]], i32 2
5458 // CHECK8-NEXT:    [[OMP_ARRAYCPY_ISEMPTY:%.*]] = icmp eq %struct.S.0* [[ARRAY_BEGIN10]], [[TMP31]]
5459 // CHECK8-NEXT:    br i1 [[OMP_ARRAYCPY_ISEMPTY]], label [[OMP_ARRAYCPY_DONE11:%.*]], label [[OMP_ARRAYCPY_BODY:%.*]]
5460 // CHECK8:       omp.arraycpy.body:
5461 // CHECK8-NEXT:    [[OMP_ARRAYCPY_SRCELEMENTPAST:%.*]] = phi %struct.S.0* [ [[TMP30]], [[DOTOMP_LASTPRIVATE_THEN]] ], [ [[OMP_ARRAYCPY_SRC_ELEMENT:%.*]], [[OMP_ARRAYCPY_BODY]] ]
5462 // CHECK8-NEXT:    [[OMP_ARRAYCPY_DESTELEMENTPAST:%.*]] = phi %struct.S.0* [ [[ARRAY_BEGIN10]], [[DOTOMP_LASTPRIVATE_THEN]] ], [ [[OMP_ARRAYCPY_DEST_ELEMENT:%.*]], [[OMP_ARRAYCPY_BODY]] ]
5463 // CHECK8-NEXT:    [[TMP32:%.*]] = bitcast %struct.S.0* [[OMP_ARRAYCPY_DESTELEMENTPAST]] to i8*
5464 // CHECK8-NEXT:    [[TMP33:%.*]] = bitcast %struct.S.0* [[OMP_ARRAYCPY_SRCELEMENTPAST]] to i8*
5465 // CHECK8-NEXT:    call void @llvm.memcpy.p0i8.p0i8.i32(i8* align 4 [[TMP32]], i8* align 4 [[TMP33]], i32 4, i1 false)
5466 // CHECK8-NEXT:    [[OMP_ARRAYCPY_DEST_ELEMENT]] = getelementptr [[STRUCT_S_0]], %struct.S.0* [[OMP_ARRAYCPY_DESTELEMENTPAST]], i32 1
5467 // CHECK8-NEXT:    [[OMP_ARRAYCPY_SRC_ELEMENT]] = getelementptr [[STRUCT_S_0]], %struct.S.0* [[OMP_ARRAYCPY_SRCELEMENTPAST]], i32 1
5468 // CHECK8-NEXT:    [[OMP_ARRAYCPY_DONE:%.*]] = icmp eq %struct.S.0* [[OMP_ARRAYCPY_DEST_ELEMENT]], [[TMP31]]
5469 // CHECK8-NEXT:    br i1 [[OMP_ARRAYCPY_DONE]], label [[OMP_ARRAYCPY_DONE11]], label [[OMP_ARRAYCPY_BODY]]
5470 // CHECK8:       omp.arraycpy.done11:
5471 // CHECK8-NEXT:    [[TMP34:%.*]] = load %struct.S.0*, %struct.S.0** [[_TMP6]], align 4
5472 // CHECK8-NEXT:    [[TMP35:%.*]] = bitcast %struct.S.0* [[TMP5]] to i8*
5473 // CHECK8-NEXT:    [[TMP36:%.*]] = bitcast %struct.S.0* [[TMP34]] to i8*
5474 // CHECK8-NEXT:    call void @llvm.memcpy.p0i8.p0i8.i32(i8* align 4 [[TMP35]], i8* align 4 [[TMP36]], i32 4, i1 false)
5475 // CHECK8-NEXT:    br label [[DOTOMP_LASTPRIVATE_DONE]]
5476 // CHECK8:       .omp.lastprivate.done:
5477 // CHECK8-NEXT:    call void @_ZN1SIiED1Ev(%struct.S.0* nonnull align 4 dereferenceable(4) [[VAR5]]) #[[ATTR4]]
5478 // CHECK8-NEXT:    [[ARRAY_BEGIN12:%.*]] = getelementptr inbounds [2 x %struct.S.0], [2 x %struct.S.0]* [[S_ARR4]], i32 0, i32 0
5479 // CHECK8-NEXT:    [[TMP37:%.*]] = getelementptr inbounds [[STRUCT_S_0]], %struct.S.0* [[ARRAY_BEGIN12]], i32 2
5480 // CHECK8-NEXT:    br label [[ARRAYDESTROY_BODY:%.*]]
5481 // CHECK8:       arraydestroy.body:
5482 // CHECK8-NEXT:    [[ARRAYDESTROY_ELEMENTPAST:%.*]] = phi %struct.S.0* [ [[TMP37]], [[DOTOMP_LASTPRIVATE_DONE]] ], [ [[ARRAYDESTROY_ELEMENT:%.*]], [[ARRAYDESTROY_BODY]] ]
5483 // CHECK8-NEXT:    [[ARRAYDESTROY_ELEMENT]] = getelementptr inbounds [[STRUCT_S_0]], %struct.S.0* [[ARRAYDESTROY_ELEMENTPAST]], i32 -1
5484 // CHECK8-NEXT:    call void @_ZN1SIiED1Ev(%struct.S.0* nonnull align 4 dereferenceable(4) [[ARRAYDESTROY_ELEMENT]]) #[[ATTR4]]
5485 // CHECK8-NEXT:    [[ARRAYDESTROY_DONE:%.*]] = icmp eq %struct.S.0* [[ARRAYDESTROY_ELEMENT]], [[ARRAY_BEGIN12]]
5486 // CHECK8-NEXT:    br i1 [[ARRAYDESTROY_DONE]], label [[ARRAYDESTROY_DONE13:%.*]], label [[ARRAYDESTROY_BODY]]
5487 // CHECK8:       arraydestroy.done13:
5488 // CHECK8-NEXT:    ret void
5489 //
5490 //
5491 // CHECK8-LABEL: define {{[^@]+}}@_ZN1SIiED1Ev
5492 // CHECK8-SAME: (%struct.S.0* nonnull align 4 dereferenceable(4) [[THIS:%.*]]) unnamed_addr #[[ATTR1]] comdat align 2 {
5493 // CHECK8-NEXT:  entry:
5494 // CHECK8-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.S.0*, align 4
5495 // CHECK8-NEXT:    store %struct.S.0* [[THIS]], %struct.S.0** [[THIS_ADDR]], align 4
5496 // CHECK8-NEXT:    [[THIS1:%.*]] = load %struct.S.0*, %struct.S.0** [[THIS_ADDR]], align 4
5497 // CHECK8-NEXT:    call void @_ZN1SIiED2Ev(%struct.S.0* nonnull align 4 dereferenceable(4) [[THIS1]]) #[[ATTR4]]
5498 // CHECK8-NEXT:    ret void
5499 //
5500 //
5501 // CHECK8-LABEL: define {{[^@]+}}@_ZN1SIiEC2Ev
5502 // CHECK8-SAME: (%struct.S.0* nonnull align 4 dereferenceable(4) [[THIS:%.*]]) unnamed_addr #[[ATTR1]] comdat align 2 {
5503 // CHECK8-NEXT:  entry:
5504 // CHECK8-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.S.0*, align 4
5505 // CHECK8-NEXT:    store %struct.S.0* [[THIS]], %struct.S.0** [[THIS_ADDR]], align 4
5506 // CHECK8-NEXT:    [[THIS1:%.*]] = load %struct.S.0*, %struct.S.0** [[THIS_ADDR]], align 4
5507 // CHECK8-NEXT:    [[F:%.*]] = getelementptr inbounds [[STRUCT_S_0:%.*]], %struct.S.0* [[THIS1]], i32 0, i32 0
5508 // CHECK8-NEXT:    store i32 0, i32* [[F]], align 4
5509 // CHECK8-NEXT:    ret void
5510 //
5511 //
5512 // CHECK8-LABEL: define {{[^@]+}}@_ZN1SIiEC2Ei
5513 // CHECK8-SAME: (%struct.S.0* nonnull align 4 dereferenceable(4) [[THIS:%.*]], i32 [[A:%.*]]) unnamed_addr #[[ATTR1]] comdat align 2 {
5514 // CHECK8-NEXT:  entry:
5515 // CHECK8-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.S.0*, align 4
5516 // CHECK8-NEXT:    [[A_ADDR:%.*]] = alloca i32, align 4
5517 // CHECK8-NEXT:    store %struct.S.0* [[THIS]], %struct.S.0** [[THIS_ADDR]], align 4
5518 // CHECK8-NEXT:    store i32 [[A]], i32* [[A_ADDR]], align 4
5519 // CHECK8-NEXT:    [[THIS1:%.*]] = load %struct.S.0*, %struct.S.0** [[THIS_ADDR]], align 4
5520 // CHECK8-NEXT:    [[F:%.*]] = getelementptr inbounds [[STRUCT_S_0:%.*]], %struct.S.0* [[THIS1]], i32 0, i32 0
5521 // CHECK8-NEXT:    [[TMP0:%.*]] = load i32, i32* [[A_ADDR]], align 4
5522 // CHECK8-NEXT:    store i32 [[TMP0]], i32* [[F]], align 4
5523 // CHECK8-NEXT:    ret void
5524 //
5525 //
5526 // CHECK8-LABEL: define {{[^@]+}}@_ZN1SIiED2Ev
5527 // CHECK8-SAME: (%struct.S.0* nonnull align 4 dereferenceable(4) [[THIS:%.*]]) unnamed_addr #[[ATTR1]] comdat align 2 {
5528 // CHECK8-NEXT:  entry:
5529 // CHECK8-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.S.0*, align 4
5530 // CHECK8-NEXT:    store %struct.S.0* [[THIS]], %struct.S.0** [[THIS_ADDR]], align 4
5531 // CHECK8-NEXT:    [[THIS1:%.*]] = load %struct.S.0*, %struct.S.0** [[THIS_ADDR]], align 4
5532 // CHECK8-NEXT:    ret void
5533 //
5534 //
5535 // CHECK8-LABEL: define {{[^@]+}}@.omp_offloading.requires_reg
5536 // CHECK8-SAME: () #[[ATTR6:[0-9]+]] {
5537 // CHECK8-NEXT:  entry:
5538 // CHECK8-NEXT:    call void @__tgt_register_requires(i64 1)
5539 // CHECK8-NEXT:    ret void
5540 //
5541 //
5542 // CHECK9-LABEL: define {{[^@]+}}@main
5543 // CHECK9-SAME: () #[[ATTR0:[0-9]+]] {
5544 // CHECK9-NEXT:  entry:
5545 // CHECK9-NEXT:    [[RETVAL:%.*]] = alloca i32, align 4
5546 // CHECK9-NEXT:    [[G:%.*]] = alloca double, align 8
5547 // CHECK9-NEXT:    [[G1:%.*]] = alloca double*, align 8
5548 // CHECK9-NEXT:    [[REF_TMP:%.*]] = alloca [[CLASS_ANON:%.*]], align 8
5549 // CHECK9-NEXT:    store i32 0, i32* [[RETVAL]], align 4
5550 // CHECK9-NEXT:    store double* [[G]], double** [[G1]], align 8
5551 // CHECK9-NEXT:    [[TMP0:%.*]] = getelementptr inbounds [[CLASS_ANON]], %class.anon* [[REF_TMP]], i32 0, i32 0
5552 // CHECK9-NEXT:    store double* [[G]], double** [[TMP0]], align 8
5553 // CHECK9-NEXT:    [[TMP1:%.*]] = getelementptr inbounds [[CLASS_ANON]], %class.anon* [[REF_TMP]], i32 0, i32 1
5554 // CHECK9-NEXT:    [[TMP2:%.*]] = load double*, double** [[G1]], align 8
5555 // CHECK9-NEXT:    store double* [[TMP2]], double** [[TMP1]], align 8
5556 // CHECK9-NEXT:    call void @"_ZZ4mainENK3$_0clEv"(%class.anon* nonnull align 8 dereferenceable(16) [[REF_TMP]])
5557 // CHECK9-NEXT:    ret i32 0
5558 //
5559 //
5560 // CHECK10-LABEL: define {{[^@]+}}@main
5561 // CHECK10-SAME: () #[[ATTR0:[0-9]+]] {
5562 // CHECK10-NEXT:  entry:
5563 // CHECK10-NEXT:    [[RETVAL:%.*]] = alloca i32, align 4
5564 // CHECK10-NEXT:    [[G:%.*]] = alloca double, align 8
5565 // CHECK10-NEXT:    [[G1:%.*]] = alloca double*, align 8
5566 // CHECK10-NEXT:    [[REF_TMP:%.*]] = alloca [[CLASS_ANON:%.*]], align 8
5567 // CHECK10-NEXT:    store i32 0, i32* [[RETVAL]], align 4
5568 // CHECK10-NEXT:    store double* [[G]], double** [[G1]], align 8
5569 // CHECK10-NEXT:    [[TMP0:%.*]] = getelementptr inbounds [[CLASS_ANON]], %class.anon* [[REF_TMP]], i32 0, i32 0
5570 // CHECK10-NEXT:    store double* [[G]], double** [[TMP0]], align 8
5571 // CHECK10-NEXT:    [[TMP1:%.*]] = getelementptr inbounds [[CLASS_ANON]], %class.anon* [[REF_TMP]], i32 0, i32 1
5572 // CHECK10-NEXT:    [[TMP2:%.*]] = load double*, double** [[G1]], align 8
5573 // CHECK10-NEXT:    store double* [[TMP2]], double** [[TMP1]], align 8
5574 // CHECK10-NEXT:    call void @"_ZZ4mainENK3$_0clEv"(%class.anon* nonnull align 8 dereferenceable(16) [[REF_TMP]])
5575 // CHECK10-NEXT:    ret i32 0
5576 //
5577 //
5578 // CHECK11-LABEL: define {{[^@]+}}@main
5579 // CHECK11-SAME: () #[[ATTR0:[0-9]+]] {
5580 // CHECK11-NEXT:  entry:
5581 // CHECK11-NEXT:    [[RETVAL:%.*]] = alloca i32, align 4
5582 // CHECK11-NEXT:    [[G:%.*]] = alloca double, align 8
5583 // CHECK11-NEXT:    [[G1:%.*]] = alloca double*, align 4
5584 // CHECK11-NEXT:    [[REF_TMP:%.*]] = alloca [[CLASS_ANON:%.*]], align 4
5585 // CHECK11-NEXT:    store i32 0, i32* [[RETVAL]], align 4
5586 // CHECK11-NEXT:    store double* [[G]], double** [[G1]], align 4
5587 // CHECK11-NEXT:    [[TMP0:%.*]] = getelementptr inbounds [[CLASS_ANON]], %class.anon* [[REF_TMP]], i32 0, i32 0
5588 // CHECK11-NEXT:    store double* [[G]], double** [[TMP0]], align 4
5589 // CHECK11-NEXT:    [[TMP1:%.*]] = getelementptr inbounds [[CLASS_ANON]], %class.anon* [[REF_TMP]], i32 0, i32 1
5590 // CHECK11-NEXT:    [[TMP2:%.*]] = load double*, double** [[G1]], align 4
5591 // CHECK11-NEXT:    store double* [[TMP2]], double** [[TMP1]], align 4
5592 // CHECK11-NEXT:    call void @"_ZZ4mainENK3$_0clEv"(%class.anon* nonnull align 4 dereferenceable(8) [[REF_TMP]])
5593 // CHECK11-NEXT:    ret i32 0
5594 //
5595 //
5596 // CHECK12-LABEL: define {{[^@]+}}@main
5597 // CHECK12-SAME: () #[[ATTR0:[0-9]+]] {
5598 // CHECK12-NEXT:  entry:
5599 // CHECK12-NEXT:    [[RETVAL:%.*]] = alloca i32, align 4
5600 // CHECK12-NEXT:    [[G:%.*]] = alloca double, align 8
5601 // CHECK12-NEXT:    [[G1:%.*]] = alloca double*, align 4
5602 // CHECK12-NEXT:    [[REF_TMP:%.*]] = alloca [[CLASS_ANON:%.*]], align 4
5603 // CHECK12-NEXT:    store i32 0, i32* [[RETVAL]], align 4
5604 // CHECK12-NEXT:    store double* [[G]], double** [[G1]], align 4
5605 // CHECK12-NEXT:    [[TMP0:%.*]] = getelementptr inbounds [[CLASS_ANON]], %class.anon* [[REF_TMP]], i32 0, i32 0
5606 // CHECK12-NEXT:    store double* [[G]], double** [[TMP0]], align 4
5607 // CHECK12-NEXT:    [[TMP1:%.*]] = getelementptr inbounds [[CLASS_ANON]], %class.anon* [[REF_TMP]], i32 0, i32 1
5608 // CHECK12-NEXT:    [[TMP2:%.*]] = load double*, double** [[G1]], align 4
5609 // CHECK12-NEXT:    store double* [[TMP2]], double** [[TMP1]], align 4
5610 // CHECK12-NEXT:    call void @"_ZZ4mainENK3$_0clEv"(%class.anon* nonnull align 4 dereferenceable(8) [[REF_TMP]])
5611 // CHECK12-NEXT:    ret i32 0
5612 //
5613 //
5614 // CHECK13-LABEL: define {{[^@]+}}@main
5615 // CHECK13-SAME: () #[[ATTR0:[0-9]+]] {
5616 // CHECK13-NEXT:  entry:
5617 // CHECK13-NEXT:    [[RETVAL:%.*]] = alloca i32, align 4
5618 // CHECK13-NEXT:    [[G:%.*]] = alloca double, align 8
5619 // CHECK13-NEXT:    [[G1:%.*]] = alloca double*, align 8
5620 // CHECK13-NEXT:    [[TEST:%.*]] = alloca [[STRUCT_S:%.*]], align 4
5621 // CHECK13-NEXT:    [[T_VAR:%.*]] = alloca i32, align 4
5622 // CHECK13-NEXT:    [[VEC:%.*]] = alloca [2 x i32], align 4
5623 // CHECK13-NEXT:    [[S_ARR:%.*]] = alloca [2 x %struct.S], align 4
5624 // CHECK13-NEXT:    [[VAR:%.*]] = alloca %struct.S*, align 8
5625 // CHECK13-NEXT:    [[TMP:%.*]] = alloca %struct.S*, align 8
5626 // CHECK13-NEXT:    [[_TMP1:%.*]] = alloca i32, align 4
5627 // CHECK13-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
5628 // CHECK13-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
5629 // CHECK13-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
5630 // CHECK13-NEXT:    [[I:%.*]] = alloca i32, align 4
5631 // CHECK13-NEXT:    [[T_VAR2:%.*]] = alloca i32, align 4
5632 // CHECK13-NEXT:    [[VEC3:%.*]] = alloca [2 x i32], align 4
5633 // CHECK13-NEXT:    [[S_ARR4:%.*]] = alloca [2 x %struct.S], align 4
5634 // CHECK13-NEXT:    [[VAR5:%.*]] = alloca [[STRUCT_S]], align 4
5635 // CHECK13-NEXT:    [[_TMP6:%.*]] = alloca %struct.S*, align 8
5636 // CHECK13-NEXT:    [[SVAR:%.*]] = alloca i32, align 4
5637 // CHECK13-NEXT:    [[I14:%.*]] = alloca i32, align 4
5638 // CHECK13-NEXT:    store i32 0, i32* [[RETVAL]], align 4
5639 // CHECK13-NEXT:    store double* [[G]], double** [[G1]], align 8
5640 // CHECK13-NEXT:    call void @_ZN1SIfEC1Ev(%struct.S* nonnull align 4 dereferenceable(4) [[TEST]])
5641 // CHECK13-NEXT:    store i32 0, i32* [[T_VAR]], align 4
5642 // CHECK13-NEXT:    [[TMP0:%.*]] = bitcast [2 x i32]* [[VEC]] to i8*
5643 // CHECK13-NEXT:    call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 4 [[TMP0]], i8* align 4 bitcast ([2 x i32]* @__const.main.vec to i8*), i64 8, i1 false)
5644 // CHECK13-NEXT:    [[ARRAYINIT_BEGIN:%.*]] = getelementptr inbounds [2 x %struct.S], [2 x %struct.S]* [[S_ARR]], i64 0, i64 0
5645 // CHECK13-NEXT:    call void @_ZN1SIfEC1Ef(%struct.S* nonnull align 4 dereferenceable(4) [[ARRAYINIT_BEGIN]], float 1.000000e+00)
5646 // CHECK13-NEXT:    [[ARRAYINIT_ELEMENT:%.*]] = getelementptr inbounds [[STRUCT_S]], %struct.S* [[ARRAYINIT_BEGIN]], i64 1
5647 // CHECK13-NEXT:    call void @_ZN1SIfEC1Ef(%struct.S* nonnull align 4 dereferenceable(4) [[ARRAYINIT_ELEMENT]], float 2.000000e+00)
5648 // CHECK13-NEXT:    store %struct.S* [[TEST]], %struct.S** [[VAR]], align 8
5649 // CHECK13-NEXT:    [[TMP1:%.*]] = load %struct.S*, %struct.S** [[VAR]], align 8
5650 // CHECK13-NEXT:    store %struct.S* [[TMP1]], %struct.S** [[TMP]], align 8
5651 // CHECK13-NEXT:    [[TMP2:%.*]] = load %struct.S*, %struct.S** [[VAR]], align 8
5652 // CHECK13-NEXT:    [[TMP3:%.*]] = load %struct.S*, %struct.S** [[VAR]], align 8
5653 // CHECK13-NEXT:    [[TMP4:%.*]] = load %struct.S*, %struct.S** [[VAR]], align 8
5654 // CHECK13-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
5655 // CHECK13-NEXT:    store i32 1, i32* [[DOTOMP_UB]], align 4
5656 // CHECK13-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
5657 // CHECK13-NEXT:    store i32 [[TMP5]], i32* [[DOTOMP_IV]], align 4
5658 // CHECK13-NEXT:    [[ARRAY_BEGIN:%.*]] = getelementptr inbounds [2 x %struct.S], [2 x %struct.S]* [[S_ARR4]], i32 0, i32 0
5659 // CHECK13-NEXT:    [[ARRAYCTOR_END:%.*]] = getelementptr inbounds [[STRUCT_S]], %struct.S* [[ARRAY_BEGIN]], i64 2
5660 // CHECK13-NEXT:    br label [[ARRAYCTOR_LOOP:%.*]]
5661 // CHECK13:       arrayctor.loop:
5662 // CHECK13-NEXT:    [[ARRAYCTOR_CUR:%.*]] = phi %struct.S* [ [[ARRAY_BEGIN]], [[ENTRY:%.*]] ], [ [[ARRAYCTOR_NEXT:%.*]], [[ARRAYCTOR_LOOP]] ]
5663 // CHECK13-NEXT:    call void @_ZN1SIfEC1Ev(%struct.S* nonnull align 4 dereferenceable(4) [[ARRAYCTOR_CUR]])
5664 // CHECK13-NEXT:    [[ARRAYCTOR_NEXT]] = getelementptr inbounds [[STRUCT_S]], %struct.S* [[ARRAYCTOR_CUR]], i64 1
5665 // CHECK13-NEXT:    [[ARRAYCTOR_DONE:%.*]] = icmp eq %struct.S* [[ARRAYCTOR_NEXT]], [[ARRAYCTOR_END]]
5666 // CHECK13-NEXT:    br i1 [[ARRAYCTOR_DONE]], label [[ARRAYCTOR_CONT:%.*]], label [[ARRAYCTOR_LOOP]]
5667 // CHECK13:       arrayctor.cont:
5668 // CHECK13-NEXT:    [[TMP6:%.*]] = load %struct.S*, %struct.S** [[TMP]], align 8
5669 // CHECK13-NEXT:    call void @_ZN1SIfEC1Ev(%struct.S* nonnull align 4 dereferenceable(4) [[VAR5]])
5670 // CHECK13-NEXT:    store %struct.S* [[VAR5]], %struct.S** [[_TMP6]], align 8
5671 // CHECK13-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
5672 // CHECK13:       omp.inner.for.cond:
5673 // CHECK13-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !2
5674 // CHECK13-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !2
5675 // CHECK13-NEXT:    [[CMP:%.*]] = icmp sle i32 [[TMP7]], [[TMP8]]
5676 // CHECK13-NEXT:    br i1 [[CMP]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_COND_CLEANUP:%.*]]
5677 // CHECK13:       omp.inner.for.cond.cleanup:
5678 // CHECK13-NEXT:    br label [[OMP_INNER_FOR_END:%.*]]
5679 // CHECK13:       omp.inner.for.body:
5680 // CHECK13-NEXT:    [[TMP9:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !2
5681 // CHECK13-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP9]], 1
5682 // CHECK13-NEXT:    [[ADD:%.*]] = add nsw i32 0, [[MUL]]
5683 // CHECK13-NEXT:    store i32 [[ADD]], i32* [[I]], align 4, !llvm.access.group !2
5684 // CHECK13-NEXT:    [[TMP10:%.*]] = load i32, i32* [[T_VAR2]], align 4, !llvm.access.group !2
5685 // CHECK13-NEXT:    [[TMP11:%.*]] = load i32, i32* [[I]], align 4, !llvm.access.group !2
5686 // CHECK13-NEXT:    [[IDXPROM:%.*]] = sext i32 [[TMP11]] to i64
5687 // CHECK13-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds [2 x i32], [2 x i32]* [[VEC3]], i64 0, i64 [[IDXPROM]]
5688 // CHECK13-NEXT:    store i32 [[TMP10]], i32* [[ARRAYIDX]], align 4, !llvm.access.group !2
5689 // CHECK13-NEXT:    [[TMP12:%.*]] = load %struct.S*, %struct.S** [[_TMP6]], align 8, !llvm.access.group !2
5690 // CHECK13-NEXT:    [[TMP13:%.*]] = load i32, i32* [[I]], align 4, !llvm.access.group !2
5691 // CHECK13-NEXT:    [[IDXPROM7:%.*]] = sext i32 [[TMP13]] to i64
5692 // CHECK13-NEXT:    [[ARRAYIDX8:%.*]] = getelementptr inbounds [2 x %struct.S], [2 x %struct.S]* [[S_ARR4]], i64 0, i64 [[IDXPROM7]]
5693 // CHECK13-NEXT:    [[TMP14:%.*]] = bitcast %struct.S* [[ARRAYIDX8]] to i8*
5694 // CHECK13-NEXT:    [[TMP15:%.*]] = bitcast %struct.S* [[TMP12]] to i8*
5695 // CHECK13-NEXT:    call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 4 [[TMP14]], i8* align 4 [[TMP15]], i64 4, i1 false), !llvm.access.group !2
5696 // CHECK13-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
5697 // CHECK13:       omp.body.continue:
5698 // CHECK13-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
5699 // CHECK13:       omp.inner.for.inc:
5700 // CHECK13-NEXT:    [[TMP16:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !2
5701 // CHECK13-NEXT:    [[ADD9:%.*]] = add nsw i32 [[TMP16]], 1
5702 // CHECK13-NEXT:    store i32 [[ADD9]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !2
5703 // CHECK13-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP3:![0-9]+]]
5704 // CHECK13:       omp.inner.for.end:
5705 // CHECK13-NEXT:    store i32 2, i32* [[I]], align 4
5706 // CHECK13-NEXT:    [[TMP17:%.*]] = load i32, i32* [[T_VAR2]], align 4
5707 // CHECK13-NEXT:    store i32 [[TMP17]], i32* [[T_VAR]], align 4
5708 // CHECK13-NEXT:    [[TMP18:%.*]] = bitcast [2 x i32]* [[VEC]] to i8*
5709 // CHECK13-NEXT:    [[TMP19:%.*]] = bitcast [2 x i32]* [[VEC3]] to i8*
5710 // CHECK13-NEXT:    call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 4 [[TMP18]], i8* align 4 [[TMP19]], i64 8, i1 false)
5711 // CHECK13-NEXT:    [[ARRAY_BEGIN10:%.*]] = getelementptr inbounds [2 x %struct.S], [2 x %struct.S]* [[S_ARR]], i32 0, i32 0
5712 // CHECK13-NEXT:    [[TMP20:%.*]] = bitcast [2 x %struct.S]* [[S_ARR4]] to %struct.S*
5713 // CHECK13-NEXT:    [[TMP21:%.*]] = getelementptr [[STRUCT_S]], %struct.S* [[ARRAY_BEGIN10]], i64 2
5714 // CHECK13-NEXT:    [[OMP_ARRAYCPY_ISEMPTY:%.*]] = icmp eq %struct.S* [[ARRAY_BEGIN10]], [[TMP21]]
5715 // CHECK13-NEXT:    br i1 [[OMP_ARRAYCPY_ISEMPTY]], label [[OMP_ARRAYCPY_DONE11:%.*]], label [[OMP_ARRAYCPY_BODY:%.*]]
5716 // CHECK13:       omp.arraycpy.body:
5717 // CHECK13-NEXT:    [[OMP_ARRAYCPY_SRCELEMENTPAST:%.*]] = phi %struct.S* [ [[TMP20]], [[OMP_INNER_FOR_END]] ], [ [[OMP_ARRAYCPY_SRC_ELEMENT:%.*]], [[OMP_ARRAYCPY_BODY]] ]
5718 // CHECK13-NEXT:    [[OMP_ARRAYCPY_DESTELEMENTPAST:%.*]] = phi %struct.S* [ [[ARRAY_BEGIN10]], [[OMP_INNER_FOR_END]] ], [ [[OMP_ARRAYCPY_DEST_ELEMENT:%.*]], [[OMP_ARRAYCPY_BODY]] ]
5719 // CHECK13-NEXT:    [[TMP22:%.*]] = bitcast %struct.S* [[OMP_ARRAYCPY_DESTELEMENTPAST]] to i8*
5720 // CHECK13-NEXT:    [[TMP23:%.*]] = bitcast %struct.S* [[OMP_ARRAYCPY_SRCELEMENTPAST]] to i8*
5721 // CHECK13-NEXT:    call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 4 [[TMP22]], i8* align 4 [[TMP23]], i64 4, i1 false)
5722 // CHECK13-NEXT:    [[OMP_ARRAYCPY_DEST_ELEMENT]] = getelementptr [[STRUCT_S]], %struct.S* [[OMP_ARRAYCPY_DESTELEMENTPAST]], i32 1
5723 // CHECK13-NEXT:    [[OMP_ARRAYCPY_SRC_ELEMENT]] = getelementptr [[STRUCT_S]], %struct.S* [[OMP_ARRAYCPY_SRCELEMENTPAST]], i32 1
5724 // CHECK13-NEXT:    [[OMP_ARRAYCPY_DONE:%.*]] = icmp eq %struct.S* [[OMP_ARRAYCPY_DEST_ELEMENT]], [[TMP21]]
5725 // CHECK13-NEXT:    br i1 [[OMP_ARRAYCPY_DONE]], label [[OMP_ARRAYCPY_DONE11]], label [[OMP_ARRAYCPY_BODY]]
5726 // CHECK13:       omp.arraycpy.done11:
5727 // CHECK13-NEXT:    [[TMP24:%.*]] = load %struct.S*, %struct.S** [[_TMP6]], align 8
5728 // CHECK13-NEXT:    [[TMP25:%.*]] = bitcast %struct.S* [[TMP6]] to i8*
5729 // CHECK13-NEXT:    [[TMP26:%.*]] = bitcast %struct.S* [[TMP24]] to i8*
5730 // CHECK13-NEXT:    call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 4 [[TMP25]], i8* align 4 [[TMP26]], i64 4, i1 false)
5731 // CHECK13-NEXT:    [[TMP27:%.*]] = load i32, i32* [[SVAR]], align 4
5732 // CHECK13-NEXT:    store i32 [[TMP27]], i32* @_ZZ4mainE4svar, align 4
5733 // CHECK13-NEXT:    call void @_ZN1SIfED1Ev(%struct.S* nonnull align 4 dereferenceable(4) [[VAR5]]) #[[ATTR4:[0-9]+]]
5734 // CHECK13-NEXT:    [[ARRAY_BEGIN12:%.*]] = getelementptr inbounds [2 x %struct.S], [2 x %struct.S]* [[S_ARR4]], i32 0, i32 0
5735 // CHECK13-NEXT:    [[TMP28:%.*]] = getelementptr inbounds [[STRUCT_S]], %struct.S* [[ARRAY_BEGIN12]], i64 2
5736 // CHECK13-NEXT:    br label [[ARRAYDESTROY_BODY:%.*]]
5737 // CHECK13:       arraydestroy.body:
5738 // CHECK13-NEXT:    [[ARRAYDESTROY_ELEMENTPAST:%.*]] = phi %struct.S* [ [[TMP28]], [[OMP_ARRAYCPY_DONE11]] ], [ [[ARRAYDESTROY_ELEMENT:%.*]], [[ARRAYDESTROY_BODY]] ]
5739 // CHECK13-NEXT:    [[ARRAYDESTROY_ELEMENT]] = getelementptr inbounds [[STRUCT_S]], %struct.S* [[ARRAYDESTROY_ELEMENTPAST]], i64 -1
5740 // CHECK13-NEXT:    call void @_ZN1SIfED1Ev(%struct.S* nonnull align 4 dereferenceable(4) [[ARRAYDESTROY_ELEMENT]]) #[[ATTR4]]
5741 // CHECK13-NEXT:    [[ARRAYDESTROY_DONE:%.*]] = icmp eq %struct.S* [[ARRAYDESTROY_ELEMENT]], [[ARRAY_BEGIN12]]
5742 // CHECK13-NEXT:    br i1 [[ARRAYDESTROY_DONE]], label [[ARRAYDESTROY_DONE13:%.*]], label [[ARRAYDESTROY_BODY]]
5743 // CHECK13:       arraydestroy.done13:
5744 // CHECK13-NEXT:    [[CALL:%.*]] = call signext i32 @_Z5tmainIiET_v()
5745 // CHECK13-NEXT:    store i32 [[CALL]], i32* [[RETVAL]], align 4
5746 // CHECK13-NEXT:    [[ARRAY_BEGIN15:%.*]] = getelementptr inbounds [2 x %struct.S], [2 x %struct.S]* [[S_ARR]], i32 0, i32 0
5747 // CHECK13-NEXT:    [[TMP29:%.*]] = getelementptr inbounds [[STRUCT_S]], %struct.S* [[ARRAY_BEGIN15]], i64 2
5748 // CHECK13-NEXT:    br label [[ARRAYDESTROY_BODY16:%.*]]
5749 // CHECK13:       arraydestroy.body16:
5750 // CHECK13-NEXT:    [[ARRAYDESTROY_ELEMENTPAST17:%.*]] = phi %struct.S* [ [[TMP29]], [[ARRAYDESTROY_DONE13]] ], [ [[ARRAYDESTROY_ELEMENT18:%.*]], [[ARRAYDESTROY_BODY16]] ]
5751 // CHECK13-NEXT:    [[ARRAYDESTROY_ELEMENT18]] = getelementptr inbounds [[STRUCT_S]], %struct.S* [[ARRAYDESTROY_ELEMENTPAST17]], i64 -1
5752 // CHECK13-NEXT:    call void @_ZN1SIfED1Ev(%struct.S* nonnull align 4 dereferenceable(4) [[ARRAYDESTROY_ELEMENT18]]) #[[ATTR4]]
5753 // CHECK13-NEXT:    [[ARRAYDESTROY_DONE19:%.*]] = icmp eq %struct.S* [[ARRAYDESTROY_ELEMENT18]], [[ARRAY_BEGIN15]]
5754 // CHECK13-NEXT:    br i1 [[ARRAYDESTROY_DONE19]], label [[ARRAYDESTROY_DONE20:%.*]], label [[ARRAYDESTROY_BODY16]]
5755 // CHECK13:       arraydestroy.done20:
5756 // CHECK13-NEXT:    call void @_ZN1SIfED1Ev(%struct.S* nonnull align 4 dereferenceable(4) [[TEST]]) #[[ATTR4]]
5757 // CHECK13-NEXT:    [[TMP30:%.*]] = load i32, i32* [[RETVAL]], align 4
5758 // CHECK13-NEXT:    ret i32 [[TMP30]]
5759 //
5760 //
5761 // CHECK13-LABEL: define {{[^@]+}}@_ZN1SIfEC1Ev
5762 // CHECK13-SAME: (%struct.S* nonnull align 4 dereferenceable(4) [[THIS:%.*]]) unnamed_addr #[[ATTR1:[0-9]+]] comdat align 2 {
5763 // CHECK13-NEXT:  entry:
5764 // CHECK13-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.S*, align 8
5765 // CHECK13-NEXT:    store %struct.S* [[THIS]], %struct.S** [[THIS_ADDR]], align 8
5766 // CHECK13-NEXT:    [[THIS1:%.*]] = load %struct.S*, %struct.S** [[THIS_ADDR]], align 8
5767 // CHECK13-NEXT:    call void @_ZN1SIfEC2Ev(%struct.S* nonnull align 4 dereferenceable(4) [[THIS1]])
5768 // CHECK13-NEXT:    ret void
5769 //
5770 //
5771 // CHECK13-LABEL: define {{[^@]+}}@_ZN1SIfEC1Ef
5772 // CHECK13-SAME: (%struct.S* nonnull align 4 dereferenceable(4) [[THIS:%.*]], float [[A:%.*]]) unnamed_addr #[[ATTR1]] comdat align 2 {
5773 // CHECK13-NEXT:  entry:
5774 // CHECK13-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.S*, align 8
5775 // CHECK13-NEXT:    [[A_ADDR:%.*]] = alloca float, align 4
5776 // CHECK13-NEXT:    store %struct.S* [[THIS]], %struct.S** [[THIS_ADDR]], align 8
5777 // CHECK13-NEXT:    store float [[A]], float* [[A_ADDR]], align 4
5778 // CHECK13-NEXT:    [[THIS1:%.*]] = load %struct.S*, %struct.S** [[THIS_ADDR]], align 8
5779 // CHECK13-NEXT:    [[TMP0:%.*]] = load float, float* [[A_ADDR]], align 4
5780 // CHECK13-NEXT:    call void @_ZN1SIfEC2Ef(%struct.S* nonnull align 4 dereferenceable(4) [[THIS1]], float [[TMP0]])
5781 // CHECK13-NEXT:    ret void
5782 //
5783 //
5784 // CHECK13-LABEL: define {{[^@]+}}@_ZN1SIfED1Ev
5785 // CHECK13-SAME: (%struct.S* nonnull align 4 dereferenceable(4) [[THIS:%.*]]) unnamed_addr #[[ATTR1]] comdat align 2 {
5786 // CHECK13-NEXT:  entry:
5787 // CHECK13-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.S*, align 8
5788 // CHECK13-NEXT:    store %struct.S* [[THIS]], %struct.S** [[THIS_ADDR]], align 8
5789 // CHECK13-NEXT:    [[THIS1:%.*]] = load %struct.S*, %struct.S** [[THIS_ADDR]], align 8
5790 // CHECK13-NEXT:    call void @_ZN1SIfED2Ev(%struct.S* nonnull align 4 dereferenceable(4) [[THIS1]]) #[[ATTR4]]
5791 // CHECK13-NEXT:    ret void
5792 //
5793 //
5794 // CHECK13-LABEL: define {{[^@]+}}@_Z5tmainIiET_v
5795 // CHECK13-SAME: () #[[ATTR3:[0-9]+]] comdat {
5796 // CHECK13-NEXT:  entry:
5797 // CHECK13-NEXT:    [[RETVAL:%.*]] = alloca i32, align 4
5798 // CHECK13-NEXT:    [[TEST:%.*]] = alloca [[STRUCT_S_0:%.*]], align 4
5799 // CHECK13-NEXT:    [[T_VAR:%.*]] = alloca i32, align 4
5800 // CHECK13-NEXT:    [[VEC:%.*]] = alloca [2 x i32], align 4
5801 // CHECK13-NEXT:    [[S_ARR:%.*]] = alloca [2 x %struct.S.0], align 4
5802 // CHECK13-NEXT:    [[VAR:%.*]] = alloca %struct.S.0*, align 8
5803 // CHECK13-NEXT:    [[TMP:%.*]] = alloca %struct.S.0*, align 8
5804 // CHECK13-NEXT:    [[_TMP1:%.*]] = alloca i32, align 4
5805 // CHECK13-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
5806 // CHECK13-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
5807 // CHECK13-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
5808 // CHECK13-NEXT:    [[I:%.*]] = alloca i32, align 4
5809 // CHECK13-NEXT:    [[T_VAR2:%.*]] = alloca i32, align 4
5810 // CHECK13-NEXT:    [[VEC3:%.*]] = alloca [2 x i32], align 4
5811 // CHECK13-NEXT:    [[S_ARR4:%.*]] = alloca [2 x %struct.S.0], align 4
5812 // CHECK13-NEXT:    [[VAR5:%.*]] = alloca [[STRUCT_S_0]], align 4
5813 // CHECK13-NEXT:    [[_TMP6:%.*]] = alloca %struct.S.0*, align 8
5814 // CHECK13-NEXT:    call void @_ZN1SIiEC1Ev(%struct.S.0* nonnull align 4 dereferenceable(4) [[TEST]])
5815 // CHECK13-NEXT:    store i32 0, i32* [[T_VAR]], align 4
5816 // CHECK13-NEXT:    [[TMP0:%.*]] = bitcast [2 x i32]* [[VEC]] to i8*
5817 // CHECK13-NEXT:    call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 4 [[TMP0]], i8* align 4 bitcast ([2 x i32]* @__const._Z5tmainIiET_v.vec to i8*), i64 8, i1 false)
5818 // CHECK13-NEXT:    [[ARRAYINIT_BEGIN:%.*]] = getelementptr inbounds [2 x %struct.S.0], [2 x %struct.S.0]* [[S_ARR]], i64 0, i64 0
5819 // CHECK13-NEXT:    call void @_ZN1SIiEC1Ei(%struct.S.0* nonnull align 4 dereferenceable(4) [[ARRAYINIT_BEGIN]], i32 signext 1)
5820 // CHECK13-NEXT:    [[ARRAYINIT_ELEMENT:%.*]] = getelementptr inbounds [[STRUCT_S_0]], %struct.S.0* [[ARRAYINIT_BEGIN]], i64 1
5821 // CHECK13-NEXT:    call void @_ZN1SIiEC1Ei(%struct.S.0* nonnull align 4 dereferenceable(4) [[ARRAYINIT_ELEMENT]], i32 signext 2)
5822 // CHECK13-NEXT:    store %struct.S.0* [[TEST]], %struct.S.0** [[VAR]], align 8
5823 // CHECK13-NEXT:    [[TMP1:%.*]] = load %struct.S.0*, %struct.S.0** [[VAR]], align 8
5824 // CHECK13-NEXT:    store %struct.S.0* [[TMP1]], %struct.S.0** [[TMP]], align 8
5825 // CHECK13-NEXT:    [[TMP2:%.*]] = load %struct.S.0*, %struct.S.0** [[VAR]], align 8
5826 // CHECK13-NEXT:    [[TMP3:%.*]] = load %struct.S.0*, %struct.S.0** [[VAR]], align 8
5827 // CHECK13-NEXT:    [[TMP4:%.*]] = load %struct.S.0*, %struct.S.0** [[VAR]], align 8
5828 // CHECK13-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
5829 // CHECK13-NEXT:    store i32 1, i32* [[DOTOMP_UB]], align 4
5830 // CHECK13-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
5831 // CHECK13-NEXT:    store i32 [[TMP5]], i32* [[DOTOMP_IV]], align 4
5832 // CHECK13-NEXT:    [[ARRAY_BEGIN:%.*]] = getelementptr inbounds [2 x %struct.S.0], [2 x %struct.S.0]* [[S_ARR4]], i32 0, i32 0
5833 // CHECK13-NEXT:    [[ARRAYCTOR_END:%.*]] = getelementptr inbounds [[STRUCT_S_0]], %struct.S.0* [[ARRAY_BEGIN]], i64 2
5834 // CHECK13-NEXT:    br label [[ARRAYCTOR_LOOP:%.*]]
5835 // CHECK13:       arrayctor.loop:
5836 // CHECK13-NEXT:    [[ARRAYCTOR_CUR:%.*]] = phi %struct.S.0* [ [[ARRAY_BEGIN]], [[ENTRY:%.*]] ], [ [[ARRAYCTOR_NEXT:%.*]], [[ARRAYCTOR_LOOP]] ]
5837 // CHECK13-NEXT:    call void @_ZN1SIiEC1Ev(%struct.S.0* nonnull align 4 dereferenceable(4) [[ARRAYCTOR_CUR]])
5838 // CHECK13-NEXT:    [[ARRAYCTOR_NEXT]] = getelementptr inbounds [[STRUCT_S_0]], %struct.S.0* [[ARRAYCTOR_CUR]], i64 1
5839 // CHECK13-NEXT:    [[ARRAYCTOR_DONE:%.*]] = icmp eq %struct.S.0* [[ARRAYCTOR_NEXT]], [[ARRAYCTOR_END]]
5840 // CHECK13-NEXT:    br i1 [[ARRAYCTOR_DONE]], label [[ARRAYCTOR_CONT:%.*]], label [[ARRAYCTOR_LOOP]]
5841 // CHECK13:       arrayctor.cont:
5842 // CHECK13-NEXT:    [[TMP6:%.*]] = load %struct.S.0*, %struct.S.0** [[TMP]], align 8
5843 // CHECK13-NEXT:    call void @_ZN1SIiEC1Ev(%struct.S.0* nonnull align 4 dereferenceable(4) [[VAR5]])
5844 // CHECK13-NEXT:    store %struct.S.0* [[VAR5]], %struct.S.0** [[_TMP6]], align 8
5845 // CHECK13-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
5846 // CHECK13:       omp.inner.for.cond:
5847 // CHECK13-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !6
5848 // CHECK13-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !6
5849 // CHECK13-NEXT:    [[CMP:%.*]] = icmp sle i32 [[TMP7]], [[TMP8]]
5850 // CHECK13-NEXT:    br i1 [[CMP]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_COND_CLEANUP:%.*]]
5851 // CHECK13:       omp.inner.for.cond.cleanup:
5852 // CHECK13-NEXT:    br label [[OMP_INNER_FOR_END:%.*]]
5853 // CHECK13:       omp.inner.for.body:
5854 // CHECK13-NEXT:    [[TMP9:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !6
5855 // CHECK13-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP9]], 1
5856 // CHECK13-NEXT:    [[ADD:%.*]] = add nsw i32 0, [[MUL]]
5857 // CHECK13-NEXT:    store i32 [[ADD]], i32* [[I]], align 4, !llvm.access.group !6
5858 // CHECK13-NEXT:    [[TMP10:%.*]] = load i32, i32* [[T_VAR2]], align 4, !llvm.access.group !6
5859 // CHECK13-NEXT:    [[TMP11:%.*]] = load i32, i32* [[I]], align 4, !llvm.access.group !6
5860 // CHECK13-NEXT:    [[IDXPROM:%.*]] = sext i32 [[TMP11]] to i64
5861 // CHECK13-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds [2 x i32], [2 x i32]* [[VEC3]], i64 0, i64 [[IDXPROM]]
5862 // CHECK13-NEXT:    store i32 [[TMP10]], i32* [[ARRAYIDX]], align 4, !llvm.access.group !6
5863 // CHECK13-NEXT:    [[TMP12:%.*]] = load %struct.S.0*, %struct.S.0** [[_TMP6]], align 8, !llvm.access.group !6
5864 // CHECK13-NEXT:    [[TMP13:%.*]] = load i32, i32* [[I]], align 4, !llvm.access.group !6
5865 // CHECK13-NEXT:    [[IDXPROM7:%.*]] = sext i32 [[TMP13]] to i64
5866 // CHECK13-NEXT:    [[ARRAYIDX8:%.*]] = getelementptr inbounds [2 x %struct.S.0], [2 x %struct.S.0]* [[S_ARR4]], i64 0, i64 [[IDXPROM7]]
5867 // CHECK13-NEXT:    [[TMP14:%.*]] = bitcast %struct.S.0* [[ARRAYIDX8]] to i8*
5868 // CHECK13-NEXT:    [[TMP15:%.*]] = bitcast %struct.S.0* [[TMP12]] to i8*
5869 // CHECK13-NEXT:    call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 4 [[TMP14]], i8* align 4 [[TMP15]], i64 4, i1 false), !llvm.access.group !6
5870 // CHECK13-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
5871 // CHECK13:       omp.body.continue:
5872 // CHECK13-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
5873 // CHECK13:       omp.inner.for.inc:
5874 // CHECK13-NEXT:    [[TMP16:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !6
5875 // CHECK13-NEXT:    [[ADD9:%.*]] = add nsw i32 [[TMP16]], 1
5876 // CHECK13-NEXT:    store i32 [[ADD9]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !6
5877 // CHECK13-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP7:![0-9]+]]
5878 // CHECK13:       omp.inner.for.end:
5879 // CHECK13-NEXT:    store i32 2, i32* [[I]], align 4
5880 // CHECK13-NEXT:    [[TMP17:%.*]] = load i32, i32* [[T_VAR2]], align 4
5881 // CHECK13-NEXT:    store i32 [[TMP17]], i32* [[T_VAR]], align 4
5882 // CHECK13-NEXT:    [[TMP18:%.*]] = bitcast [2 x i32]* [[VEC]] to i8*
5883 // CHECK13-NEXT:    [[TMP19:%.*]] = bitcast [2 x i32]* [[VEC3]] to i8*
5884 // CHECK13-NEXT:    call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 4 [[TMP18]], i8* align 4 [[TMP19]], i64 8, i1 false)
5885 // CHECK13-NEXT:    [[ARRAY_BEGIN10:%.*]] = getelementptr inbounds [2 x %struct.S.0], [2 x %struct.S.0]* [[S_ARR]], i32 0, i32 0
5886 // CHECK13-NEXT:    [[TMP20:%.*]] = bitcast [2 x %struct.S.0]* [[S_ARR4]] to %struct.S.0*
5887 // CHECK13-NEXT:    [[TMP21:%.*]] = getelementptr [[STRUCT_S_0]], %struct.S.0* [[ARRAY_BEGIN10]], i64 2
5888 // CHECK13-NEXT:    [[OMP_ARRAYCPY_ISEMPTY:%.*]] = icmp eq %struct.S.0* [[ARRAY_BEGIN10]], [[TMP21]]
5889 // CHECK13-NEXT:    br i1 [[OMP_ARRAYCPY_ISEMPTY]], label [[OMP_ARRAYCPY_DONE11:%.*]], label [[OMP_ARRAYCPY_BODY:%.*]]
5890 // CHECK13:       omp.arraycpy.body:
5891 // CHECK13-NEXT:    [[OMP_ARRAYCPY_SRCELEMENTPAST:%.*]] = phi %struct.S.0* [ [[TMP20]], [[OMP_INNER_FOR_END]] ], [ [[OMP_ARRAYCPY_SRC_ELEMENT:%.*]], [[OMP_ARRAYCPY_BODY]] ]
5892 // CHECK13-NEXT:    [[OMP_ARRAYCPY_DESTELEMENTPAST:%.*]] = phi %struct.S.0* [ [[ARRAY_BEGIN10]], [[OMP_INNER_FOR_END]] ], [ [[OMP_ARRAYCPY_DEST_ELEMENT:%.*]], [[OMP_ARRAYCPY_BODY]] ]
5893 // CHECK13-NEXT:    [[TMP22:%.*]] = bitcast %struct.S.0* [[OMP_ARRAYCPY_DESTELEMENTPAST]] to i8*
5894 // CHECK13-NEXT:    [[TMP23:%.*]] = bitcast %struct.S.0* [[OMP_ARRAYCPY_SRCELEMENTPAST]] to i8*
5895 // CHECK13-NEXT:    call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 4 [[TMP22]], i8* align 4 [[TMP23]], i64 4, i1 false)
5896 // CHECK13-NEXT:    [[OMP_ARRAYCPY_DEST_ELEMENT]] = getelementptr [[STRUCT_S_0]], %struct.S.0* [[OMP_ARRAYCPY_DESTELEMENTPAST]], i32 1
5897 // CHECK13-NEXT:    [[OMP_ARRAYCPY_SRC_ELEMENT]] = getelementptr [[STRUCT_S_0]], %struct.S.0* [[OMP_ARRAYCPY_SRCELEMENTPAST]], i32 1
5898 // CHECK13-NEXT:    [[OMP_ARRAYCPY_DONE:%.*]] = icmp eq %struct.S.0* [[OMP_ARRAYCPY_DEST_ELEMENT]], [[TMP21]]
5899 // CHECK13-NEXT:    br i1 [[OMP_ARRAYCPY_DONE]], label [[OMP_ARRAYCPY_DONE11]], label [[OMP_ARRAYCPY_BODY]]
5900 // CHECK13:       omp.arraycpy.done11:
5901 // CHECK13-NEXT:    [[TMP24:%.*]] = load %struct.S.0*, %struct.S.0** [[_TMP6]], align 8
5902 // CHECK13-NEXT:    [[TMP25:%.*]] = bitcast %struct.S.0* [[TMP6]] to i8*
5903 // CHECK13-NEXT:    [[TMP26:%.*]] = bitcast %struct.S.0* [[TMP24]] to i8*
5904 // CHECK13-NEXT:    call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 4 [[TMP25]], i8* align 4 [[TMP26]], i64 4, i1 false)
5905 // CHECK13-NEXT:    call void @_ZN1SIiED1Ev(%struct.S.0* nonnull align 4 dereferenceable(4) [[VAR5]]) #[[ATTR4]]
5906 // CHECK13-NEXT:    [[ARRAY_BEGIN12:%.*]] = getelementptr inbounds [2 x %struct.S.0], [2 x %struct.S.0]* [[S_ARR4]], i32 0, i32 0
5907 // CHECK13-NEXT:    [[TMP27:%.*]] = getelementptr inbounds [[STRUCT_S_0]], %struct.S.0* [[ARRAY_BEGIN12]], i64 2
5908 // CHECK13-NEXT:    br label [[ARRAYDESTROY_BODY:%.*]]
5909 // CHECK13:       arraydestroy.body:
5910 // CHECK13-NEXT:    [[ARRAYDESTROY_ELEMENTPAST:%.*]] = phi %struct.S.0* [ [[TMP27]], [[OMP_ARRAYCPY_DONE11]] ], [ [[ARRAYDESTROY_ELEMENT:%.*]], [[ARRAYDESTROY_BODY]] ]
5911 // CHECK13-NEXT:    [[ARRAYDESTROY_ELEMENT]] = getelementptr inbounds [[STRUCT_S_0]], %struct.S.0* [[ARRAYDESTROY_ELEMENTPAST]], i64 -1
5912 // CHECK13-NEXT:    call void @_ZN1SIiED1Ev(%struct.S.0* nonnull align 4 dereferenceable(4) [[ARRAYDESTROY_ELEMENT]]) #[[ATTR4]]
5913 // CHECK13-NEXT:    [[ARRAYDESTROY_DONE:%.*]] = icmp eq %struct.S.0* [[ARRAYDESTROY_ELEMENT]], [[ARRAY_BEGIN12]]
5914 // CHECK13-NEXT:    br i1 [[ARRAYDESTROY_DONE]], label [[ARRAYDESTROY_DONE13:%.*]], label [[ARRAYDESTROY_BODY]]
5915 // CHECK13:       arraydestroy.done13:
5916 // CHECK13-NEXT:    store i32 0, i32* [[RETVAL]], align 4
5917 // CHECK13-NEXT:    [[ARRAY_BEGIN14:%.*]] = getelementptr inbounds [2 x %struct.S.0], [2 x %struct.S.0]* [[S_ARR]], i32 0, i32 0
5918 // CHECK13-NEXT:    [[TMP28:%.*]] = getelementptr inbounds [[STRUCT_S_0]], %struct.S.0* [[ARRAY_BEGIN14]], i64 2
5919 // CHECK13-NEXT:    br label [[ARRAYDESTROY_BODY15:%.*]]
5920 // CHECK13:       arraydestroy.body15:
5921 // CHECK13-NEXT:    [[ARRAYDESTROY_ELEMENTPAST16:%.*]] = phi %struct.S.0* [ [[TMP28]], [[ARRAYDESTROY_DONE13]] ], [ [[ARRAYDESTROY_ELEMENT17:%.*]], [[ARRAYDESTROY_BODY15]] ]
5922 // CHECK13-NEXT:    [[ARRAYDESTROY_ELEMENT17]] = getelementptr inbounds [[STRUCT_S_0]], %struct.S.0* [[ARRAYDESTROY_ELEMENTPAST16]], i64 -1
5923 // CHECK13-NEXT:    call void @_ZN1SIiED1Ev(%struct.S.0* nonnull align 4 dereferenceable(4) [[ARRAYDESTROY_ELEMENT17]]) #[[ATTR4]]
5924 // CHECK13-NEXT:    [[ARRAYDESTROY_DONE18:%.*]] = icmp eq %struct.S.0* [[ARRAYDESTROY_ELEMENT17]], [[ARRAY_BEGIN14]]
5925 // CHECK13-NEXT:    br i1 [[ARRAYDESTROY_DONE18]], label [[ARRAYDESTROY_DONE19:%.*]], label [[ARRAYDESTROY_BODY15]]
5926 // CHECK13:       arraydestroy.done19:
5927 // CHECK13-NEXT:    call void @_ZN1SIiED1Ev(%struct.S.0* nonnull align 4 dereferenceable(4) [[TEST]]) #[[ATTR4]]
5928 // CHECK13-NEXT:    [[TMP29:%.*]] = load i32, i32* [[RETVAL]], align 4
5929 // CHECK13-NEXT:    ret i32 [[TMP29]]
5930 //
5931 //
5932 // CHECK13-LABEL: define {{[^@]+}}@_ZN1SIfEC2Ev
5933 // CHECK13-SAME: (%struct.S* nonnull align 4 dereferenceable(4) [[THIS:%.*]]) unnamed_addr #[[ATTR1]] comdat align 2 {
5934 // CHECK13-NEXT:  entry:
5935 // CHECK13-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.S*, align 8
5936 // CHECK13-NEXT:    store %struct.S* [[THIS]], %struct.S** [[THIS_ADDR]], align 8
5937 // CHECK13-NEXT:    [[THIS1:%.*]] = load %struct.S*, %struct.S** [[THIS_ADDR]], align 8
5938 // CHECK13-NEXT:    [[F:%.*]] = getelementptr inbounds [[STRUCT_S:%.*]], %struct.S* [[THIS1]], i32 0, i32 0
5939 // CHECK13-NEXT:    store float 0.000000e+00, float* [[F]], align 4
5940 // CHECK13-NEXT:    ret void
5941 //
5942 //
5943 // CHECK13-LABEL: define {{[^@]+}}@_ZN1SIfED2Ev
5944 // CHECK13-SAME: (%struct.S* nonnull align 4 dereferenceable(4) [[THIS:%.*]]) unnamed_addr #[[ATTR1]] comdat align 2 {
5945 // CHECK13-NEXT:  entry:
5946 // CHECK13-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.S*, align 8
5947 // CHECK13-NEXT:    store %struct.S* [[THIS]], %struct.S** [[THIS_ADDR]], align 8
5948 // CHECK13-NEXT:    [[THIS1:%.*]] = load %struct.S*, %struct.S** [[THIS_ADDR]], align 8
5949 // CHECK13-NEXT:    ret void
5950 //
5951 //
5952 // CHECK13-LABEL: define {{[^@]+}}@_ZN1SIfEC2Ef
5953 // CHECK13-SAME: (%struct.S* nonnull align 4 dereferenceable(4) [[THIS:%.*]], float [[A:%.*]]) unnamed_addr #[[ATTR1]] comdat align 2 {
5954 // CHECK13-NEXT:  entry:
5955 // CHECK13-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.S*, align 8
5956 // CHECK13-NEXT:    [[A_ADDR:%.*]] = alloca float, align 4
5957 // CHECK13-NEXT:    store %struct.S* [[THIS]], %struct.S** [[THIS_ADDR]], align 8
5958 // CHECK13-NEXT:    store float [[A]], float* [[A_ADDR]], align 4
5959 // CHECK13-NEXT:    [[THIS1:%.*]] = load %struct.S*, %struct.S** [[THIS_ADDR]], align 8
5960 // CHECK13-NEXT:    [[F:%.*]] = getelementptr inbounds [[STRUCT_S:%.*]], %struct.S* [[THIS1]], i32 0, i32 0
5961 // CHECK13-NEXT:    [[TMP0:%.*]] = load float, float* [[A_ADDR]], align 4
5962 // CHECK13-NEXT:    store float [[TMP0]], float* [[F]], align 4
5963 // CHECK13-NEXT:    ret void
5964 //
5965 //
5966 // CHECK13-LABEL: define {{[^@]+}}@_ZN1SIiEC1Ev
5967 // CHECK13-SAME: (%struct.S.0* nonnull align 4 dereferenceable(4) [[THIS:%.*]]) unnamed_addr #[[ATTR1]] comdat align 2 {
5968 // CHECK13-NEXT:  entry:
5969 // CHECK13-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.S.0*, align 8
5970 // CHECK13-NEXT:    store %struct.S.0* [[THIS]], %struct.S.0** [[THIS_ADDR]], align 8
5971 // CHECK13-NEXT:    [[THIS1:%.*]] = load %struct.S.0*, %struct.S.0** [[THIS_ADDR]], align 8
5972 // CHECK13-NEXT:    call void @_ZN1SIiEC2Ev(%struct.S.0* nonnull align 4 dereferenceable(4) [[THIS1]])
5973 // CHECK13-NEXT:    ret void
5974 //
5975 //
5976 // CHECK13-LABEL: define {{[^@]+}}@_ZN1SIiEC1Ei
5977 // CHECK13-SAME: (%struct.S.0* nonnull align 4 dereferenceable(4) [[THIS:%.*]], i32 signext [[A:%.*]]) unnamed_addr #[[ATTR1]] comdat align 2 {
5978 // CHECK13-NEXT:  entry:
5979 // CHECK13-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.S.0*, align 8
5980 // CHECK13-NEXT:    [[A_ADDR:%.*]] = alloca i32, align 4
5981 // CHECK13-NEXT:    store %struct.S.0* [[THIS]], %struct.S.0** [[THIS_ADDR]], align 8
5982 // CHECK13-NEXT:    store i32 [[A]], i32* [[A_ADDR]], align 4
5983 // CHECK13-NEXT:    [[THIS1:%.*]] = load %struct.S.0*, %struct.S.0** [[THIS_ADDR]], align 8
5984 // CHECK13-NEXT:    [[TMP0:%.*]] = load i32, i32* [[A_ADDR]], align 4
5985 // CHECK13-NEXT:    call void @_ZN1SIiEC2Ei(%struct.S.0* nonnull align 4 dereferenceable(4) [[THIS1]], i32 signext [[TMP0]])
5986 // CHECK13-NEXT:    ret void
5987 //
5988 //
5989 // CHECK13-LABEL: define {{[^@]+}}@_ZN1SIiED1Ev
5990 // CHECK13-SAME: (%struct.S.0* nonnull align 4 dereferenceable(4) [[THIS:%.*]]) unnamed_addr #[[ATTR1]] comdat align 2 {
5991 // CHECK13-NEXT:  entry:
5992 // CHECK13-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.S.0*, align 8
5993 // CHECK13-NEXT:    store %struct.S.0* [[THIS]], %struct.S.0** [[THIS_ADDR]], align 8
5994 // CHECK13-NEXT:    [[THIS1:%.*]] = load %struct.S.0*, %struct.S.0** [[THIS_ADDR]], align 8
5995 // CHECK13-NEXT:    call void @_ZN1SIiED2Ev(%struct.S.0* nonnull align 4 dereferenceable(4) [[THIS1]]) #[[ATTR4]]
5996 // CHECK13-NEXT:    ret void
5997 //
5998 //
5999 // CHECK13-LABEL: define {{[^@]+}}@_ZN1SIiEC2Ev
6000 // CHECK13-SAME: (%struct.S.0* nonnull align 4 dereferenceable(4) [[THIS:%.*]]) unnamed_addr #[[ATTR1]] comdat align 2 {
6001 // CHECK13-NEXT:  entry:
6002 // CHECK13-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.S.0*, align 8
6003 // CHECK13-NEXT:    store %struct.S.0* [[THIS]], %struct.S.0** [[THIS_ADDR]], align 8
6004 // CHECK13-NEXT:    [[THIS1:%.*]] = load %struct.S.0*, %struct.S.0** [[THIS_ADDR]], align 8
6005 // CHECK13-NEXT:    [[F:%.*]] = getelementptr inbounds [[STRUCT_S_0:%.*]], %struct.S.0* [[THIS1]], i32 0, i32 0
6006 // CHECK13-NEXT:    store i32 0, i32* [[F]], align 4
6007 // CHECK13-NEXT:    ret void
6008 //
6009 //
6010 // CHECK13-LABEL: define {{[^@]+}}@_ZN1SIiEC2Ei
6011 // CHECK13-SAME: (%struct.S.0* nonnull align 4 dereferenceable(4) [[THIS:%.*]], i32 signext [[A:%.*]]) unnamed_addr #[[ATTR1]] comdat align 2 {
6012 // CHECK13-NEXT:  entry:
6013 // CHECK13-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.S.0*, align 8
6014 // CHECK13-NEXT:    [[A_ADDR:%.*]] = alloca i32, align 4
6015 // CHECK13-NEXT:    store %struct.S.0* [[THIS]], %struct.S.0** [[THIS_ADDR]], align 8
6016 // CHECK13-NEXT:    store i32 [[A]], i32* [[A_ADDR]], align 4
6017 // CHECK13-NEXT:    [[THIS1:%.*]] = load %struct.S.0*, %struct.S.0** [[THIS_ADDR]], align 8
6018 // CHECK13-NEXT:    [[F:%.*]] = getelementptr inbounds [[STRUCT_S_0:%.*]], %struct.S.0* [[THIS1]], i32 0, i32 0
6019 // CHECK13-NEXT:    [[TMP0:%.*]] = load i32, i32* [[A_ADDR]], align 4
6020 // CHECK13-NEXT:    store i32 [[TMP0]], i32* [[F]], align 4
6021 // CHECK13-NEXT:    ret void
6022 //
6023 //
6024 // CHECK13-LABEL: define {{[^@]+}}@_ZN1SIiED2Ev
6025 // CHECK13-SAME: (%struct.S.0* nonnull align 4 dereferenceable(4) [[THIS:%.*]]) unnamed_addr #[[ATTR1]] comdat align 2 {
6026 // CHECK13-NEXT:  entry:
6027 // CHECK13-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.S.0*, align 8
6028 // CHECK13-NEXT:    store %struct.S.0* [[THIS]], %struct.S.0** [[THIS_ADDR]], align 8
6029 // CHECK13-NEXT:    [[THIS1:%.*]] = load %struct.S.0*, %struct.S.0** [[THIS_ADDR]], align 8
6030 // CHECK13-NEXT:    ret void
6031 //
6032 //
6033 // CHECK14-LABEL: define {{[^@]+}}@main
6034 // CHECK14-SAME: () #[[ATTR0:[0-9]+]] {
6035 // CHECK14-NEXT:  entry:
6036 // CHECK14-NEXT:    [[RETVAL:%.*]] = alloca i32, align 4
6037 // CHECK14-NEXT:    [[G:%.*]] = alloca double, align 8
6038 // CHECK14-NEXT:    [[G1:%.*]] = alloca double*, align 8
6039 // CHECK14-NEXT:    [[TEST:%.*]] = alloca [[STRUCT_S:%.*]], align 4
6040 // CHECK14-NEXT:    [[T_VAR:%.*]] = alloca i32, align 4
6041 // CHECK14-NEXT:    [[VEC:%.*]] = alloca [2 x i32], align 4
6042 // CHECK14-NEXT:    [[S_ARR:%.*]] = alloca [2 x %struct.S], align 4
6043 // CHECK14-NEXT:    [[VAR:%.*]] = alloca %struct.S*, align 8
6044 // CHECK14-NEXT:    [[TMP:%.*]] = alloca %struct.S*, align 8
6045 // CHECK14-NEXT:    [[_TMP1:%.*]] = alloca i32, align 4
6046 // CHECK14-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
6047 // CHECK14-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
6048 // CHECK14-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
6049 // CHECK14-NEXT:    [[I:%.*]] = alloca i32, align 4
6050 // CHECK14-NEXT:    [[T_VAR2:%.*]] = alloca i32, align 4
6051 // CHECK14-NEXT:    [[VEC3:%.*]] = alloca [2 x i32], align 4
6052 // CHECK14-NEXT:    [[S_ARR4:%.*]] = alloca [2 x %struct.S], align 4
6053 // CHECK14-NEXT:    [[VAR5:%.*]] = alloca [[STRUCT_S]], align 4
6054 // CHECK14-NEXT:    [[_TMP6:%.*]] = alloca %struct.S*, align 8
6055 // CHECK14-NEXT:    [[SVAR:%.*]] = alloca i32, align 4
6056 // CHECK14-NEXT:    [[I14:%.*]] = alloca i32, align 4
6057 // CHECK14-NEXT:    store i32 0, i32* [[RETVAL]], align 4
6058 // CHECK14-NEXT:    store double* [[G]], double** [[G1]], align 8
6059 // CHECK14-NEXT:    call void @_ZN1SIfEC1Ev(%struct.S* nonnull align 4 dereferenceable(4) [[TEST]])
6060 // CHECK14-NEXT:    store i32 0, i32* [[T_VAR]], align 4
6061 // CHECK14-NEXT:    [[TMP0:%.*]] = bitcast [2 x i32]* [[VEC]] to i8*
6062 // CHECK14-NEXT:    call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 4 [[TMP0]], i8* align 4 bitcast ([2 x i32]* @__const.main.vec to i8*), i64 8, i1 false)
6063 // CHECK14-NEXT:    [[ARRAYINIT_BEGIN:%.*]] = getelementptr inbounds [2 x %struct.S], [2 x %struct.S]* [[S_ARR]], i64 0, i64 0
6064 // CHECK14-NEXT:    call void @_ZN1SIfEC1Ef(%struct.S* nonnull align 4 dereferenceable(4) [[ARRAYINIT_BEGIN]], float 1.000000e+00)
6065 // CHECK14-NEXT:    [[ARRAYINIT_ELEMENT:%.*]] = getelementptr inbounds [[STRUCT_S]], %struct.S* [[ARRAYINIT_BEGIN]], i64 1
6066 // CHECK14-NEXT:    call void @_ZN1SIfEC1Ef(%struct.S* nonnull align 4 dereferenceable(4) [[ARRAYINIT_ELEMENT]], float 2.000000e+00)
6067 // CHECK14-NEXT:    store %struct.S* [[TEST]], %struct.S** [[VAR]], align 8
6068 // CHECK14-NEXT:    [[TMP1:%.*]] = load %struct.S*, %struct.S** [[VAR]], align 8
6069 // CHECK14-NEXT:    store %struct.S* [[TMP1]], %struct.S** [[TMP]], align 8
6070 // CHECK14-NEXT:    [[TMP2:%.*]] = load %struct.S*, %struct.S** [[VAR]], align 8
6071 // CHECK14-NEXT:    [[TMP3:%.*]] = load %struct.S*, %struct.S** [[VAR]], align 8
6072 // CHECK14-NEXT:    [[TMP4:%.*]] = load %struct.S*, %struct.S** [[VAR]], align 8
6073 // CHECK14-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
6074 // CHECK14-NEXT:    store i32 1, i32* [[DOTOMP_UB]], align 4
6075 // CHECK14-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
6076 // CHECK14-NEXT:    store i32 [[TMP5]], i32* [[DOTOMP_IV]], align 4
6077 // CHECK14-NEXT:    [[ARRAY_BEGIN:%.*]] = getelementptr inbounds [2 x %struct.S], [2 x %struct.S]* [[S_ARR4]], i32 0, i32 0
6078 // CHECK14-NEXT:    [[ARRAYCTOR_END:%.*]] = getelementptr inbounds [[STRUCT_S]], %struct.S* [[ARRAY_BEGIN]], i64 2
6079 // CHECK14-NEXT:    br label [[ARRAYCTOR_LOOP:%.*]]
6080 // CHECK14:       arrayctor.loop:
6081 // CHECK14-NEXT:    [[ARRAYCTOR_CUR:%.*]] = phi %struct.S* [ [[ARRAY_BEGIN]], [[ENTRY:%.*]] ], [ [[ARRAYCTOR_NEXT:%.*]], [[ARRAYCTOR_LOOP]] ]
6082 // CHECK14-NEXT:    call void @_ZN1SIfEC1Ev(%struct.S* nonnull align 4 dereferenceable(4) [[ARRAYCTOR_CUR]])
6083 // CHECK14-NEXT:    [[ARRAYCTOR_NEXT]] = getelementptr inbounds [[STRUCT_S]], %struct.S* [[ARRAYCTOR_CUR]], i64 1
6084 // CHECK14-NEXT:    [[ARRAYCTOR_DONE:%.*]] = icmp eq %struct.S* [[ARRAYCTOR_NEXT]], [[ARRAYCTOR_END]]
6085 // CHECK14-NEXT:    br i1 [[ARRAYCTOR_DONE]], label [[ARRAYCTOR_CONT:%.*]], label [[ARRAYCTOR_LOOP]]
6086 // CHECK14:       arrayctor.cont:
6087 // CHECK14-NEXT:    [[TMP6:%.*]] = load %struct.S*, %struct.S** [[TMP]], align 8
6088 // CHECK14-NEXT:    call void @_ZN1SIfEC1Ev(%struct.S* nonnull align 4 dereferenceable(4) [[VAR5]])
6089 // CHECK14-NEXT:    store %struct.S* [[VAR5]], %struct.S** [[_TMP6]], align 8
6090 // CHECK14-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
6091 // CHECK14:       omp.inner.for.cond:
6092 // CHECK14-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !2
6093 // CHECK14-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !2
6094 // CHECK14-NEXT:    [[CMP:%.*]] = icmp sle i32 [[TMP7]], [[TMP8]]
6095 // CHECK14-NEXT:    br i1 [[CMP]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_COND_CLEANUP:%.*]]
6096 // CHECK14:       omp.inner.for.cond.cleanup:
6097 // CHECK14-NEXT:    br label [[OMP_INNER_FOR_END:%.*]]
6098 // CHECK14:       omp.inner.for.body:
6099 // CHECK14-NEXT:    [[TMP9:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !2
6100 // CHECK14-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP9]], 1
6101 // CHECK14-NEXT:    [[ADD:%.*]] = add nsw i32 0, [[MUL]]
6102 // CHECK14-NEXT:    store i32 [[ADD]], i32* [[I]], align 4, !llvm.access.group !2
6103 // CHECK14-NEXT:    [[TMP10:%.*]] = load i32, i32* [[T_VAR2]], align 4, !llvm.access.group !2
6104 // CHECK14-NEXT:    [[TMP11:%.*]] = load i32, i32* [[I]], align 4, !llvm.access.group !2
6105 // CHECK14-NEXT:    [[IDXPROM:%.*]] = sext i32 [[TMP11]] to i64
6106 // CHECK14-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds [2 x i32], [2 x i32]* [[VEC3]], i64 0, i64 [[IDXPROM]]
6107 // CHECK14-NEXT:    store i32 [[TMP10]], i32* [[ARRAYIDX]], align 4, !llvm.access.group !2
6108 // CHECK14-NEXT:    [[TMP12:%.*]] = load %struct.S*, %struct.S** [[_TMP6]], align 8, !llvm.access.group !2
6109 // CHECK14-NEXT:    [[TMP13:%.*]] = load i32, i32* [[I]], align 4, !llvm.access.group !2
6110 // CHECK14-NEXT:    [[IDXPROM7:%.*]] = sext i32 [[TMP13]] to i64
6111 // CHECK14-NEXT:    [[ARRAYIDX8:%.*]] = getelementptr inbounds [2 x %struct.S], [2 x %struct.S]* [[S_ARR4]], i64 0, i64 [[IDXPROM7]]
6112 // CHECK14-NEXT:    [[TMP14:%.*]] = bitcast %struct.S* [[ARRAYIDX8]] to i8*
6113 // CHECK14-NEXT:    [[TMP15:%.*]] = bitcast %struct.S* [[TMP12]] to i8*
6114 // CHECK14-NEXT:    call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 4 [[TMP14]], i8* align 4 [[TMP15]], i64 4, i1 false), !llvm.access.group !2
6115 // CHECK14-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
6116 // CHECK14:       omp.body.continue:
6117 // CHECK14-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
6118 // CHECK14:       omp.inner.for.inc:
6119 // CHECK14-NEXT:    [[TMP16:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !2
6120 // CHECK14-NEXT:    [[ADD9:%.*]] = add nsw i32 [[TMP16]], 1
6121 // CHECK14-NEXT:    store i32 [[ADD9]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !2
6122 // CHECK14-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP3:![0-9]+]]
6123 // CHECK14:       omp.inner.for.end:
6124 // CHECK14-NEXT:    store i32 2, i32* [[I]], align 4
6125 // CHECK14-NEXT:    [[TMP17:%.*]] = load i32, i32* [[T_VAR2]], align 4
6126 // CHECK14-NEXT:    store i32 [[TMP17]], i32* [[T_VAR]], align 4
6127 // CHECK14-NEXT:    [[TMP18:%.*]] = bitcast [2 x i32]* [[VEC]] to i8*
6128 // CHECK14-NEXT:    [[TMP19:%.*]] = bitcast [2 x i32]* [[VEC3]] to i8*
6129 // CHECK14-NEXT:    call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 4 [[TMP18]], i8* align 4 [[TMP19]], i64 8, i1 false)
6130 // CHECK14-NEXT:    [[ARRAY_BEGIN10:%.*]] = getelementptr inbounds [2 x %struct.S], [2 x %struct.S]* [[S_ARR]], i32 0, i32 0
6131 // CHECK14-NEXT:    [[TMP20:%.*]] = bitcast [2 x %struct.S]* [[S_ARR4]] to %struct.S*
6132 // CHECK14-NEXT:    [[TMP21:%.*]] = getelementptr [[STRUCT_S]], %struct.S* [[ARRAY_BEGIN10]], i64 2
6133 // CHECK14-NEXT:    [[OMP_ARRAYCPY_ISEMPTY:%.*]] = icmp eq %struct.S* [[ARRAY_BEGIN10]], [[TMP21]]
6134 // CHECK14-NEXT:    br i1 [[OMP_ARRAYCPY_ISEMPTY]], label [[OMP_ARRAYCPY_DONE11:%.*]], label [[OMP_ARRAYCPY_BODY:%.*]]
6135 // CHECK14:       omp.arraycpy.body:
6136 // CHECK14-NEXT:    [[OMP_ARRAYCPY_SRCELEMENTPAST:%.*]] = phi %struct.S* [ [[TMP20]], [[OMP_INNER_FOR_END]] ], [ [[OMP_ARRAYCPY_SRC_ELEMENT:%.*]], [[OMP_ARRAYCPY_BODY]] ]
6137 // CHECK14-NEXT:    [[OMP_ARRAYCPY_DESTELEMENTPAST:%.*]] = phi %struct.S* [ [[ARRAY_BEGIN10]], [[OMP_INNER_FOR_END]] ], [ [[OMP_ARRAYCPY_DEST_ELEMENT:%.*]], [[OMP_ARRAYCPY_BODY]] ]
6138 // CHECK14-NEXT:    [[TMP22:%.*]] = bitcast %struct.S* [[OMP_ARRAYCPY_DESTELEMENTPAST]] to i8*
6139 // CHECK14-NEXT:    [[TMP23:%.*]] = bitcast %struct.S* [[OMP_ARRAYCPY_SRCELEMENTPAST]] to i8*
6140 // CHECK14-NEXT:    call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 4 [[TMP22]], i8* align 4 [[TMP23]], i64 4, i1 false)
6141 // CHECK14-NEXT:    [[OMP_ARRAYCPY_DEST_ELEMENT]] = getelementptr [[STRUCT_S]], %struct.S* [[OMP_ARRAYCPY_DESTELEMENTPAST]], i32 1
6142 // CHECK14-NEXT:    [[OMP_ARRAYCPY_SRC_ELEMENT]] = getelementptr [[STRUCT_S]], %struct.S* [[OMP_ARRAYCPY_SRCELEMENTPAST]], i32 1
6143 // CHECK14-NEXT:    [[OMP_ARRAYCPY_DONE:%.*]] = icmp eq %struct.S* [[OMP_ARRAYCPY_DEST_ELEMENT]], [[TMP21]]
6144 // CHECK14-NEXT:    br i1 [[OMP_ARRAYCPY_DONE]], label [[OMP_ARRAYCPY_DONE11]], label [[OMP_ARRAYCPY_BODY]]
6145 // CHECK14:       omp.arraycpy.done11:
6146 // CHECK14-NEXT:    [[TMP24:%.*]] = load %struct.S*, %struct.S** [[_TMP6]], align 8
6147 // CHECK14-NEXT:    [[TMP25:%.*]] = bitcast %struct.S* [[TMP6]] to i8*
6148 // CHECK14-NEXT:    [[TMP26:%.*]] = bitcast %struct.S* [[TMP24]] to i8*
6149 // CHECK14-NEXT:    call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 4 [[TMP25]], i8* align 4 [[TMP26]], i64 4, i1 false)
6150 // CHECK14-NEXT:    [[TMP27:%.*]] = load i32, i32* [[SVAR]], align 4
6151 // CHECK14-NEXT:    store i32 [[TMP27]], i32* @_ZZ4mainE4svar, align 4
6152 // CHECK14-NEXT:    call void @_ZN1SIfED1Ev(%struct.S* nonnull align 4 dereferenceable(4) [[VAR5]]) #[[ATTR4:[0-9]+]]
6153 // CHECK14-NEXT:    [[ARRAY_BEGIN12:%.*]] = getelementptr inbounds [2 x %struct.S], [2 x %struct.S]* [[S_ARR4]], i32 0, i32 0
6154 // CHECK14-NEXT:    [[TMP28:%.*]] = getelementptr inbounds [[STRUCT_S]], %struct.S* [[ARRAY_BEGIN12]], i64 2
6155 // CHECK14-NEXT:    br label [[ARRAYDESTROY_BODY:%.*]]
6156 // CHECK14:       arraydestroy.body:
6157 // CHECK14-NEXT:    [[ARRAYDESTROY_ELEMENTPAST:%.*]] = phi %struct.S* [ [[TMP28]], [[OMP_ARRAYCPY_DONE11]] ], [ [[ARRAYDESTROY_ELEMENT:%.*]], [[ARRAYDESTROY_BODY]] ]
6158 // CHECK14-NEXT:    [[ARRAYDESTROY_ELEMENT]] = getelementptr inbounds [[STRUCT_S]], %struct.S* [[ARRAYDESTROY_ELEMENTPAST]], i64 -1
6159 // CHECK14-NEXT:    call void @_ZN1SIfED1Ev(%struct.S* nonnull align 4 dereferenceable(4) [[ARRAYDESTROY_ELEMENT]]) #[[ATTR4]]
6160 // CHECK14-NEXT:    [[ARRAYDESTROY_DONE:%.*]] = icmp eq %struct.S* [[ARRAYDESTROY_ELEMENT]], [[ARRAY_BEGIN12]]
6161 // CHECK14-NEXT:    br i1 [[ARRAYDESTROY_DONE]], label [[ARRAYDESTROY_DONE13:%.*]], label [[ARRAYDESTROY_BODY]]
6162 // CHECK14:       arraydestroy.done13:
6163 // CHECK14-NEXT:    [[CALL:%.*]] = call signext i32 @_Z5tmainIiET_v()
6164 // CHECK14-NEXT:    store i32 [[CALL]], i32* [[RETVAL]], align 4
6165 // CHECK14-NEXT:    [[ARRAY_BEGIN15:%.*]] = getelementptr inbounds [2 x %struct.S], [2 x %struct.S]* [[S_ARR]], i32 0, i32 0
6166 // CHECK14-NEXT:    [[TMP29:%.*]] = getelementptr inbounds [[STRUCT_S]], %struct.S* [[ARRAY_BEGIN15]], i64 2
6167 // CHECK14-NEXT:    br label [[ARRAYDESTROY_BODY16:%.*]]
6168 // CHECK14:       arraydestroy.body16:
6169 // CHECK14-NEXT:    [[ARRAYDESTROY_ELEMENTPAST17:%.*]] = phi %struct.S* [ [[TMP29]], [[ARRAYDESTROY_DONE13]] ], [ [[ARRAYDESTROY_ELEMENT18:%.*]], [[ARRAYDESTROY_BODY16]] ]
6170 // CHECK14-NEXT:    [[ARRAYDESTROY_ELEMENT18]] = getelementptr inbounds [[STRUCT_S]], %struct.S* [[ARRAYDESTROY_ELEMENTPAST17]], i64 -1
6171 // CHECK14-NEXT:    call void @_ZN1SIfED1Ev(%struct.S* nonnull align 4 dereferenceable(4) [[ARRAYDESTROY_ELEMENT18]]) #[[ATTR4]]
6172 // CHECK14-NEXT:    [[ARRAYDESTROY_DONE19:%.*]] = icmp eq %struct.S* [[ARRAYDESTROY_ELEMENT18]], [[ARRAY_BEGIN15]]
6173 // CHECK14-NEXT:    br i1 [[ARRAYDESTROY_DONE19]], label [[ARRAYDESTROY_DONE20:%.*]], label [[ARRAYDESTROY_BODY16]]
6174 // CHECK14:       arraydestroy.done20:
6175 // CHECK14-NEXT:    call void @_ZN1SIfED1Ev(%struct.S* nonnull align 4 dereferenceable(4) [[TEST]]) #[[ATTR4]]
6176 // CHECK14-NEXT:    [[TMP30:%.*]] = load i32, i32* [[RETVAL]], align 4
6177 // CHECK14-NEXT:    ret i32 [[TMP30]]
6178 //
6179 //
6180 // CHECK14-LABEL: define {{[^@]+}}@_ZN1SIfEC1Ev
6181 // CHECK14-SAME: (%struct.S* nonnull align 4 dereferenceable(4) [[THIS:%.*]]) unnamed_addr #[[ATTR1:[0-9]+]] comdat align 2 {
6182 // CHECK14-NEXT:  entry:
6183 // CHECK14-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.S*, align 8
6184 // CHECK14-NEXT:    store %struct.S* [[THIS]], %struct.S** [[THIS_ADDR]], align 8
6185 // CHECK14-NEXT:    [[THIS1:%.*]] = load %struct.S*, %struct.S** [[THIS_ADDR]], align 8
6186 // CHECK14-NEXT:    call void @_ZN1SIfEC2Ev(%struct.S* nonnull align 4 dereferenceable(4) [[THIS1]])
6187 // CHECK14-NEXT:    ret void
6188 //
6189 //
6190 // CHECK14-LABEL: define {{[^@]+}}@_ZN1SIfEC1Ef
6191 // CHECK14-SAME: (%struct.S* nonnull align 4 dereferenceable(4) [[THIS:%.*]], float [[A:%.*]]) unnamed_addr #[[ATTR1]] comdat align 2 {
6192 // CHECK14-NEXT:  entry:
6193 // CHECK14-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.S*, align 8
6194 // CHECK14-NEXT:    [[A_ADDR:%.*]] = alloca float, align 4
6195 // CHECK14-NEXT:    store %struct.S* [[THIS]], %struct.S** [[THIS_ADDR]], align 8
6196 // CHECK14-NEXT:    store float [[A]], float* [[A_ADDR]], align 4
6197 // CHECK14-NEXT:    [[THIS1:%.*]] = load %struct.S*, %struct.S** [[THIS_ADDR]], align 8
6198 // CHECK14-NEXT:    [[TMP0:%.*]] = load float, float* [[A_ADDR]], align 4
6199 // CHECK14-NEXT:    call void @_ZN1SIfEC2Ef(%struct.S* nonnull align 4 dereferenceable(4) [[THIS1]], float [[TMP0]])
6200 // CHECK14-NEXT:    ret void
6201 //
6202 //
6203 // CHECK14-LABEL: define {{[^@]+}}@_ZN1SIfED1Ev
6204 // CHECK14-SAME: (%struct.S* nonnull align 4 dereferenceable(4) [[THIS:%.*]]) unnamed_addr #[[ATTR1]] comdat align 2 {
6205 // CHECK14-NEXT:  entry:
6206 // CHECK14-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.S*, align 8
6207 // CHECK14-NEXT:    store %struct.S* [[THIS]], %struct.S** [[THIS_ADDR]], align 8
6208 // CHECK14-NEXT:    [[THIS1:%.*]] = load %struct.S*, %struct.S** [[THIS_ADDR]], align 8
6209 // CHECK14-NEXT:    call void @_ZN1SIfED2Ev(%struct.S* nonnull align 4 dereferenceable(4) [[THIS1]]) #[[ATTR4]]
6210 // CHECK14-NEXT:    ret void
6211 //
6212 //
6213 // CHECK14-LABEL: define {{[^@]+}}@_Z5tmainIiET_v
6214 // CHECK14-SAME: () #[[ATTR3:[0-9]+]] comdat {
6215 // CHECK14-NEXT:  entry:
6216 // CHECK14-NEXT:    [[RETVAL:%.*]] = alloca i32, align 4
6217 // CHECK14-NEXT:    [[TEST:%.*]] = alloca [[STRUCT_S_0:%.*]], align 4
6218 // CHECK14-NEXT:    [[T_VAR:%.*]] = alloca i32, align 4
6219 // CHECK14-NEXT:    [[VEC:%.*]] = alloca [2 x i32], align 4
6220 // CHECK14-NEXT:    [[S_ARR:%.*]] = alloca [2 x %struct.S.0], align 4
6221 // CHECK14-NEXT:    [[VAR:%.*]] = alloca %struct.S.0*, align 8
6222 // CHECK14-NEXT:    [[TMP:%.*]] = alloca %struct.S.0*, align 8
6223 // CHECK14-NEXT:    [[_TMP1:%.*]] = alloca i32, align 4
6224 // CHECK14-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
6225 // CHECK14-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
6226 // CHECK14-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
6227 // CHECK14-NEXT:    [[I:%.*]] = alloca i32, align 4
6228 // CHECK14-NEXT:    [[T_VAR2:%.*]] = alloca i32, align 4
6229 // CHECK14-NEXT:    [[VEC3:%.*]] = alloca [2 x i32], align 4
6230 // CHECK14-NEXT:    [[S_ARR4:%.*]] = alloca [2 x %struct.S.0], align 4
6231 // CHECK14-NEXT:    [[VAR5:%.*]] = alloca [[STRUCT_S_0]], align 4
6232 // CHECK14-NEXT:    [[_TMP6:%.*]] = alloca %struct.S.0*, align 8
6233 // CHECK14-NEXT:    call void @_ZN1SIiEC1Ev(%struct.S.0* nonnull align 4 dereferenceable(4) [[TEST]])
6234 // CHECK14-NEXT:    store i32 0, i32* [[T_VAR]], align 4
6235 // CHECK14-NEXT:    [[TMP0:%.*]] = bitcast [2 x i32]* [[VEC]] to i8*
6236 // CHECK14-NEXT:    call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 4 [[TMP0]], i8* align 4 bitcast ([2 x i32]* @__const._Z5tmainIiET_v.vec to i8*), i64 8, i1 false)
6237 // CHECK14-NEXT:    [[ARRAYINIT_BEGIN:%.*]] = getelementptr inbounds [2 x %struct.S.0], [2 x %struct.S.0]* [[S_ARR]], i64 0, i64 0
6238 // CHECK14-NEXT:    call void @_ZN1SIiEC1Ei(%struct.S.0* nonnull align 4 dereferenceable(4) [[ARRAYINIT_BEGIN]], i32 signext 1)
6239 // CHECK14-NEXT:    [[ARRAYINIT_ELEMENT:%.*]] = getelementptr inbounds [[STRUCT_S_0]], %struct.S.0* [[ARRAYINIT_BEGIN]], i64 1
6240 // CHECK14-NEXT:    call void @_ZN1SIiEC1Ei(%struct.S.0* nonnull align 4 dereferenceable(4) [[ARRAYINIT_ELEMENT]], i32 signext 2)
6241 // CHECK14-NEXT:    store %struct.S.0* [[TEST]], %struct.S.0** [[VAR]], align 8
6242 // CHECK14-NEXT:    [[TMP1:%.*]] = load %struct.S.0*, %struct.S.0** [[VAR]], align 8
6243 // CHECK14-NEXT:    store %struct.S.0* [[TMP1]], %struct.S.0** [[TMP]], align 8
6244 // CHECK14-NEXT:    [[TMP2:%.*]] = load %struct.S.0*, %struct.S.0** [[VAR]], align 8
6245 // CHECK14-NEXT:    [[TMP3:%.*]] = load %struct.S.0*, %struct.S.0** [[VAR]], align 8
6246 // CHECK14-NEXT:    [[TMP4:%.*]] = load %struct.S.0*, %struct.S.0** [[VAR]], align 8
6247 // CHECK14-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
6248 // CHECK14-NEXT:    store i32 1, i32* [[DOTOMP_UB]], align 4
6249 // CHECK14-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
6250 // CHECK14-NEXT:    store i32 [[TMP5]], i32* [[DOTOMP_IV]], align 4
6251 // CHECK14-NEXT:    [[ARRAY_BEGIN:%.*]] = getelementptr inbounds [2 x %struct.S.0], [2 x %struct.S.0]* [[S_ARR4]], i32 0, i32 0
6252 // CHECK14-NEXT:    [[ARRAYCTOR_END:%.*]] = getelementptr inbounds [[STRUCT_S_0]], %struct.S.0* [[ARRAY_BEGIN]], i64 2
6253 // CHECK14-NEXT:    br label [[ARRAYCTOR_LOOP:%.*]]
6254 // CHECK14:       arrayctor.loop:
6255 // CHECK14-NEXT:    [[ARRAYCTOR_CUR:%.*]] = phi %struct.S.0* [ [[ARRAY_BEGIN]], [[ENTRY:%.*]] ], [ [[ARRAYCTOR_NEXT:%.*]], [[ARRAYCTOR_LOOP]] ]
6256 // CHECK14-NEXT:    call void @_ZN1SIiEC1Ev(%struct.S.0* nonnull align 4 dereferenceable(4) [[ARRAYCTOR_CUR]])
6257 // CHECK14-NEXT:    [[ARRAYCTOR_NEXT]] = getelementptr inbounds [[STRUCT_S_0]], %struct.S.0* [[ARRAYCTOR_CUR]], i64 1
6258 // CHECK14-NEXT:    [[ARRAYCTOR_DONE:%.*]] = icmp eq %struct.S.0* [[ARRAYCTOR_NEXT]], [[ARRAYCTOR_END]]
6259 // CHECK14-NEXT:    br i1 [[ARRAYCTOR_DONE]], label [[ARRAYCTOR_CONT:%.*]], label [[ARRAYCTOR_LOOP]]
6260 // CHECK14:       arrayctor.cont:
6261 // CHECK14-NEXT:    [[TMP6:%.*]] = load %struct.S.0*, %struct.S.0** [[TMP]], align 8
6262 // CHECK14-NEXT:    call void @_ZN1SIiEC1Ev(%struct.S.0* nonnull align 4 dereferenceable(4) [[VAR5]])
6263 // CHECK14-NEXT:    store %struct.S.0* [[VAR5]], %struct.S.0** [[_TMP6]], align 8
6264 // CHECK14-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
6265 // CHECK14:       omp.inner.for.cond:
6266 // CHECK14-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !6
6267 // CHECK14-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !6
6268 // CHECK14-NEXT:    [[CMP:%.*]] = icmp sle i32 [[TMP7]], [[TMP8]]
6269 // CHECK14-NEXT:    br i1 [[CMP]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_COND_CLEANUP:%.*]]
6270 // CHECK14:       omp.inner.for.cond.cleanup:
6271 // CHECK14-NEXT:    br label [[OMP_INNER_FOR_END:%.*]]
6272 // CHECK14:       omp.inner.for.body:
6273 // CHECK14-NEXT:    [[TMP9:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !6
6274 // CHECK14-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP9]], 1
6275 // CHECK14-NEXT:    [[ADD:%.*]] = add nsw i32 0, [[MUL]]
6276 // CHECK14-NEXT:    store i32 [[ADD]], i32* [[I]], align 4, !llvm.access.group !6
6277 // CHECK14-NEXT:    [[TMP10:%.*]] = load i32, i32* [[T_VAR2]], align 4, !llvm.access.group !6
6278 // CHECK14-NEXT:    [[TMP11:%.*]] = load i32, i32* [[I]], align 4, !llvm.access.group !6
6279 // CHECK14-NEXT:    [[IDXPROM:%.*]] = sext i32 [[TMP11]] to i64
6280 // CHECK14-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds [2 x i32], [2 x i32]* [[VEC3]], i64 0, i64 [[IDXPROM]]
6281 // CHECK14-NEXT:    store i32 [[TMP10]], i32* [[ARRAYIDX]], align 4, !llvm.access.group !6
6282 // CHECK14-NEXT:    [[TMP12:%.*]] = load %struct.S.0*, %struct.S.0** [[_TMP6]], align 8, !llvm.access.group !6
6283 // CHECK14-NEXT:    [[TMP13:%.*]] = load i32, i32* [[I]], align 4, !llvm.access.group !6
6284 // CHECK14-NEXT:    [[IDXPROM7:%.*]] = sext i32 [[TMP13]] to i64
6285 // CHECK14-NEXT:    [[ARRAYIDX8:%.*]] = getelementptr inbounds [2 x %struct.S.0], [2 x %struct.S.0]* [[S_ARR4]], i64 0, i64 [[IDXPROM7]]
6286 // CHECK14-NEXT:    [[TMP14:%.*]] = bitcast %struct.S.0* [[ARRAYIDX8]] to i8*
6287 // CHECK14-NEXT:    [[TMP15:%.*]] = bitcast %struct.S.0* [[TMP12]] to i8*
6288 // CHECK14-NEXT:    call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 4 [[TMP14]], i8* align 4 [[TMP15]], i64 4, i1 false), !llvm.access.group !6
6289 // CHECK14-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
6290 // CHECK14:       omp.body.continue:
6291 // CHECK14-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
6292 // CHECK14:       omp.inner.for.inc:
6293 // CHECK14-NEXT:    [[TMP16:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !6
6294 // CHECK14-NEXT:    [[ADD9:%.*]] = add nsw i32 [[TMP16]], 1
6295 // CHECK14-NEXT:    store i32 [[ADD9]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !6
6296 // CHECK14-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP7:![0-9]+]]
6297 // CHECK14:       omp.inner.for.end:
6298 // CHECK14-NEXT:    store i32 2, i32* [[I]], align 4
6299 // CHECK14-NEXT:    [[TMP17:%.*]] = load i32, i32* [[T_VAR2]], align 4
6300 // CHECK14-NEXT:    store i32 [[TMP17]], i32* [[T_VAR]], align 4
6301 // CHECK14-NEXT:    [[TMP18:%.*]] = bitcast [2 x i32]* [[VEC]] to i8*
6302 // CHECK14-NEXT:    [[TMP19:%.*]] = bitcast [2 x i32]* [[VEC3]] to i8*
6303 // CHECK14-NEXT:    call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 4 [[TMP18]], i8* align 4 [[TMP19]], i64 8, i1 false)
6304 // CHECK14-NEXT:    [[ARRAY_BEGIN10:%.*]] = getelementptr inbounds [2 x %struct.S.0], [2 x %struct.S.0]* [[S_ARR]], i32 0, i32 0
6305 // CHECK14-NEXT:    [[TMP20:%.*]] = bitcast [2 x %struct.S.0]* [[S_ARR4]] to %struct.S.0*
6306 // CHECK14-NEXT:    [[TMP21:%.*]] = getelementptr [[STRUCT_S_0]], %struct.S.0* [[ARRAY_BEGIN10]], i64 2
6307 // CHECK14-NEXT:    [[OMP_ARRAYCPY_ISEMPTY:%.*]] = icmp eq %struct.S.0* [[ARRAY_BEGIN10]], [[TMP21]]
6308 // CHECK14-NEXT:    br i1 [[OMP_ARRAYCPY_ISEMPTY]], label [[OMP_ARRAYCPY_DONE11:%.*]], label [[OMP_ARRAYCPY_BODY:%.*]]
6309 // CHECK14:       omp.arraycpy.body:
6310 // CHECK14-NEXT:    [[OMP_ARRAYCPY_SRCELEMENTPAST:%.*]] = phi %struct.S.0* [ [[TMP20]], [[OMP_INNER_FOR_END]] ], [ [[OMP_ARRAYCPY_SRC_ELEMENT:%.*]], [[OMP_ARRAYCPY_BODY]] ]
6311 // CHECK14-NEXT:    [[OMP_ARRAYCPY_DESTELEMENTPAST:%.*]] = phi %struct.S.0* [ [[ARRAY_BEGIN10]], [[OMP_INNER_FOR_END]] ], [ [[OMP_ARRAYCPY_DEST_ELEMENT:%.*]], [[OMP_ARRAYCPY_BODY]] ]
6312 // CHECK14-NEXT:    [[TMP22:%.*]] = bitcast %struct.S.0* [[OMP_ARRAYCPY_DESTELEMENTPAST]] to i8*
6313 // CHECK14-NEXT:    [[TMP23:%.*]] = bitcast %struct.S.0* [[OMP_ARRAYCPY_SRCELEMENTPAST]] to i8*
6314 // CHECK14-NEXT:    call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 4 [[TMP22]], i8* align 4 [[TMP23]], i64 4, i1 false)
6315 // CHECK14-NEXT:    [[OMP_ARRAYCPY_DEST_ELEMENT]] = getelementptr [[STRUCT_S_0]], %struct.S.0* [[OMP_ARRAYCPY_DESTELEMENTPAST]], i32 1
6316 // CHECK14-NEXT:    [[OMP_ARRAYCPY_SRC_ELEMENT]] = getelementptr [[STRUCT_S_0]], %struct.S.0* [[OMP_ARRAYCPY_SRCELEMENTPAST]], i32 1
6317 // CHECK14-NEXT:    [[OMP_ARRAYCPY_DONE:%.*]] = icmp eq %struct.S.0* [[OMP_ARRAYCPY_DEST_ELEMENT]], [[TMP21]]
6318 // CHECK14-NEXT:    br i1 [[OMP_ARRAYCPY_DONE]], label [[OMP_ARRAYCPY_DONE11]], label [[OMP_ARRAYCPY_BODY]]
6319 // CHECK14:       omp.arraycpy.done11:
6320 // CHECK14-NEXT:    [[TMP24:%.*]] = load %struct.S.0*, %struct.S.0** [[_TMP6]], align 8
6321 // CHECK14-NEXT:    [[TMP25:%.*]] = bitcast %struct.S.0* [[TMP6]] to i8*
6322 // CHECK14-NEXT:    [[TMP26:%.*]] = bitcast %struct.S.0* [[TMP24]] to i8*
6323 // CHECK14-NEXT:    call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 4 [[TMP25]], i8* align 4 [[TMP26]], i64 4, i1 false)
6324 // CHECK14-NEXT:    call void @_ZN1SIiED1Ev(%struct.S.0* nonnull align 4 dereferenceable(4) [[VAR5]]) #[[ATTR4]]
6325 // CHECK14-NEXT:    [[ARRAY_BEGIN12:%.*]] = getelementptr inbounds [2 x %struct.S.0], [2 x %struct.S.0]* [[S_ARR4]], i32 0, i32 0
6326 // CHECK14-NEXT:    [[TMP27:%.*]] = getelementptr inbounds [[STRUCT_S_0]], %struct.S.0* [[ARRAY_BEGIN12]], i64 2
6327 // CHECK14-NEXT:    br label [[ARRAYDESTROY_BODY:%.*]]
6328 // CHECK14:       arraydestroy.body:
6329 // CHECK14-NEXT:    [[ARRAYDESTROY_ELEMENTPAST:%.*]] = phi %struct.S.0* [ [[TMP27]], [[OMP_ARRAYCPY_DONE11]] ], [ [[ARRAYDESTROY_ELEMENT:%.*]], [[ARRAYDESTROY_BODY]] ]
6330 // CHECK14-NEXT:    [[ARRAYDESTROY_ELEMENT]] = getelementptr inbounds [[STRUCT_S_0]], %struct.S.0* [[ARRAYDESTROY_ELEMENTPAST]], i64 -1
6331 // CHECK14-NEXT:    call void @_ZN1SIiED1Ev(%struct.S.0* nonnull align 4 dereferenceable(4) [[ARRAYDESTROY_ELEMENT]]) #[[ATTR4]]
6332 // CHECK14-NEXT:    [[ARRAYDESTROY_DONE:%.*]] = icmp eq %struct.S.0* [[ARRAYDESTROY_ELEMENT]], [[ARRAY_BEGIN12]]
6333 // CHECK14-NEXT:    br i1 [[ARRAYDESTROY_DONE]], label [[ARRAYDESTROY_DONE13:%.*]], label [[ARRAYDESTROY_BODY]]
6334 // CHECK14:       arraydestroy.done13:
6335 // CHECK14-NEXT:    store i32 0, i32* [[RETVAL]], align 4
6336 // CHECK14-NEXT:    [[ARRAY_BEGIN14:%.*]] = getelementptr inbounds [2 x %struct.S.0], [2 x %struct.S.0]* [[S_ARR]], i32 0, i32 0
6337 // CHECK14-NEXT:    [[TMP28:%.*]] = getelementptr inbounds [[STRUCT_S_0]], %struct.S.0* [[ARRAY_BEGIN14]], i64 2
6338 // CHECK14-NEXT:    br label [[ARRAYDESTROY_BODY15:%.*]]
6339 // CHECK14:       arraydestroy.body15:
6340 // CHECK14-NEXT:    [[ARRAYDESTROY_ELEMENTPAST16:%.*]] = phi %struct.S.0* [ [[TMP28]], [[ARRAYDESTROY_DONE13]] ], [ [[ARRAYDESTROY_ELEMENT17:%.*]], [[ARRAYDESTROY_BODY15]] ]
6341 // CHECK14-NEXT:    [[ARRAYDESTROY_ELEMENT17]] = getelementptr inbounds [[STRUCT_S_0]], %struct.S.0* [[ARRAYDESTROY_ELEMENTPAST16]], i64 -1
6342 // CHECK14-NEXT:    call void @_ZN1SIiED1Ev(%struct.S.0* nonnull align 4 dereferenceable(4) [[ARRAYDESTROY_ELEMENT17]]) #[[ATTR4]]
6343 // CHECK14-NEXT:    [[ARRAYDESTROY_DONE18:%.*]] = icmp eq %struct.S.0* [[ARRAYDESTROY_ELEMENT17]], [[ARRAY_BEGIN14]]
6344 // CHECK14-NEXT:    br i1 [[ARRAYDESTROY_DONE18]], label [[ARRAYDESTROY_DONE19:%.*]], label [[ARRAYDESTROY_BODY15]]
6345 // CHECK14:       arraydestroy.done19:
6346 // CHECK14-NEXT:    call void @_ZN1SIiED1Ev(%struct.S.0* nonnull align 4 dereferenceable(4) [[TEST]]) #[[ATTR4]]
6347 // CHECK14-NEXT:    [[TMP29:%.*]] = load i32, i32* [[RETVAL]], align 4
6348 // CHECK14-NEXT:    ret i32 [[TMP29]]
6349 //
6350 //
6351 // CHECK14-LABEL: define {{[^@]+}}@_ZN1SIfEC2Ev
6352 // CHECK14-SAME: (%struct.S* nonnull align 4 dereferenceable(4) [[THIS:%.*]]) unnamed_addr #[[ATTR1]] comdat align 2 {
6353 // CHECK14-NEXT:  entry:
6354 // CHECK14-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.S*, align 8
6355 // CHECK14-NEXT:    store %struct.S* [[THIS]], %struct.S** [[THIS_ADDR]], align 8
6356 // CHECK14-NEXT:    [[THIS1:%.*]] = load %struct.S*, %struct.S** [[THIS_ADDR]], align 8
6357 // CHECK14-NEXT:    [[F:%.*]] = getelementptr inbounds [[STRUCT_S:%.*]], %struct.S* [[THIS1]], i32 0, i32 0
6358 // CHECK14-NEXT:    store float 0.000000e+00, float* [[F]], align 4
6359 // CHECK14-NEXT:    ret void
6360 //
6361 //
6362 // CHECK14-LABEL: define {{[^@]+}}@_ZN1SIfED2Ev
6363 // CHECK14-SAME: (%struct.S* nonnull align 4 dereferenceable(4) [[THIS:%.*]]) unnamed_addr #[[ATTR1]] comdat align 2 {
6364 // CHECK14-NEXT:  entry:
6365 // CHECK14-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.S*, align 8
6366 // CHECK14-NEXT:    store %struct.S* [[THIS]], %struct.S** [[THIS_ADDR]], align 8
6367 // CHECK14-NEXT:    [[THIS1:%.*]] = load %struct.S*, %struct.S** [[THIS_ADDR]], align 8
6368 // CHECK14-NEXT:    ret void
6369 //
6370 //
6371 // CHECK14-LABEL: define {{[^@]+}}@_ZN1SIfEC2Ef
6372 // CHECK14-SAME: (%struct.S* nonnull align 4 dereferenceable(4) [[THIS:%.*]], float [[A:%.*]]) unnamed_addr #[[ATTR1]] comdat align 2 {
6373 // CHECK14-NEXT:  entry:
6374 // CHECK14-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.S*, align 8
6375 // CHECK14-NEXT:    [[A_ADDR:%.*]] = alloca float, align 4
6376 // CHECK14-NEXT:    store %struct.S* [[THIS]], %struct.S** [[THIS_ADDR]], align 8
6377 // CHECK14-NEXT:    store float [[A]], float* [[A_ADDR]], align 4
6378 // CHECK14-NEXT:    [[THIS1:%.*]] = load %struct.S*, %struct.S** [[THIS_ADDR]], align 8
6379 // CHECK14-NEXT:    [[F:%.*]] = getelementptr inbounds [[STRUCT_S:%.*]], %struct.S* [[THIS1]], i32 0, i32 0
6380 // CHECK14-NEXT:    [[TMP0:%.*]] = load float, float* [[A_ADDR]], align 4
6381 // CHECK14-NEXT:    store float [[TMP0]], float* [[F]], align 4
6382 // CHECK14-NEXT:    ret void
6383 //
6384 //
6385 // CHECK14-LABEL: define {{[^@]+}}@_ZN1SIiEC1Ev
6386 // CHECK14-SAME: (%struct.S.0* nonnull align 4 dereferenceable(4) [[THIS:%.*]]) unnamed_addr #[[ATTR1]] comdat align 2 {
6387 // CHECK14-NEXT:  entry:
6388 // CHECK14-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.S.0*, align 8
6389 // CHECK14-NEXT:    store %struct.S.0* [[THIS]], %struct.S.0** [[THIS_ADDR]], align 8
6390 // CHECK14-NEXT:    [[THIS1:%.*]] = load %struct.S.0*, %struct.S.0** [[THIS_ADDR]], align 8
6391 // CHECK14-NEXT:    call void @_ZN1SIiEC2Ev(%struct.S.0* nonnull align 4 dereferenceable(4) [[THIS1]])
6392 // CHECK14-NEXT:    ret void
6393 //
6394 //
6395 // CHECK14-LABEL: define {{[^@]+}}@_ZN1SIiEC1Ei
6396 // CHECK14-SAME: (%struct.S.0* nonnull align 4 dereferenceable(4) [[THIS:%.*]], i32 signext [[A:%.*]]) unnamed_addr #[[ATTR1]] comdat align 2 {
6397 // CHECK14-NEXT:  entry:
6398 // CHECK14-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.S.0*, align 8
6399 // CHECK14-NEXT:    [[A_ADDR:%.*]] = alloca i32, align 4
6400 // CHECK14-NEXT:    store %struct.S.0* [[THIS]], %struct.S.0** [[THIS_ADDR]], align 8
6401 // CHECK14-NEXT:    store i32 [[A]], i32* [[A_ADDR]], align 4
6402 // CHECK14-NEXT:    [[THIS1:%.*]] = load %struct.S.0*, %struct.S.0** [[THIS_ADDR]], align 8
6403 // CHECK14-NEXT:    [[TMP0:%.*]] = load i32, i32* [[A_ADDR]], align 4
6404 // CHECK14-NEXT:    call void @_ZN1SIiEC2Ei(%struct.S.0* nonnull align 4 dereferenceable(4) [[THIS1]], i32 signext [[TMP0]])
6405 // CHECK14-NEXT:    ret void
6406 //
6407 //
6408 // CHECK14-LABEL: define {{[^@]+}}@_ZN1SIiED1Ev
6409 // CHECK14-SAME: (%struct.S.0* nonnull align 4 dereferenceable(4) [[THIS:%.*]]) unnamed_addr #[[ATTR1]] comdat align 2 {
6410 // CHECK14-NEXT:  entry:
6411 // CHECK14-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.S.0*, align 8
6412 // CHECK14-NEXT:    store %struct.S.0* [[THIS]], %struct.S.0** [[THIS_ADDR]], align 8
6413 // CHECK14-NEXT:    [[THIS1:%.*]] = load %struct.S.0*, %struct.S.0** [[THIS_ADDR]], align 8
6414 // CHECK14-NEXT:    call void @_ZN1SIiED2Ev(%struct.S.0* nonnull align 4 dereferenceable(4) [[THIS1]]) #[[ATTR4]]
6415 // CHECK14-NEXT:    ret void
6416 //
6417 //
6418 // CHECK14-LABEL: define {{[^@]+}}@_ZN1SIiEC2Ev
6419 // CHECK14-SAME: (%struct.S.0* nonnull align 4 dereferenceable(4) [[THIS:%.*]]) unnamed_addr #[[ATTR1]] comdat align 2 {
6420 // CHECK14-NEXT:  entry:
6421 // CHECK14-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.S.0*, align 8
6422 // CHECK14-NEXT:    store %struct.S.0* [[THIS]], %struct.S.0** [[THIS_ADDR]], align 8
6423 // CHECK14-NEXT:    [[THIS1:%.*]] = load %struct.S.0*, %struct.S.0** [[THIS_ADDR]], align 8
6424 // CHECK14-NEXT:    [[F:%.*]] = getelementptr inbounds [[STRUCT_S_0:%.*]], %struct.S.0* [[THIS1]], i32 0, i32 0
6425 // CHECK14-NEXT:    store i32 0, i32* [[F]], align 4
6426 // CHECK14-NEXT:    ret void
6427 //
6428 //
6429 // CHECK14-LABEL: define {{[^@]+}}@_ZN1SIiEC2Ei
6430 // CHECK14-SAME: (%struct.S.0* nonnull align 4 dereferenceable(4) [[THIS:%.*]], i32 signext [[A:%.*]]) unnamed_addr #[[ATTR1]] comdat align 2 {
6431 // CHECK14-NEXT:  entry:
6432 // CHECK14-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.S.0*, align 8
6433 // CHECK14-NEXT:    [[A_ADDR:%.*]] = alloca i32, align 4
6434 // CHECK14-NEXT:    store %struct.S.0* [[THIS]], %struct.S.0** [[THIS_ADDR]], align 8
6435 // CHECK14-NEXT:    store i32 [[A]], i32* [[A_ADDR]], align 4
6436 // CHECK14-NEXT:    [[THIS1:%.*]] = load %struct.S.0*, %struct.S.0** [[THIS_ADDR]], align 8
6437 // CHECK14-NEXT:    [[F:%.*]] = getelementptr inbounds [[STRUCT_S_0:%.*]], %struct.S.0* [[THIS1]], i32 0, i32 0
6438 // CHECK14-NEXT:    [[TMP0:%.*]] = load i32, i32* [[A_ADDR]], align 4
6439 // CHECK14-NEXT:    store i32 [[TMP0]], i32* [[F]], align 4
6440 // CHECK14-NEXT:    ret void
6441 //
6442 //
6443 // CHECK14-LABEL: define {{[^@]+}}@_ZN1SIiED2Ev
6444 // CHECK14-SAME: (%struct.S.0* nonnull align 4 dereferenceable(4) [[THIS:%.*]]) unnamed_addr #[[ATTR1]] comdat align 2 {
6445 // CHECK14-NEXT:  entry:
6446 // CHECK14-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.S.0*, align 8
6447 // CHECK14-NEXT:    store %struct.S.0* [[THIS]], %struct.S.0** [[THIS_ADDR]], align 8
6448 // CHECK14-NEXT:    [[THIS1:%.*]] = load %struct.S.0*, %struct.S.0** [[THIS_ADDR]], align 8
6449 // CHECK14-NEXT:    ret void
6450 //
6451 //
6452 // CHECK15-LABEL: define {{[^@]+}}@main
6453 // CHECK15-SAME: () #[[ATTR0:[0-9]+]] {
6454 // CHECK15-NEXT:  entry:
6455 // CHECK15-NEXT:    [[RETVAL:%.*]] = alloca i32, align 4
6456 // CHECK15-NEXT:    [[G:%.*]] = alloca double, align 8
6457 // CHECK15-NEXT:    [[G1:%.*]] = alloca double*, align 4
6458 // CHECK15-NEXT:    [[TEST:%.*]] = alloca [[STRUCT_S:%.*]], align 4
6459 // CHECK15-NEXT:    [[T_VAR:%.*]] = alloca i32, align 4
6460 // CHECK15-NEXT:    [[VEC:%.*]] = alloca [2 x i32], align 4
6461 // CHECK15-NEXT:    [[S_ARR:%.*]] = alloca [2 x %struct.S], align 4
6462 // CHECK15-NEXT:    [[VAR:%.*]] = alloca %struct.S*, align 4
6463 // CHECK15-NEXT:    [[TMP:%.*]] = alloca %struct.S*, align 4
6464 // CHECK15-NEXT:    [[_TMP1:%.*]] = alloca i32, align 4
6465 // CHECK15-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
6466 // CHECK15-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
6467 // CHECK15-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
6468 // CHECK15-NEXT:    [[I:%.*]] = alloca i32, align 4
6469 // CHECK15-NEXT:    [[T_VAR2:%.*]] = alloca i32, align 4
6470 // CHECK15-NEXT:    [[VEC3:%.*]] = alloca [2 x i32], align 4
6471 // CHECK15-NEXT:    [[S_ARR4:%.*]] = alloca [2 x %struct.S], align 4
6472 // CHECK15-NEXT:    [[VAR5:%.*]] = alloca [[STRUCT_S]], align 4
6473 // CHECK15-NEXT:    [[_TMP6:%.*]] = alloca %struct.S*, align 4
6474 // CHECK15-NEXT:    [[SVAR:%.*]] = alloca i32, align 4
6475 // CHECK15-NEXT:    [[I13:%.*]] = alloca i32, align 4
6476 // CHECK15-NEXT:    store i32 0, i32* [[RETVAL]], align 4
6477 // CHECK15-NEXT:    store double* [[G]], double** [[G1]], align 4
6478 // CHECK15-NEXT:    call void @_ZN1SIfEC1Ev(%struct.S* nonnull align 4 dereferenceable(4) [[TEST]])
6479 // CHECK15-NEXT:    store i32 0, i32* [[T_VAR]], align 4
6480 // CHECK15-NEXT:    [[TMP0:%.*]] = bitcast [2 x i32]* [[VEC]] to i8*
6481 // CHECK15-NEXT:    call void @llvm.memcpy.p0i8.p0i8.i32(i8* align 4 [[TMP0]], i8* align 4 bitcast ([2 x i32]* @__const.main.vec to i8*), i32 8, i1 false)
6482 // CHECK15-NEXT:    [[ARRAYINIT_BEGIN:%.*]] = getelementptr inbounds [2 x %struct.S], [2 x %struct.S]* [[S_ARR]], i32 0, i32 0
6483 // CHECK15-NEXT:    call void @_ZN1SIfEC1Ef(%struct.S* nonnull align 4 dereferenceable(4) [[ARRAYINIT_BEGIN]], float 1.000000e+00)
6484 // CHECK15-NEXT:    [[ARRAYINIT_ELEMENT:%.*]] = getelementptr inbounds [[STRUCT_S]], %struct.S* [[ARRAYINIT_BEGIN]], i32 1
6485 // CHECK15-NEXT:    call void @_ZN1SIfEC1Ef(%struct.S* nonnull align 4 dereferenceable(4) [[ARRAYINIT_ELEMENT]], float 2.000000e+00)
6486 // CHECK15-NEXT:    store %struct.S* [[TEST]], %struct.S** [[VAR]], align 4
6487 // CHECK15-NEXT:    [[TMP1:%.*]] = load %struct.S*, %struct.S** [[VAR]], align 4
6488 // CHECK15-NEXT:    store %struct.S* [[TMP1]], %struct.S** [[TMP]], align 4
6489 // CHECK15-NEXT:    [[TMP2:%.*]] = load %struct.S*, %struct.S** [[VAR]], align 4
6490 // CHECK15-NEXT:    [[TMP3:%.*]] = load %struct.S*, %struct.S** [[VAR]], align 4
6491 // CHECK15-NEXT:    [[TMP4:%.*]] = load %struct.S*, %struct.S** [[VAR]], align 4
6492 // CHECK15-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
6493 // CHECK15-NEXT:    store i32 1, i32* [[DOTOMP_UB]], align 4
6494 // CHECK15-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
6495 // CHECK15-NEXT:    store i32 [[TMP5]], i32* [[DOTOMP_IV]], align 4
6496 // CHECK15-NEXT:    [[ARRAY_BEGIN:%.*]] = getelementptr inbounds [2 x %struct.S], [2 x %struct.S]* [[S_ARR4]], i32 0, i32 0
6497 // CHECK15-NEXT:    [[ARRAYCTOR_END:%.*]] = getelementptr inbounds [[STRUCT_S]], %struct.S* [[ARRAY_BEGIN]], i32 2
6498 // CHECK15-NEXT:    br label [[ARRAYCTOR_LOOP:%.*]]
6499 // CHECK15:       arrayctor.loop:
6500 // CHECK15-NEXT:    [[ARRAYCTOR_CUR:%.*]] = phi %struct.S* [ [[ARRAY_BEGIN]], [[ENTRY:%.*]] ], [ [[ARRAYCTOR_NEXT:%.*]], [[ARRAYCTOR_LOOP]] ]
6501 // CHECK15-NEXT:    call void @_ZN1SIfEC1Ev(%struct.S* nonnull align 4 dereferenceable(4) [[ARRAYCTOR_CUR]])
6502 // CHECK15-NEXT:    [[ARRAYCTOR_NEXT]] = getelementptr inbounds [[STRUCT_S]], %struct.S* [[ARRAYCTOR_CUR]], i32 1
6503 // CHECK15-NEXT:    [[ARRAYCTOR_DONE:%.*]] = icmp eq %struct.S* [[ARRAYCTOR_NEXT]], [[ARRAYCTOR_END]]
6504 // CHECK15-NEXT:    br i1 [[ARRAYCTOR_DONE]], label [[ARRAYCTOR_CONT:%.*]], label [[ARRAYCTOR_LOOP]]
6505 // CHECK15:       arrayctor.cont:
6506 // CHECK15-NEXT:    [[TMP6:%.*]] = load %struct.S*, %struct.S** [[TMP]], align 4
6507 // CHECK15-NEXT:    call void @_ZN1SIfEC1Ev(%struct.S* nonnull align 4 dereferenceable(4) [[VAR5]])
6508 // CHECK15-NEXT:    store %struct.S* [[VAR5]], %struct.S** [[_TMP6]], align 4
6509 // CHECK15-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
6510 // CHECK15:       omp.inner.for.cond:
6511 // CHECK15-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !3
6512 // CHECK15-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !3
6513 // CHECK15-NEXT:    [[CMP:%.*]] = icmp sle i32 [[TMP7]], [[TMP8]]
6514 // CHECK15-NEXT:    br i1 [[CMP]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_COND_CLEANUP:%.*]]
6515 // CHECK15:       omp.inner.for.cond.cleanup:
6516 // CHECK15-NEXT:    br label [[OMP_INNER_FOR_END:%.*]]
6517 // CHECK15:       omp.inner.for.body:
6518 // CHECK15-NEXT:    [[TMP9:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !3
6519 // CHECK15-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP9]], 1
6520 // CHECK15-NEXT:    [[ADD:%.*]] = add nsw i32 0, [[MUL]]
6521 // CHECK15-NEXT:    store i32 [[ADD]], i32* [[I]], align 4, !llvm.access.group !3
6522 // CHECK15-NEXT:    [[TMP10:%.*]] = load i32, i32* [[T_VAR2]], align 4, !llvm.access.group !3
6523 // CHECK15-NEXT:    [[TMP11:%.*]] = load i32, i32* [[I]], align 4, !llvm.access.group !3
6524 // CHECK15-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds [2 x i32], [2 x i32]* [[VEC3]], i32 0, i32 [[TMP11]]
6525 // CHECK15-NEXT:    store i32 [[TMP10]], i32* [[ARRAYIDX]], align 4, !llvm.access.group !3
6526 // CHECK15-NEXT:    [[TMP12:%.*]] = load %struct.S*, %struct.S** [[_TMP6]], align 4, !llvm.access.group !3
6527 // CHECK15-NEXT:    [[TMP13:%.*]] = load i32, i32* [[I]], align 4, !llvm.access.group !3
6528 // CHECK15-NEXT:    [[ARRAYIDX7:%.*]] = getelementptr inbounds [2 x %struct.S], [2 x %struct.S]* [[S_ARR4]], i32 0, i32 [[TMP13]]
6529 // CHECK15-NEXT:    [[TMP14:%.*]] = bitcast %struct.S* [[ARRAYIDX7]] to i8*
6530 // CHECK15-NEXT:    [[TMP15:%.*]] = bitcast %struct.S* [[TMP12]] to i8*
6531 // CHECK15-NEXT:    call void @llvm.memcpy.p0i8.p0i8.i32(i8* align 4 [[TMP14]], i8* align 4 [[TMP15]], i32 4, i1 false), !llvm.access.group !3
6532 // CHECK15-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
6533 // CHECK15:       omp.body.continue:
6534 // CHECK15-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
6535 // CHECK15:       omp.inner.for.inc:
6536 // CHECK15-NEXT:    [[TMP16:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !3
6537 // CHECK15-NEXT:    [[ADD8:%.*]] = add nsw i32 [[TMP16]], 1
6538 // CHECK15-NEXT:    store i32 [[ADD8]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !3
6539 // CHECK15-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP4:![0-9]+]]
6540 // CHECK15:       omp.inner.for.end:
6541 // CHECK15-NEXT:    store i32 2, i32* [[I]], align 4
6542 // CHECK15-NEXT:    [[TMP17:%.*]] = load i32, i32* [[T_VAR2]], align 4
6543 // CHECK15-NEXT:    store i32 [[TMP17]], i32* [[T_VAR]], align 4
6544 // CHECK15-NEXT:    [[TMP18:%.*]] = bitcast [2 x i32]* [[VEC]] to i8*
6545 // CHECK15-NEXT:    [[TMP19:%.*]] = bitcast [2 x i32]* [[VEC3]] to i8*
6546 // CHECK15-NEXT:    call void @llvm.memcpy.p0i8.p0i8.i32(i8* align 4 [[TMP18]], i8* align 4 [[TMP19]], i32 8, i1 false)
6547 // CHECK15-NEXT:    [[ARRAY_BEGIN9:%.*]] = getelementptr inbounds [2 x %struct.S], [2 x %struct.S]* [[S_ARR]], i32 0, i32 0
6548 // CHECK15-NEXT:    [[TMP20:%.*]] = bitcast [2 x %struct.S]* [[S_ARR4]] to %struct.S*
6549 // CHECK15-NEXT:    [[TMP21:%.*]] = getelementptr [[STRUCT_S]], %struct.S* [[ARRAY_BEGIN9]], i32 2
6550 // CHECK15-NEXT:    [[OMP_ARRAYCPY_ISEMPTY:%.*]] = icmp eq %struct.S* [[ARRAY_BEGIN9]], [[TMP21]]
6551 // CHECK15-NEXT:    br i1 [[OMP_ARRAYCPY_ISEMPTY]], label [[OMP_ARRAYCPY_DONE10:%.*]], label [[OMP_ARRAYCPY_BODY:%.*]]
6552 // CHECK15:       omp.arraycpy.body:
6553 // CHECK15-NEXT:    [[OMP_ARRAYCPY_SRCELEMENTPAST:%.*]] = phi %struct.S* [ [[TMP20]], [[OMP_INNER_FOR_END]] ], [ [[OMP_ARRAYCPY_SRC_ELEMENT:%.*]], [[OMP_ARRAYCPY_BODY]] ]
6554 // CHECK15-NEXT:    [[OMP_ARRAYCPY_DESTELEMENTPAST:%.*]] = phi %struct.S* [ [[ARRAY_BEGIN9]], [[OMP_INNER_FOR_END]] ], [ [[OMP_ARRAYCPY_DEST_ELEMENT:%.*]], [[OMP_ARRAYCPY_BODY]] ]
6555 // CHECK15-NEXT:    [[TMP22:%.*]] = bitcast %struct.S* [[OMP_ARRAYCPY_DESTELEMENTPAST]] to i8*
6556 // CHECK15-NEXT:    [[TMP23:%.*]] = bitcast %struct.S* [[OMP_ARRAYCPY_SRCELEMENTPAST]] to i8*
6557 // CHECK15-NEXT:    call void @llvm.memcpy.p0i8.p0i8.i32(i8* align 4 [[TMP22]], i8* align 4 [[TMP23]], i32 4, i1 false)
6558 // CHECK15-NEXT:    [[OMP_ARRAYCPY_DEST_ELEMENT]] = getelementptr [[STRUCT_S]], %struct.S* [[OMP_ARRAYCPY_DESTELEMENTPAST]], i32 1
6559 // CHECK15-NEXT:    [[OMP_ARRAYCPY_SRC_ELEMENT]] = getelementptr [[STRUCT_S]], %struct.S* [[OMP_ARRAYCPY_SRCELEMENTPAST]], i32 1
6560 // CHECK15-NEXT:    [[OMP_ARRAYCPY_DONE:%.*]] = icmp eq %struct.S* [[OMP_ARRAYCPY_DEST_ELEMENT]], [[TMP21]]
6561 // CHECK15-NEXT:    br i1 [[OMP_ARRAYCPY_DONE]], label [[OMP_ARRAYCPY_DONE10]], label [[OMP_ARRAYCPY_BODY]]
6562 // CHECK15:       omp.arraycpy.done10:
6563 // CHECK15-NEXT:    [[TMP24:%.*]] = load %struct.S*, %struct.S** [[_TMP6]], align 4
6564 // CHECK15-NEXT:    [[TMP25:%.*]] = bitcast %struct.S* [[TMP6]] to i8*
6565 // CHECK15-NEXT:    [[TMP26:%.*]] = bitcast %struct.S* [[TMP24]] to i8*
6566 // CHECK15-NEXT:    call void @llvm.memcpy.p0i8.p0i8.i32(i8* align 4 [[TMP25]], i8* align 4 [[TMP26]], i32 4, i1 false)
6567 // CHECK15-NEXT:    [[TMP27:%.*]] = load i32, i32* [[SVAR]], align 4
6568 // CHECK15-NEXT:    store i32 [[TMP27]], i32* @_ZZ4mainE4svar, align 4
6569 // CHECK15-NEXT:    call void @_ZN1SIfED1Ev(%struct.S* nonnull align 4 dereferenceable(4) [[VAR5]]) #[[ATTR4:[0-9]+]]
6570 // CHECK15-NEXT:    [[ARRAY_BEGIN11:%.*]] = getelementptr inbounds [2 x %struct.S], [2 x %struct.S]* [[S_ARR4]], i32 0, i32 0
6571 // CHECK15-NEXT:    [[TMP28:%.*]] = getelementptr inbounds [[STRUCT_S]], %struct.S* [[ARRAY_BEGIN11]], i32 2
6572 // CHECK15-NEXT:    br label [[ARRAYDESTROY_BODY:%.*]]
6573 // CHECK15:       arraydestroy.body:
6574 // CHECK15-NEXT:    [[ARRAYDESTROY_ELEMENTPAST:%.*]] = phi %struct.S* [ [[TMP28]], [[OMP_ARRAYCPY_DONE10]] ], [ [[ARRAYDESTROY_ELEMENT:%.*]], [[ARRAYDESTROY_BODY]] ]
6575 // CHECK15-NEXT:    [[ARRAYDESTROY_ELEMENT]] = getelementptr inbounds [[STRUCT_S]], %struct.S* [[ARRAYDESTROY_ELEMENTPAST]], i32 -1
6576 // CHECK15-NEXT:    call void @_ZN1SIfED1Ev(%struct.S* nonnull align 4 dereferenceable(4) [[ARRAYDESTROY_ELEMENT]]) #[[ATTR4]]
6577 // CHECK15-NEXT:    [[ARRAYDESTROY_DONE:%.*]] = icmp eq %struct.S* [[ARRAYDESTROY_ELEMENT]], [[ARRAY_BEGIN11]]
6578 // CHECK15-NEXT:    br i1 [[ARRAYDESTROY_DONE]], label [[ARRAYDESTROY_DONE12:%.*]], label [[ARRAYDESTROY_BODY]]
6579 // CHECK15:       arraydestroy.done12:
6580 // CHECK15-NEXT:    [[CALL:%.*]] = call i32 @_Z5tmainIiET_v()
6581 // CHECK15-NEXT:    store i32 [[CALL]], i32* [[RETVAL]], align 4
6582 // CHECK15-NEXT:    [[ARRAY_BEGIN14:%.*]] = getelementptr inbounds [2 x %struct.S], [2 x %struct.S]* [[S_ARR]], i32 0, i32 0
6583 // CHECK15-NEXT:    [[TMP29:%.*]] = getelementptr inbounds [[STRUCT_S]], %struct.S* [[ARRAY_BEGIN14]], i32 2
6584 // CHECK15-NEXT:    br label [[ARRAYDESTROY_BODY15:%.*]]
6585 // CHECK15:       arraydestroy.body15:
6586 // CHECK15-NEXT:    [[ARRAYDESTROY_ELEMENTPAST16:%.*]] = phi %struct.S* [ [[TMP29]], [[ARRAYDESTROY_DONE12]] ], [ [[ARRAYDESTROY_ELEMENT17:%.*]], [[ARRAYDESTROY_BODY15]] ]
6587 // CHECK15-NEXT:    [[ARRAYDESTROY_ELEMENT17]] = getelementptr inbounds [[STRUCT_S]], %struct.S* [[ARRAYDESTROY_ELEMENTPAST16]], i32 -1
6588 // CHECK15-NEXT:    call void @_ZN1SIfED1Ev(%struct.S* nonnull align 4 dereferenceable(4) [[ARRAYDESTROY_ELEMENT17]]) #[[ATTR4]]
6589 // CHECK15-NEXT:    [[ARRAYDESTROY_DONE18:%.*]] = icmp eq %struct.S* [[ARRAYDESTROY_ELEMENT17]], [[ARRAY_BEGIN14]]
6590 // CHECK15-NEXT:    br i1 [[ARRAYDESTROY_DONE18]], label [[ARRAYDESTROY_DONE19:%.*]], label [[ARRAYDESTROY_BODY15]]
6591 // CHECK15:       arraydestroy.done19:
6592 // CHECK15-NEXT:    call void @_ZN1SIfED1Ev(%struct.S* nonnull align 4 dereferenceable(4) [[TEST]]) #[[ATTR4]]
6593 // CHECK15-NEXT:    [[TMP30:%.*]] = load i32, i32* [[RETVAL]], align 4
6594 // CHECK15-NEXT:    ret i32 [[TMP30]]
6595 //
6596 //
6597 // CHECK15-LABEL: define {{[^@]+}}@_ZN1SIfEC1Ev
6598 // CHECK15-SAME: (%struct.S* nonnull align 4 dereferenceable(4) [[THIS:%.*]]) unnamed_addr #[[ATTR1:[0-9]+]] comdat align 2 {
6599 // CHECK15-NEXT:  entry:
6600 // CHECK15-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.S*, align 4
6601 // CHECK15-NEXT:    store %struct.S* [[THIS]], %struct.S** [[THIS_ADDR]], align 4
6602 // CHECK15-NEXT:    [[THIS1:%.*]] = load %struct.S*, %struct.S** [[THIS_ADDR]], align 4
6603 // CHECK15-NEXT:    call void @_ZN1SIfEC2Ev(%struct.S* nonnull align 4 dereferenceable(4) [[THIS1]])
6604 // CHECK15-NEXT:    ret void
6605 //
6606 //
6607 // CHECK15-LABEL: define {{[^@]+}}@_ZN1SIfEC1Ef
6608 // CHECK15-SAME: (%struct.S* nonnull align 4 dereferenceable(4) [[THIS:%.*]], float [[A:%.*]]) unnamed_addr #[[ATTR1]] comdat align 2 {
6609 // CHECK15-NEXT:  entry:
6610 // CHECK15-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.S*, align 4
6611 // CHECK15-NEXT:    [[A_ADDR:%.*]] = alloca float, align 4
6612 // CHECK15-NEXT:    store %struct.S* [[THIS]], %struct.S** [[THIS_ADDR]], align 4
6613 // CHECK15-NEXT:    store float [[A]], float* [[A_ADDR]], align 4
6614 // CHECK15-NEXT:    [[THIS1:%.*]] = load %struct.S*, %struct.S** [[THIS_ADDR]], align 4
6615 // CHECK15-NEXT:    [[TMP0:%.*]] = load float, float* [[A_ADDR]], align 4
6616 // CHECK15-NEXT:    call void @_ZN1SIfEC2Ef(%struct.S* nonnull align 4 dereferenceable(4) [[THIS1]], float [[TMP0]])
6617 // CHECK15-NEXT:    ret void
6618 //
6619 //
6620 // CHECK15-LABEL: define {{[^@]+}}@_ZN1SIfED1Ev
6621 // CHECK15-SAME: (%struct.S* nonnull align 4 dereferenceable(4) [[THIS:%.*]]) unnamed_addr #[[ATTR1]] comdat align 2 {
6622 // CHECK15-NEXT:  entry:
6623 // CHECK15-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.S*, align 4
6624 // CHECK15-NEXT:    store %struct.S* [[THIS]], %struct.S** [[THIS_ADDR]], align 4
6625 // CHECK15-NEXT:    [[THIS1:%.*]] = load %struct.S*, %struct.S** [[THIS_ADDR]], align 4
6626 // CHECK15-NEXT:    call void @_ZN1SIfED2Ev(%struct.S* nonnull align 4 dereferenceable(4) [[THIS1]]) #[[ATTR4]]
6627 // CHECK15-NEXT:    ret void
6628 //
6629 //
6630 // CHECK15-LABEL: define {{[^@]+}}@_Z5tmainIiET_v
6631 // CHECK15-SAME: () #[[ATTR3:[0-9]+]] comdat {
6632 // CHECK15-NEXT:  entry:
6633 // CHECK15-NEXT:    [[RETVAL:%.*]] = alloca i32, align 4
6634 // CHECK15-NEXT:    [[TEST:%.*]] = alloca [[STRUCT_S_0:%.*]], align 4
6635 // CHECK15-NEXT:    [[T_VAR:%.*]] = alloca i32, align 4
6636 // CHECK15-NEXT:    [[VEC:%.*]] = alloca [2 x i32], align 4
6637 // CHECK15-NEXT:    [[S_ARR:%.*]] = alloca [2 x %struct.S.0], align 4
6638 // CHECK15-NEXT:    [[VAR:%.*]] = alloca %struct.S.0*, align 4
6639 // CHECK15-NEXT:    [[TMP:%.*]] = alloca %struct.S.0*, align 4
6640 // CHECK15-NEXT:    [[_TMP1:%.*]] = alloca i32, align 4
6641 // CHECK15-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
6642 // CHECK15-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
6643 // CHECK15-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
6644 // CHECK15-NEXT:    [[I:%.*]] = alloca i32, align 4
6645 // CHECK15-NEXT:    [[T_VAR2:%.*]] = alloca i32, align 4
6646 // CHECK15-NEXT:    [[VEC3:%.*]] = alloca [2 x i32], align 4
6647 // CHECK15-NEXT:    [[S_ARR4:%.*]] = alloca [2 x %struct.S.0], align 4
6648 // CHECK15-NEXT:    [[VAR5:%.*]] = alloca [[STRUCT_S_0]], align 4
6649 // CHECK15-NEXT:    [[_TMP6:%.*]] = alloca %struct.S.0*, align 4
6650 // CHECK15-NEXT:    call void @_ZN1SIiEC1Ev(%struct.S.0* nonnull align 4 dereferenceable(4) [[TEST]])
6651 // CHECK15-NEXT:    store i32 0, i32* [[T_VAR]], align 4
6652 // CHECK15-NEXT:    [[TMP0:%.*]] = bitcast [2 x i32]* [[VEC]] to i8*
6653 // CHECK15-NEXT:    call void @llvm.memcpy.p0i8.p0i8.i32(i8* align 4 [[TMP0]], i8* align 4 bitcast ([2 x i32]* @__const._Z5tmainIiET_v.vec to i8*), i32 8, i1 false)
6654 // CHECK15-NEXT:    [[ARRAYINIT_BEGIN:%.*]] = getelementptr inbounds [2 x %struct.S.0], [2 x %struct.S.0]* [[S_ARR]], i32 0, i32 0
6655 // CHECK15-NEXT:    call void @_ZN1SIiEC1Ei(%struct.S.0* nonnull align 4 dereferenceable(4) [[ARRAYINIT_BEGIN]], i32 1)
6656 // CHECK15-NEXT:    [[ARRAYINIT_ELEMENT:%.*]] = getelementptr inbounds [[STRUCT_S_0]], %struct.S.0* [[ARRAYINIT_BEGIN]], i32 1
6657 // CHECK15-NEXT:    call void @_ZN1SIiEC1Ei(%struct.S.0* nonnull align 4 dereferenceable(4) [[ARRAYINIT_ELEMENT]], i32 2)
6658 // CHECK15-NEXT:    store %struct.S.0* [[TEST]], %struct.S.0** [[VAR]], align 4
6659 // CHECK15-NEXT:    [[TMP1:%.*]] = load %struct.S.0*, %struct.S.0** [[VAR]], align 4
6660 // CHECK15-NEXT:    store %struct.S.0* [[TMP1]], %struct.S.0** [[TMP]], align 4
6661 // CHECK15-NEXT:    [[TMP2:%.*]] = load %struct.S.0*, %struct.S.0** [[VAR]], align 4
6662 // CHECK15-NEXT:    [[TMP3:%.*]] = load %struct.S.0*, %struct.S.0** [[VAR]], align 4
6663 // CHECK15-NEXT:    [[TMP4:%.*]] = load %struct.S.0*, %struct.S.0** [[VAR]], align 4
6664 // CHECK15-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
6665 // CHECK15-NEXT:    store i32 1, i32* [[DOTOMP_UB]], align 4
6666 // CHECK15-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
6667 // CHECK15-NEXT:    store i32 [[TMP5]], i32* [[DOTOMP_IV]], align 4
6668 // CHECK15-NEXT:    [[ARRAY_BEGIN:%.*]] = getelementptr inbounds [2 x %struct.S.0], [2 x %struct.S.0]* [[S_ARR4]], i32 0, i32 0
6669 // CHECK15-NEXT:    [[ARRAYCTOR_END:%.*]] = getelementptr inbounds [[STRUCT_S_0]], %struct.S.0* [[ARRAY_BEGIN]], i32 2
6670 // CHECK15-NEXT:    br label [[ARRAYCTOR_LOOP:%.*]]
6671 // CHECK15:       arrayctor.loop:
6672 // CHECK15-NEXT:    [[ARRAYCTOR_CUR:%.*]] = phi %struct.S.0* [ [[ARRAY_BEGIN]], [[ENTRY:%.*]] ], [ [[ARRAYCTOR_NEXT:%.*]], [[ARRAYCTOR_LOOP]] ]
6673 // CHECK15-NEXT:    call void @_ZN1SIiEC1Ev(%struct.S.0* nonnull align 4 dereferenceable(4) [[ARRAYCTOR_CUR]])
6674 // CHECK15-NEXT:    [[ARRAYCTOR_NEXT]] = getelementptr inbounds [[STRUCT_S_0]], %struct.S.0* [[ARRAYCTOR_CUR]], i32 1
6675 // CHECK15-NEXT:    [[ARRAYCTOR_DONE:%.*]] = icmp eq %struct.S.0* [[ARRAYCTOR_NEXT]], [[ARRAYCTOR_END]]
6676 // CHECK15-NEXT:    br i1 [[ARRAYCTOR_DONE]], label [[ARRAYCTOR_CONT:%.*]], label [[ARRAYCTOR_LOOP]]
6677 // CHECK15:       arrayctor.cont:
6678 // CHECK15-NEXT:    [[TMP6:%.*]] = load %struct.S.0*, %struct.S.0** [[TMP]], align 4
6679 // CHECK15-NEXT:    call void @_ZN1SIiEC1Ev(%struct.S.0* nonnull align 4 dereferenceable(4) [[VAR5]])
6680 // CHECK15-NEXT:    store %struct.S.0* [[VAR5]], %struct.S.0** [[_TMP6]], align 4
6681 // CHECK15-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
6682 // CHECK15:       omp.inner.for.cond:
6683 // CHECK15-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !7
6684 // CHECK15-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !7
6685 // CHECK15-NEXT:    [[CMP:%.*]] = icmp sle i32 [[TMP7]], [[TMP8]]
6686 // CHECK15-NEXT:    br i1 [[CMP]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_COND_CLEANUP:%.*]]
6687 // CHECK15:       omp.inner.for.cond.cleanup:
6688 // CHECK15-NEXT:    br label [[OMP_INNER_FOR_END:%.*]]
6689 // CHECK15:       omp.inner.for.body:
6690 // CHECK15-NEXT:    [[TMP9:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !7
6691 // CHECK15-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP9]], 1
6692 // CHECK15-NEXT:    [[ADD:%.*]] = add nsw i32 0, [[MUL]]
6693 // CHECK15-NEXT:    store i32 [[ADD]], i32* [[I]], align 4, !llvm.access.group !7
6694 // CHECK15-NEXT:    [[TMP10:%.*]] = load i32, i32* [[T_VAR2]], align 4, !llvm.access.group !7
6695 // CHECK15-NEXT:    [[TMP11:%.*]] = load i32, i32* [[I]], align 4, !llvm.access.group !7
6696 // CHECK15-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds [2 x i32], [2 x i32]* [[VEC3]], i32 0, i32 [[TMP11]]
6697 // CHECK15-NEXT:    store i32 [[TMP10]], i32* [[ARRAYIDX]], align 4, !llvm.access.group !7
6698 // CHECK15-NEXT:    [[TMP12:%.*]] = load %struct.S.0*, %struct.S.0** [[_TMP6]], align 4, !llvm.access.group !7
6699 // CHECK15-NEXT:    [[TMP13:%.*]] = load i32, i32* [[I]], align 4, !llvm.access.group !7
6700 // CHECK15-NEXT:    [[ARRAYIDX7:%.*]] = getelementptr inbounds [2 x %struct.S.0], [2 x %struct.S.0]* [[S_ARR4]], i32 0, i32 [[TMP13]]
6701 // CHECK15-NEXT:    [[TMP14:%.*]] = bitcast %struct.S.0* [[ARRAYIDX7]] to i8*
6702 // CHECK15-NEXT:    [[TMP15:%.*]] = bitcast %struct.S.0* [[TMP12]] to i8*
6703 // CHECK15-NEXT:    call void @llvm.memcpy.p0i8.p0i8.i32(i8* align 4 [[TMP14]], i8* align 4 [[TMP15]], i32 4, i1 false), !llvm.access.group !7
6704 // CHECK15-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
6705 // CHECK15:       omp.body.continue:
6706 // CHECK15-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
6707 // CHECK15:       omp.inner.for.inc:
6708 // CHECK15-NEXT:    [[TMP16:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !7
6709 // CHECK15-NEXT:    [[ADD8:%.*]] = add nsw i32 [[TMP16]], 1
6710 // CHECK15-NEXT:    store i32 [[ADD8]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !7
6711 // CHECK15-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP8:![0-9]+]]
6712 // CHECK15:       omp.inner.for.end:
6713 // CHECK15-NEXT:    store i32 2, i32* [[I]], align 4
6714 // CHECK15-NEXT:    [[TMP17:%.*]] = load i32, i32* [[T_VAR2]], align 4
6715 // CHECK15-NEXT:    store i32 [[TMP17]], i32* [[T_VAR]], align 4
6716 // CHECK15-NEXT:    [[TMP18:%.*]] = bitcast [2 x i32]* [[VEC]] to i8*
6717 // CHECK15-NEXT:    [[TMP19:%.*]] = bitcast [2 x i32]* [[VEC3]] to i8*
6718 // CHECK15-NEXT:    call void @llvm.memcpy.p0i8.p0i8.i32(i8* align 4 [[TMP18]], i8* align 4 [[TMP19]], i32 8, i1 false)
6719 // CHECK15-NEXT:    [[ARRAY_BEGIN9:%.*]] = getelementptr inbounds [2 x %struct.S.0], [2 x %struct.S.0]* [[S_ARR]], i32 0, i32 0
6720 // CHECK15-NEXT:    [[TMP20:%.*]] = bitcast [2 x %struct.S.0]* [[S_ARR4]] to %struct.S.0*
6721 // CHECK15-NEXT:    [[TMP21:%.*]] = getelementptr [[STRUCT_S_0]], %struct.S.0* [[ARRAY_BEGIN9]], i32 2
6722 // CHECK15-NEXT:    [[OMP_ARRAYCPY_ISEMPTY:%.*]] = icmp eq %struct.S.0* [[ARRAY_BEGIN9]], [[TMP21]]
6723 // CHECK15-NEXT:    br i1 [[OMP_ARRAYCPY_ISEMPTY]], label [[OMP_ARRAYCPY_DONE10:%.*]], label [[OMP_ARRAYCPY_BODY:%.*]]
6724 // CHECK15:       omp.arraycpy.body:
6725 // CHECK15-NEXT:    [[OMP_ARRAYCPY_SRCELEMENTPAST:%.*]] = phi %struct.S.0* [ [[TMP20]], [[OMP_INNER_FOR_END]] ], [ [[OMP_ARRAYCPY_SRC_ELEMENT:%.*]], [[OMP_ARRAYCPY_BODY]] ]
6726 // CHECK15-NEXT:    [[OMP_ARRAYCPY_DESTELEMENTPAST:%.*]] = phi %struct.S.0* [ [[ARRAY_BEGIN9]], [[OMP_INNER_FOR_END]] ], [ [[OMP_ARRAYCPY_DEST_ELEMENT:%.*]], [[OMP_ARRAYCPY_BODY]] ]
6727 // CHECK15-NEXT:    [[TMP22:%.*]] = bitcast %struct.S.0* [[OMP_ARRAYCPY_DESTELEMENTPAST]] to i8*
6728 // CHECK15-NEXT:    [[TMP23:%.*]] = bitcast %struct.S.0* [[OMP_ARRAYCPY_SRCELEMENTPAST]] to i8*
6729 // CHECK15-NEXT:    call void @llvm.memcpy.p0i8.p0i8.i32(i8* align 4 [[TMP22]], i8* align 4 [[TMP23]], i32 4, i1 false)
6730 // CHECK15-NEXT:    [[OMP_ARRAYCPY_DEST_ELEMENT]] = getelementptr [[STRUCT_S_0]], %struct.S.0* [[OMP_ARRAYCPY_DESTELEMENTPAST]], i32 1
6731 // CHECK15-NEXT:    [[OMP_ARRAYCPY_SRC_ELEMENT]] = getelementptr [[STRUCT_S_0]], %struct.S.0* [[OMP_ARRAYCPY_SRCELEMENTPAST]], i32 1
6732 // CHECK15-NEXT:    [[OMP_ARRAYCPY_DONE:%.*]] = icmp eq %struct.S.0* [[OMP_ARRAYCPY_DEST_ELEMENT]], [[TMP21]]
6733 // CHECK15-NEXT:    br i1 [[OMP_ARRAYCPY_DONE]], label [[OMP_ARRAYCPY_DONE10]], label [[OMP_ARRAYCPY_BODY]]
6734 // CHECK15:       omp.arraycpy.done10:
6735 // CHECK15-NEXT:    [[TMP24:%.*]] = load %struct.S.0*, %struct.S.0** [[_TMP6]], align 4
6736 // CHECK15-NEXT:    [[TMP25:%.*]] = bitcast %struct.S.0* [[TMP6]] to i8*
6737 // CHECK15-NEXT:    [[TMP26:%.*]] = bitcast %struct.S.0* [[TMP24]] to i8*
6738 // CHECK15-NEXT:    call void @llvm.memcpy.p0i8.p0i8.i32(i8* align 4 [[TMP25]], i8* align 4 [[TMP26]], i32 4, i1 false)
6739 // CHECK15-NEXT:    call void @_ZN1SIiED1Ev(%struct.S.0* nonnull align 4 dereferenceable(4) [[VAR5]]) #[[ATTR4]]
6740 // CHECK15-NEXT:    [[ARRAY_BEGIN11:%.*]] = getelementptr inbounds [2 x %struct.S.0], [2 x %struct.S.0]* [[S_ARR4]], i32 0, i32 0
6741 // CHECK15-NEXT:    [[TMP27:%.*]] = getelementptr inbounds [[STRUCT_S_0]], %struct.S.0* [[ARRAY_BEGIN11]], i32 2
6742 // CHECK15-NEXT:    br label [[ARRAYDESTROY_BODY:%.*]]
6743 // CHECK15:       arraydestroy.body:
6744 // CHECK15-NEXT:    [[ARRAYDESTROY_ELEMENTPAST:%.*]] = phi %struct.S.0* [ [[TMP27]], [[OMP_ARRAYCPY_DONE10]] ], [ [[ARRAYDESTROY_ELEMENT:%.*]], [[ARRAYDESTROY_BODY]] ]
6745 // CHECK15-NEXT:    [[ARRAYDESTROY_ELEMENT]] = getelementptr inbounds [[STRUCT_S_0]], %struct.S.0* [[ARRAYDESTROY_ELEMENTPAST]], i32 -1
6746 // CHECK15-NEXT:    call void @_ZN1SIiED1Ev(%struct.S.0* nonnull align 4 dereferenceable(4) [[ARRAYDESTROY_ELEMENT]]) #[[ATTR4]]
6747 // CHECK15-NEXT:    [[ARRAYDESTROY_DONE:%.*]] = icmp eq %struct.S.0* [[ARRAYDESTROY_ELEMENT]], [[ARRAY_BEGIN11]]
6748 // CHECK15-NEXT:    br i1 [[ARRAYDESTROY_DONE]], label [[ARRAYDESTROY_DONE12:%.*]], label [[ARRAYDESTROY_BODY]]
6749 // CHECK15:       arraydestroy.done12:
6750 // CHECK15-NEXT:    store i32 0, i32* [[RETVAL]], align 4
6751 // CHECK15-NEXT:    [[ARRAY_BEGIN13:%.*]] = getelementptr inbounds [2 x %struct.S.0], [2 x %struct.S.0]* [[S_ARR]], i32 0, i32 0
6752 // CHECK15-NEXT:    [[TMP28:%.*]] = getelementptr inbounds [[STRUCT_S_0]], %struct.S.0* [[ARRAY_BEGIN13]], i32 2
6753 // CHECK15-NEXT:    br label [[ARRAYDESTROY_BODY14:%.*]]
6754 // CHECK15:       arraydestroy.body14:
6755 // CHECK15-NEXT:    [[ARRAYDESTROY_ELEMENTPAST15:%.*]] = phi %struct.S.0* [ [[TMP28]], [[ARRAYDESTROY_DONE12]] ], [ [[ARRAYDESTROY_ELEMENT16:%.*]], [[ARRAYDESTROY_BODY14]] ]
6756 // CHECK15-NEXT:    [[ARRAYDESTROY_ELEMENT16]] = getelementptr inbounds [[STRUCT_S_0]], %struct.S.0* [[ARRAYDESTROY_ELEMENTPAST15]], i32 -1
6757 // CHECK15-NEXT:    call void @_ZN1SIiED1Ev(%struct.S.0* nonnull align 4 dereferenceable(4) [[ARRAYDESTROY_ELEMENT16]]) #[[ATTR4]]
6758 // CHECK15-NEXT:    [[ARRAYDESTROY_DONE17:%.*]] = icmp eq %struct.S.0* [[ARRAYDESTROY_ELEMENT16]], [[ARRAY_BEGIN13]]
6759 // CHECK15-NEXT:    br i1 [[ARRAYDESTROY_DONE17]], label [[ARRAYDESTROY_DONE18:%.*]], label [[ARRAYDESTROY_BODY14]]
6760 // CHECK15:       arraydestroy.done18:
6761 // CHECK15-NEXT:    call void @_ZN1SIiED1Ev(%struct.S.0* nonnull align 4 dereferenceable(4) [[TEST]]) #[[ATTR4]]
6762 // CHECK15-NEXT:    [[TMP29:%.*]] = load i32, i32* [[RETVAL]], align 4
6763 // CHECK15-NEXT:    ret i32 [[TMP29]]
6764 //
6765 //
6766 // CHECK15-LABEL: define {{[^@]+}}@_ZN1SIfEC2Ev
6767 // CHECK15-SAME: (%struct.S* nonnull align 4 dereferenceable(4) [[THIS:%.*]]) unnamed_addr #[[ATTR1]] comdat align 2 {
6768 // CHECK15-NEXT:  entry:
6769 // CHECK15-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.S*, align 4
6770 // CHECK15-NEXT:    store %struct.S* [[THIS]], %struct.S** [[THIS_ADDR]], align 4
6771 // CHECK15-NEXT:    [[THIS1:%.*]] = load %struct.S*, %struct.S** [[THIS_ADDR]], align 4
6772 // CHECK15-NEXT:    [[F:%.*]] = getelementptr inbounds [[STRUCT_S:%.*]], %struct.S* [[THIS1]], i32 0, i32 0
6773 // CHECK15-NEXT:    store float 0.000000e+00, float* [[F]], align 4
6774 // CHECK15-NEXT:    ret void
6775 //
6776 //
6777 // CHECK15-LABEL: define {{[^@]+}}@_ZN1SIfED2Ev
6778 // CHECK15-SAME: (%struct.S* nonnull align 4 dereferenceable(4) [[THIS:%.*]]) unnamed_addr #[[ATTR1]] comdat align 2 {
6779 // CHECK15-NEXT:  entry:
6780 // CHECK15-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.S*, align 4
6781 // CHECK15-NEXT:    store %struct.S* [[THIS]], %struct.S** [[THIS_ADDR]], align 4
6782 // CHECK15-NEXT:    [[THIS1:%.*]] = load %struct.S*, %struct.S** [[THIS_ADDR]], align 4
6783 // CHECK15-NEXT:    ret void
6784 //
6785 //
6786 // CHECK15-LABEL: define {{[^@]+}}@_ZN1SIfEC2Ef
6787 // CHECK15-SAME: (%struct.S* nonnull align 4 dereferenceable(4) [[THIS:%.*]], float [[A:%.*]]) unnamed_addr #[[ATTR1]] comdat align 2 {
6788 // CHECK15-NEXT:  entry:
6789 // CHECK15-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.S*, align 4
6790 // CHECK15-NEXT:    [[A_ADDR:%.*]] = alloca float, align 4
6791 // CHECK15-NEXT:    store %struct.S* [[THIS]], %struct.S** [[THIS_ADDR]], align 4
6792 // CHECK15-NEXT:    store float [[A]], float* [[A_ADDR]], align 4
6793 // CHECK15-NEXT:    [[THIS1:%.*]] = load %struct.S*, %struct.S** [[THIS_ADDR]], align 4
6794 // CHECK15-NEXT:    [[F:%.*]] = getelementptr inbounds [[STRUCT_S:%.*]], %struct.S* [[THIS1]], i32 0, i32 0
6795 // CHECK15-NEXT:    [[TMP0:%.*]] = load float, float* [[A_ADDR]], align 4
6796 // CHECK15-NEXT:    store float [[TMP0]], float* [[F]], align 4
6797 // CHECK15-NEXT:    ret void
6798 //
6799 //
6800 // CHECK15-LABEL: define {{[^@]+}}@_ZN1SIiEC1Ev
6801 // CHECK15-SAME: (%struct.S.0* nonnull align 4 dereferenceable(4) [[THIS:%.*]]) unnamed_addr #[[ATTR1]] comdat align 2 {
6802 // CHECK15-NEXT:  entry:
6803 // CHECK15-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.S.0*, align 4
6804 // CHECK15-NEXT:    store %struct.S.0* [[THIS]], %struct.S.0** [[THIS_ADDR]], align 4
6805 // CHECK15-NEXT:    [[THIS1:%.*]] = load %struct.S.0*, %struct.S.0** [[THIS_ADDR]], align 4
6806 // CHECK15-NEXT:    call void @_ZN1SIiEC2Ev(%struct.S.0* nonnull align 4 dereferenceable(4) [[THIS1]])
6807 // CHECK15-NEXT:    ret void
6808 //
6809 //
6810 // CHECK15-LABEL: define {{[^@]+}}@_ZN1SIiEC1Ei
6811 // CHECK15-SAME: (%struct.S.0* nonnull align 4 dereferenceable(4) [[THIS:%.*]], i32 [[A:%.*]]) unnamed_addr #[[ATTR1]] comdat align 2 {
6812 // CHECK15-NEXT:  entry:
6813 // CHECK15-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.S.0*, align 4
6814 // CHECK15-NEXT:    [[A_ADDR:%.*]] = alloca i32, align 4
6815 // CHECK15-NEXT:    store %struct.S.0* [[THIS]], %struct.S.0** [[THIS_ADDR]], align 4
6816 // CHECK15-NEXT:    store i32 [[A]], i32* [[A_ADDR]], align 4
6817 // CHECK15-NEXT:    [[THIS1:%.*]] = load %struct.S.0*, %struct.S.0** [[THIS_ADDR]], align 4
6818 // CHECK15-NEXT:    [[TMP0:%.*]] = load i32, i32* [[A_ADDR]], align 4
6819 // CHECK15-NEXT:    call void @_ZN1SIiEC2Ei(%struct.S.0* nonnull align 4 dereferenceable(4) [[THIS1]], i32 [[TMP0]])
6820 // CHECK15-NEXT:    ret void
6821 //
6822 //
6823 // CHECK15-LABEL: define {{[^@]+}}@_ZN1SIiED1Ev
6824 // CHECK15-SAME: (%struct.S.0* nonnull align 4 dereferenceable(4) [[THIS:%.*]]) unnamed_addr #[[ATTR1]] comdat align 2 {
6825 // CHECK15-NEXT:  entry:
6826 // CHECK15-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.S.0*, align 4
6827 // CHECK15-NEXT:    store %struct.S.0* [[THIS]], %struct.S.0** [[THIS_ADDR]], align 4
6828 // CHECK15-NEXT:    [[THIS1:%.*]] = load %struct.S.0*, %struct.S.0** [[THIS_ADDR]], align 4
6829 // CHECK15-NEXT:    call void @_ZN1SIiED2Ev(%struct.S.0* nonnull align 4 dereferenceable(4) [[THIS1]]) #[[ATTR4]]
6830 // CHECK15-NEXT:    ret void
6831 //
6832 //
6833 // CHECK15-LABEL: define {{[^@]+}}@_ZN1SIiEC2Ev
6834 // CHECK15-SAME: (%struct.S.0* nonnull align 4 dereferenceable(4) [[THIS:%.*]]) unnamed_addr #[[ATTR1]] comdat align 2 {
6835 // CHECK15-NEXT:  entry:
6836 // CHECK15-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.S.0*, align 4
6837 // CHECK15-NEXT:    store %struct.S.0* [[THIS]], %struct.S.0** [[THIS_ADDR]], align 4
6838 // CHECK15-NEXT:    [[THIS1:%.*]] = load %struct.S.0*, %struct.S.0** [[THIS_ADDR]], align 4
6839 // CHECK15-NEXT:    [[F:%.*]] = getelementptr inbounds [[STRUCT_S_0:%.*]], %struct.S.0* [[THIS1]], i32 0, i32 0
6840 // CHECK15-NEXT:    store i32 0, i32* [[F]], align 4
6841 // CHECK15-NEXT:    ret void
6842 //
6843 //
6844 // CHECK15-LABEL: define {{[^@]+}}@_ZN1SIiEC2Ei
6845 // CHECK15-SAME: (%struct.S.0* nonnull align 4 dereferenceable(4) [[THIS:%.*]], i32 [[A:%.*]]) unnamed_addr #[[ATTR1]] comdat align 2 {
6846 // CHECK15-NEXT:  entry:
6847 // CHECK15-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.S.0*, align 4
6848 // CHECK15-NEXT:    [[A_ADDR:%.*]] = alloca i32, align 4
6849 // CHECK15-NEXT:    store %struct.S.0* [[THIS]], %struct.S.0** [[THIS_ADDR]], align 4
6850 // CHECK15-NEXT:    store i32 [[A]], i32* [[A_ADDR]], align 4
6851 // CHECK15-NEXT:    [[THIS1:%.*]] = load %struct.S.0*, %struct.S.0** [[THIS_ADDR]], align 4
6852 // CHECK15-NEXT:    [[F:%.*]] = getelementptr inbounds [[STRUCT_S_0:%.*]], %struct.S.0* [[THIS1]], i32 0, i32 0
6853 // CHECK15-NEXT:    [[TMP0:%.*]] = load i32, i32* [[A_ADDR]], align 4
6854 // CHECK15-NEXT:    store i32 [[TMP0]], i32* [[F]], align 4
6855 // CHECK15-NEXT:    ret void
6856 //
6857 //
6858 // CHECK15-LABEL: define {{[^@]+}}@_ZN1SIiED2Ev
6859 // CHECK15-SAME: (%struct.S.0* nonnull align 4 dereferenceable(4) [[THIS:%.*]]) unnamed_addr #[[ATTR1]] comdat align 2 {
6860 // CHECK15-NEXT:  entry:
6861 // CHECK15-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.S.0*, align 4
6862 // CHECK15-NEXT:    store %struct.S.0* [[THIS]], %struct.S.0** [[THIS_ADDR]], align 4
6863 // CHECK15-NEXT:    [[THIS1:%.*]] = load %struct.S.0*, %struct.S.0** [[THIS_ADDR]], align 4
6864 // CHECK15-NEXT:    ret void
6865 //
6866 //
6867 // CHECK16-LABEL: define {{[^@]+}}@main
6868 // CHECK16-SAME: () #[[ATTR0:[0-9]+]] {
6869 // CHECK16-NEXT:  entry:
6870 // CHECK16-NEXT:    [[RETVAL:%.*]] = alloca i32, align 4
6871 // CHECK16-NEXT:    [[G:%.*]] = alloca double, align 8
6872 // CHECK16-NEXT:    [[G1:%.*]] = alloca double*, align 4
6873 // CHECK16-NEXT:    [[TEST:%.*]] = alloca [[STRUCT_S:%.*]], align 4
6874 // CHECK16-NEXT:    [[T_VAR:%.*]] = alloca i32, align 4
6875 // CHECK16-NEXT:    [[VEC:%.*]] = alloca [2 x i32], align 4
6876 // CHECK16-NEXT:    [[S_ARR:%.*]] = alloca [2 x %struct.S], align 4
6877 // CHECK16-NEXT:    [[VAR:%.*]] = alloca %struct.S*, align 4
6878 // CHECK16-NEXT:    [[TMP:%.*]] = alloca %struct.S*, align 4
6879 // CHECK16-NEXT:    [[_TMP1:%.*]] = alloca i32, align 4
6880 // CHECK16-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
6881 // CHECK16-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
6882 // CHECK16-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
6883 // CHECK16-NEXT:    [[I:%.*]] = alloca i32, align 4
6884 // CHECK16-NEXT:    [[T_VAR2:%.*]] = alloca i32, align 4
6885 // CHECK16-NEXT:    [[VEC3:%.*]] = alloca [2 x i32], align 4
6886 // CHECK16-NEXT:    [[S_ARR4:%.*]] = alloca [2 x %struct.S], align 4
6887 // CHECK16-NEXT:    [[VAR5:%.*]] = alloca [[STRUCT_S]], align 4
6888 // CHECK16-NEXT:    [[_TMP6:%.*]] = alloca %struct.S*, align 4
6889 // CHECK16-NEXT:    [[SVAR:%.*]] = alloca i32, align 4
6890 // CHECK16-NEXT:    [[I13:%.*]] = alloca i32, align 4
6891 // CHECK16-NEXT:    store i32 0, i32* [[RETVAL]], align 4
6892 // CHECK16-NEXT:    store double* [[G]], double** [[G1]], align 4
6893 // CHECK16-NEXT:    call void @_ZN1SIfEC1Ev(%struct.S* nonnull align 4 dereferenceable(4) [[TEST]])
6894 // CHECK16-NEXT:    store i32 0, i32* [[T_VAR]], align 4
6895 // CHECK16-NEXT:    [[TMP0:%.*]] = bitcast [2 x i32]* [[VEC]] to i8*
6896 // CHECK16-NEXT:    call void @llvm.memcpy.p0i8.p0i8.i32(i8* align 4 [[TMP0]], i8* align 4 bitcast ([2 x i32]* @__const.main.vec to i8*), i32 8, i1 false)
6897 // CHECK16-NEXT:    [[ARRAYINIT_BEGIN:%.*]] = getelementptr inbounds [2 x %struct.S], [2 x %struct.S]* [[S_ARR]], i32 0, i32 0
6898 // CHECK16-NEXT:    call void @_ZN1SIfEC1Ef(%struct.S* nonnull align 4 dereferenceable(4) [[ARRAYINIT_BEGIN]], float 1.000000e+00)
6899 // CHECK16-NEXT:    [[ARRAYINIT_ELEMENT:%.*]] = getelementptr inbounds [[STRUCT_S]], %struct.S* [[ARRAYINIT_BEGIN]], i32 1
6900 // CHECK16-NEXT:    call void @_ZN1SIfEC1Ef(%struct.S* nonnull align 4 dereferenceable(4) [[ARRAYINIT_ELEMENT]], float 2.000000e+00)
6901 // CHECK16-NEXT:    store %struct.S* [[TEST]], %struct.S** [[VAR]], align 4
6902 // CHECK16-NEXT:    [[TMP1:%.*]] = load %struct.S*, %struct.S** [[VAR]], align 4
6903 // CHECK16-NEXT:    store %struct.S* [[TMP1]], %struct.S** [[TMP]], align 4
6904 // CHECK16-NEXT:    [[TMP2:%.*]] = load %struct.S*, %struct.S** [[VAR]], align 4
6905 // CHECK16-NEXT:    [[TMP3:%.*]] = load %struct.S*, %struct.S** [[VAR]], align 4
6906 // CHECK16-NEXT:    [[TMP4:%.*]] = load %struct.S*, %struct.S** [[VAR]], align 4
6907 // CHECK16-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
6908 // CHECK16-NEXT:    store i32 1, i32* [[DOTOMP_UB]], align 4
6909 // CHECK16-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
6910 // CHECK16-NEXT:    store i32 [[TMP5]], i32* [[DOTOMP_IV]], align 4
6911 // CHECK16-NEXT:    [[ARRAY_BEGIN:%.*]] = getelementptr inbounds [2 x %struct.S], [2 x %struct.S]* [[S_ARR4]], i32 0, i32 0
6912 // CHECK16-NEXT:    [[ARRAYCTOR_END:%.*]] = getelementptr inbounds [[STRUCT_S]], %struct.S* [[ARRAY_BEGIN]], i32 2
6913 // CHECK16-NEXT:    br label [[ARRAYCTOR_LOOP:%.*]]
6914 // CHECK16:       arrayctor.loop:
6915 // CHECK16-NEXT:    [[ARRAYCTOR_CUR:%.*]] = phi %struct.S* [ [[ARRAY_BEGIN]], [[ENTRY:%.*]] ], [ [[ARRAYCTOR_NEXT:%.*]], [[ARRAYCTOR_LOOP]] ]
6916 // CHECK16-NEXT:    call void @_ZN1SIfEC1Ev(%struct.S* nonnull align 4 dereferenceable(4) [[ARRAYCTOR_CUR]])
6917 // CHECK16-NEXT:    [[ARRAYCTOR_NEXT]] = getelementptr inbounds [[STRUCT_S]], %struct.S* [[ARRAYCTOR_CUR]], i32 1
6918 // CHECK16-NEXT:    [[ARRAYCTOR_DONE:%.*]] = icmp eq %struct.S* [[ARRAYCTOR_NEXT]], [[ARRAYCTOR_END]]
6919 // CHECK16-NEXT:    br i1 [[ARRAYCTOR_DONE]], label [[ARRAYCTOR_CONT:%.*]], label [[ARRAYCTOR_LOOP]]
6920 // CHECK16:       arrayctor.cont:
6921 // CHECK16-NEXT:    [[TMP6:%.*]] = load %struct.S*, %struct.S** [[TMP]], align 4
6922 // CHECK16-NEXT:    call void @_ZN1SIfEC1Ev(%struct.S* nonnull align 4 dereferenceable(4) [[VAR5]])
6923 // CHECK16-NEXT:    store %struct.S* [[VAR5]], %struct.S** [[_TMP6]], align 4
6924 // CHECK16-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
6925 // CHECK16:       omp.inner.for.cond:
6926 // CHECK16-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !3
6927 // CHECK16-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !3
6928 // CHECK16-NEXT:    [[CMP:%.*]] = icmp sle i32 [[TMP7]], [[TMP8]]
6929 // CHECK16-NEXT:    br i1 [[CMP]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_COND_CLEANUP:%.*]]
6930 // CHECK16:       omp.inner.for.cond.cleanup:
6931 // CHECK16-NEXT:    br label [[OMP_INNER_FOR_END:%.*]]
6932 // CHECK16:       omp.inner.for.body:
6933 // CHECK16-NEXT:    [[TMP9:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !3
6934 // CHECK16-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP9]], 1
6935 // CHECK16-NEXT:    [[ADD:%.*]] = add nsw i32 0, [[MUL]]
6936 // CHECK16-NEXT:    store i32 [[ADD]], i32* [[I]], align 4, !llvm.access.group !3
6937 // CHECK16-NEXT:    [[TMP10:%.*]] = load i32, i32* [[T_VAR2]], align 4, !llvm.access.group !3
6938 // CHECK16-NEXT:    [[TMP11:%.*]] = load i32, i32* [[I]], align 4, !llvm.access.group !3
6939 // CHECK16-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds [2 x i32], [2 x i32]* [[VEC3]], i32 0, i32 [[TMP11]]
6940 // CHECK16-NEXT:    store i32 [[TMP10]], i32* [[ARRAYIDX]], align 4, !llvm.access.group !3
6941 // CHECK16-NEXT:    [[TMP12:%.*]] = load %struct.S*, %struct.S** [[_TMP6]], align 4, !llvm.access.group !3
6942 // CHECK16-NEXT:    [[TMP13:%.*]] = load i32, i32* [[I]], align 4, !llvm.access.group !3
6943 // CHECK16-NEXT:    [[ARRAYIDX7:%.*]] = getelementptr inbounds [2 x %struct.S], [2 x %struct.S]* [[S_ARR4]], i32 0, i32 [[TMP13]]
6944 // CHECK16-NEXT:    [[TMP14:%.*]] = bitcast %struct.S* [[ARRAYIDX7]] to i8*
6945 // CHECK16-NEXT:    [[TMP15:%.*]] = bitcast %struct.S* [[TMP12]] to i8*
6946 // CHECK16-NEXT:    call void @llvm.memcpy.p0i8.p0i8.i32(i8* align 4 [[TMP14]], i8* align 4 [[TMP15]], i32 4, i1 false), !llvm.access.group !3
6947 // CHECK16-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
6948 // CHECK16:       omp.body.continue:
6949 // CHECK16-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
6950 // CHECK16:       omp.inner.for.inc:
6951 // CHECK16-NEXT:    [[TMP16:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !3
6952 // CHECK16-NEXT:    [[ADD8:%.*]] = add nsw i32 [[TMP16]], 1
6953 // CHECK16-NEXT:    store i32 [[ADD8]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !3
6954 // CHECK16-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP4:![0-9]+]]
6955 // CHECK16:       omp.inner.for.end:
6956 // CHECK16-NEXT:    store i32 2, i32* [[I]], align 4
6957 // CHECK16-NEXT:    [[TMP17:%.*]] = load i32, i32* [[T_VAR2]], align 4
6958 // CHECK16-NEXT:    store i32 [[TMP17]], i32* [[T_VAR]], align 4
6959 // CHECK16-NEXT:    [[TMP18:%.*]] = bitcast [2 x i32]* [[VEC]] to i8*
6960 // CHECK16-NEXT:    [[TMP19:%.*]] = bitcast [2 x i32]* [[VEC3]] to i8*
6961 // CHECK16-NEXT:    call void @llvm.memcpy.p0i8.p0i8.i32(i8* align 4 [[TMP18]], i8* align 4 [[TMP19]], i32 8, i1 false)
6962 // CHECK16-NEXT:    [[ARRAY_BEGIN9:%.*]] = getelementptr inbounds [2 x %struct.S], [2 x %struct.S]* [[S_ARR]], i32 0, i32 0
6963 // CHECK16-NEXT:    [[TMP20:%.*]] = bitcast [2 x %struct.S]* [[S_ARR4]] to %struct.S*
6964 // CHECK16-NEXT:    [[TMP21:%.*]] = getelementptr [[STRUCT_S]], %struct.S* [[ARRAY_BEGIN9]], i32 2
6965 // CHECK16-NEXT:    [[OMP_ARRAYCPY_ISEMPTY:%.*]] = icmp eq %struct.S* [[ARRAY_BEGIN9]], [[TMP21]]
6966 // CHECK16-NEXT:    br i1 [[OMP_ARRAYCPY_ISEMPTY]], label [[OMP_ARRAYCPY_DONE10:%.*]], label [[OMP_ARRAYCPY_BODY:%.*]]
6967 // CHECK16:       omp.arraycpy.body:
6968 // CHECK16-NEXT:    [[OMP_ARRAYCPY_SRCELEMENTPAST:%.*]] = phi %struct.S* [ [[TMP20]], [[OMP_INNER_FOR_END]] ], [ [[OMP_ARRAYCPY_SRC_ELEMENT:%.*]], [[OMP_ARRAYCPY_BODY]] ]
6969 // CHECK16-NEXT:    [[OMP_ARRAYCPY_DESTELEMENTPAST:%.*]] = phi %struct.S* [ [[ARRAY_BEGIN9]], [[OMP_INNER_FOR_END]] ], [ [[OMP_ARRAYCPY_DEST_ELEMENT:%.*]], [[OMP_ARRAYCPY_BODY]] ]
6970 // CHECK16-NEXT:    [[TMP22:%.*]] = bitcast %struct.S* [[OMP_ARRAYCPY_DESTELEMENTPAST]] to i8*
6971 // CHECK16-NEXT:    [[TMP23:%.*]] = bitcast %struct.S* [[OMP_ARRAYCPY_SRCELEMENTPAST]] to i8*
6972 // CHECK16-NEXT:    call void @llvm.memcpy.p0i8.p0i8.i32(i8* align 4 [[TMP22]], i8* align 4 [[TMP23]], i32 4, i1 false)
6973 // CHECK16-NEXT:    [[OMP_ARRAYCPY_DEST_ELEMENT]] = getelementptr [[STRUCT_S]], %struct.S* [[OMP_ARRAYCPY_DESTELEMENTPAST]], i32 1
6974 // CHECK16-NEXT:    [[OMP_ARRAYCPY_SRC_ELEMENT]] = getelementptr [[STRUCT_S]], %struct.S* [[OMP_ARRAYCPY_SRCELEMENTPAST]], i32 1
6975 // CHECK16-NEXT:    [[OMP_ARRAYCPY_DONE:%.*]] = icmp eq %struct.S* [[OMP_ARRAYCPY_DEST_ELEMENT]], [[TMP21]]
6976 // CHECK16-NEXT:    br i1 [[OMP_ARRAYCPY_DONE]], label [[OMP_ARRAYCPY_DONE10]], label [[OMP_ARRAYCPY_BODY]]
6977 // CHECK16:       omp.arraycpy.done10:
6978 // CHECK16-NEXT:    [[TMP24:%.*]] = load %struct.S*, %struct.S** [[_TMP6]], align 4
6979 // CHECK16-NEXT:    [[TMP25:%.*]] = bitcast %struct.S* [[TMP6]] to i8*
6980 // CHECK16-NEXT:    [[TMP26:%.*]] = bitcast %struct.S* [[TMP24]] to i8*
6981 // CHECK16-NEXT:    call void @llvm.memcpy.p0i8.p0i8.i32(i8* align 4 [[TMP25]], i8* align 4 [[TMP26]], i32 4, i1 false)
6982 // CHECK16-NEXT:    [[TMP27:%.*]] = load i32, i32* [[SVAR]], align 4
6983 // CHECK16-NEXT:    store i32 [[TMP27]], i32* @_ZZ4mainE4svar, align 4
6984 // CHECK16-NEXT:    call void @_ZN1SIfED1Ev(%struct.S* nonnull align 4 dereferenceable(4) [[VAR5]]) #[[ATTR4:[0-9]+]]
6985 // CHECK16-NEXT:    [[ARRAY_BEGIN11:%.*]] = getelementptr inbounds [2 x %struct.S], [2 x %struct.S]* [[S_ARR4]], i32 0, i32 0
6986 // CHECK16-NEXT:    [[TMP28:%.*]] = getelementptr inbounds [[STRUCT_S]], %struct.S* [[ARRAY_BEGIN11]], i32 2
6987 // CHECK16-NEXT:    br label [[ARRAYDESTROY_BODY:%.*]]
6988 // CHECK16:       arraydestroy.body:
6989 // CHECK16-NEXT:    [[ARRAYDESTROY_ELEMENTPAST:%.*]] = phi %struct.S* [ [[TMP28]], [[OMP_ARRAYCPY_DONE10]] ], [ [[ARRAYDESTROY_ELEMENT:%.*]], [[ARRAYDESTROY_BODY]] ]
6990 // CHECK16-NEXT:    [[ARRAYDESTROY_ELEMENT]] = getelementptr inbounds [[STRUCT_S]], %struct.S* [[ARRAYDESTROY_ELEMENTPAST]], i32 -1
6991 // CHECK16-NEXT:    call void @_ZN1SIfED1Ev(%struct.S* nonnull align 4 dereferenceable(4) [[ARRAYDESTROY_ELEMENT]]) #[[ATTR4]]
6992 // CHECK16-NEXT:    [[ARRAYDESTROY_DONE:%.*]] = icmp eq %struct.S* [[ARRAYDESTROY_ELEMENT]], [[ARRAY_BEGIN11]]
6993 // CHECK16-NEXT:    br i1 [[ARRAYDESTROY_DONE]], label [[ARRAYDESTROY_DONE12:%.*]], label [[ARRAYDESTROY_BODY]]
6994 // CHECK16:       arraydestroy.done12:
6995 // CHECK16-NEXT:    [[CALL:%.*]] = call i32 @_Z5tmainIiET_v()
6996 // CHECK16-NEXT:    store i32 [[CALL]], i32* [[RETVAL]], align 4
6997 // CHECK16-NEXT:    [[ARRAY_BEGIN14:%.*]] = getelementptr inbounds [2 x %struct.S], [2 x %struct.S]* [[S_ARR]], i32 0, i32 0
6998 // CHECK16-NEXT:    [[TMP29:%.*]] = getelementptr inbounds [[STRUCT_S]], %struct.S* [[ARRAY_BEGIN14]], i32 2
6999 // CHECK16-NEXT:    br label [[ARRAYDESTROY_BODY15:%.*]]
7000 // CHECK16:       arraydestroy.body15:
7001 // CHECK16-NEXT:    [[ARRAYDESTROY_ELEMENTPAST16:%.*]] = phi %struct.S* [ [[TMP29]], [[ARRAYDESTROY_DONE12]] ], [ [[ARRAYDESTROY_ELEMENT17:%.*]], [[ARRAYDESTROY_BODY15]] ]
7002 // CHECK16-NEXT:    [[ARRAYDESTROY_ELEMENT17]] = getelementptr inbounds [[STRUCT_S]], %struct.S* [[ARRAYDESTROY_ELEMENTPAST16]], i32 -1
7003 // CHECK16-NEXT:    call void @_ZN1SIfED1Ev(%struct.S* nonnull align 4 dereferenceable(4) [[ARRAYDESTROY_ELEMENT17]]) #[[ATTR4]]
7004 // CHECK16-NEXT:    [[ARRAYDESTROY_DONE18:%.*]] = icmp eq %struct.S* [[ARRAYDESTROY_ELEMENT17]], [[ARRAY_BEGIN14]]
7005 // CHECK16-NEXT:    br i1 [[ARRAYDESTROY_DONE18]], label [[ARRAYDESTROY_DONE19:%.*]], label [[ARRAYDESTROY_BODY15]]
7006 // CHECK16:       arraydestroy.done19:
7007 // CHECK16-NEXT:    call void @_ZN1SIfED1Ev(%struct.S* nonnull align 4 dereferenceable(4) [[TEST]]) #[[ATTR4]]
7008 // CHECK16-NEXT:    [[TMP30:%.*]] = load i32, i32* [[RETVAL]], align 4
7009 // CHECK16-NEXT:    ret i32 [[TMP30]]
7010 //
7011 //
7012 // CHECK16-LABEL: define {{[^@]+}}@_ZN1SIfEC1Ev
7013 // CHECK16-SAME: (%struct.S* nonnull align 4 dereferenceable(4) [[THIS:%.*]]) unnamed_addr #[[ATTR1:[0-9]+]] comdat align 2 {
7014 // CHECK16-NEXT:  entry:
7015 // CHECK16-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.S*, align 4
7016 // CHECK16-NEXT:    store %struct.S* [[THIS]], %struct.S** [[THIS_ADDR]], align 4
7017 // CHECK16-NEXT:    [[THIS1:%.*]] = load %struct.S*, %struct.S** [[THIS_ADDR]], align 4
7018 // CHECK16-NEXT:    call void @_ZN1SIfEC2Ev(%struct.S* nonnull align 4 dereferenceable(4) [[THIS1]])
7019 // CHECK16-NEXT:    ret void
7020 //
7021 //
7022 // CHECK16-LABEL: define {{[^@]+}}@_ZN1SIfEC1Ef
7023 // CHECK16-SAME: (%struct.S* nonnull align 4 dereferenceable(4) [[THIS:%.*]], float [[A:%.*]]) unnamed_addr #[[ATTR1]] comdat align 2 {
7024 // CHECK16-NEXT:  entry:
7025 // CHECK16-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.S*, align 4
7026 // CHECK16-NEXT:    [[A_ADDR:%.*]] = alloca float, align 4
7027 // CHECK16-NEXT:    store %struct.S* [[THIS]], %struct.S** [[THIS_ADDR]], align 4
7028 // CHECK16-NEXT:    store float [[A]], float* [[A_ADDR]], align 4
7029 // CHECK16-NEXT:    [[THIS1:%.*]] = load %struct.S*, %struct.S** [[THIS_ADDR]], align 4
7030 // CHECK16-NEXT:    [[TMP0:%.*]] = load float, float* [[A_ADDR]], align 4
7031 // CHECK16-NEXT:    call void @_ZN1SIfEC2Ef(%struct.S* nonnull align 4 dereferenceable(4) [[THIS1]], float [[TMP0]])
7032 // CHECK16-NEXT:    ret void
7033 //
7034 //
7035 // CHECK16-LABEL: define {{[^@]+}}@_ZN1SIfED1Ev
7036 // CHECK16-SAME: (%struct.S* nonnull align 4 dereferenceable(4) [[THIS:%.*]]) unnamed_addr #[[ATTR1]] comdat align 2 {
7037 // CHECK16-NEXT:  entry:
7038 // CHECK16-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.S*, align 4
7039 // CHECK16-NEXT:    store %struct.S* [[THIS]], %struct.S** [[THIS_ADDR]], align 4
7040 // CHECK16-NEXT:    [[THIS1:%.*]] = load %struct.S*, %struct.S** [[THIS_ADDR]], align 4
7041 // CHECK16-NEXT:    call void @_ZN1SIfED2Ev(%struct.S* nonnull align 4 dereferenceable(4) [[THIS1]]) #[[ATTR4]]
7042 // CHECK16-NEXT:    ret void
7043 //
7044 //
7045 // CHECK16-LABEL: define {{[^@]+}}@_Z5tmainIiET_v
7046 // CHECK16-SAME: () #[[ATTR3:[0-9]+]] comdat {
7047 // CHECK16-NEXT:  entry:
7048 // CHECK16-NEXT:    [[RETVAL:%.*]] = alloca i32, align 4
7049 // CHECK16-NEXT:    [[TEST:%.*]] = alloca [[STRUCT_S_0:%.*]], align 4
7050 // CHECK16-NEXT:    [[T_VAR:%.*]] = alloca i32, align 4
7051 // CHECK16-NEXT:    [[VEC:%.*]] = alloca [2 x i32], align 4
7052 // CHECK16-NEXT:    [[S_ARR:%.*]] = alloca [2 x %struct.S.0], align 4
7053 // CHECK16-NEXT:    [[VAR:%.*]] = alloca %struct.S.0*, align 4
7054 // CHECK16-NEXT:    [[TMP:%.*]] = alloca %struct.S.0*, align 4
7055 // CHECK16-NEXT:    [[_TMP1:%.*]] = alloca i32, align 4
7056 // CHECK16-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
7057 // CHECK16-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
7058 // CHECK16-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
7059 // CHECK16-NEXT:    [[I:%.*]] = alloca i32, align 4
7060 // CHECK16-NEXT:    [[T_VAR2:%.*]] = alloca i32, align 4
7061 // CHECK16-NEXT:    [[VEC3:%.*]] = alloca [2 x i32], align 4
7062 // CHECK16-NEXT:    [[S_ARR4:%.*]] = alloca [2 x %struct.S.0], align 4
7063 // CHECK16-NEXT:    [[VAR5:%.*]] = alloca [[STRUCT_S_0]], align 4
7064 // CHECK16-NEXT:    [[_TMP6:%.*]] = alloca %struct.S.0*, align 4
7065 // CHECK16-NEXT:    call void @_ZN1SIiEC1Ev(%struct.S.0* nonnull align 4 dereferenceable(4) [[TEST]])
7066 // CHECK16-NEXT:    store i32 0, i32* [[T_VAR]], align 4
7067 // CHECK16-NEXT:    [[TMP0:%.*]] = bitcast [2 x i32]* [[VEC]] to i8*
7068 // CHECK16-NEXT:    call void @llvm.memcpy.p0i8.p0i8.i32(i8* align 4 [[TMP0]], i8* align 4 bitcast ([2 x i32]* @__const._Z5tmainIiET_v.vec to i8*), i32 8, i1 false)
7069 // CHECK16-NEXT:    [[ARRAYINIT_BEGIN:%.*]] = getelementptr inbounds [2 x %struct.S.0], [2 x %struct.S.0]* [[S_ARR]], i32 0, i32 0
7070 // CHECK16-NEXT:    call void @_ZN1SIiEC1Ei(%struct.S.0* nonnull align 4 dereferenceable(4) [[ARRAYINIT_BEGIN]], i32 1)
7071 // CHECK16-NEXT:    [[ARRAYINIT_ELEMENT:%.*]] = getelementptr inbounds [[STRUCT_S_0]], %struct.S.0* [[ARRAYINIT_BEGIN]], i32 1
7072 // CHECK16-NEXT:    call void @_ZN1SIiEC1Ei(%struct.S.0* nonnull align 4 dereferenceable(4) [[ARRAYINIT_ELEMENT]], i32 2)
7073 // CHECK16-NEXT:    store %struct.S.0* [[TEST]], %struct.S.0** [[VAR]], align 4
7074 // CHECK16-NEXT:    [[TMP1:%.*]] = load %struct.S.0*, %struct.S.0** [[VAR]], align 4
7075 // CHECK16-NEXT:    store %struct.S.0* [[TMP1]], %struct.S.0** [[TMP]], align 4
7076 // CHECK16-NEXT:    [[TMP2:%.*]] = load %struct.S.0*, %struct.S.0** [[VAR]], align 4
7077 // CHECK16-NEXT:    [[TMP3:%.*]] = load %struct.S.0*, %struct.S.0** [[VAR]], align 4
7078 // CHECK16-NEXT:    [[TMP4:%.*]] = load %struct.S.0*, %struct.S.0** [[VAR]], align 4
7079 // CHECK16-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
7080 // CHECK16-NEXT:    store i32 1, i32* [[DOTOMP_UB]], align 4
7081 // CHECK16-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
7082 // CHECK16-NEXT:    store i32 [[TMP5]], i32* [[DOTOMP_IV]], align 4
7083 // CHECK16-NEXT:    [[ARRAY_BEGIN:%.*]] = getelementptr inbounds [2 x %struct.S.0], [2 x %struct.S.0]* [[S_ARR4]], i32 0, i32 0
7084 // CHECK16-NEXT:    [[ARRAYCTOR_END:%.*]] = getelementptr inbounds [[STRUCT_S_0]], %struct.S.0* [[ARRAY_BEGIN]], i32 2
7085 // CHECK16-NEXT:    br label [[ARRAYCTOR_LOOP:%.*]]
7086 // CHECK16:       arrayctor.loop:
7087 // CHECK16-NEXT:    [[ARRAYCTOR_CUR:%.*]] = phi %struct.S.0* [ [[ARRAY_BEGIN]], [[ENTRY:%.*]] ], [ [[ARRAYCTOR_NEXT:%.*]], [[ARRAYCTOR_LOOP]] ]
7088 // CHECK16-NEXT:    call void @_ZN1SIiEC1Ev(%struct.S.0* nonnull align 4 dereferenceable(4) [[ARRAYCTOR_CUR]])
7089 // CHECK16-NEXT:    [[ARRAYCTOR_NEXT]] = getelementptr inbounds [[STRUCT_S_0]], %struct.S.0* [[ARRAYCTOR_CUR]], i32 1
7090 // CHECK16-NEXT:    [[ARRAYCTOR_DONE:%.*]] = icmp eq %struct.S.0* [[ARRAYCTOR_NEXT]], [[ARRAYCTOR_END]]
7091 // CHECK16-NEXT:    br i1 [[ARRAYCTOR_DONE]], label [[ARRAYCTOR_CONT:%.*]], label [[ARRAYCTOR_LOOP]]
7092 // CHECK16:       arrayctor.cont:
7093 // CHECK16-NEXT:    [[TMP6:%.*]] = load %struct.S.0*, %struct.S.0** [[TMP]], align 4
7094 // CHECK16-NEXT:    call void @_ZN1SIiEC1Ev(%struct.S.0* nonnull align 4 dereferenceable(4) [[VAR5]])
7095 // CHECK16-NEXT:    store %struct.S.0* [[VAR5]], %struct.S.0** [[_TMP6]], align 4
7096 // CHECK16-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
7097 // CHECK16:       omp.inner.for.cond:
7098 // CHECK16-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !7
7099 // CHECK16-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !7
7100 // CHECK16-NEXT:    [[CMP:%.*]] = icmp sle i32 [[TMP7]], [[TMP8]]
7101 // CHECK16-NEXT:    br i1 [[CMP]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_COND_CLEANUP:%.*]]
7102 // CHECK16:       omp.inner.for.cond.cleanup:
7103 // CHECK16-NEXT:    br label [[OMP_INNER_FOR_END:%.*]]
7104 // CHECK16:       omp.inner.for.body:
7105 // CHECK16-NEXT:    [[TMP9:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !7
7106 // CHECK16-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP9]], 1
7107 // CHECK16-NEXT:    [[ADD:%.*]] = add nsw i32 0, [[MUL]]
7108 // CHECK16-NEXT:    store i32 [[ADD]], i32* [[I]], align 4, !llvm.access.group !7
7109 // CHECK16-NEXT:    [[TMP10:%.*]] = load i32, i32* [[T_VAR2]], align 4, !llvm.access.group !7
7110 // CHECK16-NEXT:    [[TMP11:%.*]] = load i32, i32* [[I]], align 4, !llvm.access.group !7
7111 // CHECK16-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds [2 x i32], [2 x i32]* [[VEC3]], i32 0, i32 [[TMP11]]
7112 // CHECK16-NEXT:    store i32 [[TMP10]], i32* [[ARRAYIDX]], align 4, !llvm.access.group !7
7113 // CHECK16-NEXT:    [[TMP12:%.*]] = load %struct.S.0*, %struct.S.0** [[_TMP6]], align 4, !llvm.access.group !7
7114 // CHECK16-NEXT:    [[TMP13:%.*]] = load i32, i32* [[I]], align 4, !llvm.access.group !7
7115 // CHECK16-NEXT:    [[ARRAYIDX7:%.*]] = getelementptr inbounds [2 x %struct.S.0], [2 x %struct.S.0]* [[S_ARR4]], i32 0, i32 [[TMP13]]
7116 // CHECK16-NEXT:    [[TMP14:%.*]] = bitcast %struct.S.0* [[ARRAYIDX7]] to i8*
7117 // CHECK16-NEXT:    [[TMP15:%.*]] = bitcast %struct.S.0* [[TMP12]] to i8*
7118 // CHECK16-NEXT:    call void @llvm.memcpy.p0i8.p0i8.i32(i8* align 4 [[TMP14]], i8* align 4 [[TMP15]], i32 4, i1 false), !llvm.access.group !7
7119 // CHECK16-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
7120 // CHECK16:       omp.body.continue:
7121 // CHECK16-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
7122 // CHECK16:       omp.inner.for.inc:
7123 // CHECK16-NEXT:    [[TMP16:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !7
7124 // CHECK16-NEXT:    [[ADD8:%.*]] = add nsw i32 [[TMP16]], 1
7125 // CHECK16-NEXT:    store i32 [[ADD8]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !7
7126 // CHECK16-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP8:![0-9]+]]
7127 // CHECK16:       omp.inner.for.end:
7128 // CHECK16-NEXT:    store i32 2, i32* [[I]], align 4
7129 // CHECK16-NEXT:    [[TMP17:%.*]] = load i32, i32* [[T_VAR2]], align 4
7130 // CHECK16-NEXT:    store i32 [[TMP17]], i32* [[T_VAR]], align 4
7131 // CHECK16-NEXT:    [[TMP18:%.*]] = bitcast [2 x i32]* [[VEC]] to i8*
7132 // CHECK16-NEXT:    [[TMP19:%.*]] = bitcast [2 x i32]* [[VEC3]] to i8*
7133 // CHECK16-NEXT:    call void @llvm.memcpy.p0i8.p0i8.i32(i8* align 4 [[TMP18]], i8* align 4 [[TMP19]], i32 8, i1 false)
7134 // CHECK16-NEXT:    [[ARRAY_BEGIN9:%.*]] = getelementptr inbounds [2 x %struct.S.0], [2 x %struct.S.0]* [[S_ARR]], i32 0, i32 0
7135 // CHECK16-NEXT:    [[TMP20:%.*]] = bitcast [2 x %struct.S.0]* [[S_ARR4]] to %struct.S.0*
7136 // CHECK16-NEXT:    [[TMP21:%.*]] = getelementptr [[STRUCT_S_0]], %struct.S.0* [[ARRAY_BEGIN9]], i32 2
7137 // CHECK16-NEXT:    [[OMP_ARRAYCPY_ISEMPTY:%.*]] = icmp eq %struct.S.0* [[ARRAY_BEGIN9]], [[TMP21]]
7138 // CHECK16-NEXT:    br i1 [[OMP_ARRAYCPY_ISEMPTY]], label [[OMP_ARRAYCPY_DONE10:%.*]], label [[OMP_ARRAYCPY_BODY:%.*]]
7139 // CHECK16:       omp.arraycpy.body:
7140 // CHECK16-NEXT:    [[OMP_ARRAYCPY_SRCELEMENTPAST:%.*]] = phi %struct.S.0* [ [[TMP20]], [[OMP_INNER_FOR_END]] ], [ [[OMP_ARRAYCPY_SRC_ELEMENT:%.*]], [[OMP_ARRAYCPY_BODY]] ]
7141 // CHECK16-NEXT:    [[OMP_ARRAYCPY_DESTELEMENTPAST:%.*]] = phi %struct.S.0* [ [[ARRAY_BEGIN9]], [[OMP_INNER_FOR_END]] ], [ [[OMP_ARRAYCPY_DEST_ELEMENT:%.*]], [[OMP_ARRAYCPY_BODY]] ]
7142 // CHECK16-NEXT:    [[TMP22:%.*]] = bitcast %struct.S.0* [[OMP_ARRAYCPY_DESTELEMENTPAST]] to i8*
7143 // CHECK16-NEXT:    [[TMP23:%.*]] = bitcast %struct.S.0* [[OMP_ARRAYCPY_SRCELEMENTPAST]] to i8*
7144 // CHECK16-NEXT:    call void @llvm.memcpy.p0i8.p0i8.i32(i8* align 4 [[TMP22]], i8* align 4 [[TMP23]], i32 4, i1 false)
7145 // CHECK16-NEXT:    [[OMP_ARRAYCPY_DEST_ELEMENT]] = getelementptr [[STRUCT_S_0]], %struct.S.0* [[OMP_ARRAYCPY_DESTELEMENTPAST]], i32 1
7146 // CHECK16-NEXT:    [[OMP_ARRAYCPY_SRC_ELEMENT]] = getelementptr [[STRUCT_S_0]], %struct.S.0* [[OMP_ARRAYCPY_SRCELEMENTPAST]], i32 1
7147 // CHECK16-NEXT:    [[OMP_ARRAYCPY_DONE:%.*]] = icmp eq %struct.S.0* [[OMP_ARRAYCPY_DEST_ELEMENT]], [[TMP21]]
7148 // CHECK16-NEXT:    br i1 [[OMP_ARRAYCPY_DONE]], label [[OMP_ARRAYCPY_DONE10]], label [[OMP_ARRAYCPY_BODY]]
7149 // CHECK16:       omp.arraycpy.done10:
7150 // CHECK16-NEXT:    [[TMP24:%.*]] = load %struct.S.0*, %struct.S.0** [[_TMP6]], align 4
7151 // CHECK16-NEXT:    [[TMP25:%.*]] = bitcast %struct.S.0* [[TMP6]] to i8*
7152 // CHECK16-NEXT:    [[TMP26:%.*]] = bitcast %struct.S.0* [[TMP24]] to i8*
7153 // CHECK16-NEXT:    call void @llvm.memcpy.p0i8.p0i8.i32(i8* align 4 [[TMP25]], i8* align 4 [[TMP26]], i32 4, i1 false)
7154 // CHECK16-NEXT:    call void @_ZN1SIiED1Ev(%struct.S.0* nonnull align 4 dereferenceable(4) [[VAR5]]) #[[ATTR4]]
7155 // CHECK16-NEXT:    [[ARRAY_BEGIN11:%.*]] = getelementptr inbounds [2 x %struct.S.0], [2 x %struct.S.0]* [[S_ARR4]], i32 0, i32 0
7156 // CHECK16-NEXT:    [[TMP27:%.*]] = getelementptr inbounds [[STRUCT_S_0]], %struct.S.0* [[ARRAY_BEGIN11]], i32 2
7157 // CHECK16-NEXT:    br label [[ARRAYDESTROY_BODY:%.*]]
7158 // CHECK16:       arraydestroy.body:
7159 // CHECK16-NEXT:    [[ARRAYDESTROY_ELEMENTPAST:%.*]] = phi %struct.S.0* [ [[TMP27]], [[OMP_ARRAYCPY_DONE10]] ], [ [[ARRAYDESTROY_ELEMENT:%.*]], [[ARRAYDESTROY_BODY]] ]
7160 // CHECK16-NEXT:    [[ARRAYDESTROY_ELEMENT]] = getelementptr inbounds [[STRUCT_S_0]], %struct.S.0* [[ARRAYDESTROY_ELEMENTPAST]], i32 -1
7161 // CHECK16-NEXT:    call void @_ZN1SIiED1Ev(%struct.S.0* nonnull align 4 dereferenceable(4) [[ARRAYDESTROY_ELEMENT]]) #[[ATTR4]]
7162 // CHECK16-NEXT:    [[ARRAYDESTROY_DONE:%.*]] = icmp eq %struct.S.0* [[ARRAYDESTROY_ELEMENT]], [[ARRAY_BEGIN11]]
7163 // CHECK16-NEXT:    br i1 [[ARRAYDESTROY_DONE]], label [[ARRAYDESTROY_DONE12:%.*]], label [[ARRAYDESTROY_BODY]]
7164 // CHECK16:       arraydestroy.done12:
7165 // CHECK16-NEXT:    store i32 0, i32* [[RETVAL]], align 4
7166 // CHECK16-NEXT:    [[ARRAY_BEGIN13:%.*]] = getelementptr inbounds [2 x %struct.S.0], [2 x %struct.S.0]* [[S_ARR]], i32 0, i32 0
7167 // CHECK16-NEXT:    [[TMP28:%.*]] = getelementptr inbounds [[STRUCT_S_0]], %struct.S.0* [[ARRAY_BEGIN13]], i32 2
7168 // CHECK16-NEXT:    br label [[ARRAYDESTROY_BODY14:%.*]]
7169 // CHECK16:       arraydestroy.body14:
7170 // CHECK16-NEXT:    [[ARRAYDESTROY_ELEMENTPAST15:%.*]] = phi %struct.S.0* [ [[TMP28]], [[ARRAYDESTROY_DONE12]] ], [ [[ARRAYDESTROY_ELEMENT16:%.*]], [[ARRAYDESTROY_BODY14]] ]
7171 // CHECK16-NEXT:    [[ARRAYDESTROY_ELEMENT16]] = getelementptr inbounds [[STRUCT_S_0]], %struct.S.0* [[ARRAYDESTROY_ELEMENTPAST15]], i32 -1
7172 // CHECK16-NEXT:    call void @_ZN1SIiED1Ev(%struct.S.0* nonnull align 4 dereferenceable(4) [[ARRAYDESTROY_ELEMENT16]]) #[[ATTR4]]
7173 // CHECK16-NEXT:    [[ARRAYDESTROY_DONE17:%.*]] = icmp eq %struct.S.0* [[ARRAYDESTROY_ELEMENT16]], [[ARRAY_BEGIN13]]
7174 // CHECK16-NEXT:    br i1 [[ARRAYDESTROY_DONE17]], label [[ARRAYDESTROY_DONE18:%.*]], label [[ARRAYDESTROY_BODY14]]
7175 // CHECK16:       arraydestroy.done18:
7176 // CHECK16-NEXT:    call void @_ZN1SIiED1Ev(%struct.S.0* nonnull align 4 dereferenceable(4) [[TEST]]) #[[ATTR4]]
7177 // CHECK16-NEXT:    [[TMP29:%.*]] = load i32, i32* [[RETVAL]], align 4
7178 // CHECK16-NEXT:    ret i32 [[TMP29]]
7179 //
7180 //
7181 // CHECK16-LABEL: define {{[^@]+}}@_ZN1SIfEC2Ev
7182 // CHECK16-SAME: (%struct.S* nonnull align 4 dereferenceable(4) [[THIS:%.*]]) unnamed_addr #[[ATTR1]] comdat align 2 {
7183 // CHECK16-NEXT:  entry:
7184 // CHECK16-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.S*, align 4
7185 // CHECK16-NEXT:    store %struct.S* [[THIS]], %struct.S** [[THIS_ADDR]], align 4
7186 // CHECK16-NEXT:    [[THIS1:%.*]] = load %struct.S*, %struct.S** [[THIS_ADDR]], align 4
7187 // CHECK16-NEXT:    [[F:%.*]] = getelementptr inbounds [[STRUCT_S:%.*]], %struct.S* [[THIS1]], i32 0, i32 0
7188 // CHECK16-NEXT:    store float 0.000000e+00, float* [[F]], align 4
7189 // CHECK16-NEXT:    ret void
7190 //
7191 //
7192 // CHECK16-LABEL: define {{[^@]+}}@_ZN1SIfED2Ev
7193 // CHECK16-SAME: (%struct.S* nonnull align 4 dereferenceable(4) [[THIS:%.*]]) unnamed_addr #[[ATTR1]] comdat align 2 {
7194 // CHECK16-NEXT:  entry:
7195 // CHECK16-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.S*, align 4
7196 // CHECK16-NEXT:    store %struct.S* [[THIS]], %struct.S** [[THIS_ADDR]], align 4
7197 // CHECK16-NEXT:    [[THIS1:%.*]] = load %struct.S*, %struct.S** [[THIS_ADDR]], align 4
7198 // CHECK16-NEXT:    ret void
7199 //
7200 //
7201 // CHECK16-LABEL: define {{[^@]+}}@_ZN1SIfEC2Ef
7202 // CHECK16-SAME: (%struct.S* nonnull align 4 dereferenceable(4) [[THIS:%.*]], float [[A:%.*]]) unnamed_addr #[[ATTR1]] comdat align 2 {
7203 // CHECK16-NEXT:  entry:
7204 // CHECK16-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.S*, align 4
7205 // CHECK16-NEXT:    [[A_ADDR:%.*]] = alloca float, align 4
7206 // CHECK16-NEXT:    store %struct.S* [[THIS]], %struct.S** [[THIS_ADDR]], align 4
7207 // CHECK16-NEXT:    store float [[A]], float* [[A_ADDR]], align 4
7208 // CHECK16-NEXT:    [[THIS1:%.*]] = load %struct.S*, %struct.S** [[THIS_ADDR]], align 4
7209 // CHECK16-NEXT:    [[F:%.*]] = getelementptr inbounds [[STRUCT_S:%.*]], %struct.S* [[THIS1]], i32 0, i32 0
7210 // CHECK16-NEXT:    [[TMP0:%.*]] = load float, float* [[A_ADDR]], align 4
7211 // CHECK16-NEXT:    store float [[TMP0]], float* [[F]], align 4
7212 // CHECK16-NEXT:    ret void
7213 //
7214 //
7215 // CHECK16-LABEL: define {{[^@]+}}@_ZN1SIiEC1Ev
7216 // CHECK16-SAME: (%struct.S.0* nonnull align 4 dereferenceable(4) [[THIS:%.*]]) unnamed_addr #[[ATTR1]] comdat align 2 {
7217 // CHECK16-NEXT:  entry:
7218 // CHECK16-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.S.0*, align 4
7219 // CHECK16-NEXT:    store %struct.S.0* [[THIS]], %struct.S.0** [[THIS_ADDR]], align 4
7220 // CHECK16-NEXT:    [[THIS1:%.*]] = load %struct.S.0*, %struct.S.0** [[THIS_ADDR]], align 4
7221 // CHECK16-NEXT:    call void @_ZN1SIiEC2Ev(%struct.S.0* nonnull align 4 dereferenceable(4) [[THIS1]])
7222 // CHECK16-NEXT:    ret void
7223 //
7224 //
7225 // CHECK16-LABEL: define {{[^@]+}}@_ZN1SIiEC1Ei
7226 // CHECK16-SAME: (%struct.S.0* nonnull align 4 dereferenceable(4) [[THIS:%.*]], i32 [[A:%.*]]) unnamed_addr #[[ATTR1]] comdat align 2 {
7227 // CHECK16-NEXT:  entry:
7228 // CHECK16-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.S.0*, align 4
7229 // CHECK16-NEXT:    [[A_ADDR:%.*]] = alloca i32, align 4
7230 // CHECK16-NEXT:    store %struct.S.0* [[THIS]], %struct.S.0** [[THIS_ADDR]], align 4
7231 // CHECK16-NEXT:    store i32 [[A]], i32* [[A_ADDR]], align 4
7232 // CHECK16-NEXT:    [[THIS1:%.*]] = load %struct.S.0*, %struct.S.0** [[THIS_ADDR]], align 4
7233 // CHECK16-NEXT:    [[TMP0:%.*]] = load i32, i32* [[A_ADDR]], align 4
7234 // CHECK16-NEXT:    call void @_ZN1SIiEC2Ei(%struct.S.0* nonnull align 4 dereferenceable(4) [[THIS1]], i32 [[TMP0]])
7235 // CHECK16-NEXT:    ret void
7236 //
7237 //
7238 // CHECK16-LABEL: define {{[^@]+}}@_ZN1SIiED1Ev
7239 // CHECK16-SAME: (%struct.S.0* nonnull align 4 dereferenceable(4) [[THIS:%.*]]) unnamed_addr #[[ATTR1]] comdat align 2 {
7240 // CHECK16-NEXT:  entry:
7241 // CHECK16-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.S.0*, align 4
7242 // CHECK16-NEXT:    store %struct.S.0* [[THIS]], %struct.S.0** [[THIS_ADDR]], align 4
7243 // CHECK16-NEXT:    [[THIS1:%.*]] = load %struct.S.0*, %struct.S.0** [[THIS_ADDR]], align 4
7244 // CHECK16-NEXT:    call void @_ZN1SIiED2Ev(%struct.S.0* nonnull align 4 dereferenceable(4) [[THIS1]]) #[[ATTR4]]
7245 // CHECK16-NEXT:    ret void
7246 //
7247 //
7248 // CHECK16-LABEL: define {{[^@]+}}@_ZN1SIiEC2Ev
7249 // CHECK16-SAME: (%struct.S.0* nonnull align 4 dereferenceable(4) [[THIS:%.*]]) unnamed_addr #[[ATTR1]] comdat align 2 {
7250 // CHECK16-NEXT:  entry:
7251 // CHECK16-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.S.0*, align 4
7252 // CHECK16-NEXT:    store %struct.S.0* [[THIS]], %struct.S.0** [[THIS_ADDR]], align 4
7253 // CHECK16-NEXT:    [[THIS1:%.*]] = load %struct.S.0*, %struct.S.0** [[THIS_ADDR]], align 4
7254 // CHECK16-NEXT:    [[F:%.*]] = getelementptr inbounds [[STRUCT_S_0:%.*]], %struct.S.0* [[THIS1]], i32 0, i32 0
7255 // CHECK16-NEXT:    store i32 0, i32* [[F]], align 4
7256 // CHECK16-NEXT:    ret void
7257 //
7258 //
7259 // CHECK16-LABEL: define {{[^@]+}}@_ZN1SIiEC2Ei
7260 // CHECK16-SAME: (%struct.S.0* nonnull align 4 dereferenceable(4) [[THIS:%.*]], i32 [[A:%.*]]) unnamed_addr #[[ATTR1]] comdat align 2 {
7261 // CHECK16-NEXT:  entry:
7262 // CHECK16-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.S.0*, align 4
7263 // CHECK16-NEXT:    [[A_ADDR:%.*]] = alloca i32, align 4
7264 // CHECK16-NEXT:    store %struct.S.0* [[THIS]], %struct.S.0** [[THIS_ADDR]], align 4
7265 // CHECK16-NEXT:    store i32 [[A]], i32* [[A_ADDR]], align 4
7266 // CHECK16-NEXT:    [[THIS1:%.*]] = load %struct.S.0*, %struct.S.0** [[THIS_ADDR]], align 4
7267 // CHECK16-NEXT:    [[F:%.*]] = getelementptr inbounds [[STRUCT_S_0:%.*]], %struct.S.0* [[THIS1]], i32 0, i32 0
7268 // CHECK16-NEXT:    [[TMP0:%.*]] = load i32, i32* [[A_ADDR]], align 4
7269 // CHECK16-NEXT:    store i32 [[TMP0]], i32* [[F]], align 4
7270 // CHECK16-NEXT:    ret void
7271 //
7272 //
7273 // CHECK16-LABEL: define {{[^@]+}}@_ZN1SIiED2Ev
7274 // CHECK16-SAME: (%struct.S.0* nonnull align 4 dereferenceable(4) [[THIS:%.*]]) unnamed_addr #[[ATTR1]] comdat align 2 {
7275 // CHECK16-NEXT:  entry:
7276 // CHECK16-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.S.0*, align 4
7277 // CHECK16-NEXT:    store %struct.S.0* [[THIS]], %struct.S.0** [[THIS_ADDR]], align 4
7278 // CHECK16-NEXT:    [[THIS1:%.*]] = load %struct.S.0*, %struct.S.0** [[THIS_ADDR]], align 4
7279 // CHECK16-NEXT:    ret void
7280 //
7281